예제 #1
0
    def __call__(self):
        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     None, self.meta_data, self.settings,
                                     self._frames, self.plate_id,
                                     **self._hdf_options)

        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings.get(
                              'Processing', 'objectdetection'))
        n_images = self._analyze(ca)
예제 #2
0
    def __call__(self):
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname, self.meta_data, self.settings,
                                     self._frames, self.plate_id,
                                     **self._hdf_options)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings.get(
                              'Processing', 'objectdetection'))

        #self.setup_classifiers()
        #self.export_features = self.define_exp_features()
        self._analyze(ca)
        return ca
예제 #3
0
파일: browser.py 프로젝트: waterponey/cecog
    def __call__(self):

        self.timeholder = TimeHolder(self.position,
                                     self._all_channel_regions,
                                     self.datafile,
                                     self.meta_data,
                                     self.settings,
                                     self._frames,
                                     self.plate_id,
                                     well=None,
                                     site=None,
                                     **self._hdf_options)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings.get(
                              'Processing', 'objectdetection'))

        self._analyze(ca)
        return ca
예제 #4
0
파일: position.py 프로젝트: imcf/cecog
    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good placke to read out the options
        # fils must not exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        # setup tracker
        if self.settings.get('Processing', 'tracking'):
            region = self.settings.get('Tracking', 'tracking_regionname')
            tropts = (self.settings.get('Tracking', 'tracking_maxobjectdistance'),
                      self.settings.get('Tracking', 'tracking_maxsplitobjects'),
                      self.settings.get('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)
            self._tes = EventSelection(self._tracker.graph, **self._es_options)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings.get('Processing',
                                                             'objectdetection'))

        self.setup_classifiers()
        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings.get('Processing', 'tracking_synchronize_trajectories') and \
                    self.settings.get('Processing', 'tracking'):
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings.get('Output', 'export_object_counts'):
                self.export_object_counts()
            if self.settings.get('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings.get('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings.get('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})
                if self.settings.get('Processing', 'tracking_synchronize_trajectories'):
                    self.export_events()
                if self.settings.get('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings.get('Output', 'export_tracking_as_dot'):
                    self.export_graphviz()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
#        self.clear()
        return n_images
예제 #5
0
파일: position.py 프로젝트: imcf/cecog
class PositionAnalyzer(PositionCore):

    def __init__(self, *args, **kw):
        super(PositionAnalyzer, self).__init__(*args, **kw)

        if not self.has_timelapse:
            self.settings.set('Processing', 'tracking', False)

        self._makedirs()
        self.add_file_handler(join(self._log_dir, "%s.log" %self.position),
                              self._lvl.DEBUG)

    def _makedirs(self):
        assert isinstance(self.position, basestring)
        assert isinstance(self._out_dir, basestring)

        self._analyzed_dir = join(self._out_dir, "analyzed")
        if self.has_timelapse:
            self._position_dir = join(self._analyzed_dir, self.position)
        else:
            self._position_dir = self._analyzed_dir

        odirs = (self._analyzed_dir,
                 join(self._out_dir, "log"),
                 join(self._out_dir, "log", "_finished"),
                 join(self._out_dir, "hdf5"),
                 join(self._out_dir, "plots"),
                 join(self._position_dir, "statistics"),
                 join(self._position_dir, "gallery"),
                 join(self._position_dir, "channel_gallery"),
                 join(self._position_dir, "images"),
                 join(self._position_dir, "images","_labels"))

        for odir in odirs:
            try:
                makedirs(odir)
            except os.error: # no permissions
                self.logger.error("mkdir %s: failed" %odir)
            else:
                self.logger.info("mkdir %s: ok" %odir)
            setattr(self, "_%s_dir" %basename(odir.lower()).strip("_"), odir)

    def setup_classifiers(self):
        sttg = self.settings
        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                sttg.set_section('Classification')
                clf = CommonClassPredictor(
                    clf_dir=sttg.get2(self._resolve_name(p_channel,
                                                         'classification_envpath')),
                    name=p_channel,
                    channels=self._channel_regions(p_channel),
                    color_channel=c_channel)
                clf.importFromArff()
                clf.loadClassifier()
                self.classifiers[p_channel] = clf

    def _convert_tracking_duration(self, option_name):
        """Converts a tracking duration according to the unit and the
        mean time-lapse of the current position.
        Returns number of frames.
        """
        value = self.settings.get(SECTION_NAME_TRACKING, option_name)
        unit = self.settings.get(SECTION_NAME_TRACKING,
                                  'tracking_duration_unit')

        # get mean and stddev for the current position
        info = self.meta_data.get_timestamp_info(self.position)
        if unit == TRACKING_DURATION_UNIT_FRAMES or info is None:
            result = value
        elif unit == TRACKING_DURATION_UNIT_MINUTES:
            result = (value * 60.) / info[0]
        elif unit == TRACKING_DURATION_UNIT_SECONDS:
            result = value / info[0]
        else:
            raise ValueError("Wrong unit '%s' specified." %unit)
        return int(round(result))

    @property
    def _es_options(self):
        transitions = eval(self.settings.get2('tracking_labeltransitions'))
        if not isinstance(transitions[0], tuple):
            transitions = (transitions, )
        evopts = {'transitions': transitions,
                  'backward_labels': map(int, self.settings.get2('tracking_backwardlabels').split(',')),
                  'forward_labels': map(int, self.settings.get2('tracking_forwardlabels').split(',')),
                  'backward_check': self._convert_tracking_duration('tracking_backwardCheck'),
                  'forward_check': self._convert_tracking_duration('tracking_forwardCheck'),
                  'backward_range': self._convert_tracking_duration('tracking_backwardrange'),
                  'forward_range': self._convert_tracking_duration('tracking_forwardrange'),
                  'backward_range_min': self.settings.get2('tracking_backwardrange_min'),
                  'forward_range_min': self.settings.get2('tracking_forwardrange_min'),
                  'max_in_degree': self.settings.get2('tracking_maxindegree'),
                  'max_out_degree': self.settings.get2('tracking_maxoutdegree')}
        return evopts

    def define_exp_features(self):
        features = {}
        for name in self.processing_channels:
            region_features = {}
            for region in REGION_INFO.names[name.lower()]:
                # export all features extracted per regions
                if self.settings.get('Output', 'events_export_all_features') or \
                        self.settings.get('Output', 'export_track_data'):
                    # None means all features
                    region_features[region] = None
                # export selected features from settings
                else:
                    region_features[region] = \
                        self.settings.get('General',
                                          '%s_featureextraction_exportfeaturenames'
                                          % name.lower())
                features[name] = region_features
        return features

    def export_object_counts(self):
        fname = join(self._statistics_dir, 'P%s__object_counts.txt' % self.position)

        # at least the total count for primary is always exported
        ch_info = OrderedDict([('Primary', ('primary', [], []))])
        for name, clf in self.classifiers.iteritems():
            names = clf.class_names.values()
            colors = [clf.hexcolors[n] for n in names]
            ch_info[name] = (clf.regions, names, colors)

        self.timeholder.exportObjectCounts(fname, self.position, self.meta_data, ch_info)
        pplot_ymax = \
            self.settings.get('Output', 'export_object_counts_ylim_max')

        # plot only for primary channel so far!
        self.timeholder.exportPopulationPlots(fname, self._plots_dir, self.position,
                                              self.meta_data, ch_info['Primary'], pplot_ymax)


    def export_object_details(self):
        fname = join(self._statistics_dir,
                        'P%s__object_details.txt' % self.position)
        self.timeholder.exportObjectDetails(fname, excel_style=False)
        fname = join(self._statistics_dir,
                        'P%s__object_details_excel.txt' % self.position)
        self.timeholder.exportObjectDetails(fname, excel_style=True)

    def export_image_names(self):
        self.timeholder.exportImageFileNames(self._statistics_dir,
                                             self.position,
                                             self._imagecontainer._importer,
                                             self.ch_mapping)

    def export_full_tracks(self):
        odir = join(self._statistics_dir, 'full')
        exporter = EventExporter(self.meta_data)
        exporter.full_tracks(self.timeholder, self._tes.visitor_data,
                             self.position, odir)

    def export_graphviz(self, channel_name='Primary', region_name='primary'):
        filename = 'tracking_graph___P%s.dot' %self.position
        exporter = TrackExporter()
        exporter.graphviz_dot(join(self._statistics_dir, filename),
                              self._tracker)

        sample_holders = OrderedDict()
        for frame in self.timeholder.keys():
            channel = self.timeholder[frame][channel_name]
            sample_holders[frame] = channel.get_region(region_name)

        filename = join(self._statistics_dir, filename.replace('.dot', '_features.csv'))
        exporter.tracking_data(filename, sample_holders)

    def export_gallery_images(self):
        for ch_name in self.processing_channels:
            cutter_in = join(self._images_dir, ch_name)
            if isdir(cutter_in):
                cutter_out = join(self._gallery_dir, ch_name)
                self.logger.info("running Cutter for '%s'..." %ch_name)
                image_size = \
                    self.settings.get('Output', 'events_gallery_image_size')
                EventGallery(self._tes, cutter_in, self.position, cutter_out,
                             self.meta_data, oneFilePerTrack=True,
                             size=(image_size, image_size))
            # FIXME: be careful here. normally only raw images are
            #        used for the cutter and can be deleted
            #        afterwards
            shutil.rmtree(cutter_in, ignore_errors=True)

    def export_tracks_hdf5(self):
        """Save tracking data to hdf file"""
        self.logger.debug("--- serializing tracking start")
        self.timeholder.serialize_tracking(self._tes.graph)
        self.logger.debug("--- serializing tracking ok")

    def export_events(self):
        """Export and save event selceciton data"""
        exporter = EventExporter(self.meta_data)
        # writes to the event folder
        odir = join(self._statistics_dir, "events")
        exporter.track_features(self.timeholder, self._tes.visitor_data,
                                self.export_features, self.position, odir)
        self.logger.debug("--- visitor analysis ok")
        # writes event data to hdf5
        self.timeholder.serialize_events(self._tes)
        self.logger.debug("--- serializing events ok")

    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good placke to read out the options
        # fils must not exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        # setup tracker
        if self.settings.get('Processing', 'tracking'):
            region = self.settings.get('Tracking', 'tracking_regionname')
            tropts = (self.settings.get('Tracking', 'tracking_maxobjectdistance'),
                      self.settings.get('Tracking', 'tracking_maxsplitobjects'),
                      self.settings.get('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)
            self._tes = EventSelection(self._tracker.graph, **self._es_options)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings.get('Processing',
                                                             'objectdetection'))

        self.setup_classifiers()
        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings.get('Processing', 'tracking_synchronize_trajectories') and \
                    self.settings.get('Processing', 'tracking'):
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings.get('Output', 'export_object_counts'):
                self.export_object_counts()
            if self.settings.get('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings.get('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings.get('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})
                if self.settings.get('Processing', 'tracking_synchronize_trajectories'):
                    self.export_events()
                if self.settings.get('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings.get('Output', 'export_tracking_as_dot'):
                    self.export_graphviz()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
#        self.clear()
        return n_images

    @property
    def hdf5_filename(self):
        return self.timeholder.hdf5_filename

    def touch_finished(self, times=None):
        """Writes an empty file to mark this position as finished"""
        fname = join(self._finished_dir, '%s__finished.txt' % self.position)
        with open(fname, "w") as f:
            os.utime(fname, times)

    def clear(self):
        # closes hdf5
        self.timeholder.close_all()

    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            n_images += 1
            images = []

            if self.settings.get('Processing', 'tracking'):
                region = self.settings.get('Tracking', 'tracking_regionname')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings.get('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings.get('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings.get('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            for clf in self.classifiers.itervalues():
                cellanalyzer.classify_objects(clf)

            ##############################################################
            # FIXME - part for browser
            if not self._myhack is None:
                self.render_browser(cellanalyzer)
            ##############################################################

            self.settings.set_section('General')
            self.render_classification_images(cellanalyzer, images, frame)
            self.render_contour_images(cellanalyzer, images, frame)

            if self.settings.get('Output', 'rendering_channel_gallery'):
                self.render_channel_gallery(cellanalyzer, frame)

            if self.settings.get('Output', 'rendering_labels_discwrite'):
                cellanalyzer.exportLabelImages(self._labels_dir)

            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))
            cellanalyzer.purge(features=self.export_features)

        return n_images

    def render_channel_gallery(self, cellanalyzer, frame):
        for channel in cellanalyzer.virtual_channels.itervalues():
            chgal = ChannelGallery(channel, frame, self._channel_gallery_dir)
            chgal.make_gallery()

    def render_contour_images(self, ca, images, frame):
        for region, render_par in self.settings.get2('rendering').iteritems():
            out_dir = join(self._images_dir, region)
            write = self.settings.get('Output', 'rendering_contours_discwrite')

            if region not in self.CHANNELS.keys():
                img, fname = ca.render(out_dir, dctRenderInfo=render_par,
                                       writeToDisc=write, images=images)
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position,
                                                frame)
                self.set_image(img, msg, fname, region, 50)
            # gallery images are treated differenty
            else:
                ca.render(out_dir, dctRenderInfo=render_par, writeToDisc=True)

    def render_classification_images(self, cellanalyzer, images, frame):
        for region, render_par in self.settings.get2('rendering_class').iteritems():
            out_images = join(self._images_dir, region)
            write = self.settings.get('Output', 'rendering_class_discwrite')
            img_rgb, fname = cellanalyzer.render(out_images,
                                                 dctRenderInfo=render_par,
                                                 writeToDisc=write,
                                                 images=images)

            msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame)
            self.set_image(img_rgb, msg, fname, region, 50)

    def render_browser(self, cellanalyzer):
        d = {}
        for name in cellanalyzer.get_channel_names():
            channel = cellanalyzer.get_channel(name)
            d[channel.strChannelId] = channel.meta_image.image
            self._myhack.show_image(d)

        channel_name, region_name = self._myhack._object_region
        channel = cellanalyzer.get_channel(channel_name)
        if channel.has_region(region_name):
            region = channel.get_region(region_name)
            coords = {}
            for obj_id, obj in region.iteritems():
                coords[obj_id] = obj.crack_contour
            self._myhack.set_coords(coords)
예제 #6
0
class PositionAnalyzer(PositionCore):

    def __init__(self, *args, **kw):
        super(PositionAnalyzer, self).__init__(*args, **kw)

        if not self.has_timelapse:
            self.settings.set('Processing', 'tracking', False)

        self._makedirs()
        self.add_file_handler(join(self._log_dir, "%s.log" %self.position),
                              self.Levels.DEBUG)

    def _makedirs(self):
        assert isinstance(self.position, basestring)
        assert isinstance(self._out_dir, basestring)

        self._analyzed_dir = join(self._out_dir, "analyzed")
        if self.has_timelapse:
            self._position_dir = join(self._analyzed_dir, self.position)
        else:
            self._position_dir = self._analyzed_dir

        odirs = (self._analyzed_dir,
                 join(self._out_dir, "log"),
                 join(self._out_dir, "log", "_finished"),
                 join(self._out_dir, "hdf5"),
                 join(self._out_dir, "plots"),
                 join(self._position_dir, "statistics"),
                 join(self._position_dir, "tc3"),
                 join(self._position_dir, "gallery"),
                 join(self._position_dir, "channel_gallery"),
                 join(self._position_dir, "images"),
                 join(self._position_dir, "images","_labels"))

        for odir in odirs:
            try:
                makedirs(odir)
            except os.error: # no permissions
                self.logger.error("mkdir %s: failed" %odir)
            else:
                self.logger.info("mkdir %s: ok" %odir)
            setattr(self, "_%s_dir" %basename(odir.lower()).strip("_"), odir)

    def setup_classifiers(self):
        sttg = self.settings

        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                chreg = self._channel_regions(p_channel)
                if sttg("EventSelection", "unsupervised_event_selection"):
                    nclusters = sttg("EventSelection", "num_clusters")
                    self.classifiers[p_channel] = ClassDefinitionUnsup( \
                        nclusters, chreg)
                else:
                    sttg.set_section('Classification')
                    clf = CommonClassPredictor(
                        clf_dir=sttg.get2(self._resolve_name(p_channel,
                                                             'classification_envpath')),
                        name=p_channel,
                        channels=chreg,
                        color_channel=c_channel)
                    clf.importFromArff()
                    clf.loadClassifier()
                    self.classifiers[p_channel] = clf

    @property
    def _transitions(self):
        if self.settings.get('EventSelection', 'unsupervised_event_selection'):
            transitions = np.array((0, 1))
        else:
            try:
                transitions = np.array(eval(self.settings.get('EventSelection', 'labeltransitions')))
                transitions.reshape((-1, 2))
            except Exception as e:
                raise RuntimeError(("Make sure that transitions are of the form "
                                    "'int, int' or '(int, int), (int, int)' i.e "
                                    "2-int-tuple  or a list of 2-int-tuples"))

        return transitions

    def setup_eventselection(self, graph):
        """Setup the method for event selection."""

        opts = {'transitions': self._transitions,
                'backward_range': self._convert_tracking_duration('backwardrange'),
                'forward_range': self._convert_tracking_duration('forwardrange'),
                'max_in_degree': self.settings.get('EventSelection', 'maxindegree'),
                'max_out_degree': self.settings.get('EventSelection', 'maxoutdegree')}

        if self.settings.get('EventSelection', 'supervised_event_selection'):
            opts.update({'backward_labels': [int(i) for i in self.settings.get('EventSelection', 'backwardlabels').split(',')],
                         'forward_labels': [int(i) for i in self.settings.get('EventSelection', 'forwardlabels').split(',')],
                         'backward_range_min': self.settings.get('EventSelection', 'backwardrange_min'),
                         'forward_range_min': self.settings.get('EventSelection', 'forwardrange_min'),
                         'backward_check': self._convert_tracking_duration('backwardCheck'),
                         'forward_check': self._convert_tracking_duration('forwardCheck')})
            es = EventSelection(graph, **opts)

        elif self.settings.get('EventSelection', 'unsupervised_event_selection'):
            cld = self.classifiers.values()[0] # only one classdef in case of UES
            opts.update({'forward_check': self._convert_tracking_duration('min_event_duration'),
                         'forward_labels': (1, ),
                         'backward_check': -1, # unsused for unsupervised usecase
                         'backward_labels': (0, ),
                         'num_clusters': self.settings.get('EventSelection', 'num_clusters'),
                         'min_cluster_size': self.settings.get('EventSelection', 'min_cluster_size'),
                         'classdef': cld})
            es = UnsupervisedEventSelection(graph, **opts)

        return es

    def _convert_tracking_duration(self, option_name):
        """Converts a tracking duration according to the unit and the
        mean time-lapse of the current position.
        Returns number of frames.
        """
        value = self.settings.get('EventSelection', option_name)
        unit = self.settings.get('EventSelection', 'duration_unit')

        # get mean and stddev for the current position
        info = self.meta_data.get_timestamp_info(self.position)
        if unit == TimeConverter.FRAMES or info is None:
            result = value
        elif unit == TimeConverter.MINUTES:
            result = (value * 60.) / info[0]
        elif unit == TimeConverter.SECONDS:
            result = value / info[0]
        else:
            raise ValueError("Wrong unit '%s' specified." %unit)
        return int(round(result))

    def define_exp_features(self):
        features = {}
        for name in self.processing_channels:
            region_features = {}

            for region in MetaPluginManager().region_info.names[name.lower()]:
                if name is self.MERGED_CHANNEL:
                    continue
                # export all features extracted per regions
                if self.settings.get('Output', 'events_export_all_features') or \
                        self.settings.get('Output', 'export_track_data'):
                    # None means all features
                    region_features[region] = None
                # export selected features from settings
                else:
                    region_features[region] = \
                        self.settings.get('General',
                                          '%s_featureextraction_exportfeaturenames'
                                          % name.lower())

                features[name] = region_features

            # special case for merged channel
            if name is self.MERGED_CHANNEL:
                mftrs = list()
                for channel, region in self._channel_regions(name).iteritems():
                    if features[channel][region] is None:
                        mftrs = None
                    else:
                        for feature in features[channel][region]:
                            mftrs.append("_".join((channel, region, feature)))
                region_features[self._all_channel_regions[name].values()] = mftrs
                features[name] = region_features

        return features

    def export_object_counts(self):
        fname = join(self._statistics_dir, 'P%s__object_counts.txt' % self.position)

        ch_info = OrderedDict()
        for name, clf in self.classifiers.iteritems():
            names = clf.class_names.values()
            colors = [clf.hexcolors[n] for n in names]
            ch_info[name] = (clf.regions, names, colors)

        # if no classifier has been loaded, no counts can be exported.
        if len(ch_info) == 0:
            return

        self.timeholder.exportObjectCounts(fname, self.position, self.meta_data, ch_info)
        pplot_ymax = \
            self.settings.get('Output', 'export_object_counts_ylim_max')

        self.timeholder.exportPopulationPlots(ch_info, self._plots_dir,
                                              self.plate_id, self.position,
                                              ymax=pplot_ymax)



    def export_object_details(self):
        fname = join(self._statistics_dir,
                        'P%s__object_details.txt' % self.position)
        self.timeholder.exportObjectDetails(fname)

    def export_image_names(self):
        self.timeholder.exportImageFileNames(self._statistics_dir,
                                             self.position,
                                             self._imagecontainer._importer,
                                             self.ch_mapping)

    def export_full_tracks(self):
        odir = join(self._statistics_dir, 'full')
        exporter = EventExporter(self.meta_data)
        exporter.full_tracks(self.timeholder, self._tes.visitor_data,
                             self.position, odir)

    def export_graphviz(self, channel_name='Primary', region_name='primary'):
        filename = 'tracking_graph___P%s.dot' %self.position
        exporter = TrackExporter()
        exporter.graphviz_dot(join(self._statistics_dir, filename),
                              self._tracker)

        sample_holders = OrderedDict()
        for frame in self.timeholder.keys():
            channel = self.timeholder[frame][channel_name]
            sample_holders[frame] = channel.get_region(region_name)

        filename = join(self._statistics_dir, filename.replace('.dot', '_features.csv'))
        exporter.tracking_data(filename, sample_holders)

    def export_gallery_images(self):
        for ch_name in self.processing_channels:
            cutter_in = join(self._images_dir, ch_name.lower())

            if not isdir(cutter_in):
                self.logger.warning('directory not found (%s)' %cutter_in)
                self.logger.warning('can not write the gallery images')
            else:
                cutter_out = join(self._gallery_dir, ch_name.lower())
                self.logger.info("running Cutter for '%s'..." %ch_name)
                image_size = \
                    self.settings.get('Output', 'events_gallery_image_size')
                TrackGallery(self._tes.centers(),
                             cutter_in, cutter_out, self.position, size=image_size)
            # FIXME: be careful here. normally only raw images are
            #        used for the cutter and can be deleted
            #        afterwards
            shutil.rmtree(cutter_in, ignore_errors=True)

    def export_tracks_hdf5(self):
        """Save tracking data to hdf file"""
        self.logger.debug("--- serializing tracking start")
        self.timeholder.serialize_tracking(self._tracker.graph)
        self.logger.debug("--- serializing tracking ok")

    def export_events(self):
        """Export and save event selection data"""
        exporter = EventExporter(self.meta_data)
        # writes to the event folder
        odir = join(self._statistics_dir, "events")
        exporter.track_features(self.timeholder, self._tes.visitor_data,
                                self.export_features, self.position, odir)
        self.logger.debug("--- serializing events ok")

    def export_events_hdf5(self):
        # writes event data to hdf5
        self.timeholder.serialize_events(self._tes)

    def export_tc3(self):
        t_mean = self.meta_data.get_timestamp_info(self.position)[0]
        tu = TimeConverter(t_mean, TimeConverter.SECONDS)
        increment = self.settings('General', 'frameincrement')
        t_step = tu.sec2min(t_mean)*increment

        nclusters = self.settings.get('EventSelection', 'num_clusters')
        exporter = TC3Exporter(self._tes.tc3data, self._tc3_dir, nclusters,
                               t_step, TimeConverter.MINUTES, self.position)
        exporter()

    def export_classlabels(self):
        """Save classlabels of each object to the hdf file."""
        # function works for supervised and unuspervised case
        for channels in self.timeholder.itervalues():
            for chname, classifier in self.classifiers.iteritems():
                holder = channels[chname].get_region(classifier.regions)
                if classifier.feature_names is None:
                    # special for unsupervised case
                    classifier.feature_names = holder.feature_names
                self.timeholder.save_classlabels(channels[chname],
                                                 holder, classifier)

    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good place to read out the options
        # file does not have to exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings('Processing',
                                                         'objectdetection'))

        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings('Processing', 'eventselection') and \
                    self.settings('Processing', 'tracking'):

                evchannel = self.settings('EventSelection', 'eventchannel')
                region = self.classifiers[evchannel].regions
                if self.settings('EventSelection', 'unsupervised_event_selection'):
                    graph = self._tracker.graph
                elif  evchannel != PrimaryChannel.NAME or \
                        region != self.settings("Tracking", "region"):
                    graph = self._tracker.clone_graph(self.timeholder,
                                                      evchannel,
                                                      region)
                else:
                    graph = self._tracker.graph

                self._tes = self.setup_eventselection(graph)
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings('Output', 'export_object_counts') and \
                    self.settings('EventSelection', 'supervised_event_selection'):
                # no object counts in case of unsupervised event selection
                self.export_object_counts()
            if self.settings('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})

                if self.settings('Output', 'hdf5_include_events'):
                    self.export_events_hdf5()

                if self.settings('Output', "export_events"):
                    if self.settings('Processing', 'eventselection'):
                        self.export_events()
                    if self.settings('EventSelection', 'unsupervised_event_selection'):
                        self.export_tc3()

                if self.settings('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings('Output', 'export_tracking_as_dot'):
                    self.export_graphviz(channel_name =PrimaryChannel.NAME,\
                                          region_name =self._all_channel_regions[PrimaryChannel.NAME][PrimaryChannel.NAME])

            self.export_classlabels()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images') and \
                    self.settings.get('Processing', 'eventselection'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
        self.clear()
        return n_images

    @property
    def hdf5_filename(self):
        return self.timeholder.hdf5_filename

    def touch_finished(self, times=None):
        """Writes an empty file to mark this position as finished"""
        fname = join(self._finished_dir, '%s__finished.txt' % self.position)
        with open(fname, "w") as f:
            os.utime(fname, times)

    def clear(self):
        # closes
        if self.timeholder is not None:
            self.timeholder.close_all()
        # close and remove handlers from logging object
        self.close()

    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        minimal_effort = self.settings.get('Output', 'minimal_effort') and self.settings.get('Output', 'hdf5_reuse')

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.info(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            n_images += 1
            images = []

            if self.settings('Processing', 'tracking'):
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.info(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for clf in self.classifiers.itervalues():
                    cellanalyzer.classify_objects(clf)

            self.logger.info(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once
            if not minimal_effort:
                imgs = {}
                imgs.update(self.render_classification_images(cellanalyzer, images, frame))
                imgs.update(self.render_contour_images(cellanalyzer, images, frame))
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame)
                self.set_image(imgs, msg, 50)

                if self.settings('Output', 'rendering_channel_gallery'):
                    self.render_channel_gallery(cellanalyzer, frame)

                if self.settings('Output', 'rendering_labels_discwrite'):
                    cellanalyzer.exportLabelImages(self._labels_dir)

            cellanalyzer.purge(features=self.export_features)
            self.logger.info(" - Frame %d, rest (ms): %3d" \
                                 %(frame, stopwatch.interval()*1000))
            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))


        return n_images

    def render_channel_gallery(self, cellanalyzer, frame):
        for channel in cellanalyzer.virtual_channels.itervalues():
            chgal = ChannelGallery(channel, frame, self._channel_gallery_dir)
            chgal.make_gallery()

    def render_contour_images(self, ca, images, frame):
        images_ = dict()
        for region, render_par in self.settings.get2('rendering').iteritems():
            out_dir = join(self._images_dir, region)
            write = self.settings('Output', 'rendering_contours_discwrite')

            if region not in self.CHANNELS.keys():
                img, _ = ca.render(out_dir, dctRenderInfo=render_par,
                                       writeToDisc=write, images=images)


                images_[region] = img
            # gallery images are treated differenty
            else:
                ca.render(out_dir, dctRenderInfo=render_par, writeToDisc=True)
        return images_

    def render_classification_images(self, cellanalyzer, images, frame):
         images_ = dict()
         for region, render_par in self.settings.get2('rendering_class').iteritems():
            out_images = join(self._images_dir, region)
            write = self.settings('Output', 'rendering_class_discwrite')
            image, _ = cellanalyzer.render(out_images,
                                                 dctRenderInfo=render_par,
                                                 writeToDisc=write,
                                                 images=images)
            images_[region] = image
         return images_
예제 #7
0
    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good place to read out the options
        # file does not have to exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings('Processing',
                                                         'objectdetection'))

        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings('Processing', 'eventselection') and \
                    self.settings('Processing', 'tracking'):

                evchannel = self.settings('EventSelection', 'eventchannel')
                region = self.classifiers[evchannel].regions
                if self.settings('EventSelection', 'unsupervised_event_selection'):
                    graph = self._tracker.graph
                elif  evchannel != PrimaryChannel.NAME or \
                        region != self.settings("Tracking", "region"):
                    graph = self._tracker.clone_graph(self.timeholder,
                                                      evchannel,
                                                      region)
                else:
                    graph = self._tracker.graph

                self._tes = self.setup_eventselection(graph)
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings('Output', 'export_object_counts') and \
                    self.settings('EventSelection', 'supervised_event_selection'):
                # no object counts in case of unsupervised event selection
                self.export_object_counts()
            if self.settings('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})

                if self.settings('Output', 'hdf5_include_events'):
                    self.export_events_hdf5()

                if self.settings('Output', "export_events"):
                    if self.settings('Processing', 'eventselection'):
                        self.export_events()
                    if self.settings('EventSelection', 'unsupervised_event_selection'):
                        self.export_tc3()

                if self.settings('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings('Output', 'export_tracking_as_dot'):
                    self.export_graphviz(channel_name =PrimaryChannel.NAME,\
                                          region_name =self._all_channel_regions[PrimaryChannel.NAME][PrimaryChannel.NAME])

            self.export_classlabels()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images') and \
                    self.settings.get('Processing', 'eventselection'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
        self.clear()
        return n_images
예제 #8
0
class PositionAnalyzer(PositionCore):

    def __init__(self, *args, **kw):
        super(PositionAnalyzer, self).__init__(*args, **kw)

        if not self.has_timelapse:
            self.settings.set('Processing', 'tracking', False)

        self._makedirs()
        self.add_file_handler(join(self._log_dir, "%s.log" %self.position),
                              self.Levels.DEBUG)

    def _makedirs(self):
        assert isinstance(self.position, basestring)
        assert isinstance(self._out_dir, basestring)

        self._analyzed_dir = join(self._out_dir, "analyzed")
        if self.has_timelapse:
            self._position_dir = join(self._analyzed_dir, self.position)
        else:
            self._position_dir = self._analyzed_dir

        odirs = (self._analyzed_dir,
                 join(self._out_dir, "log"),
                 join(self._out_dir, "log", "_finished"),
                 join(self._out_dir, "hdf5"),
                 join(self._out_dir, "plots"),
                 join(self._position_dir, "statistics"),
                 join(self._position_dir, "tc3"),
                 join(self._position_dir, "gallery"),
                 join(self._position_dir, "channel_gallery"),
                 join(self._position_dir, "images"),
                 join(self._position_dir, "images","_labels"))

        for odir in odirs:
            try:
                makedirs(odir)
            except os.error: # no permissions
                self.logger.error("mkdir %s: failed" %odir)
            else:
                self.logger.info("mkdir %s: ok" %odir)
            setattr(self, "_%s_dir" %basename(odir.lower()).strip("_"), odir)

    def setup_classifiers(self):
        sttg = self.settings

        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                chreg = self._channel_regions(p_channel)
                if sttg("EventSelection", "unsupervised_event_selection"):
                    nclusters = sttg("EventSelection", "num_clusters")
                    self.classifiers[p_channel] = ClassDefinitionUnsup( \
                        nclusters, chreg)
                else:
                    sttg.set_section('Classification')
                    clf = CommonClassPredictor(
                        clf_dir=sttg.get2(self._resolve_name(p_channel,
                                                             'classification_envpath')),
                        name=p_channel,
                        channels=chreg,
                        color_channel=c_channel)
                    clf.importFromArff()
                    clf.loadClassifier()
                    self.classifiers[p_channel] = clf

    @property
    def _transitions(self):
        if self.settings.get('EventSelection', 'unsupervised_event_selection'):
            transitions = np.array(((0, 1), ))
        else:
            try:
                transitions = np.array(eval(self.settings.get('EventSelection', 'labeltransitions')))
                transitions.reshape((-1, 2))
            except Exception as e:
                raise RuntimeError(("Make sure that transitions are of the form "
                                    "'int, int' or '(int, int), (int, int)' i.e "
                                    "2-int-tuple  or a list of 2-int-tuples"))

        return transitions

    def setup_eventselection(self, graph):
        """Setup the method for event selection."""

        opts = {'transitions': self._transitions,
                'backward_range': self._convert_tracking_duration('backwardrange'),
                'forward_range': self._convert_tracking_duration('forwardrange'),
                'max_in_degree': self.settings.get('EventSelection', 'maxindegree'),
                'max_out_degree': self.settings.get('EventSelection', 'maxoutdegree')}

        if self.settings.get('EventSelection', 'supervised_event_selection'):
            opts.update({'backward_labels': [int(i) for i in self.settings.get('EventSelection', 'backwardlabels').split(',')],
                         'forward_labels': [int(i) for i in self.settings.get('EventSelection', 'forwardlabels').split(',')],
                         'backward_range_min': self.settings.get('EventSelection', 'backwardrange_min'),
                         'forward_range_min': self.settings.get('EventSelection', 'forwardrange_min'),
                         'backward_check': self._convert_tracking_duration('backwardCheck'),
                         'forward_check': self._convert_tracking_duration('forwardCheck')})
            es = EventSelection(graph, **opts)

        elif self.settings.get('EventSelection', 'unsupervised_event_selection'):
            cld = self.classifiers.values()[0] # only one classdef in case of UES
            opts.update({'forward_check': self._convert_tracking_duration('min_event_duration'),
                         'forward_labels': (1, ),
                         'backward_check': -1, # unsused for unsupervised usecase
                         'backward_labels': (0, ),
                         'num_clusters': self.settings.get('EventSelection', 'num_clusters'),
                         'min_cluster_size': self.settings.get('EventSelection', 'min_cluster_size'),
                         'classdef': cld})
            es = UnsupervisedEventSelection(graph, **opts)

        return es

    def _convert_tracking_duration(self, option_name):
        """Converts a tracking duration according to the unit and the
        mean time-lapse of the current position.
        Returns number of frames.
        """
        value = self.settings.get('EventSelection', option_name)
        unit = self.settings.get('EventSelection', 'duration_unit')

        # get mean and stddev for the current position
        info = self.meta_data.get_timestamp_info(self.position)
        if unit == TimeConverter.FRAMES or info is None:
            result = value
        elif unit == TimeConverter.MINUTES:
            result = (value * 60.) / info[0]
        elif unit == TimeConverter.SECONDS:
            result = value / info[0]
        else:
            raise ValueError("Wrong unit '%s' specified." %unit)
        return int(round(result))

    def define_exp_features(self):
        features = {}
        for name in self.processing_channels:
            region_features = {}

            for region in MetaPluginManager().region_info.names[name.lower()]:
                if name is self.MERGED_CHANNEL:
                    continue
                # export all features extracted per regions
                if self.settings.get('Output', 'events_export_all_features') or \
                        self.settings.get('Output', 'export_track_data'):
                    # None means all features
                    region_features[region] = None
                # export selected features from settings
                else:
                    region_features[region] = \
                        self.settings.get('General',
                                          '%s_featureextraction_exportfeaturenames'
                                          % name.lower())

                features[name] = region_features

            # special case for merged channel
            if name is self.MERGED_CHANNEL:
                mftrs = list()
                for channel, region in self._channel_regions(name).iteritems():
                    if features[channel][region] is None:
                        mftrs = None
                    else:
                        for feature in features[channel][region]:
                            mftrs.append("_".join((channel, region, feature)))
                region_features[self._all_channel_regions[name].values()] = mftrs
                features[name] = region_features

        return features

    def export_object_counts(self):
        fname = join(self._statistics_dir, 'P%s__object_counts.txt' % self.position)

        ch_info = OrderedDict()
        for name, clf in self.classifiers.iteritems():
            names = clf.class_names.values()
            colors = [clf.hexcolors[n] for n in names]
            ch_info[name] = (clf.regions, names, colors)

        # if no classifier has been loaded, no counts can be exported.
        if len(ch_info) == 0:
            return

        self.timeholder.exportObjectCounts(fname, self.position, self.meta_data, ch_info)
        pplot_ymax = \
            self.settings.get('Output', 'export_object_counts_ylim_max')

        self.timeholder.exportPopulationPlots(ch_info, self._plots_dir,
                                              self.plate_id, self.position,
                                              ymax=pplot_ymax)



    def export_object_details(self):
        fname = join(self._statistics_dir,
                        'P%s__object_details.txt' % self.position)
        self.timeholder.exportObjectDetails(fname)

    def export_image_names(self):
        self.timeholder.exportImageFileNames(self._statistics_dir,
                                             self.position,
                                             self._imagecontainer._importer,
                                             self.ch_mapping)

    def export_full_tracks(self):
        odir = join(self._statistics_dir, 'full')
        exporter = EventExporter(self.meta_data)
        exporter.full_tracks(self.timeholder, self._tes.visitor_data,
                             self.position, odir)

    def export_graphviz(self, channel_name='Primary', region_name='primary'):
        filename = 'tracking_graph___P%s.dot' %self.position
        exporter = TrackExporter()
        exporter.graphviz_dot(join(self._statistics_dir, filename),
                              self._tracker)

        sample_holders = OrderedDict()
        for frame in self.timeholder.keys():
            channel = self.timeholder[frame][channel_name]
            sample_holders[frame] = channel.get_region(region_name)

        filename = join(self._statistics_dir, filename.replace('.dot', '_features.csv'))
        exporter.tracking_data(filename, sample_holders)

    def export_gallery_images(self):
        for ch_name in self.processing_channels:
            cutter_in = join(self._images_dir, ch_name.lower())

            if not isdir(cutter_in):
                self.logger.warning('directory not found (%s)' %cutter_in)
                self.logger.warning('can not write the gallery images')
            else:
                cutter_out = join(self._gallery_dir, ch_name.lower())
                self.logger.info("running Cutter for '%s'..." %ch_name)
                image_size = \
                    self.settings.get('Output', 'events_gallery_image_size')
                TrackGallery(self._tes.centers(),
                             cutter_in, cutter_out, self.position, size=image_size)
            # FIXME: be careful here. normally only raw images are
            #        used for the cutter and can be deleted
            #        afterwards
            shutil.rmtree(cutter_in, ignore_errors=True)

    def export_tracks_hdf5(self):
        """Save tracking data to hdf file"""
        self.logger.debug("--- serializing tracking start")
        self.timeholder.serialize_tracking(self._tracker.graph)
        self.logger.debug("--- serializing tracking ok")

    def export_events(self):
        """Export and save event selection data"""
        exporter = EventExporter(self.meta_data)
        # writes to the event folder
        odir = join(self._statistics_dir, "events")
        exporter.track_features(self.timeholder, self._tes.visitor_data,
                                self.export_features, self.position, odir)
        self.logger.debug("--- serializing events ok")

    def export_events_hdf5(self):
        # writes event data to hdf5
        self.timeholder.serialize_events(self._tes)

    def export_tc3(self):
        t_mean = self.meta_data.get_timestamp_info(self.position)[0]
        tu = TimeConverter(t_mean, TimeConverter.SECONDS)
        increment = self.settings('General', 'frameincrement')
        t_step = tu.sec2min(t_mean)*increment

        nclusters = self.settings.get('EventSelection', 'num_clusters')
        exporter = TC3Exporter(self._tes.tc3data, self._tc3_dir, nclusters,
                               t_step, TimeConverter.MINUTES, self.position)
        exporter()

    def export_classlabels(self):
        """Save classlabels of each object to the hdf file."""
        # function works for supervised and unuspervised case
        for channels in self.timeholder.itervalues():
            for chname, classifier in self.classifiers.iteritems():
                holder = channels[chname].get_region(classifier.regions)
                if classifier.feature_names is None:
                    # special for unsupervised case
                    classifier.feature_names = holder.feature_names
                self.timeholder.save_classlabels(channels[chname],
                                                 holder, classifier)

    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good place to read out the options
        # file does not have to exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings('Processing',
                                                         'objectdetection'))

        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings('Processing', 'eventselection') and \
                    self.settings('Processing', 'tracking'):

                evchannel = self.settings('EventSelection', 'eventchannel')
                region = self.classifiers[evchannel].regions
                if self.settings('EventSelection', 'unsupervised_event_selection'):
                    graph = self._tracker.graph
                elif  evchannel != PrimaryChannel.NAME or \
                        region != self.settings("Tracking", "region"):
                    graph = self._tracker.clone_graph(self.timeholder,
                                                      evchannel,
                                                      region)
                else:
                    graph = self._tracker.graph

                self._tes = self.setup_eventselection(graph)
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings('Output', 'export_object_counts') and \
                    self.settings('EventSelection', 'supervised_event_selection'):
                # no object counts in case of unsupervised event selection
                self.export_object_counts()
            if self.settings('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})

                if self.settings('Output', 'hdf5_include_events'):
                    self.export_events_hdf5()

                if self.settings('Output', "export_events"):
                    if self.settings('Processing', 'eventselection'):
                        self.export_events()
                    if self.settings('EventSelection', 'unsupervised_event_selection'):
                        self.export_tc3()

                if self.settings('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings('Output', 'export_tracking_as_dot'):
                    self.export_graphviz(channel_name =PrimaryChannel.NAME,\
                                          region_name =self._all_channel_regions[PrimaryChannel.NAME][PrimaryChannel.NAME])

            self.export_classlabels()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images') and \
                    self.settings.get('Processing', 'eventselection'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
        self.clear()
        return n_images

    @property
    def hdf5_filename(self):
        return self.timeholder.hdf5_filename

    def touch_finished(self, times=None):
        """Writes an empty file to mark this position as finished"""
        fname = join(self._finished_dir, '%s__finished.txt' % self.position)
        with open(fname, "w") as f:
            os.utime(fname, times)

    def clear(self):
        # closes
        if self.timeholder is not None:
            self.timeholder.close_all()
        # close and remove handlers from logging object
        self.close()

    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        minimal_effort = self.settings.get('Output', 'minimal_effort') and self.settings.get('Output', 'hdf5_reuse')

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.info(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            n_images += 1
            images = []

            if self.settings('Processing', 'tracking'):
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.info(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for clf in self.classifiers.itervalues():
                    cellanalyzer.classify_objects(clf)

            self.logger.info(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once
            if not minimal_effort:
                imgs = {}
                imgs.update(self.render_classification_images(cellanalyzer, images, frame))
                imgs.update(self.render_contour_images(cellanalyzer, images, frame))
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame)
                self.set_image(imgs, msg, 50)

                if self.settings('Output', 'rendering_channel_gallery'):
                    self.render_channel_gallery(cellanalyzer, frame)

                if self.settings('Output', 'rendering_labels_discwrite'):
                    cellanalyzer.exportLabelImages(self._labels_dir)

            cellanalyzer.purge(features=self.export_features)
            self.logger.info(" - Frame %d, rest (ms): %3d" \
                                 %(frame, stopwatch.interval()*1000))
            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))


        return n_images

    def render_channel_gallery(self, cellanalyzer, frame):
        for channel in cellanalyzer.virtual_channels.itervalues():
            chgal = ChannelGallery(channel, frame, self._channel_gallery_dir)
            chgal.make_gallery()

    def render_contour_images(self, ca, images, frame):
        images_ = dict()
        for region, render_par in self.settings.get2('rendering').iteritems():
            out_dir = join(self._images_dir, region)
            write = self.settings('Output', 'rendering_contours_discwrite')

            if region not in self.CHANNELS.keys():
                img, _ = ca.render(out_dir, dctRenderInfo=render_par,
                                       writeToDisc=write, images=images)


                images_[region] = img
            # gallery images are treated differenty
            else:
                ca.render(out_dir, dctRenderInfo=render_par, writeToDisc=True)
        return images_

    def render_classification_images(self, cellanalyzer, images, frame):
         images_ = dict()
         for region, render_par in self.settings.get2('rendering_class').iteritems():
            out_images = join(self._images_dir, region)
            write = self.settings('Output', 'rendering_class_discwrite')
            image, _ = cellanalyzer.render(out_images,
                                                 dctRenderInfo=render_par,
                                                 writeToDisc=write,
                                                 images=images)
            images_[region] = image
         return images_
예제 #9
0
파일: position.py 프로젝트: imcf/cecog
class PositionAnalyzer(PositionCore):
    def __init__(self, *args, **kw):
        super(PositionAnalyzer, self).__init__(*args, **kw)

        if not self.has_timelapse:
            self.settings.set('Processing', 'tracking', False)

        self._makedirs()
        self.add_file_handler(join(self._log_dir, "%s.log" % self.position),
                              self._lvl.DEBUG)

    def _makedirs(self):
        assert isinstance(self.position, basestring)
        assert isinstance(self._out_dir, basestring)

        self._analyzed_dir = join(self._out_dir, "analyzed")
        if self.has_timelapse:
            self._position_dir = join(self._analyzed_dir, self.position)
        else:
            self._position_dir = self._analyzed_dir

        odirs = (self._analyzed_dir, join(self._out_dir, "log"),
                 join(self._out_dir, "log",
                      "_finished"), join(self._out_dir,
                                         "hdf5"), join(self._out_dir, "plots"),
                 join(self._position_dir,
                      "statistics"), join(self._position_dir, "gallery"),
                 join(self._position_dir,
                      "channel_gallery"), join(self._position_dir, "images"),
                 join(self._position_dir, "images", "_labels"))

        for odir in odirs:
            try:
                makedirs(odir)
            except os.error:  # no permissions
                self.logger.error("mkdir %s: failed" % odir)
            else:
                self.logger.info("mkdir %s: ok" % odir)
            setattr(self, "_%s_dir" % basename(odir.lower()).strip("_"), odir)

    def setup_classifiers(self):
        sttg = self.settings
        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                sttg.set_section('Classification')
                clf = CommonClassPredictor(
                    clf_dir=sttg.get2(
                        self._resolve_name(p_channel,
                                           'classification_envpath')),
                    name=p_channel,
                    channels=self._channel_regions(p_channel),
                    color_channel=c_channel)
                clf.importFromArff()
                clf.loadClassifier()
                self.classifiers[p_channel] = clf

    def _convert_tracking_duration(self, option_name):
        """Converts a tracking duration according to the unit and the
        mean time-lapse of the current position.
        Returns number of frames.
        """
        value = self.settings.get(SECTION_NAME_TRACKING, option_name)
        unit = self.settings.get(SECTION_NAME_TRACKING,
                                 'tracking_duration_unit')

        # get mean and stddev for the current position
        info = self.meta_data.get_timestamp_info(self.position)
        if unit == TRACKING_DURATION_UNIT_FRAMES or info is None:
            result = value
        elif unit == TRACKING_DURATION_UNIT_MINUTES:
            result = (value * 60.) / info[0]
        elif unit == TRACKING_DURATION_UNIT_SECONDS:
            result = value / info[0]
        else:
            raise ValueError("Wrong unit '%s' specified." % unit)
        return int(round(result))

    @property
    def _es_options(self):
        transitions = eval(self.settings.get2('tracking_labeltransitions'))
        if not isinstance(transitions[0], tuple):
            transitions = (transitions, )
        evopts = {
            'transitions':
            transitions,
            'backward_labels':
            map(int,
                self.settings.get2('tracking_backwardlabels').split(',')),
            'forward_labels':
            map(int,
                self.settings.get2('tracking_forwardlabels').split(',')),
            'backward_check':
            self._convert_tracking_duration('tracking_backwardCheck'),
            'forward_check':
            self._convert_tracking_duration('tracking_forwardCheck'),
            'backward_range':
            self._convert_tracking_duration('tracking_backwardrange'),
            'forward_range':
            self._convert_tracking_duration('tracking_forwardrange'),
            'backward_range_min':
            self.settings.get2('tracking_backwardrange_min'),
            'forward_range_min':
            self.settings.get2('tracking_forwardrange_min'),
            'max_in_degree':
            self.settings.get2('tracking_maxindegree'),
            'max_out_degree':
            self.settings.get2('tracking_maxoutdegree')
        }
        return evopts

    def define_exp_features(self):
        features = {}
        for name in self.processing_channels:
            region_features = {}
            for region in REGION_INFO.names[name.lower()]:
                # export all features extracted per regions
                if self.settings.get('Output', 'events_export_all_features') or \
                        self.settings.get('Output', 'export_track_data'):
                    # None means all features
                    region_features[region] = None
                # export selected features from settings
                else:
                    region_features[region] = \
                        self.settings.get('General',
                                          '%s_featureextraction_exportfeaturenames'
                                          % name.lower())
                features[name] = region_features
        return features

    def export_object_counts(self):
        fname = join(self._statistics_dir,
                     'P%s__object_counts.txt' % self.position)

        # at least the total count for primary is always exported
        ch_info = OrderedDict([('Primary', ('primary', [], []))])
        for name, clf in self.classifiers.iteritems():
            names = clf.class_names.values()
            colors = [clf.hexcolors[n] for n in names]
            ch_info[name] = (clf.regions, names, colors)

        self.timeholder.exportObjectCounts(fname, self.position,
                                           self.meta_data, ch_info)
        pplot_ymax = \
            self.settings.get('Output', 'export_object_counts_ylim_max')

        # plot only for primary channel so far!
        self.timeholder.exportPopulationPlots(fname, self._plots_dir,
                                              self.position, self.meta_data,
                                              ch_info['Primary'], pplot_ymax)

    def export_object_details(self):
        fname = join(self._statistics_dir,
                     'P%s__object_details.txt' % self.position)
        self.timeholder.exportObjectDetails(fname, excel_style=False)
        fname = join(self._statistics_dir,
                     'P%s__object_details_excel.txt' % self.position)
        self.timeholder.exportObjectDetails(fname, excel_style=True)

    def export_image_names(self):
        self.timeholder.exportImageFileNames(self._statistics_dir,
                                             self.position,
                                             self._imagecontainer._importer,
                                             self.ch_mapping)

    def export_full_tracks(self):
        odir = join(self._statistics_dir, 'full')
        exporter = EventExporter(self.meta_data)
        exporter.full_tracks(self.timeholder, self._tes.visitor_data,
                             self.position, odir)

    def export_graphviz(self, channel_name='Primary', region_name='primary'):
        filename = 'tracking_graph___P%s.dot' % self.position
        exporter = TrackExporter()
        exporter.graphviz_dot(join(self._statistics_dir, filename),
                              self._tracker)

        sample_holders = OrderedDict()
        for frame in self.timeholder.keys():
            channel = self.timeholder[frame][channel_name]
            sample_holders[frame] = channel.get_region(region_name)

        filename = join(self._statistics_dir,
                        filename.replace('.dot', '_features.csv'))
        exporter.tracking_data(filename, sample_holders)

    def export_gallery_images(self):
        for ch_name in self.processing_channels:
            cutter_in = join(self._images_dir, ch_name)
            if isdir(cutter_in):
                cutter_out = join(self._gallery_dir, ch_name)
                self.logger.info("running Cutter for '%s'..." % ch_name)
                image_size = \
                    self.settings.get('Output', 'events_gallery_image_size')
                EventGallery(self._tes,
                             cutter_in,
                             self.position,
                             cutter_out,
                             self.meta_data,
                             oneFilePerTrack=True,
                             size=(image_size, image_size))
            # FIXME: be careful here. normally only raw images are
            #        used for the cutter and can be deleted
            #        afterwards
            shutil.rmtree(cutter_in, ignore_errors=True)

    def export_tracks_hdf5(self):
        """Save tracking data to hdf file"""
        self.logger.debug("--- serializing tracking start")
        self.timeholder.serialize_tracking(self._tes.graph)
        self.logger.debug("--- serializing tracking ok")

    def export_events(self):
        """Export and save event selceciton data"""
        exporter = EventExporter(self.meta_data)
        # writes to the event folder
        odir = join(self._statistics_dir, "events")
        exporter.track_features(self.timeholder, self._tes.visitor_data,
                                self.export_features, self.position, odir)
        self.logger.debug("--- visitor analysis ok")
        # writes event data to hdf5
        self.timeholder.serialize_events(self._tes)
        self.logger.debug("--- serializing events ok")

    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good placke to read out the options
        # fils must not exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname, self.meta_data, self.settings,
                                     self._frames, self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        # setup tracker
        if self.settings.get('Processing', 'tracking'):
            region = self.settings.get('Tracking', 'tracking_regionname')
            tropts = (self.settings.get('Tracking',
                                        'tracking_maxobjectdistance'),
                      self.settings.get('Tracking',
                                        'tracking_maxsplitobjects'),
                      self.settings.get('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)
            self._tes = EventSelection(self._tracker.graph, **self._es_options)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings.get(
                              'Processing', 'objectdetection'))

        self.setup_classifiers()
        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings.get('Processing', 'tracking_synchronize_trajectories') and \
                    self.settings.get('Processing', 'tracking'):
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0  # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings.get('Output', 'export_object_counts'):
                self.export_object_counts()
            if self.settings.get('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings.get('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings.get('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})
                if self.settings.get('Processing',
                                     'tracking_synchronize_trajectories'):
                    self.export_events()
                if self.settings.get('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings.get('Output', 'export_tracking_as_dot'):
                    self.export_graphviz()

            self.update_status({
                'text': 'export events...',
                'max': 1,
                'progress': 1
            })

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop() / n_images * 1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(
                " - %d image sets analyzed, %3d ms per image set" %
                (n_images, intval))

        self.touch_finished()
        #        self.clear()
        return n_images

    @property
    def hdf5_filename(self):
        return self.timeholder.hdf5_filename

    def touch_finished(self, times=None):
        """Writes an empty file to mark this position as finished"""
        fname = join(self._finished_dir, '%s__finished.txt' % self.position)
        with open(fname, "w") as f:
            os.utime(fname, times)

    def clear(self):
        # closes hdf5
        self.timeholder.close_all()

    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' % (frame, self._frames.index(frame) + 1,
                                        len(self._frames))
                self.update_status({
                    'progress': self._frames.index(frame) + 1,
                    'text': txt,
                    'interval': stopwatch.interim()
                })

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            n_images += 1
            images = []

            if self.settings.get('Processing', 'tracking'):
                region = self.settings.get('Tracking', 'tracking_regionname')
                samples = self.timeholder[frame][
                    PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings.get('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings.get(
                        'Tracking', 'tracking_visualize_track_length')
                    radius = self.settings.get('Tracking',
                                               'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            for clf in self.classifiers.itervalues():
                cellanalyzer.classify_objects(clf)

            ##############################################################
            # FIXME - part for browser
            if not self._myhack is None:
                self.render_browser(cellanalyzer)
            ##############################################################

            self.settings.set_section('General')
            self.render_classification_images(cellanalyzer, images, frame)
            self.render_contour_images(cellanalyzer, images, frame)

            if self.settings.get('Output', 'rendering_channel_gallery'):
                self.render_channel_gallery(cellanalyzer, frame)

            if self.settings.get('Output', 'rendering_labels_discwrite'):
                cellanalyzer.exportLabelImages(self._labels_dir)

            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))
            cellanalyzer.purge(features=self.export_features)

        return n_images

    def render_channel_gallery(self, cellanalyzer, frame):
        for channel in cellanalyzer.virtual_channels.itervalues():
            chgal = ChannelGallery(channel, frame, self._channel_gallery_dir)
            chgal.make_gallery()

    def render_contour_images(self, ca, images, frame):
        for region, render_par in self.settings.get2('rendering').iteritems():
            out_dir = join(self._images_dir, region)
            write = self.settings.get('Output', 'rendering_contours_discwrite')

            if region not in self.CHANNELS.keys():
                img, fname = ca.render(out_dir,
                                       dctRenderInfo=render_par,
                                       writeToDisc=write,
                                       images=images)
                msg = 'PL %s - P %s - T %05d' % (self.plate_id, self.position,
                                                 frame)
                self.set_image(img, msg, fname, region, 50)
            # gallery images are treated differenty
            else:
                ca.render(out_dir, dctRenderInfo=render_par, writeToDisc=True)

    def render_classification_images(self, cellanalyzer, images, frame):
        for region, render_par in self.settings.get2(
                'rendering_class').iteritems():
            out_images = join(self._images_dir, region)
            write = self.settings.get('Output', 'rendering_class_discwrite')
            img_rgb, fname = cellanalyzer.render(out_images,
                                                 dctRenderInfo=render_par,
                                                 writeToDisc=write,
                                                 images=images)

            msg = 'PL %s - P %s - T %05d' % (self.plate_id, self.position,
                                             frame)
            self.set_image(img_rgb, msg, fname, region, 50)

    def render_browser(self, cellanalyzer):
        d = {}
        for name in cellanalyzer.get_channel_names():
            channel = cellanalyzer.get_channel(name)
            d[channel.strChannelId] = channel.meta_image.image
            self._myhack.show_image(d)

        channel_name, region_name = self._myhack._object_region
        channel = cellanalyzer.get_channel(channel_name)
        if channel.has_region(region_name):
            region = channel.get_region(region_name)
            coords = {}
            for obj_id, obj in region.iteritems():
                coords[obj_id] = obj.crack_contour
            self._myhack.set_coords(coords)
예제 #10
0
    def __call__(self):

        thread = QThread.currentThread()
        well, site = self._posinfo()

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     self.datafile, self.meta_data,
                                     self.settings, self._frames,
                                     self.plate_id, well, site,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings('Processing',
                                                       'objectdetection'))

        self.export_features = self.define_exp_features()
        self._analyze(ca)

        # invoke event selection
        if self.settings('Processing', 'eventselection') and \
           self.settings('Processing', 'tracking'):

            evchannel = self.settings('EventSelection', 'eventchannel')
            region = self.classifiers[evchannel].regions

            if evchannel != PrimaryChannel.NAME or region != self.settings(
                    "Tracking", "region"):
                graph = self._tracker.clone_graph(self.timeholder, evchannel,
                                                  region)
            else:
                graph = self._tracker.graph

            self._tes = self.setup_eventselection(graph)
            self.logger.info("Event detection")
            self._tes.find_events()
            if self.isAborted():
                return 0  # number of processed images

        # save all the data of the position, no aborts from here on
        # want all processed data saved
        if self.settings('Processing', 'tracking'):
            self.statusUpdate(text="Saving Tracking Data to cellh5...")
            self.save_tracks()

            if self.settings('Output', 'hdf5_include_events') and \
               self.settings('Processing', "eventselection"):
                self.statusUpdate(text="Saving Event Data to cellh5...")
                self.save_events()

        self.save_classification()
        self.timeholder.purge()

        try:
            n = len(self._frames)
            intval = stopwatch.stop() / n * 1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info("%d images analyzed, %3d ms per image set" %
                             (n, intval))

        self.clear()

        if isfile(self.datafile):
            with Ch5File(self.datafile, mode="r+") as ch5:
                ch5.savePlateLayout(self.layout, self.plate_id)
예제 #11
0
class PositionAnalyzer(PositionCore):
    def __init__(self, *args, **kw):
        super(PositionAnalyzer, self).__init__(*args, **kw)

        if not self.has_timelapse:
            self.settings.set('Processing', 'tracking', False)

        if self._writelogs:
            logfile = join(dirname(dirname(self.datafile)), "log",
                           "%s.log" % self.position)
            self.add_file_handler(logfile, self.Levels.DEBUG)
        self.logger.setLevel(self.Levels.DEBUG)

    def setup_classifiers(self):
        sttg = self.settings

        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                chreg = self._channel_regions(p_channel)

                sttg.set_section('Classification')
                cpath = sttg.get2(
                    self._resolve_name(p_channel, 'classification_envpath'))
                cpath = join(cpath, basename(cpath) + ".hdf")
                svc = SupportVectorClassifier(cpath,
                                              load=True,
                                              channels=chreg,
                                              color_channel=c_channel)
                svc.close()
                self.classifiers[p_channel] = svc

    @property
    def _transitions(self):
        try:
            transitions = np.array(
                eval(self.settings.get('EventSelection', 'labeltransitions')))
            transitions.reshape((-1, 2))
        except Exception as e:
            raise RuntimeError(("Make sure that transitions are of the form "
                                "'int, int' or '(int, int), (int, int)' i.e "
                                "2-int-tuple  or a list of 2-int-tuples"))

        return transitions

    def setup_eventselection(self, graph):
        """Setup the method for event selection."""

        opts = {
            'transitions': self._transitions,
            'backward_range': self._convert_tracking_duration('backwardrange'),
            'forward_range': self._convert_tracking_duration('forwardrange'),
            'max_in_degree': self.settings.get('EventSelection',
                                               'maxindegree'),
            'max_out_degree': self.settings.get('EventSelection',
                                                'maxoutdegree')
        }

        opts.update({
            'backward_labels': [
                int(i) for i in self.settings.get('EventSelection',
                                                  'backwardlabels').split(',')
            ],
            'forward_labels': [
                int(i) for i in self.settings.get('EventSelection',
                                                  'forwardlabels').split(',')
            ],
            'backward_range_min':
            self.settings.get('EventSelection', 'backwardrange_min'),
            'forward_range_min':
            self.settings.get('EventSelection', 'forwardrange_min'),
            'backward_check':
            self._convert_tracking_duration('backwardCheck'),
            'forward_check':
            self._convert_tracking_duration('forwardCheck')
        })
        es = EventSelection(graph, **opts)

        return es

    def _convert_tracking_duration(self, option_name):
        """Converts a tracking duration according to the unit and the
        mean time-lapse of the current position.
        Returns number of frames.
        """
        value = self.settings.get('EventSelection', option_name)
        unit = self.settings.get('EventSelection', 'duration_unit')

        # get mean and stddev for the current position
        info = self.meta_data.get_timestamp_info(self.position)
        if unit == TimeConverter.FRAMES or info is None:
            result = value
        elif unit == TimeConverter.MINUTES:
            result = (value * 60.) / info[0]
        elif unit == TimeConverter.SECONDS:
            result = value / info[0]
        else:
            raise ValueError("Wrong unit '%s' specified." % unit)
        return int(round(result))

    def define_exp_features(self):
        features = {}
        for name in self.processing_channels:
            region_features = {}

            for region in MetaPluginManager().region_info.names[name.lower()]:
                if name is self.MERGED_CHANNEL:
                    continue

                region_features[region] = \
                        self.settings.get('General',
                                          '%s_featureextraction_exportfeaturenames'
                                          % name.lower())

                features[name] = region_features

            # special case for merged channel
            if name is self.MERGED_CHANNEL:
                mftrs = list()
                for channel, region in self._channel_regions(name).iteritems():
                    if features[channel][region] is None:
                        mftrs = None
                    else:
                        for feature in features[channel][region]:
                            mftrs.append("_".join((channel, region, feature)))
                region_features[
                    self._all_channel_regions[name].values()] = mftrs
                features[name] = region_features

        return features

    def save_tracks(self):
        """Save tracking data to hdf file"""
        self.logger.info("Save tracking data")
        self.timeholder.serialize_tracking(self._tracker.graph)

    def save_events(self):
        self.logger.info("Save Event data")
        self.timeholder.serialize_events(self._tes)

    def save_classification(self):
        """Save classlabels of each object to the hdf file."""
        # function works for supervised and unuspervised case
        for channels in self.timeholder.itervalues():
            for chname, classifier in self.classifiers.iteritems():
                holder = channels[chname].get_region(classifier.regions)
                # if classifier.feature_names is None:
                #     # special for unsupervised case
                #     classifier.feature_names = holder.feature_names
                self.timeholder.save_classlabels(channels[chname], holder,
                                                 classifier)

    def __call__(self):

        thread = QThread.currentThread()
        well, site = self._posinfo()

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     self.datafile, self.meta_data,
                                     self.settings, self._frames,
                                     self.plate_id, well, site,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings('Processing',
                                                       'objectdetection'))

        self.export_features = self.define_exp_features()
        self._analyze(ca)

        # invoke event selection
        if self.settings('Processing', 'eventselection') and \
           self.settings('Processing', 'tracking'):

            evchannel = self.settings('EventSelection', 'eventchannel')
            region = self.classifiers[evchannel].regions

            if evchannel != PrimaryChannel.NAME or region != self.settings(
                    "Tracking", "region"):
                graph = self._tracker.clone_graph(self.timeholder, evchannel,
                                                  region)
            else:
                graph = self._tracker.graph

            self._tes = self.setup_eventselection(graph)
            self.logger.info("Event detection")
            self._tes.find_events()
            if self.isAborted():
                return 0  # number of processed images

        # save all the data of the position, no aborts from here on
        # want all processed data saved
        if self.settings('Processing', 'tracking'):
            self.statusUpdate(text="Saving Tracking Data to cellh5...")
            self.save_tracks()

            if self.settings('Output', 'hdf5_include_events') and \
               self.settings('Processing', "eventselection"):
                self.statusUpdate(text="Saving Event Data to cellh5...")
                self.save_events()

        self.save_classification()
        self.timeholder.purge()

        try:
            n = len(self._frames)
            intval = stopwatch.stop() / n * 1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info("%d images analyzed, %3d ms per image set" %
                             (n, intval))

        self.clear()

        if isfile(self.datafile):
            with Ch5File(self.datafile, mode="r+") as ch5:
                ch5.savePlateLayout(self.layout, self.plate_id)

    def clear(self):
        if self.timeholder is not None:
            self.timeholder.close_all()
        # close and remove handlers from logging object
        self.close()

    def _analyze(self, cellanalyzer):

        thread = QThread.currentThread()

        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            self.interruptionPoint()
            txt = '%s, %s, T %d (%d/%d)' \
                  %(self.plate_id, self.position, frame,
                    self._frames.index(frame)+1, len(self._frames))

            self.statusUpdate(text=txt,
                              interval=stopwatch.interim(),
                              increment=True)

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.debug(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            images = []

            if self.settings('Processing', 'tracking'):
                apc = AppPreferences()
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][
                    PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if apc.display_tracks:
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, apc.track_length, apc.cradius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.debug(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for channel, clf in self.classifiers.iteritems():
                    cellanalyzer.classify_objects(clf, channel)

            self.logger.debug(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once

            imgs = {}
            imgs.update(
                self.render_classification_images(cellanalyzer, images, frame))
            imgs.update(self.render_contour_images(cellanalyzer, images,
                                                   frame))
            msg = 'PL %s - P %s - T %05d' % (self.plate_id, self.position,
                                             frame)
            self.setImage(imgs, msg, 50)

            cellanalyzer.purge(features=self.export_features)
            self.logger.debug(" - Frame %d, duration (ms): %3d" \
                              %(frame, stopwatch.interim()*1000))

    def render_contour_images(self, ca, images, frame):
        images_ = dict()
        for region, render_par in self.settings.get2('rendering').iteritems():
            img = ca.render(dctRenderInfo=render_par, images=images)
            images_[region] = img

        return images_

    def render_classification_images(self, cellanalyzer, images, frame):
        images_ = dict()
        for region, render_par in self.settings.get2(
                'rendering_class').iteritems():
            image = cellanalyzer.render(dctRenderInfo=render_par,
                                        images=images)
            images_[region] = image
        return images_