示例#1
0
    def track_features(self, timeholder, visitor_data, channel_regions,
                       position, outdir):
        shutil.rmtree(outdir, True)
        makedirs(outdir)

        for tracks in visitor_data.itervalues():
            for startid, event_data in tracks.iteritems():
                if startid in ('_full_tracks', '_current_branch'):
                    continue
                for chname, region in channel_regions.iteritems():
                    for region_name, feature_names in region.iteritems():
                        try:
                            frame, obj_label, branch = Tracker.split_nodeid(
                                startid)
                        except ValueError:
                            frame, obj_label = Tracker.split_nodeid(startid)
                            branch = 1

                        filename = 'features__P%s__T%05d__O%04d__B%02d__C%s__R%s.txt' \
                            %(position, frame, obj_label, branch, chname, region_name)
                        filename = join(outdir, filename)

                        self._data_per_channel(timeholder, event_data,
                                               filename, chname, region_name,
                                               feature_names, position)
示例#2
0
文件: exporter.py 项目: imcf/cecog
    def full_tracks(self, timeholder, visitor_data, position, outdir):
        shutil.rmtree(outdir, True)
        makedirs(outdir)

        for start_id, data in visitor_data.iteritems():
            for idx, track in enumerate(data['_full']):
                has_header = False
                line1 = []
                line2 = []
                line3 = []

                frame, obj_label= Tracker.split_nodeid(start_id)[:2]
                filename = 'P%s__T%05d__O%04d__B%02d.txt' \
                    %(position, frame, obj_label, idx+1)
                f = file(join(outdir, filename), 'w')

                for node_id in track:
                    frame, obj_id = Tracker.split_nodeid(node_id)

                    coordinate = Coordinate(position=position, time=frame)
                    prefix = [frame, self.meta_data.get_timestamp_relative(coordinate), obj_id]
                    prefix_names = ['frame', 'time', 'objID']
                    items = []

                    for channel in timeholder[frame].values():
                        for region_id in channel.region_names():
                            region = channel.get_region(region_id)
                            if obj_id in region:
                                flkp = self._map_feature_names(region.feature_names)
                                if not has_header:
                                    keys = ['classLabel', 'className']
                                    if channel.NAME == 'Primary':
                                        keys += ['centerX', 'centerY']
                                    keys += flkp.keys()
                                    line1 += [channel.NAME.upper()] * len(keys)
                                    line2 += [str(region_id)] * len(keys)
                                    line3 += keys
                                obj = region[obj_id]
                                features = region.features_by_name(obj_id, flkp.values())
                                values = [x if not x is None else '' for x in [obj.iLabel, obj.strClassName]]
                                if channel.NAME == 'Primary':
                                    values += [obj.oCenterAbs[0], obj.oCenterAbs[1]]
                                values += list(features)
                                items.extend(values)

                    if not has_header:
                        has_header = True
                        prefix_str = [''] * len(prefix)
                        line1 = prefix_str + line1
                        line2 = prefix_str + line2
                        line3 = prefix_names + line3
                        f.write('%s\n' %CSVParams.sep.join(line1))
                        f.write('%s\n' %CSVParams.sep.join(line2))
                        f.write('%s\n' %CSVParams.sep.join(line3))

                    f.write('%s\n' %CSVParams.sep.join([str(i) for i in prefix + items]))
                f.close()
示例#3
0
 def start_nodes(self):
     """Return all start nodes i.e. nodes without incoming edges."""
     start_nodes = [
         node_id for node_id in self.graph.node_list()
         if self.graph.in_degree(node_id) == 0
     ]
     start_nodes.sort(key=lambda x: Tracker.split_nodeid(x)[0])
     return start_nodes
示例#4
0
文件: exporter.py 项目: imcf/cecog
    def track_features(self, timeholder,  visitor_data, channel_regions,
                              position, outdir):
        shutil.rmtree(outdir, True)
        makedirs(outdir)

        for tracks in visitor_data.itervalues():
            for startid, event_data in tracks.iteritems():
                if startid.startswith('_'):
                    continue
                for chname, region in channel_regions.iteritems():
                    for region_name, feature_names in region.iteritems():
                        try:
                            frame, obj_label, branch = Tracker.split_nodeid(startid)
                        except ValueError:
                            frame, obj_label = Tracker.split_nodeid(startid)
                            branch = 1
                        filename = 'features_P%s_T%05d_O%04d_B%02d_C%s_R%s.txt' \
                            %(position, frame, obj_label, branch, chname, region_name)
                        filename = join(outdir, filename)

                        self._data_per_channel(timeholder, event_data, filename, chname,
                                               region_name, feature_names, position)
示例#5
0
 def centers(self):
     """Return the a list of the object centers for each track."""
     centers = dict()
     for startid, eventdata in self.iterevents():
         if startid in ['_full_tracks', '_current_branch']:
             continue
         data = list()
         for nodeids in zip(*eventdata['tracks']):
             for nodeid in nodeids:
                 obj = self.graph.node_data(nodeid)
                 frame = Tracker.split_nodeid(nodeid)[0]
                 data.append((int(frame), obj.iId, obj.oCenterAbs))
         centers[startid] = data
     return centers
示例#6
0
 def centers(self):
     """Return the a list of the object centers for each track."""
     centers = dict()
     for startid, eventdata in self.iterevents():
         if startid in ['_full_tracks', '_current_branch']:
             continue
         data = list()
         for nodeids in zip(*eventdata['tracks']):
             for nodeid in nodeids:
                 obj = self.graph.node_data(nodeid)
                 frame = Tracker.split_nodeid(nodeid)[0]
                 data.append((int(frame), obj.iId, obj.oCenterAbs))
         centers[startid] = data
     return centers
示例#7
0
    def bboxes(self, size=None, border=0):
        bboxes = {}
        for startid, eventdata in self.iterevents():
            if startid in ['_full', '_current']:
                continue
            data = []
            for nodeids in zip(*eventdata['tracks']):
                nodeid = nodeids[0]
                frame = Tracker.split_nodeid(nodeid)[0]
                objids = [Tracker.split_nodeid(n)[1] for n in nodeids]
                objects = [self.graph.node_data(n) for n in nodeids]

                minX = min([obj.oRoi.upperLeft[0] for obj in objects])
                minY = min([obj.oRoi.upperLeft[1] for obj in objects])
                maxX = max([obj.oRoi.lowerRight[0] for obj in objects])
                maxY = max([obj.oRoi.lowerRight[1] for obj in objects])
                width = maxX - minX + 1
                height = maxY - minY + 1
                centerX = int(
                    round(np.average([obj.oCenterAbs[0] for obj in objects])))
                centerY = int(
                    round(np.average([obj.oCenterAbs[1] for obj in objects])))
                data.append((frame, centerX, centerY, width, height, objids))
            data1 = np.array(data, 'O')
            if not size is None and len(size) == 2:
                diffX = int(size[0] / 2)
                diffY = int(size[1] / 2)
            else:
                diffX = int(max(data1[:, 3]) / 2 + border)
                diffY = int(max(data1[:, 4]) / 2 + border)
            # convert to float to for numpy float64 type
            timedata = [(int(d[0]), (d[1] - diffX, d[2] - diffY,
                                     d[1] + diffX - 1 + size[0] % 2,
                                     d[2] + diffY - 1 + size[1] % 2), d[5])
                        for d in data1]
            bboxes[startid] = timedata
        return bboxes
示例#8
0
    def bboxes(self, size=None, border=0):
        bboxes = {}
        for startid, eventdata in self.iterevents():
            if startid  in ['_full', '_current']:
                continue
            data = []
            for nodeids in zip(*eventdata['tracks']):
                nodeid = nodeids[0]
                frame = Tracker.split_nodeid(nodeid)[0]
                objids = [Tracker.split_nodeid(n)[1] for n in nodeids]
                objects = [self.graph.node_data(n) for n in nodeids]

                minX = min([obj.oRoi.upperLeft[0] for obj in objects])
                minY = min([obj.oRoi.upperLeft[1] for obj in objects])
                maxX = max([obj.oRoi.lowerRight[0] for obj in objects])
                maxY = max([obj.oRoi.lowerRight[1] for obj in objects])
                width  = maxX - minX + 1
                height = maxY - minY + 1
                centerX = int(round(np.average([obj.oCenterAbs[0] for obj in objects])))
                centerY = int(round(np.average([obj.oCenterAbs[1] for obj in objects])))
                data.append((frame, centerX, centerY, width, height, objids))
            data1 = np.array(data, 'O')
            if not size is None and len(size) == 2:
                diffX = int(size[0] / 2)
                diffY = int(size[1] / 2)
            else:
                diffX = int(max(data1[:,3])/2 + border)
                diffY = int(max(data1[:,4])/2 + border)
            # convert to float to for numpy float64 type
            timedata = [(int(d[0]),
                         (d[1] - diffX,
                          d[2] - diffY,
                          d[1] + diffX - 1 + size[0] %2,
                          d[2] + diffY - 1 + size[1] %2),
                         d[5]) for d in data1]
            bboxes[startid] = timedata
        return bboxes
示例#9
0
文件: exporter.py 项目: imcf/cecog
    def _data_per_channel(self, timeholder, event_data, filename, channel_name, region_name,
                          feature_names, position):

        eventid = event_data['eventId']
        event_frame, _ = Tracker.split_nodeid(eventid)
        has_split = 'splitId' in event_data

        header_names = ['Frame', 'Timestamp', 'isEvent']
        if has_split:
            header_names.append('isSplit')
            if event_data['splitId'] is not None:
                split_frame, _ = Tracker.split_nodeid(event_data['splitId'])
            else:
                split_frame = None

        table = []
        # zip nodes with same time together
        for nodeids in zip(*event_data['tracks']):
            objids = []
            frame = None
            for nodeid in nodeids:
                node_frame, objid = Tracker.split_nodeid(nodeid)
                if frame is None:
                    frame = node_frame
                else:
                    assert frame == node_frame
                objids.append(objid)

            channel = timeholder[frame][channel_name]
            sample_holder = channel.get_region(region_name)

            if feature_names is None:
                feature_names = sample_holder.feature_names

            if CSVParams.objId not in header_names:
                # setup header line
                header_names.append(CSVParams.objId)
                header_names += [CSVParams.class_ %x for x in ['name', 'label', 'probability']]
                # only feature_names scales according to settings
                header_names += [CSVParams.feature %fn for fn in feature_names]
                header_names += [CSVParams.tracking %tf for tf in CSVParams.tracking_features]

            coordinate = Coordinate(position=position, time=frame)
            data = {'Frame' : frame,
                    'Timestamp': self.meta_data.get_timestamp_relative(coordinate),
                    'isEvent': int(frame==event_frame)}

            if has_split:
                data['isSplit'] = int(frame==split_frame)

            #for iIdx, iObjId in enumerate(lstObjectIds):
            objid = objids[0]
            if objid in sample_holder:
                sample = sample_holder[objid]
                data[CSVParams.objId] = objid

                # classification data
                if sample.iLabel is not None:
                    data[CSVParams.class_ %'label'] = sample.iLabel
                    data[CSVParams.class_ %'name'] = sample.strClassName
                    data[CSVParams.class_ %'probability'] = \
                        ','.join(['%d:%.5f' % (int(x),y) for x,y in sample.dctProb.iteritems()])

                common_ftr = [f for f in set(sample_holder.feature_names).intersection(feature_names)]
                features = sample_holder.features_by_name(objid, common_ftr)
                for feature, fname in zip(features, common_ftr):
                    data[CSVParams.feature %fname] = feature

                # features not calculated are exported as NAN
                diff_ftr = [f for f in set(feature_names).difference(sample_holder.feature_names)]
                for df in diff_ftr:
                    data[CSVParams.feature %df] = float("NAN")

                # object tracking data (absolute center)
                data[CSVParams.tracking %'center_x'] = sample.oCenterAbs[0]
                data[CSVParams.tracking %'center_y'] = sample.oCenterAbs[1]
                data[CSVParams.tracking %'upperleft_x'] = sample.oRoi.upperLeft[0]
                data[CSVParams.tracking %'upperleft_y'] = sample.oRoi.upperLeft[1]
                data[CSVParams.tracking %'lowerright_x'] = sample.oRoi.lowerRight[0]
                data[CSVParams.tracking %'lowerright_y'] = sample.oRoi.lowerRight[1]
            else:
                # we rather skip the entire event in case the object ID is not valid
                return
            table.append(data)

        if len(table) > 0:
            with open(filename, 'w') as fp:
                writer = csv.DictWriter(fp, fieldnames=header_names,
                                        delimiter=CSVParams.sep)
                writer.writeheader()
                writer.writerows(table)
示例#10
0
    def __init__(self, eventselector, strPathIn, oP, strPathOut,
                 imageCompression="85",
                 imageSuffix=".jpg",
                 border=0,
                 writeSubdirs=True,
                 writeDescription=True,
                 size=None,
                 oneFilePerTrack=False):

        self._bHasImages = False
        dctTimePoints = {}

        for strStartId, lstTimeData in eventselector.bboxes( \
            size=size, border=border).iteritems():
            items = Tracker.split_nodeid(strStartId)
            iStartT, iObjId = items[:2]
            if len(items) == 3:
                branch_id = items[2]
            else:
                branch_id = 1

            if writeSubdirs:
                strPathOutEvent = os.path.join(strPathOut,
                                               self._format_name(oP, iStartT, iObjId, branch_id))
            else:
                strPathOutEvent = strPathOut
            makedirs(strPathOutEvent)

            if writeDescription:
                oFile = file(os.path.join(strPathOutEvent,
                                          "_%s.txt" % self._format_name(oP, iStartT, iObjId, branch_id)), "w")
                lstData = ["Frame", "ObjId", "x1", "y1", "x2", "y2"]
                oFile.write("%s\n" % "\t".join(map(str, lstData)))

            for iCnt, (iT, tplBoundingBox, lstObjIds) in enumerate(lstTimeData):

                if writeDescription:
                    lstData = [iT, ';'.join(map(str, lstObjIds))] + list(tplBoundingBox)
                    oFile.write("%s\n" % "\t".join(map(str, lstData)))
                if not iT in dctTimePoints:
                    dctTimePoints[iT] = []
                dctTimePoints[iT].append((strStartId, lstObjIds, iCnt, strPathOutEvent, tplBoundingBox))

            if writeDescription:
                oFile.close()

        for idx, (iT, lstItems) in enumerate(dctTimePoints.iteritems()):

            #print iT, lstItems
            imgXY = self._getImage(strPathIn, iT)

            for strStartId, lstObjIds, iCnt, strPathOutEvent, tplBoundingBox in lstItems:

                x1, y1, x2, y2 = tplBoundingBox
                x1Corr = 0 if x1 < 0 else x1
                y1Corr = 0 if y1 < 0 else y1
                x2Corr = imgXY.width-1 if x2 >= imgXY.width else x2
                y2Corr = imgXY.height-1 if y2 >= imgXY.height else y2

                imgSub = ccore.subImage(imgXY,
                                        ccore.Diff2D(x1Corr, y1Corr),
                                        ccore.Diff2D(x2Corr-x1Corr+1, y2Corr-y1Corr+1))

                if (x1 < 0 or y1 < 0 or
                    x2 >= imgXY.width or y2 >= imgXY.height):
                    imgSub2 = self.IMAGE_CLASS(size[0], size[1])
                    ccore.copySubImage(imgSub, imgSub2, ccore.Diff2D(x1Corr-x1, y1Corr-y1))
                    imgSub = imgSub2

                assert imgSub.width == size[0]
                assert imgSub.width == x2-x1+1
                assert imgSub.height == size[1]
                assert imgSub.height == y2-y1+1

                if self.PROCESS_LABEL:
                    lstImages = []
                    for iObjId in lstObjIds:
                        lstImages.append(ccore.copyImageIfLabel(imgSub, imgSub, iObjId))
                    imgSub = ccore.projectImage(lstImages, ccore.ProjectionType.MaxProjection)

                strFilenameImage = os.path.join(strPathOutEvent, "P%s__T%05d%s" % (oP, iT, imageSuffix))
                ccore.writeImage(imgSub, strFilenameImage)

        if oneFilePerTrack and os.path.isdir(strPathOut):
            self.convertToOneFilePerTrack(strPathOut, imageCompression)
示例#11
0
 def start_nodes(self):
     """Return all start nodes i.e. nodes without incoming edges."""
     start_nodes = [node_id for node_id in self.graph.node_list()
                    if self.graph.in_degree(node_id) == 0]
     start_nodes.sort(key=lambda x: Tracker.split_nodeid(x)[0])
     return start_nodes
示例#12
0
    def __call__(self):

        thread = QThread.currentThread()
        well, site = self._posinfo()

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     self.datafile, self.meta_data,
                                     self.settings, self._frames,
                                     self.plate_id, well, site,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings('Processing',
                                                       'objectdetection'))

        self.export_features = self.define_exp_features()
        self._analyze(ca)

        # invoke event selection
        if self.settings('Processing', 'eventselection') and \
           self.settings('Processing', 'tracking'):

            evchannel = self.settings('EventSelection', 'eventchannel')
            region = self.classifiers[evchannel].regions

            if evchannel != PrimaryChannel.NAME or region != self.settings(
                    "Tracking", "region"):
                graph = self._tracker.clone_graph(self.timeholder, evchannel,
                                                  region)
            else:
                graph = self._tracker.graph

            self._tes = self.setup_eventselection(graph)
            self.logger.info("Event detection")
            self._tes.find_events()
            if self.isAborted():
                return 0  # number of processed images

        # save all the data of the position, no aborts from here on
        # want all processed data saved
        if self.settings('Processing', 'tracking'):
            self.statusUpdate(text="Saving Tracking Data to cellh5...")
            self.save_tracks()

            if self.settings('Output', 'hdf5_include_events') and \
               self.settings('Processing', "eventselection"):
                self.statusUpdate(text="Saving Event Data to cellh5...")
                self.save_events()

        self.save_classification()
        self.timeholder.purge()

        try:
            n = len(self._frames)
            intval = stopwatch.stop() / n * 1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info("%d images analyzed, %3d ms per image set" %
                             (n, intval))

        self.clear()

        if isfile(self.datafile):
            with Ch5File(self.datafile, mode="r+") as ch5:
                ch5.savePlateLayout(self.layout, self.plate_id)
示例#13
0
 def _split_nodeid(self, nodeid):
     ret = Tracker.split_nodeid(nodeid)
     if len(ret) == 2:
         ret = ret + (1, )
     return ret
示例#14
0
文件: position.py 项目: imcf/cecog
class PositionAnalyzer(PositionCore):
    def __init__(self, *args, **kw):
        super(PositionAnalyzer, self).__init__(*args, **kw)

        if not self.has_timelapse:
            self.settings.set('Processing', 'tracking', False)

        self._makedirs()
        self.add_file_handler(join(self._log_dir, "%s.log" % self.position),
                              self._lvl.DEBUG)

    def _makedirs(self):
        assert isinstance(self.position, basestring)
        assert isinstance(self._out_dir, basestring)

        self._analyzed_dir = join(self._out_dir, "analyzed")
        if self.has_timelapse:
            self._position_dir = join(self._analyzed_dir, self.position)
        else:
            self._position_dir = self._analyzed_dir

        odirs = (self._analyzed_dir, join(self._out_dir, "log"),
                 join(self._out_dir, "log",
                      "_finished"), join(self._out_dir,
                                         "hdf5"), join(self._out_dir, "plots"),
                 join(self._position_dir,
                      "statistics"), join(self._position_dir, "gallery"),
                 join(self._position_dir,
                      "channel_gallery"), join(self._position_dir, "images"),
                 join(self._position_dir, "images", "_labels"))

        for odir in odirs:
            try:
                makedirs(odir)
            except os.error:  # no permissions
                self.logger.error("mkdir %s: failed" % odir)
            else:
                self.logger.info("mkdir %s: ok" % odir)
            setattr(self, "_%s_dir" % basename(odir.lower()).strip("_"), odir)

    def setup_classifiers(self):
        sttg = self.settings
        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                sttg.set_section('Classification')
                clf = CommonClassPredictor(
                    clf_dir=sttg.get2(
                        self._resolve_name(p_channel,
                                           'classification_envpath')),
                    name=p_channel,
                    channels=self._channel_regions(p_channel),
                    color_channel=c_channel)
                clf.importFromArff()
                clf.loadClassifier()
                self.classifiers[p_channel] = clf

    def _convert_tracking_duration(self, option_name):
        """Converts a tracking duration according to the unit and the
        mean time-lapse of the current position.
        Returns number of frames.
        """
        value = self.settings.get(SECTION_NAME_TRACKING, option_name)
        unit = self.settings.get(SECTION_NAME_TRACKING,
                                 'tracking_duration_unit')

        # get mean and stddev for the current position
        info = self.meta_data.get_timestamp_info(self.position)
        if unit == TRACKING_DURATION_UNIT_FRAMES or info is None:
            result = value
        elif unit == TRACKING_DURATION_UNIT_MINUTES:
            result = (value * 60.) / info[0]
        elif unit == TRACKING_DURATION_UNIT_SECONDS:
            result = value / info[0]
        else:
            raise ValueError("Wrong unit '%s' specified." % unit)
        return int(round(result))

    @property
    def _es_options(self):
        transitions = eval(self.settings.get2('tracking_labeltransitions'))
        if not isinstance(transitions[0], tuple):
            transitions = (transitions, )
        evopts = {
            'transitions':
            transitions,
            'backward_labels':
            map(int,
                self.settings.get2('tracking_backwardlabels').split(',')),
            'forward_labels':
            map(int,
                self.settings.get2('tracking_forwardlabels').split(',')),
            'backward_check':
            self._convert_tracking_duration('tracking_backwardCheck'),
            'forward_check':
            self._convert_tracking_duration('tracking_forwardCheck'),
            'backward_range':
            self._convert_tracking_duration('tracking_backwardrange'),
            'forward_range':
            self._convert_tracking_duration('tracking_forwardrange'),
            'backward_range_min':
            self.settings.get2('tracking_backwardrange_min'),
            'forward_range_min':
            self.settings.get2('tracking_forwardrange_min'),
            'max_in_degree':
            self.settings.get2('tracking_maxindegree'),
            'max_out_degree':
            self.settings.get2('tracking_maxoutdegree')
        }
        return evopts

    def define_exp_features(self):
        features = {}
        for name in self.processing_channels:
            region_features = {}
            for region in REGION_INFO.names[name.lower()]:
                # export all features extracted per regions
                if self.settings.get('Output', 'events_export_all_features') or \
                        self.settings.get('Output', 'export_track_data'):
                    # None means all features
                    region_features[region] = None
                # export selected features from settings
                else:
                    region_features[region] = \
                        self.settings.get('General',
                                          '%s_featureextraction_exportfeaturenames'
                                          % name.lower())
                features[name] = region_features
        return features

    def export_object_counts(self):
        fname = join(self._statistics_dir,
                     'P%s__object_counts.txt' % self.position)

        # at least the total count for primary is always exported
        ch_info = OrderedDict([('Primary', ('primary', [], []))])
        for name, clf in self.classifiers.iteritems():
            names = clf.class_names.values()
            colors = [clf.hexcolors[n] for n in names]
            ch_info[name] = (clf.regions, names, colors)

        self.timeholder.exportObjectCounts(fname, self.position,
                                           self.meta_data, ch_info)
        pplot_ymax = \
            self.settings.get('Output', 'export_object_counts_ylim_max')

        # plot only for primary channel so far!
        self.timeholder.exportPopulationPlots(fname, self._plots_dir,
                                              self.position, self.meta_data,
                                              ch_info['Primary'], pplot_ymax)

    def export_object_details(self):
        fname = join(self._statistics_dir,
                     'P%s__object_details.txt' % self.position)
        self.timeholder.exportObjectDetails(fname, excel_style=False)
        fname = join(self._statistics_dir,
                     'P%s__object_details_excel.txt' % self.position)
        self.timeholder.exportObjectDetails(fname, excel_style=True)

    def export_image_names(self):
        self.timeholder.exportImageFileNames(self._statistics_dir,
                                             self.position,
                                             self._imagecontainer._importer,
                                             self.ch_mapping)

    def export_full_tracks(self):
        odir = join(self._statistics_dir, 'full')
        exporter = EventExporter(self.meta_data)
        exporter.full_tracks(self.timeholder, self._tes.visitor_data,
                             self.position, odir)

    def export_graphviz(self, channel_name='Primary', region_name='primary'):
        filename = 'tracking_graph___P%s.dot' % self.position
        exporter = TrackExporter()
        exporter.graphviz_dot(join(self._statistics_dir, filename),
                              self._tracker)

        sample_holders = OrderedDict()
        for frame in self.timeholder.keys():
            channel = self.timeholder[frame][channel_name]
            sample_holders[frame] = channel.get_region(region_name)

        filename = join(self._statistics_dir,
                        filename.replace('.dot', '_features.csv'))
        exporter.tracking_data(filename, sample_holders)

    def export_gallery_images(self):
        for ch_name in self.processing_channels:
            cutter_in = join(self._images_dir, ch_name)
            if isdir(cutter_in):
                cutter_out = join(self._gallery_dir, ch_name)
                self.logger.info("running Cutter for '%s'..." % ch_name)
                image_size = \
                    self.settings.get('Output', 'events_gallery_image_size')
                EventGallery(self._tes,
                             cutter_in,
                             self.position,
                             cutter_out,
                             self.meta_data,
                             oneFilePerTrack=True,
                             size=(image_size, image_size))
            # FIXME: be careful here. normally only raw images are
            #        used for the cutter and can be deleted
            #        afterwards
            shutil.rmtree(cutter_in, ignore_errors=True)

    def export_tracks_hdf5(self):
        """Save tracking data to hdf file"""
        self.logger.debug("--- serializing tracking start")
        self.timeholder.serialize_tracking(self._tes.graph)
        self.logger.debug("--- serializing tracking ok")

    def export_events(self):
        """Export and save event selceciton data"""
        exporter = EventExporter(self.meta_data)
        # writes to the event folder
        odir = join(self._statistics_dir, "events")
        exporter.track_features(self.timeholder, self._tes.visitor_data,
                                self.export_features, self.position, odir)
        self.logger.debug("--- visitor analysis ok")
        # writes event data to hdf5
        self.timeholder.serialize_events(self._tes)
        self.logger.debug("--- serializing events ok")

    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good placke to read out the options
        # fils must not exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname, self.meta_data, self.settings,
                                     self._frames, self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        # setup tracker
        if self.settings.get('Processing', 'tracking'):
            region = self.settings.get('Tracking', 'tracking_regionname')
            tropts = (self.settings.get('Tracking',
                                        'tracking_maxobjectdistance'),
                      self.settings.get('Tracking',
                                        'tracking_maxsplitobjects'),
                      self.settings.get('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)
            self._tes = EventSelection(self._tracker.graph, **self._es_options)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings.get(
                              'Processing', 'objectdetection'))

        self.setup_classifiers()
        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings.get('Processing', 'tracking_synchronize_trajectories') and \
                    self.settings.get('Processing', 'tracking'):
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0  # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings.get('Output', 'export_object_counts'):
                self.export_object_counts()
            if self.settings.get('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings.get('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings.get('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})
                if self.settings.get('Processing',
                                     'tracking_synchronize_trajectories'):
                    self.export_events()
                if self.settings.get('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings.get('Output', 'export_tracking_as_dot'):
                    self.export_graphviz()

            self.update_status({
                'text': 'export events...',
                'max': 1,
                'progress': 1
            })

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop() / n_images * 1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(
                " - %d image sets analyzed, %3d ms per image set" %
                (n_images, intval))

        self.touch_finished()
        #        self.clear()
        return n_images

    @property
    def hdf5_filename(self):
        return self.timeholder.hdf5_filename

    def touch_finished(self, times=None):
        """Writes an empty file to mark this position as finished"""
        fname = join(self._finished_dir, '%s__finished.txt' % self.position)
        with open(fname, "w") as f:
            os.utime(fname, times)

    def clear(self):
        # closes hdf5
        self.timeholder.close_all()

    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' % (frame, self._frames.index(frame) + 1,
                                        len(self._frames))
                self.update_status({
                    'progress': self._frames.index(frame) + 1,
                    'text': txt,
                    'interval': stopwatch.interim()
                })

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            n_images += 1
            images = []

            if self.settings.get('Processing', 'tracking'):
                region = self.settings.get('Tracking', 'tracking_regionname')
                samples = self.timeholder[frame][
                    PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings.get('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings.get(
                        'Tracking', 'tracking_visualize_track_length')
                    radius = self.settings.get('Tracking',
                                               'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            for clf in self.classifiers.itervalues():
                cellanalyzer.classify_objects(clf)

            ##############################################################
            # FIXME - part for browser
            if not self._myhack is None:
                self.render_browser(cellanalyzer)
            ##############################################################

            self.settings.set_section('General')
            self.render_classification_images(cellanalyzer, images, frame)
            self.render_contour_images(cellanalyzer, images, frame)

            if self.settings.get('Output', 'rendering_channel_gallery'):
                self.render_channel_gallery(cellanalyzer, frame)

            if self.settings.get('Output', 'rendering_labels_discwrite'):
                cellanalyzer.exportLabelImages(self._labels_dir)

            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))
            cellanalyzer.purge(features=self.export_features)

        return n_images

    def render_channel_gallery(self, cellanalyzer, frame):
        for channel in cellanalyzer.virtual_channels.itervalues():
            chgal = ChannelGallery(channel, frame, self._channel_gallery_dir)
            chgal.make_gallery()

    def render_contour_images(self, ca, images, frame):
        for region, render_par in self.settings.get2('rendering').iteritems():
            out_dir = join(self._images_dir, region)
            write = self.settings.get('Output', 'rendering_contours_discwrite')

            if region not in self.CHANNELS.keys():
                img, fname = ca.render(out_dir,
                                       dctRenderInfo=render_par,
                                       writeToDisc=write,
                                       images=images)
                msg = 'PL %s - P %s - T %05d' % (self.plate_id, self.position,
                                                 frame)
                self.set_image(img, msg, fname, region, 50)
            # gallery images are treated differenty
            else:
                ca.render(out_dir, dctRenderInfo=render_par, writeToDisc=True)

    def render_classification_images(self, cellanalyzer, images, frame):
        for region, render_par in self.settings.get2(
                'rendering_class').iteritems():
            out_images = join(self._images_dir, region)
            write = self.settings.get('Output', 'rendering_class_discwrite')
            img_rgb, fname = cellanalyzer.render(out_images,
                                                 dctRenderInfo=render_par,
                                                 writeToDisc=write,
                                                 images=images)

            msg = 'PL %s - P %s - T %05d' % (self.plate_id, self.position,
                                             frame)
            self.set_image(img_rgb, msg, fname, region, 50)

    def render_browser(self, cellanalyzer):
        d = {}
        for name in cellanalyzer.get_channel_names():
            channel = cellanalyzer.get_channel(name)
            d[channel.strChannelId] = channel.meta_image.image
            self._myhack.show_image(d)

        channel_name, region_name = self._myhack._object_region
        channel = cellanalyzer.get_channel(channel_name)
        if channel.has_region(region_name):
            region = channel.get_region(region_name)
            coords = {}
            for obj_id, obj in region.iteritems():
                coords[obj_id] = obj.crack_contour
            self._myhack.set_coords(coords)
示例#15
0
    def _data_per_channel(self, timeholder, event_data, filename, channel_name,
                          region_name, feature_names, position):

        eventid = event_data['eventId']
        event_frame, _ = Tracker.split_nodeid(eventid)
        has_split = 'splitId' in event_data

        header_names = ['Frame', 'Timestamp', 'isEvent']
        if has_split:
            header_names.append('isSplit')
            if event_data['splitId'] is not None:
                split_frame, _ = Tracker.split_nodeid(event_data['splitId'])
            else:
                split_frame = None

        table = []
        # zip nodes with same time together
        for nodeids in zip(*event_data['tracks']):
            objids = []
            frame = None
            for nodeid in nodeids:
                node_frame, objid = Tracker.split_nodeid(nodeid)
                if frame is None:
                    frame = node_frame
                else:
                    assert frame == node_frame
                objids.append(objid)

            channel = timeholder[frame][channel_name]
            sample_holder = channel.get_region(region_name)

            if feature_names is None:
                feature_names = sample_holder.feature_names

            if CSVParams.objId not in header_names:
                # setup header line
                header_names.append(CSVParams.objId)
                header_names += [
                    CSVParams.class_ % x
                    for x in ['name', 'label', 'probability']
                ]
                # only feature_names scales according to settings
                header_names += [
                    CSVParams.feature % fn for fn in feature_names
                ]
                header_names += [
                    CSVParams.tracking % tf
                    for tf in CSVParams.tracking_features
                ]

            coordinate = Coordinate(position=position, time=frame)
            data = {
                'Frame': frame,
                'Timestamp': self.meta_data.get_timestamp_relative(coordinate),
                'isEvent': int(frame == event_frame)
            }

            if has_split:
                data['isSplit'] = int(frame == split_frame)

            #for iIdx, iObjId in enumerate(lstObjectIds):
            objid = objids[0]
            if objid in sample_holder:
                sample = sample_holder[objid]
                data[CSVParams.objId] = objid

                # classification data
                if sample.iLabel is not None:
                    data[CSVParams.class_ % 'label'] = sample.iLabel
                    data[CSVParams.class_ % 'name'] = sample.strClassName
                    data[CSVParams.class_ %'probability'] = \
                        ','.join(['%d:%.5f' % (int(x),y) for x,y in
                                  sample.dctProb.iteritems()])

                common_ftr = [
                    f for f in set(sample_holder.feature_names).intersection(
                        feature_names)
                ]
                features = sample_holder.features_by_name(objid, common_ftr)
                for feature, fname in zip(features, common_ftr):
                    data[CSVParams.feature % fname] = feature

                # features not calculated are exported as NAN
                diff_ftr = [
                    f for f in set(feature_names).difference(
                        sample_holder.feature_names)
                ]
                for df in diff_ftr:
                    data[CSVParams.feature % df] = float("NAN")

                # object tracking data (absolute center)
                data[CSVParams.tracking % 'center_x'] = sample.oCenterAbs[0]
                data[CSVParams.tracking % 'center_y'] = sample.oCenterAbs[1]
                data[CSVParams.tracking %
                     'upperleft_x'] = sample.oRoi.upperLeft[0]
                data[CSVParams.tracking %
                     'upperleft_y'] = sample.oRoi.upperLeft[1]
                data[CSVParams.tracking %
                     'lowerright_x'] = sample.oRoi.lowerRight[0]
                data[CSVParams.tracking %
                     'lowerright_y'] = sample.oRoi.lowerRight[1]
            else:
                # we rather skip the entire event in case the object ID is not valid
                return
            table.append(data)

        if len(table) > 0:
            with open(filename, 'wb') as fp:
                writer = csv.DictWriter(fp,
                                        fieldnames=header_names,
                                        delimiter=CSVParams.sep)
                writer.writeheader()
                writer.writerows(table)
示例#16
0
class PositionAnalyzer(PositionCore):
    def __init__(self, *args, **kw):
        super(PositionAnalyzer, self).__init__(*args, **kw)

        if not self.has_timelapse:
            self.settings.set('Processing', 'tracking', False)

        if self._writelogs:
            logfile = join(dirname(dirname(self.datafile)), "log",
                           "%s.log" % self.position)
            self.add_file_handler(logfile, self.Levels.DEBUG)
        self.logger.setLevel(self.Levels.DEBUG)

    def setup_classifiers(self):
        sttg = self.settings

        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                chreg = self._channel_regions(p_channel)

                sttg.set_section('Classification')
                cpath = sttg.get2(
                    self._resolve_name(p_channel, 'classification_envpath'))
                cpath = join(cpath, basename(cpath) + ".hdf")
                svc = SupportVectorClassifier(cpath,
                                              load=True,
                                              channels=chreg,
                                              color_channel=c_channel)
                svc.close()
                self.classifiers[p_channel] = svc

    @property
    def _transitions(self):
        try:
            transitions = np.array(
                eval(self.settings.get('EventSelection', 'labeltransitions')))
            transitions.reshape((-1, 2))
        except Exception as e:
            raise RuntimeError(("Make sure that transitions are of the form "
                                "'int, int' or '(int, int), (int, int)' i.e "
                                "2-int-tuple  or a list of 2-int-tuples"))

        return transitions

    def setup_eventselection(self, graph):
        """Setup the method for event selection."""

        opts = {
            'transitions': self._transitions,
            'backward_range': self._convert_tracking_duration('backwardrange'),
            'forward_range': self._convert_tracking_duration('forwardrange'),
            'max_in_degree': self.settings.get('EventSelection',
                                               'maxindegree'),
            'max_out_degree': self.settings.get('EventSelection',
                                                'maxoutdegree')
        }

        opts.update({
            'backward_labels': [
                int(i) for i in self.settings.get('EventSelection',
                                                  'backwardlabels').split(',')
            ],
            'forward_labels': [
                int(i) for i in self.settings.get('EventSelection',
                                                  'forwardlabels').split(',')
            ],
            'backward_range_min':
            self.settings.get('EventSelection', 'backwardrange_min'),
            'forward_range_min':
            self.settings.get('EventSelection', 'forwardrange_min'),
            'backward_check':
            self._convert_tracking_duration('backwardCheck'),
            'forward_check':
            self._convert_tracking_duration('forwardCheck')
        })
        es = EventSelection(graph, **opts)

        return es

    def _convert_tracking_duration(self, option_name):
        """Converts a tracking duration according to the unit and the
        mean time-lapse of the current position.
        Returns number of frames.
        """
        value = self.settings.get('EventSelection', option_name)
        unit = self.settings.get('EventSelection', 'duration_unit')

        # get mean and stddev for the current position
        info = self.meta_data.get_timestamp_info(self.position)
        if unit == TimeConverter.FRAMES or info is None:
            result = value
        elif unit == TimeConverter.MINUTES:
            result = (value * 60.) / info[0]
        elif unit == TimeConverter.SECONDS:
            result = value / info[0]
        else:
            raise ValueError("Wrong unit '%s' specified." % unit)
        return int(round(result))

    def define_exp_features(self):
        features = {}
        for name in self.processing_channels:
            region_features = {}

            for region in MetaPluginManager().region_info.names[name.lower()]:
                if name is self.MERGED_CHANNEL:
                    continue

                region_features[region] = \
                        self.settings.get('General',
                                          '%s_featureextraction_exportfeaturenames'
                                          % name.lower())

                features[name] = region_features

            # special case for merged channel
            if name is self.MERGED_CHANNEL:
                mftrs = list()
                for channel, region in self._channel_regions(name).iteritems():
                    if features[channel][region] is None:
                        mftrs = None
                    else:
                        for feature in features[channel][region]:
                            mftrs.append("_".join((channel, region, feature)))
                region_features[
                    self._all_channel_regions[name].values()] = mftrs
                features[name] = region_features

        return features

    def save_tracks(self):
        """Save tracking data to hdf file"""
        self.logger.info("Save tracking data")
        self.timeholder.serialize_tracking(self._tracker.graph)

    def save_events(self):
        self.logger.info("Save Event data")
        self.timeholder.serialize_events(self._tes)

    def save_classification(self):
        """Save classlabels of each object to the hdf file."""
        # function works for supervised and unuspervised case
        for channels in self.timeholder.itervalues():
            for chname, classifier in self.classifiers.iteritems():
                holder = channels[chname].get_region(classifier.regions)
                # if classifier.feature_names is None:
                #     # special for unsupervised case
                #     classifier.feature_names = holder.feature_names
                self.timeholder.save_classlabels(channels[chname], holder,
                                                 classifier)

    def __call__(self):

        thread = QThread.currentThread()
        well, site = self._posinfo()

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     self.datafile, self.meta_data,
                                     self.settings, self._frames,
                                     self.plate_id, well, site,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position=self.position,
                          create_images=True,
                          binning_factor=1,
                          detect_objects=self.settings('Processing',
                                                       'objectdetection'))

        self.export_features = self.define_exp_features()
        self._analyze(ca)

        # invoke event selection
        if self.settings('Processing', 'eventselection') and \
           self.settings('Processing', 'tracking'):

            evchannel = self.settings('EventSelection', 'eventchannel')
            region = self.classifiers[evchannel].regions

            if evchannel != PrimaryChannel.NAME or region != self.settings(
                    "Tracking", "region"):
                graph = self._tracker.clone_graph(self.timeholder, evchannel,
                                                  region)
            else:
                graph = self._tracker.graph

            self._tes = self.setup_eventselection(graph)
            self.logger.info("Event detection")
            self._tes.find_events()
            if self.isAborted():
                return 0  # number of processed images

        # save all the data of the position, no aborts from here on
        # want all processed data saved
        if self.settings('Processing', 'tracking'):
            self.statusUpdate(text="Saving Tracking Data to cellh5...")
            self.save_tracks()

            if self.settings('Output', 'hdf5_include_events') and \
               self.settings('Processing', "eventselection"):
                self.statusUpdate(text="Saving Event Data to cellh5...")
                self.save_events()

        self.save_classification()
        self.timeholder.purge()

        try:
            n = len(self._frames)
            intval = stopwatch.stop() / n * 1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info("%d images analyzed, %3d ms per image set" %
                             (n, intval))

        self.clear()

        if isfile(self.datafile):
            with Ch5File(self.datafile, mode="r+") as ch5:
                ch5.savePlateLayout(self.layout, self.plate_id)

    def clear(self):
        if self.timeholder is not None:
            self.timeholder.close_all()
        # close and remove handlers from logging object
        self.close()

    def _analyze(self, cellanalyzer):

        thread = QThread.currentThread()

        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            self.interruptionPoint()
            txt = '%s, %s, T %d (%d/%d)' \
                  %(self.plate_id, self.position, frame,
                    self._frames.index(frame)+1, len(self._frames))

            self.statusUpdate(text=txt,
                              interval=stopwatch.interim(),
                              increment=True)

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.debug(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            images = []

            if self.settings('Processing', 'tracking'):
                apc = AppPreferences()
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][
                    PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if apc.display_tracks:
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, apc.track_length, apc.cradius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.debug(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for channel, clf in self.classifiers.iteritems():
                    cellanalyzer.classify_objects(clf, channel)

            self.logger.debug(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once

            imgs = {}
            imgs.update(
                self.render_classification_images(cellanalyzer, images, frame))
            imgs.update(self.render_contour_images(cellanalyzer, images,
                                                   frame))
            msg = 'PL %s - P %s - T %05d' % (self.plate_id, self.position,
                                             frame)
            self.setImage(imgs, msg, 50)

            cellanalyzer.purge(features=self.export_features)
            self.logger.debug(" - Frame %d, duration (ms): %3d" \
                              %(frame, stopwatch.interim()*1000))

    def render_contour_images(self, ca, images, frame):
        images_ = dict()
        for region, render_par in self.settings.get2('rendering').iteritems():
            img = ca.render(dctRenderInfo=render_par, images=images)
            images_[region] = img

        return images_

    def render_classification_images(self, cellanalyzer, images, frame):
        images_ = dict()
        for region, render_par in self.settings.get2(
                'rendering_class').iteritems():
            image = cellanalyzer.render(dctRenderInfo=render_par,
                                        images=images)
            images_[region] = image
        return images_
示例#17
0
文件: position.py 项目: imcf/cecog
class PositionAnalyzer(PositionCore):

    def __init__(self, *args, **kw):
        super(PositionAnalyzer, self).__init__(*args, **kw)

        if not self.has_timelapse:
            self.settings.set('Processing', 'tracking', False)

        self._makedirs()
        self.add_file_handler(join(self._log_dir, "%s.log" %self.position),
                              self._lvl.DEBUG)

    def _makedirs(self):
        assert isinstance(self.position, basestring)
        assert isinstance(self._out_dir, basestring)

        self._analyzed_dir = join(self._out_dir, "analyzed")
        if self.has_timelapse:
            self._position_dir = join(self._analyzed_dir, self.position)
        else:
            self._position_dir = self._analyzed_dir

        odirs = (self._analyzed_dir,
                 join(self._out_dir, "log"),
                 join(self._out_dir, "log", "_finished"),
                 join(self._out_dir, "hdf5"),
                 join(self._out_dir, "plots"),
                 join(self._position_dir, "statistics"),
                 join(self._position_dir, "gallery"),
                 join(self._position_dir, "channel_gallery"),
                 join(self._position_dir, "images"),
                 join(self._position_dir, "images","_labels"))

        for odir in odirs:
            try:
                makedirs(odir)
            except os.error: # no permissions
                self.logger.error("mkdir %s: failed" %odir)
            else:
                self.logger.info("mkdir %s: ok" %odir)
            setattr(self, "_%s_dir" %basename(odir.lower()).strip("_"), odir)

    def setup_classifiers(self):
        sttg = self.settings
        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                sttg.set_section('Classification')
                clf = CommonClassPredictor(
                    clf_dir=sttg.get2(self._resolve_name(p_channel,
                                                         'classification_envpath')),
                    name=p_channel,
                    channels=self._channel_regions(p_channel),
                    color_channel=c_channel)
                clf.importFromArff()
                clf.loadClassifier()
                self.classifiers[p_channel] = clf

    def _convert_tracking_duration(self, option_name):
        """Converts a tracking duration according to the unit and the
        mean time-lapse of the current position.
        Returns number of frames.
        """
        value = self.settings.get(SECTION_NAME_TRACKING, option_name)
        unit = self.settings.get(SECTION_NAME_TRACKING,
                                  'tracking_duration_unit')

        # get mean and stddev for the current position
        info = self.meta_data.get_timestamp_info(self.position)
        if unit == TRACKING_DURATION_UNIT_FRAMES or info is None:
            result = value
        elif unit == TRACKING_DURATION_UNIT_MINUTES:
            result = (value * 60.) / info[0]
        elif unit == TRACKING_DURATION_UNIT_SECONDS:
            result = value / info[0]
        else:
            raise ValueError("Wrong unit '%s' specified." %unit)
        return int(round(result))

    @property
    def _es_options(self):
        transitions = eval(self.settings.get2('tracking_labeltransitions'))
        if not isinstance(transitions[0], tuple):
            transitions = (transitions, )
        evopts = {'transitions': transitions,
                  'backward_labels': map(int, self.settings.get2('tracking_backwardlabels').split(',')),
                  'forward_labels': map(int, self.settings.get2('tracking_forwardlabels').split(',')),
                  'backward_check': self._convert_tracking_duration('tracking_backwardCheck'),
                  'forward_check': self._convert_tracking_duration('tracking_forwardCheck'),
                  'backward_range': self._convert_tracking_duration('tracking_backwardrange'),
                  'forward_range': self._convert_tracking_duration('tracking_forwardrange'),
                  'backward_range_min': self.settings.get2('tracking_backwardrange_min'),
                  'forward_range_min': self.settings.get2('tracking_forwardrange_min'),
                  'max_in_degree': self.settings.get2('tracking_maxindegree'),
                  'max_out_degree': self.settings.get2('tracking_maxoutdegree')}
        return evopts

    def define_exp_features(self):
        features = {}
        for name in self.processing_channels:
            region_features = {}
            for region in REGION_INFO.names[name.lower()]:
                # export all features extracted per regions
                if self.settings.get('Output', 'events_export_all_features') or \
                        self.settings.get('Output', 'export_track_data'):
                    # None means all features
                    region_features[region] = None
                # export selected features from settings
                else:
                    region_features[region] = \
                        self.settings.get('General',
                                          '%s_featureextraction_exportfeaturenames'
                                          % name.lower())
                features[name] = region_features
        return features

    def export_object_counts(self):
        fname = join(self._statistics_dir, 'P%s__object_counts.txt' % self.position)

        # at least the total count for primary is always exported
        ch_info = OrderedDict([('Primary', ('primary', [], []))])
        for name, clf in self.classifiers.iteritems():
            names = clf.class_names.values()
            colors = [clf.hexcolors[n] for n in names]
            ch_info[name] = (clf.regions, names, colors)

        self.timeholder.exportObjectCounts(fname, self.position, self.meta_data, ch_info)
        pplot_ymax = \
            self.settings.get('Output', 'export_object_counts_ylim_max')

        # plot only for primary channel so far!
        self.timeholder.exportPopulationPlots(fname, self._plots_dir, self.position,
                                              self.meta_data, ch_info['Primary'], pplot_ymax)


    def export_object_details(self):
        fname = join(self._statistics_dir,
                        'P%s__object_details.txt' % self.position)
        self.timeholder.exportObjectDetails(fname, excel_style=False)
        fname = join(self._statistics_dir,
                        'P%s__object_details_excel.txt' % self.position)
        self.timeholder.exportObjectDetails(fname, excel_style=True)

    def export_image_names(self):
        self.timeholder.exportImageFileNames(self._statistics_dir,
                                             self.position,
                                             self._imagecontainer._importer,
                                             self.ch_mapping)

    def export_full_tracks(self):
        odir = join(self._statistics_dir, 'full')
        exporter = EventExporter(self.meta_data)
        exporter.full_tracks(self.timeholder, self._tes.visitor_data,
                             self.position, odir)

    def export_graphviz(self, channel_name='Primary', region_name='primary'):
        filename = 'tracking_graph___P%s.dot' %self.position
        exporter = TrackExporter()
        exporter.graphviz_dot(join(self._statistics_dir, filename),
                              self._tracker)

        sample_holders = OrderedDict()
        for frame in self.timeholder.keys():
            channel = self.timeholder[frame][channel_name]
            sample_holders[frame] = channel.get_region(region_name)

        filename = join(self._statistics_dir, filename.replace('.dot', '_features.csv'))
        exporter.tracking_data(filename, sample_holders)

    def export_gallery_images(self):
        for ch_name in self.processing_channels:
            cutter_in = join(self._images_dir, ch_name)
            if isdir(cutter_in):
                cutter_out = join(self._gallery_dir, ch_name)
                self.logger.info("running Cutter for '%s'..." %ch_name)
                image_size = \
                    self.settings.get('Output', 'events_gallery_image_size')
                EventGallery(self._tes, cutter_in, self.position, cutter_out,
                             self.meta_data, oneFilePerTrack=True,
                             size=(image_size, image_size))
            # FIXME: be careful here. normally only raw images are
            #        used for the cutter and can be deleted
            #        afterwards
            shutil.rmtree(cutter_in, ignore_errors=True)

    def export_tracks_hdf5(self):
        """Save tracking data to hdf file"""
        self.logger.debug("--- serializing tracking start")
        self.timeholder.serialize_tracking(self._tes.graph)
        self.logger.debug("--- serializing tracking ok")

    def export_events(self):
        """Export and save event selceciton data"""
        exporter = EventExporter(self.meta_data)
        # writes to the event folder
        odir = join(self._statistics_dir, "events")
        exporter.track_features(self.timeholder, self._tes.visitor_data,
                                self.export_features, self.position, odir)
        self.logger.debug("--- visitor analysis ok")
        # writes event data to hdf5
        self.timeholder.serialize_events(self._tes)
        self.logger.debug("--- serializing events ok")

    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good placke to read out the options
        # fils must not exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        # setup tracker
        if self.settings.get('Processing', 'tracking'):
            region = self.settings.get('Tracking', 'tracking_regionname')
            tropts = (self.settings.get('Tracking', 'tracking_maxobjectdistance'),
                      self.settings.get('Tracking', 'tracking_maxsplitobjects'),
                      self.settings.get('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)
            self._tes = EventSelection(self._tracker.graph, **self._es_options)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings.get('Processing',
                                                             'objectdetection'))

        self.setup_classifiers()
        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings.get('Processing', 'tracking_synchronize_trajectories') and \
                    self.settings.get('Processing', 'tracking'):
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings.get('Output', 'export_object_counts'):
                self.export_object_counts()
            if self.settings.get('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings.get('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings.get('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})
                if self.settings.get('Processing', 'tracking_synchronize_trajectories'):
                    self.export_events()
                if self.settings.get('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings.get('Output', 'export_tracking_as_dot'):
                    self.export_graphviz()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
#        self.clear()
        return n_images

    @property
    def hdf5_filename(self):
        return self.timeholder.hdf5_filename

    def touch_finished(self, times=None):
        """Writes an empty file to mark this position as finished"""
        fname = join(self._finished_dir, '%s__finished.txt' % self.position)
        with open(fname, "w") as f:
            os.utime(fname, times)

    def clear(self):
        # closes hdf5
        self.timeholder.close_all()

    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            n_images += 1
            images = []

            if self.settings.get('Processing', 'tracking'):
                region = self.settings.get('Tracking', 'tracking_regionname')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings.get('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings.get('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings.get('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            for clf in self.classifiers.itervalues():
                cellanalyzer.classify_objects(clf)

            ##############################################################
            # FIXME - part for browser
            if not self._myhack is None:
                self.render_browser(cellanalyzer)
            ##############################################################

            self.settings.set_section('General')
            self.render_classification_images(cellanalyzer, images, frame)
            self.render_contour_images(cellanalyzer, images, frame)

            if self.settings.get('Output', 'rendering_channel_gallery'):
                self.render_channel_gallery(cellanalyzer, frame)

            if self.settings.get('Output', 'rendering_labels_discwrite'):
                cellanalyzer.exportLabelImages(self._labels_dir)

            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))
            cellanalyzer.purge(features=self.export_features)

        return n_images

    def render_channel_gallery(self, cellanalyzer, frame):
        for channel in cellanalyzer.virtual_channels.itervalues():
            chgal = ChannelGallery(channel, frame, self._channel_gallery_dir)
            chgal.make_gallery()

    def render_contour_images(self, ca, images, frame):
        for region, render_par in self.settings.get2('rendering').iteritems():
            out_dir = join(self._images_dir, region)
            write = self.settings.get('Output', 'rendering_contours_discwrite')

            if region not in self.CHANNELS.keys():
                img, fname = ca.render(out_dir, dctRenderInfo=render_par,
                                       writeToDisc=write, images=images)
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position,
                                                frame)
                self.set_image(img, msg, fname, region, 50)
            # gallery images are treated differenty
            else:
                ca.render(out_dir, dctRenderInfo=render_par, writeToDisc=True)

    def render_classification_images(self, cellanalyzer, images, frame):
        for region, render_par in self.settings.get2('rendering_class').iteritems():
            out_images = join(self._images_dir, region)
            write = self.settings.get('Output', 'rendering_class_discwrite')
            img_rgb, fname = cellanalyzer.render(out_images,
                                                 dctRenderInfo=render_par,
                                                 writeToDisc=write,
                                                 images=images)

            msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame)
            self.set_image(img_rgb, msg, fname, region, 50)

    def render_browser(self, cellanalyzer):
        d = {}
        for name in cellanalyzer.get_channel_names():
            channel = cellanalyzer.get_channel(name)
            d[channel.strChannelId] = channel.meta_image.image
            self._myhack.show_image(d)

        channel_name, region_name = self._myhack._object_region
        channel = cellanalyzer.get_channel(channel_name)
        if channel.has_region(region_name):
            region = channel.get_region(region_name)
            coords = {}
            for obj_id, obj in region.iteritems():
                coords[obj_id] = obj.crack_contour
            self._myhack.set_coords(coords)
示例#18
0
class PositionAnalyzer(PositionCore):

    def __init__(self, *args, **kw):
        super(PositionAnalyzer, self).__init__(*args, **kw)

        if not self.has_timelapse:
            self.settings.set('Processing', 'tracking', False)

        self._makedirs()
        self.add_file_handler(join(self._log_dir, "%s.log" %self.position),
                              self.Levels.DEBUG)

    def _makedirs(self):
        assert isinstance(self.position, basestring)
        assert isinstance(self._out_dir, basestring)

        self._analyzed_dir = join(self._out_dir, "analyzed")
        if self.has_timelapse:
            self._position_dir = join(self._analyzed_dir, self.position)
        else:
            self._position_dir = self._analyzed_dir

        odirs = (self._analyzed_dir,
                 join(self._out_dir, "log"),
                 join(self._out_dir, "log", "_finished"),
                 join(self._out_dir, "hdf5"),
                 join(self._out_dir, "plots"),
                 join(self._position_dir, "statistics"),
                 join(self._position_dir, "tc3"),
                 join(self._position_dir, "gallery"),
                 join(self._position_dir, "channel_gallery"),
                 join(self._position_dir, "images"),
                 join(self._position_dir, "images","_labels"))

        for odir in odirs:
            try:
                makedirs(odir)
            except os.error: # no permissions
                self.logger.error("mkdir %s: failed" %odir)
            else:
                self.logger.info("mkdir %s: ok" %odir)
            setattr(self, "_%s_dir" %basename(odir.lower()).strip("_"), odir)

    def setup_classifiers(self):
        sttg = self.settings

        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                chreg = self._channel_regions(p_channel)
                if sttg("EventSelection", "unsupervised_event_selection"):
                    nclusters = sttg("EventSelection", "num_clusters")
                    self.classifiers[p_channel] = ClassDefinitionUnsup( \
                        nclusters, chreg)
                else:
                    sttg.set_section('Classification')
                    clf = CommonClassPredictor(
                        clf_dir=sttg.get2(self._resolve_name(p_channel,
                                                             'classification_envpath')),
                        name=p_channel,
                        channels=chreg,
                        color_channel=c_channel)
                    clf.importFromArff()
                    clf.loadClassifier()
                    self.classifiers[p_channel] = clf

    @property
    def _transitions(self):
        if self.settings.get('EventSelection', 'unsupervised_event_selection'):
            transitions = np.array((0, 1))
        else:
            try:
                transitions = np.array(eval(self.settings.get('EventSelection', 'labeltransitions')))
                transitions.reshape((-1, 2))
            except Exception as e:
                raise RuntimeError(("Make sure that transitions are of the form "
                                    "'int, int' or '(int, int), (int, int)' i.e "
                                    "2-int-tuple  or a list of 2-int-tuples"))

        return transitions

    def setup_eventselection(self, graph):
        """Setup the method for event selection."""

        opts = {'transitions': self._transitions,
                'backward_range': self._convert_tracking_duration('backwardrange'),
                'forward_range': self._convert_tracking_duration('forwardrange'),
                'max_in_degree': self.settings.get('EventSelection', 'maxindegree'),
                'max_out_degree': self.settings.get('EventSelection', 'maxoutdegree')}

        if self.settings.get('EventSelection', 'supervised_event_selection'):
            opts.update({'backward_labels': [int(i) for i in self.settings.get('EventSelection', 'backwardlabels').split(',')],
                         'forward_labels': [int(i) for i in self.settings.get('EventSelection', 'forwardlabels').split(',')],
                         'backward_range_min': self.settings.get('EventSelection', 'backwardrange_min'),
                         'forward_range_min': self.settings.get('EventSelection', 'forwardrange_min'),
                         'backward_check': self._convert_tracking_duration('backwardCheck'),
                         'forward_check': self._convert_tracking_duration('forwardCheck')})
            es = EventSelection(graph, **opts)

        elif self.settings.get('EventSelection', 'unsupervised_event_selection'):
            cld = self.classifiers.values()[0] # only one classdef in case of UES
            opts.update({'forward_check': self._convert_tracking_duration('min_event_duration'),
                         'forward_labels': (1, ),
                         'backward_check': -1, # unsused for unsupervised usecase
                         'backward_labels': (0, ),
                         'num_clusters': self.settings.get('EventSelection', 'num_clusters'),
                         'min_cluster_size': self.settings.get('EventSelection', 'min_cluster_size'),
                         'classdef': cld})
            es = UnsupervisedEventSelection(graph, **opts)

        return es

    def _convert_tracking_duration(self, option_name):
        """Converts a tracking duration according to the unit and the
        mean time-lapse of the current position.
        Returns number of frames.
        """
        value = self.settings.get('EventSelection', option_name)
        unit = self.settings.get('EventSelection', 'duration_unit')

        # get mean and stddev for the current position
        info = self.meta_data.get_timestamp_info(self.position)
        if unit == TimeConverter.FRAMES or info is None:
            result = value
        elif unit == TimeConverter.MINUTES:
            result = (value * 60.) / info[0]
        elif unit == TimeConverter.SECONDS:
            result = value / info[0]
        else:
            raise ValueError("Wrong unit '%s' specified." %unit)
        return int(round(result))

    def define_exp_features(self):
        features = {}
        for name in self.processing_channels:
            region_features = {}

            for region in MetaPluginManager().region_info.names[name.lower()]:
                if name is self.MERGED_CHANNEL:
                    continue
                # export all features extracted per regions
                if self.settings.get('Output', 'events_export_all_features') or \
                        self.settings.get('Output', 'export_track_data'):
                    # None means all features
                    region_features[region] = None
                # export selected features from settings
                else:
                    region_features[region] = \
                        self.settings.get('General',
                                          '%s_featureextraction_exportfeaturenames'
                                          % name.lower())

                features[name] = region_features

            # special case for merged channel
            if name is self.MERGED_CHANNEL:
                mftrs = list()
                for channel, region in self._channel_regions(name).iteritems():
                    if features[channel][region] is None:
                        mftrs = None
                    else:
                        for feature in features[channel][region]:
                            mftrs.append("_".join((channel, region, feature)))
                region_features[self._all_channel_regions[name].values()] = mftrs
                features[name] = region_features

        return features

    def export_object_counts(self):
        fname = join(self._statistics_dir, 'P%s__object_counts.txt' % self.position)

        ch_info = OrderedDict()
        for name, clf in self.classifiers.iteritems():
            names = clf.class_names.values()
            colors = [clf.hexcolors[n] for n in names]
            ch_info[name] = (clf.regions, names, colors)

        # if no classifier has been loaded, no counts can be exported.
        if len(ch_info) == 0:
            return

        self.timeholder.exportObjectCounts(fname, self.position, self.meta_data, ch_info)
        pplot_ymax = \
            self.settings.get('Output', 'export_object_counts_ylim_max')

        self.timeholder.exportPopulationPlots(ch_info, self._plots_dir,
                                              self.plate_id, self.position,
                                              ymax=pplot_ymax)



    def export_object_details(self):
        fname = join(self._statistics_dir,
                        'P%s__object_details.txt' % self.position)
        self.timeholder.exportObjectDetails(fname)

    def export_image_names(self):
        self.timeholder.exportImageFileNames(self._statistics_dir,
                                             self.position,
                                             self._imagecontainer._importer,
                                             self.ch_mapping)

    def export_full_tracks(self):
        odir = join(self._statistics_dir, 'full')
        exporter = EventExporter(self.meta_data)
        exporter.full_tracks(self.timeholder, self._tes.visitor_data,
                             self.position, odir)

    def export_graphviz(self, channel_name='Primary', region_name='primary'):
        filename = 'tracking_graph___P%s.dot' %self.position
        exporter = TrackExporter()
        exporter.graphviz_dot(join(self._statistics_dir, filename),
                              self._tracker)

        sample_holders = OrderedDict()
        for frame in self.timeholder.keys():
            channel = self.timeholder[frame][channel_name]
            sample_holders[frame] = channel.get_region(region_name)

        filename = join(self._statistics_dir, filename.replace('.dot', '_features.csv'))
        exporter.tracking_data(filename, sample_holders)

    def export_gallery_images(self):
        for ch_name in self.processing_channels:
            cutter_in = join(self._images_dir, ch_name.lower())

            if not isdir(cutter_in):
                self.logger.warning('directory not found (%s)' %cutter_in)
                self.logger.warning('can not write the gallery images')
            else:
                cutter_out = join(self._gallery_dir, ch_name.lower())
                self.logger.info("running Cutter for '%s'..." %ch_name)
                image_size = \
                    self.settings.get('Output', 'events_gallery_image_size')
                TrackGallery(self._tes.centers(),
                             cutter_in, cutter_out, self.position, size=image_size)
            # FIXME: be careful here. normally only raw images are
            #        used for the cutter and can be deleted
            #        afterwards
            shutil.rmtree(cutter_in, ignore_errors=True)

    def export_tracks_hdf5(self):
        """Save tracking data to hdf file"""
        self.logger.debug("--- serializing tracking start")
        self.timeholder.serialize_tracking(self._tracker.graph)
        self.logger.debug("--- serializing tracking ok")

    def export_events(self):
        """Export and save event selection data"""
        exporter = EventExporter(self.meta_data)
        # writes to the event folder
        odir = join(self._statistics_dir, "events")
        exporter.track_features(self.timeholder, self._tes.visitor_data,
                                self.export_features, self.position, odir)
        self.logger.debug("--- serializing events ok")

    def export_events_hdf5(self):
        # writes event data to hdf5
        self.timeholder.serialize_events(self._tes)

    def export_tc3(self):
        t_mean = self.meta_data.get_timestamp_info(self.position)[0]
        tu = TimeConverter(t_mean, TimeConverter.SECONDS)
        increment = self.settings('General', 'frameincrement')
        t_step = tu.sec2min(t_mean)*increment

        nclusters = self.settings.get('EventSelection', 'num_clusters')
        exporter = TC3Exporter(self._tes.tc3data, self._tc3_dir, nclusters,
                               t_step, TimeConverter.MINUTES, self.position)
        exporter()

    def export_classlabels(self):
        """Save classlabels of each object to the hdf file."""
        # function works for supervised and unuspervised case
        for channels in self.timeholder.itervalues():
            for chname, classifier in self.classifiers.iteritems():
                holder = channels[chname].get_region(classifier.regions)
                if classifier.feature_names is None:
                    # special for unsupervised case
                    classifier.feature_names = holder.feature_names
                self.timeholder.save_classlabels(channels[chname],
                                                 holder, classifier)

    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good place to read out the options
        # file does not have to exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings('Processing',
                                                         'objectdetection'))

        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings('Processing', 'eventselection') and \
                    self.settings('Processing', 'tracking'):

                evchannel = self.settings('EventSelection', 'eventchannel')
                region = self.classifiers[evchannel].regions
                if self.settings('EventSelection', 'unsupervised_event_selection'):
                    graph = self._tracker.graph
                elif  evchannel != PrimaryChannel.NAME or \
                        region != self.settings("Tracking", "region"):
                    graph = self._tracker.clone_graph(self.timeholder,
                                                      evchannel,
                                                      region)
                else:
                    graph = self._tracker.graph

                self._tes = self.setup_eventselection(graph)
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings('Output', 'export_object_counts') and \
                    self.settings('EventSelection', 'supervised_event_selection'):
                # no object counts in case of unsupervised event selection
                self.export_object_counts()
            if self.settings('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})

                if self.settings('Output', 'hdf5_include_events'):
                    self.export_events_hdf5()

                if self.settings('Output', "export_events"):
                    if self.settings('Processing', 'eventselection'):
                        self.export_events()
                    if self.settings('EventSelection', 'unsupervised_event_selection'):
                        self.export_tc3()

                if self.settings('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings('Output', 'export_tracking_as_dot'):
                    self.export_graphviz(channel_name =PrimaryChannel.NAME,\
                                          region_name =self._all_channel_regions[PrimaryChannel.NAME][PrimaryChannel.NAME])

            self.export_classlabels()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images') and \
                    self.settings.get('Processing', 'eventselection'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
        self.clear()
        return n_images

    @property
    def hdf5_filename(self):
        return self.timeholder.hdf5_filename

    def touch_finished(self, times=None):
        """Writes an empty file to mark this position as finished"""
        fname = join(self._finished_dir, '%s__finished.txt' % self.position)
        with open(fname, "w") as f:
            os.utime(fname, times)

    def clear(self):
        # closes
        if self.timeholder is not None:
            self.timeholder.close_all()
        # close and remove handlers from logging object
        self.close()

    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        minimal_effort = self.settings.get('Output', 'minimal_effort') and self.settings.get('Output', 'hdf5_reuse')

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.info(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            n_images += 1
            images = []

            if self.settings('Processing', 'tracking'):
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.info(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for clf in self.classifiers.itervalues():
                    cellanalyzer.classify_objects(clf)

            self.logger.info(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once
            if not minimal_effort:
                imgs = {}
                imgs.update(self.render_classification_images(cellanalyzer, images, frame))
                imgs.update(self.render_contour_images(cellanalyzer, images, frame))
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame)
                self.set_image(imgs, msg, 50)

                if self.settings('Output', 'rendering_channel_gallery'):
                    self.render_channel_gallery(cellanalyzer, frame)

                if self.settings('Output', 'rendering_labels_discwrite'):
                    cellanalyzer.exportLabelImages(self._labels_dir)

            cellanalyzer.purge(features=self.export_features)
            self.logger.info(" - Frame %d, rest (ms): %3d" \
                                 %(frame, stopwatch.interval()*1000))
            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))


        return n_images

    def render_channel_gallery(self, cellanalyzer, frame):
        for channel in cellanalyzer.virtual_channels.itervalues():
            chgal = ChannelGallery(channel, frame, self._channel_gallery_dir)
            chgal.make_gallery()

    def render_contour_images(self, ca, images, frame):
        images_ = dict()
        for region, render_par in self.settings.get2('rendering').iteritems():
            out_dir = join(self._images_dir, region)
            write = self.settings('Output', 'rendering_contours_discwrite')

            if region not in self.CHANNELS.keys():
                img, _ = ca.render(out_dir, dctRenderInfo=render_par,
                                       writeToDisc=write, images=images)


                images_[region] = img
            # gallery images are treated differenty
            else:
                ca.render(out_dir, dctRenderInfo=render_par, writeToDisc=True)
        return images_

    def render_classification_images(self, cellanalyzer, images, frame):
         images_ = dict()
         for region, render_par in self.settings.get2('rendering_class').iteritems():
            out_images = join(self._images_dir, region)
            write = self.settings('Output', 'rendering_class_discwrite')
            image, _ = cellanalyzer.render(out_images,
                                                 dctRenderInfo=render_par,
                                                 writeToDisc=write,
                                                 images=images)
            images_[region] = image
         return images_
示例#19
0
    def full_tracks(self, timeholder, visitor_data, position, outdir):
        shutil.rmtree(outdir, True)
        makedirs(outdir)

        for start_id, data in visitor_data.iteritems():
            for idx, track in enumerate(data['_full_tracks']):
                has_header = False
                line1 = []
                line2 = []
                line3 = []

                frame, obj_label = Tracker.split_nodeid(start_id)[:2]
                filename = 'P%s__T%05d__O%04d__B%02d.txt' \
                    %(position, frame, obj_label, idx+1)
                f = file(join(outdir, filename), 'w')

                for node_id in track:
                    frame, obj_id = Tracker.split_nodeid(node_id)

                    coordinate = Coordinate(position=position, time=frame)
                    prefix = [
                        frame,
                        self.meta_data.get_timestamp_relative(coordinate),
                        obj_id
                    ]
                    prefix_names = ['frame', 'time', 'objID']
                    items = []

                    for channel in timeholder[frame].values():
                        for region_id in channel.region_names():
                            region = channel.get_region(region_id)
                            if obj_id in region:
                                flkp = self._map_feature_names(
                                    region.feature_names)
                                if not has_header:
                                    keys = ['classLabel', 'className']
                                    if channel.NAME == 'Primary':
                                        keys += ['centerX', 'centerY']
                                    keys += flkp.keys()
                                    line1 += [channel.NAME.upper()] * len(keys)
                                    line2 += [str(region_id)] * len(keys)
                                    line3 += keys
                                obj = region[obj_id]
                                features = region.features_by_name(
                                    obj_id, flkp.values())
                                values = [
                                    x if not x is None else ''
                                    for x in [obj.iLabel, obj.strClassName]
                                ]
                                if channel.NAME == 'Primary':
                                    values += [
                                        obj.oCenterAbs[0], obj.oCenterAbs[1]
                                    ]
                                values += list(features)
                                items.extend(values)

                    if not has_header:
                        has_header = True
                        prefix_str = [''] * len(prefix)
                        line1 = prefix_str + line1
                        line2 = prefix_str + line2
                        line3 = prefix_names + line3
                        f.write('%s\n' % CSVParams.sep.join(line1))
                        f.write('%s\n' % CSVParams.sep.join(line2))
                        f.write('%s\n' % CSVParams.sep.join(line3))

                    f.write(
                        '%s\n' %
                        CSVParams.sep.join([str(i) for i in prefix + items]))
                f.close()
示例#20
0
 def _split_nodeid(self, nodeid):
     ret = Tracker.split_nodeid(nodeid)
     if len(ret) == 2:
         ret = ret + (1,)
     return ret
示例#21
0
class PositionAnalyzer(PositionCore):

    def __init__(self, *args, **kw):
        super(PositionAnalyzer, self).__init__(*args, **kw)

        if not self.has_timelapse:
            self.settings.set('Processing', 'tracking', False)

        self._makedirs()
        self.add_file_handler(join(self._log_dir, "%s.log" %self.position),
                              self.Levels.DEBUG)

    def _makedirs(self):
        assert isinstance(self.position, basestring)
        assert isinstance(self._out_dir, basestring)

        self._analyzed_dir = join(self._out_dir, "analyzed")
        if self.has_timelapse:
            self._position_dir = join(self._analyzed_dir, self.position)
        else:
            self._position_dir = self._analyzed_dir

        odirs = (self._analyzed_dir,
                 join(self._out_dir, "log"),
                 join(self._out_dir, "log", "_finished"),
                 join(self._out_dir, "hdf5"),
                 join(self._out_dir, "plots"),
                 join(self._position_dir, "statistics"),
                 join(self._position_dir, "tc3"),
                 join(self._position_dir, "gallery"),
                 join(self._position_dir, "channel_gallery"),
                 join(self._position_dir, "images"),
                 join(self._position_dir, "images","_labels"))

        for odir in odirs:
            try:
                makedirs(odir)
            except os.error: # no permissions
                self.logger.error("mkdir %s: failed" %odir)
            else:
                self.logger.info("mkdir %s: ok" %odir)
            setattr(self, "_%s_dir" %basename(odir.lower()).strip("_"), odir)

    def setup_classifiers(self):
        sttg = self.settings

        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                chreg = self._channel_regions(p_channel)
                if sttg("EventSelection", "unsupervised_event_selection"):
                    nclusters = sttg("EventSelection", "num_clusters")
                    self.classifiers[p_channel] = ClassDefinitionUnsup( \
                        nclusters, chreg)
                else:
                    sttg.set_section('Classification')
                    clf = CommonClassPredictor(
                        clf_dir=sttg.get2(self._resolve_name(p_channel,
                                                             'classification_envpath')),
                        name=p_channel,
                        channels=chreg,
                        color_channel=c_channel)
                    clf.importFromArff()
                    clf.loadClassifier()
                    self.classifiers[p_channel] = clf

    @property
    def _transitions(self):
        if self.settings.get('EventSelection', 'unsupervised_event_selection'):
            transitions = np.array(((0, 1), ))
        else:
            try:
                transitions = np.array(eval(self.settings.get('EventSelection', 'labeltransitions')))
                transitions.reshape((-1, 2))
            except Exception as e:
                raise RuntimeError(("Make sure that transitions are of the form "
                                    "'int, int' or '(int, int), (int, int)' i.e "
                                    "2-int-tuple  or a list of 2-int-tuples"))

        return transitions

    def setup_eventselection(self, graph):
        """Setup the method for event selection."""

        opts = {'transitions': self._transitions,
                'backward_range': self._convert_tracking_duration('backwardrange'),
                'forward_range': self._convert_tracking_duration('forwardrange'),
                'max_in_degree': self.settings.get('EventSelection', 'maxindegree'),
                'max_out_degree': self.settings.get('EventSelection', 'maxoutdegree')}

        if self.settings.get('EventSelection', 'supervised_event_selection'):
            opts.update({'backward_labels': [int(i) for i in self.settings.get('EventSelection', 'backwardlabels').split(',')],
                         'forward_labels': [int(i) for i in self.settings.get('EventSelection', 'forwardlabels').split(',')],
                         'backward_range_min': self.settings.get('EventSelection', 'backwardrange_min'),
                         'forward_range_min': self.settings.get('EventSelection', 'forwardrange_min'),
                         'backward_check': self._convert_tracking_duration('backwardCheck'),
                         'forward_check': self._convert_tracking_duration('forwardCheck')})
            es = EventSelection(graph, **opts)

        elif self.settings.get('EventSelection', 'unsupervised_event_selection'):
            cld = self.classifiers.values()[0] # only one classdef in case of UES
            opts.update({'forward_check': self._convert_tracking_duration('min_event_duration'),
                         'forward_labels': (1, ),
                         'backward_check': -1, # unsused for unsupervised usecase
                         'backward_labels': (0, ),
                         'num_clusters': self.settings.get('EventSelection', 'num_clusters'),
                         'min_cluster_size': self.settings.get('EventSelection', 'min_cluster_size'),
                         'classdef': cld})
            es = UnsupervisedEventSelection(graph, **opts)

        return es

    def _convert_tracking_duration(self, option_name):
        """Converts a tracking duration according to the unit and the
        mean time-lapse of the current position.
        Returns number of frames.
        """
        value = self.settings.get('EventSelection', option_name)
        unit = self.settings.get('EventSelection', 'duration_unit')

        # get mean and stddev for the current position
        info = self.meta_data.get_timestamp_info(self.position)
        if unit == TimeConverter.FRAMES or info is None:
            result = value
        elif unit == TimeConverter.MINUTES:
            result = (value * 60.) / info[0]
        elif unit == TimeConverter.SECONDS:
            result = value / info[0]
        else:
            raise ValueError("Wrong unit '%s' specified." %unit)
        return int(round(result))

    def define_exp_features(self):
        features = {}
        for name in self.processing_channels:
            region_features = {}

            for region in MetaPluginManager().region_info.names[name.lower()]:
                if name is self.MERGED_CHANNEL:
                    continue
                # export all features extracted per regions
                if self.settings.get('Output', 'events_export_all_features') or \
                        self.settings.get('Output', 'export_track_data'):
                    # None means all features
                    region_features[region] = None
                # export selected features from settings
                else:
                    region_features[region] = \
                        self.settings.get('General',
                                          '%s_featureextraction_exportfeaturenames'
                                          % name.lower())

                features[name] = region_features

            # special case for merged channel
            if name is self.MERGED_CHANNEL:
                mftrs = list()
                for channel, region in self._channel_regions(name).iteritems():
                    if features[channel][region] is None:
                        mftrs = None
                    else:
                        for feature in features[channel][region]:
                            mftrs.append("_".join((channel, region, feature)))
                region_features[self._all_channel_regions[name].values()] = mftrs
                features[name] = region_features

        return features

    def export_object_counts(self):
        fname = join(self._statistics_dir, 'P%s__object_counts.txt' % self.position)

        ch_info = OrderedDict()
        for name, clf in self.classifiers.iteritems():
            names = clf.class_names.values()
            colors = [clf.hexcolors[n] for n in names]
            ch_info[name] = (clf.regions, names, colors)

        # if no classifier has been loaded, no counts can be exported.
        if len(ch_info) == 0:
            return

        self.timeholder.exportObjectCounts(fname, self.position, self.meta_data, ch_info)
        pplot_ymax = \
            self.settings.get('Output', 'export_object_counts_ylim_max')

        self.timeholder.exportPopulationPlots(ch_info, self._plots_dir,
                                              self.plate_id, self.position,
                                              ymax=pplot_ymax)



    def export_object_details(self):
        fname = join(self._statistics_dir,
                        'P%s__object_details.txt' % self.position)
        self.timeholder.exportObjectDetails(fname)

    def export_image_names(self):
        self.timeholder.exportImageFileNames(self._statistics_dir,
                                             self.position,
                                             self._imagecontainer._importer,
                                             self.ch_mapping)

    def export_full_tracks(self):
        odir = join(self._statistics_dir, 'full')
        exporter = EventExporter(self.meta_data)
        exporter.full_tracks(self.timeholder, self._tes.visitor_data,
                             self.position, odir)

    def export_graphviz(self, channel_name='Primary', region_name='primary'):
        filename = 'tracking_graph___P%s.dot' %self.position
        exporter = TrackExporter()
        exporter.graphviz_dot(join(self._statistics_dir, filename),
                              self._tracker)

        sample_holders = OrderedDict()
        for frame in self.timeholder.keys():
            channel = self.timeholder[frame][channel_name]
            sample_holders[frame] = channel.get_region(region_name)

        filename = join(self._statistics_dir, filename.replace('.dot', '_features.csv'))
        exporter.tracking_data(filename, sample_holders)

    def export_gallery_images(self):
        for ch_name in self.processing_channels:
            cutter_in = join(self._images_dir, ch_name.lower())

            if not isdir(cutter_in):
                self.logger.warning('directory not found (%s)' %cutter_in)
                self.logger.warning('can not write the gallery images')
            else:
                cutter_out = join(self._gallery_dir, ch_name.lower())
                self.logger.info("running Cutter for '%s'..." %ch_name)
                image_size = \
                    self.settings.get('Output', 'events_gallery_image_size')
                TrackGallery(self._tes.centers(),
                             cutter_in, cutter_out, self.position, size=image_size)
            # FIXME: be careful here. normally only raw images are
            #        used for the cutter and can be deleted
            #        afterwards
            shutil.rmtree(cutter_in, ignore_errors=True)

    def export_tracks_hdf5(self):
        """Save tracking data to hdf file"""
        self.logger.debug("--- serializing tracking start")
        self.timeholder.serialize_tracking(self._tracker.graph)
        self.logger.debug("--- serializing tracking ok")

    def export_events(self):
        """Export and save event selection data"""
        exporter = EventExporter(self.meta_data)
        # writes to the event folder
        odir = join(self._statistics_dir, "events")
        exporter.track_features(self.timeholder, self._tes.visitor_data,
                                self.export_features, self.position, odir)
        self.logger.debug("--- serializing events ok")

    def export_events_hdf5(self):
        # writes event data to hdf5
        self.timeholder.serialize_events(self._tes)

    def export_tc3(self):
        t_mean = self.meta_data.get_timestamp_info(self.position)[0]
        tu = TimeConverter(t_mean, TimeConverter.SECONDS)
        increment = self.settings('General', 'frameincrement')
        t_step = tu.sec2min(t_mean)*increment

        nclusters = self.settings.get('EventSelection', 'num_clusters')
        exporter = TC3Exporter(self._tes.tc3data, self._tc3_dir, nclusters,
                               t_step, TimeConverter.MINUTES, self.position)
        exporter()

    def export_classlabels(self):
        """Save classlabels of each object to the hdf file."""
        # function works for supervised and unuspervised case
        for channels in self.timeholder.itervalues():
            for chname, classifier in self.classifiers.iteritems():
                holder = channels[chname].get_region(classifier.regions)
                if classifier.feature_names is None:
                    # special for unsupervised case
                    classifier.feature_names = holder.feature_names
                self.timeholder.save_classlabels(channels[chname],
                                                 holder, classifier)

    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good place to read out the options
        # file does not have to exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings('Processing',
                                                         'objectdetection'))

        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings('Processing', 'eventselection') and \
                    self.settings('Processing', 'tracking'):

                evchannel = self.settings('EventSelection', 'eventchannel')
                region = self.classifiers[evchannel].regions
                if self.settings('EventSelection', 'unsupervised_event_selection'):
                    graph = self._tracker.graph
                elif  evchannel != PrimaryChannel.NAME or \
                        region != self.settings("Tracking", "region"):
                    graph = self._tracker.clone_graph(self.timeholder,
                                                      evchannel,
                                                      region)
                else:
                    graph = self._tracker.graph

                self._tes = self.setup_eventselection(graph)
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings('Output', 'export_object_counts') and \
                    self.settings('EventSelection', 'supervised_event_selection'):
                # no object counts in case of unsupervised event selection
                self.export_object_counts()
            if self.settings('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})

                if self.settings('Output', 'hdf5_include_events'):
                    self.export_events_hdf5()

                if self.settings('Output', "export_events"):
                    if self.settings('Processing', 'eventselection'):
                        self.export_events()
                    if self.settings('EventSelection', 'unsupervised_event_selection'):
                        self.export_tc3()

                if self.settings('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings('Output', 'export_tracking_as_dot'):
                    self.export_graphviz(channel_name =PrimaryChannel.NAME,\
                                          region_name =self._all_channel_regions[PrimaryChannel.NAME][PrimaryChannel.NAME])

            self.export_classlabels()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images') and \
                    self.settings.get('Processing', 'eventselection'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
        self.clear()
        return n_images

    @property
    def hdf5_filename(self):
        return self.timeholder.hdf5_filename

    def touch_finished(self, times=None):
        """Writes an empty file to mark this position as finished"""
        fname = join(self._finished_dir, '%s__finished.txt' % self.position)
        with open(fname, "w") as f:
            os.utime(fname, times)

    def clear(self):
        # closes
        if self.timeholder is not None:
            self.timeholder.close_all()
        # close and remove handlers from logging object
        self.close()

    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        minimal_effort = self.settings.get('Output', 'minimal_effort') and self.settings.get('Output', 'hdf5_reuse')

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.info(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            n_images += 1
            images = []

            if self.settings('Processing', 'tracking'):
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.info(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for clf in self.classifiers.itervalues():
                    cellanalyzer.classify_objects(clf)

            self.logger.info(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once
            if not minimal_effort:
                imgs = {}
                imgs.update(self.render_classification_images(cellanalyzer, images, frame))
                imgs.update(self.render_contour_images(cellanalyzer, images, frame))
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame)
                self.set_image(imgs, msg, 50)

                if self.settings('Output', 'rendering_channel_gallery'):
                    self.render_channel_gallery(cellanalyzer, frame)

                if self.settings('Output', 'rendering_labels_discwrite'):
                    cellanalyzer.exportLabelImages(self._labels_dir)

            cellanalyzer.purge(features=self.export_features)
            self.logger.info(" - Frame %d, rest (ms): %3d" \
                                 %(frame, stopwatch.interval()*1000))
            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))


        return n_images

    def render_channel_gallery(self, cellanalyzer, frame):
        for channel in cellanalyzer.virtual_channels.itervalues():
            chgal = ChannelGallery(channel, frame, self._channel_gallery_dir)
            chgal.make_gallery()

    def render_contour_images(self, ca, images, frame):
        images_ = dict()
        for region, render_par in self.settings.get2('rendering').iteritems():
            out_dir = join(self._images_dir, region)
            write = self.settings('Output', 'rendering_contours_discwrite')

            if region not in self.CHANNELS.keys():
                img, _ = ca.render(out_dir, dctRenderInfo=render_par,
                                       writeToDisc=write, images=images)


                images_[region] = img
            # gallery images are treated differenty
            else:
                ca.render(out_dir, dctRenderInfo=render_par, writeToDisc=True)
        return images_

    def render_classification_images(self, cellanalyzer, images, frame):
         images_ = dict()
         for region, render_par in self.settings.get2('rendering_class').iteritems():
            out_images = join(self._images_dir, region)
            write = self.settings('Output', 'rendering_class_discwrite')
            image, _ = cellanalyzer.render(out_images,
                                                 dctRenderInfo=render_par,
                                                 writeToDisc=write,
                                                 images=images)
            images_[region] = image
         return images_
示例#22
0
文件: position.py 项目: imcf/cecog
    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good placke to read out the options
        # fils must not exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        # setup tracker
        if self.settings.get('Processing', 'tracking'):
            region = self.settings.get('Tracking', 'tracking_regionname')
            tropts = (self.settings.get('Tracking', 'tracking_maxobjectdistance'),
                      self.settings.get('Tracking', 'tracking_maxsplitobjects'),
                      self.settings.get('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)
            self._tes = EventSelection(self._tracker.graph, **self._es_options)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings.get('Processing',
                                                             'objectdetection'))

        self.setup_classifiers()
        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings.get('Processing', 'tracking_synchronize_trajectories') and \
                    self.settings.get('Processing', 'tracking'):
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings.get('Output', 'export_object_counts'):
                self.export_object_counts()
            if self.settings.get('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings.get('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings.get('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})
                if self.settings.get('Processing', 'tracking_synchronize_trajectories'):
                    self.export_events()
                if self.settings.get('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings.get('Output', 'export_tracking_as_dot'):
                    self.export_graphviz()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
#        self.clear()
        return n_images
示例#23
0
    def __call__(self):
        # include hdf5 file name in hdf5_options
        # perhaps timeholder might be a good place to read out the options
        # file does not have to exist to proceed
        hdf5_fname = join(self._hdf5_dir, '%s.ch5' % self.position)

        self.timeholder = TimeHolder(self.position, self._all_channel_regions,
                                     hdf5_fname,
                                     self.meta_data, self.settings,
                                     self._frames,
                                     self.plate_id,
                                     **self._hdf_options)

        self.settings.set_section('Tracking')
        self.setup_classifiers()

        # setup tracker
        if self.settings('Processing', 'tracking'):
            tropts = (self.settings('Tracking', 'tracking_maxobjectdistance'),
                      self.settings('Tracking', 'tracking_maxsplitobjects'),
                      self.settings('Tracking', 'tracking_maxtrackinggap'))
            self._tracker = Tracker(*tropts)

        stopwatch = StopWatch(start=True)
        ca = CellAnalyzer(timeholder=self.timeholder,
                          position = self.position,
                          create_images = True,
                          binning_factor = 1,
                          detect_objects = self.settings('Processing',
                                                         'objectdetection'))

        self.export_features = self.define_exp_features()
        n_images = self._analyze(ca)

        if n_images > 0:
            # invoke event selection
            if self.settings('Processing', 'eventselection') and \
                    self.settings('Processing', 'tracking'):

                evchannel = self.settings('EventSelection', 'eventchannel')
                region = self.classifiers[evchannel].regions
                if self.settings('EventSelection', 'unsupervised_event_selection'):
                    graph = self._tracker.graph
                elif  evchannel != PrimaryChannel.NAME or \
                        region != self.settings("Tracking", "region"):
                    graph = self._tracker.clone_graph(self.timeholder,
                                                      evchannel,
                                                      region)
                else:
                    graph = self._tracker.graph

                self._tes = self.setup_eventselection(graph)
                self.logger.debug("--- visitor start")
                self._tes.find_events()
                self.logger.debug("--- visitor ok")
                if self.is_aborted():
                    return 0 # number of processed images

            # save all the data of the position, no aborts from here on
            # want all processed data saved
            if self.settings('Output', 'export_object_counts') and \
                    self.settings('EventSelection', 'supervised_event_selection'):
                # no object counts in case of unsupervised event selection
                self.export_object_counts()
            if self.settings('Output', 'export_object_details'):
                self.export_object_details()
            if self.settings('Output', 'export_file_names'):
                self.export_image_names()

            if self.settings('Processing', 'tracking'):
                self.export_tracks_hdf5()
                self.update_status({'text': 'export events...'})

                if self.settings('Output', 'hdf5_include_events'):
                    self.export_events_hdf5()

                if self.settings('Output', "export_events"):
                    if self.settings('Processing', 'eventselection'):
                        self.export_events()
                    if self.settings('EventSelection', 'unsupervised_event_selection'):
                        self.export_tc3()

                if self.settings('Output', 'export_track_data'):
                    self.export_full_tracks()
                if self.settings('Output', 'export_tracking_as_dot'):
                    self.export_graphviz(channel_name =PrimaryChannel.NAME,\
                                          region_name =self._all_channel_regions[PrimaryChannel.NAME][PrimaryChannel.NAME])

            self.export_classlabels()

            self.update_status({'text': 'export events...',
                                'max': 1,
                                'progress': 1})

            # remove all features from all channels to free memory
            # for the generation of gallery images
            self.timeholder.purge_features()
            if self.settings.get('Output', 'events_export_gallery_images') and \
                    self.settings.get('Processing', 'eventselection'):
                self.export_gallery_images()

        try:
            intval = stopwatch.stop()/n_images*1000
        except ZeroDivisionError:
            pass
        else:
            self.logger.info(" - %d image sets analyzed, %3d ms per image set" %
                             (n_images, intval))

        self.touch_finished()
        self.clear()
        return n_images