示例#1
0
文件: position.py 项目: imcf/cecog
    def _analyze(self, cellanalyzer):
        super(PositionPicker, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):
            if self.is_aborted():
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                   'text': txt,
                                   'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            # initTimepoint clears channel_registry
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)
            image = cellanalyzer.collectObjects(self.plate_id,
                                                self.position,
                                                self.sample_readers,
                                                self.learner,
                                                byTime=True)

            if image is not None:
                n_images += 1
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position,
                                                frame)
                self.set_image(image[self._qthread.renderer],
                               msg, region=self._qthread.renderer)
示例#2
0
    def _analyze(self, cellanalyzer):
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))


        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.isAborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' % (frame, self._frames.index(frame) + 1,
                                        len(self._frames))
                self.statusUpdate(progress=self._frames.index(frame) + 1,
                                  text=txt,
                                  interval=stopwatch.interim())

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)

            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            self.setup_classifiers()

            for chname, clf in self.classifiers.iteritems():
                try:
                    cellanalyzer.classify_objects(clf, chname)
                except KeyError as e:
                    pass
示例#3
0
    def _analyze(self, cellanalyzer):
        super(PositionPicker, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):
            if self.is_aborted():
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                   'text': txt,
                                   'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            # initTimepoint clears channel_registry
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)
            image = cellanalyzer.collectObjects(self.plate_id,
                                                self.position,
                                                self.sample_readers,
                                                self.learner,
                                                byTime=True)

            if image is not None:
                n_images += 1
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position,
                                                frame)
                self.set_image(image, msg)
示例#4
0
    def _analyze(self, cellanalyzer):

        thread = QThread.currentThread()
        imax = sum([len(n) for n in self.sample_positions.values()])
        thread.statusUpdate(min=0, max=imax)

        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            thread.interruption_point()
            txt = '%s, %s, T %d, (%d/%d)' \
                  %(self.plate_id, self.position,
                    frame, self._frames.index(frame)+1, len(self._frames))

            thread.statusUpdate(meta="Classifier training: ",
                                text=txt, interval=stopwatch.interim(), increment=True)

            stopwatch.reset(start=True)
            # initTimepoint clears channel_registry
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.collectObjects(self.plate_id, self.position, self.sample_readers,
                                        self.learner)
示例#5
0
    def _analyze(self, cellanalyzer):
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))


        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.isAborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.statusUpdate(progress=self._frames.index(frame)+1,
                                  text=txt,
                                  interval=stopwatch.interim())

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)

            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            self.setup_classifiers()

            for chname, clf in self.classifiers.iteritems():
                try:
                    cellanalyzer.classify_objects(clf, chname)
                except KeyError as e:
                    pass
示例#6
0
    def _analyze(self, cellanalyzer):

        thread = QThread.currentThread()
        imax = sum([len(n) for n in self.sample_positions.values()])
        thread.statusUpdate(min=0, max=imax)

        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            thread.interruption_point()
            txt = '%s, %s, T %d, (%d/%d)' \
                  %(self.plate_id, self.position,
                    frame, self._frames.index(frame)+1, len(self._frames))

            thread.statusUpdate(meta="Classifier training: ",
                                text=txt,
                                interval=stopwatch.interim(),
                                increment=True)

            stopwatch.reset(start=True)
            # initTimepoint clears channel_registry
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.collectObjects(self.plate_id, self.position,
                                        self.sample_readers, self.learner)
示例#7
0
文件: position.py 项目: imcf/cecog
    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            n_images += 1
            images = []

            if self.settings.get('Processing', 'tracking'):
                region = self.settings.get('Tracking', 'tracking_regionname')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings.get('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings.get('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings.get('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            for clf in self.classifiers.itervalues():
                cellanalyzer.classify_objects(clf)

            ##############################################################
            # FIXME - part for browser
            if not self._myhack is None:
                self.render_browser(cellanalyzer)
            ##############################################################

            self.settings.set_section('General')
            self.render_classification_images(cellanalyzer, images, frame)
            self.render_contour_images(cellanalyzer, images, frame)

            if self.settings.get('Output', 'rendering_channel_gallery'):
                self.render_channel_gallery(cellanalyzer, frame)

            if self.settings.get('Output', 'rendering_labels_discwrite'):
                cellanalyzer.exportLabelImages(self._labels_dir)

            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))
            cellanalyzer.purge(features=self.export_features)

        return n_images
示例#8
0
    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        minimal_effort = self.settings.get('Output', 'minimal_effort') and self.settings.get('Output', 'hdf5_reuse')

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.info(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            n_images += 1
            images = []

            if self.settings('Processing', 'tracking'):
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.info(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for clf in self.classifiers.itervalues():
                    cellanalyzer.classify_objects(clf)

            self.logger.info(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once
            if not minimal_effort:
                imgs = {}
                imgs.update(self.render_classification_images(cellanalyzer, images, frame))
                imgs.update(self.render_contour_images(cellanalyzer, images, frame))
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame)
                self.set_image(imgs, msg, 50)

                if self.settings('Output', 'rendering_channel_gallery'):
                    self.render_channel_gallery(cellanalyzer, frame)

                if self.settings('Output', 'rendering_labels_discwrite'):
                    cellanalyzer.exportLabelImages(self._labels_dir)

            cellanalyzer.purge(features=self.export_features)
            self.logger.info(" - Frame %d, rest (ms): %3d" \
                                 %(frame, stopwatch.interval()*1000))
            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))


        return n_images
示例#9
0
    def _run(self):
        c_begin, c_end, c_step = -5, 15, 2
        c_info = c_begin, c_end, c_step

        g_begin, g_end, g_step = -15, 3, 2
        g_info = g_begin, g_end, g_step

        status = {
            'stage': 0,
            'text': '',
            'min': 0,
            'max': 1,
            'meta': 'Classifier training:',
            'item_name': 'round',
            'progress': 0
        }
        self.update_status(status)

        i = 0
        best_accuracy = -1
        best_log2c = None
        best_log2g = None
        best_conf = None
        is_abort = False
        stopwatch = StopWatch(start=True)

        if self._learner.has_nan_features():
            self._learner.filter_nans(apply=True)

        t0 = time.time()
        for info in self._learner.iterGridSearchSVM(c_info=c_info,
                                                    g_info=g_info):
            n, log2c, log2g, conf = info
            status.update({'min': 1,
                           'max': n,
                           'progress': i+1,
                           'text': 'log2(C)=%d, log2(g)=%d' % \
                               (log2c, log2g),
                           'interval': stopwatch.interim(),
                           })
            self.update_status(status, stime=50)
            stopwatch.reset(start=True)
            i += 1
            accuracy = conf.ac_sample
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                best_log2c = log2c
                best_log2g = log2g
                best_conf = conf
                self.conf_result.emit(log2c, log2g, conf)

            if self.is_aborted():
                is_abort = True
                break
        # overwrite only if grid-search was not aborted by the user
        if not is_abort:
            self._learner.train(2**best_log2c, 2**best_log2g)
            self._learner.exportConfusion(best_log2c, best_log2g, best_conf)
            self._learner.exportRanges()

            # FIXME: in case the meta-data (colors, names, zero-insert) changed
            #        the ARFF file has to be written again
            #        -> better store meta-data outside ARFF
            self._learner.exportToArff()
示例#10
0
    def _build_dimension_lookup(self):
        s = StopWatch(start=True)
        lookup = {}
        has_xy = False
        positions = []
        times = []
        channels = []
        zslices = []

        dimension_items = self._get_dimension_items()
        print("Get dimensions: %s" %s.interim())
        s.reset(start=True)

        # if use_frame_indices is set in the ini file,
        # we make a first scan of the items and determine for each position
        # the list of timepoints.
        # Then, we can assign to each position a dictionary that assigns to each timepoint
        # its index (after ordering).
        if self.use_frame_indices:
            #all_times = list(set([int(item[DIMENSION_NAME_TIME]) if DIMENSION_NAME_TIME in item else 0
            #                      for item in dimension_items]))
            #all_times.sort()
            first_pass = {}
            for item in dimension_items:
                position = item[DIMENSION_NAME_POSITION]
                if not position in first_pass:
                    first_pass[position] = []

                if DIMENSION_NAME_TIME in item:
                    time_val = int(item[DIMENSION_NAME_TIME])
                else:
                    time_val = 0
                first_pass[position].append(time_val)

            time_index_correspondence = {}
            for pos in first_pass.keys():
                first_pass[position].sort()
                time_index_correspondence[pos] = dict(zip(first_pass[position],
                                                          range(len(first_pass[position]))))

        for item in dimension_items:
            # import image info only once
            if not has_xy:
                has_xy = True
                info = ccore.ImageImportInfo(os.path.join(self.path,
                                                          item['filename']))
                self.meta_data.set_image_info(info)
                self.has_multi_images = False #info.images > 1

            # position
            position = item[DIMENSION_NAME_POSITION]
            if not position in lookup:
                lookup[position] = {}

            # time
            if DIMENSION_NAME_TIME in item:
                time_from_filename = int(item[DIMENSION_NAME_TIME])
            else:
                time_from_filename = 0
                item[DIMENSION_NAME_TIME] = str(time_from_filename)

            if self.use_frame_indices:
                time = time_index_correspondence[position][time_from_filename]
            else:
                time = time_from_filename
            if not time in lookup[position]:
                lookup[position][time] = {}

            # channels
            if DIMENSION_NAME_CHANNEL in item:
                channel = item[DIMENSION_NAME_CHANNEL]
            else:
                channel = '1'
                item[DIMENSION_NAME_CHANNEL] = channel
            if not channel in lookup[position][time]:
                lookup[position][time][channel] = {}

            # leave zslice optional.
            # in case of multi-images it must not be defined
            if DIMENSION_NAME_ZSLICE in item:
                zslice = item[DIMENSION_NAME_ZSLICE]
            else:
                zslice = 0
                item[DIMENSION_NAME_ZSLICE] = zslice
            if zslice == '':
                zslice = None
            if not zslice is None:
                zslice = int(zslice)
            if not zslice in lookup[position][time][channel]:
                lookup[position][time][channel][zslice] = item['filename']

            # allow to read timestamps from file if not present
            if META_INFO_TIMESTAMP in item:
                timestamp = float(item[META_INFO_TIMESTAMP])
                self.meta_data.append_absolute_time(position, time, timestamp)
            elif self.timestamps_from_file in ['mtime', 'ctime']:
                filename_full = os.path.join(self.path, item['filename'])
                if self.timestamps_from_file == 'mtime':
                    timestamp = os.path.getmtime(filename_full)
                else:
                    timestamp = os.path.getctime(filename_full)
                item[META_INFO_TIMESTAMP] = timestamp
                self.meta_data.append_absolute_time(position, time, timestamp)

            if META_INFO_WELL in item:
                well = item[META_INFO_WELL]
                subwell = item.get(META_INFO_SUBWELL, None)
                self.meta_data.append_well_subwell_info(position, well, subwell)

            if (self.has_multi_images and
                self.multi_image == self.MULTIIMAGE_USE_ZSLICE):
                if not zslice is None:
                    raise ValueError('Multi-image assigned for zslice conflicts'
                                     ' with zslice token in filename!')
                zslices.extend(range(1,info.images+1))
            else:
                zslices.append(zslice)

            positions.append(position)
            times.append(time)
            channels.append(channel)

        self.meta_data.positions = tuple(sorted(set(positions)))

        # assure that all items of one dimension are of same length
        times = set(times)
        channels = set(channels)
        zslices = set(zslices)
        # find overall valid number of frames
        for p in lookup:
            times = times.intersection(lookup[p].keys())
        # find overall valid channels/zslices based on overall valid frames
        for p in lookup:
            for t in times:
                channels = channels.intersection(lookup[p][t].keys())
                for c in channels:
                    zslices = zslices.intersection(lookup[p][t][c].keys())
        self.meta_data.times = sorted(times)
        self.meta_data.channels = sorted(channels)
        self.meta_data.zslices = sorted(zslices)
        self.meta_data.image_files = len(dimension_items)

        print('Build time: %s' %s.stop())
        return lookup
示例#11
0
    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position,
                         self._frames, list(set(self.ch_mapping.values())))

        minimal_effort = self.settings.get('Output', 'minimal_effort') and self.settings.get('Output', 'hdf5_reuse')

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1,
                                       len(self._frames))
                self.update_status({'progress': self._frames.index(frame)+1,
                                    'text': txt,
                                    'interval': stopwatch.interim()})

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.info(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            n_images += 1
            images = []

            if self.settings('Processing', 'tracking'):
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings('Tracking', 'tracking_visualize_track_length')
                    radius = self.settings('Tracking', 'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.info(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for clf in self.classifiers.itervalues():
                    cellanalyzer.classify_objects(clf)

            self.logger.info(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once
            if not minimal_effort:
                imgs = {}
                imgs.update(self.render_classification_images(cellanalyzer, images, frame))
                imgs.update(self.render_contour_images(cellanalyzer, images, frame))
                msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame)
                self.set_image(imgs, msg, 50)

                if self.settings('Output', 'rendering_channel_gallery'):
                    self.render_channel_gallery(cellanalyzer, frame)

                if self.settings('Output', 'rendering_labels_discwrite'):
                    cellanalyzer.exportLabelImages(self._labels_dir)

            cellanalyzer.purge(features=self.export_features)
            self.logger.info(" - Frame %d, rest (ms): %3d" \
                                 %(frame, stopwatch.interval()*1000))
            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))


        return n_images
示例#12
0
文件: training.py 项目: imcf/cecog
    def _run(self):
        c_begin, c_end, c_step = -5,  15, 2
        c_info = c_begin, c_end, c_step

        g_begin, g_end, g_step = -15, 3, 2
        g_info = g_begin, g_end, g_step

        status = {'stage': 0,
                  'text': '',
                  'min': 0,
                  'max': 1,
                  'meta': 'Classifier training:',
                  'item_name': 'round',
                  'progress': 0}
        self.update_status(status)

        i = 0
        best_accuracy = -1
        best_log2c = None
        best_log2g = None
        best_conf = None
        is_abort = False
        stopwatch = StopWatch(start=True)

        if self._learner.has_nan_features():
            self._learner.filter_nans(apply=True)

        t0 = time.time()
        for info in self._learner.iterGridSearchSVM(c_info=c_info,
                                                    g_info=g_info):
            n, log2c, log2g, conf = info
            status.update({'min': 1,
                           'max': n,
                           'progress': i+1,
                           'text': 'log2(C)=%d, log2(g)=%d' % \
                               (log2c, log2g),
                           'interval': stopwatch.interim(),
                           })
            self.update_status(status, stime=50)
            stopwatch.reset(start=True)
            i += 1
            accuracy = conf.ac_sample
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                best_log2c = log2c
                best_log2g = log2g
                best_conf = conf
                self.conf_result.emit(log2c, log2g, conf)

            if self.is_aborted():
                is_abort = True
                break
        # overwrite only if grid-search was not aborted by the user
        if not is_abort:
            self._learner.train(2**best_log2c, 2**best_log2g)
            self._learner.exportConfusion(best_log2c, best_log2g, best_conf)
            self._learner.exportRanges()

            # FIXME: in case the meta-data (colors, names, zero-insert) changed
            #        the ARFF file has to be written again
            #        -> better store meta-data outside ARFF
            self._learner.exportToArff()
示例#13
0
文件: position.py 项目: imcf/cecog
    def _analyze(self, cellanalyzer):
        super(PositionAnalyzer, self)._analyze()
        n_images = 0
        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            if self.is_aborted():
                self.clear()
                return 0
            else:
                txt = 'T %d (%d/%d)' % (frame, self._frames.index(frame) + 1,
                                        len(self._frames))
                self.update_status({
                    'progress': self._frames.index(frame) + 1,
                    'text': txt,
                    'interval': stopwatch.interim()
                })

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()
            n_images += 1
            images = []

            if self.settings.get('Processing', 'tracking'):
                region = self.settings.get('Tracking', 'tracking_regionname')
                samples = self.timeholder[frame][
                    PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if self.settings.get('Tracking', 'tracking_visualization'):
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    nframes = self.settings.get(
                        'Tracking', 'tracking_visualize_track_length')
                    radius = self.settings.get('Tracking',
                                               'tracking_centroid_radius')
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, nframes, radius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            for clf in self.classifiers.itervalues():
                cellanalyzer.classify_objects(clf)

            ##############################################################
            # FIXME - part for browser
            if not self._myhack is None:
                self.render_browser(cellanalyzer)
            ##############################################################

            self.settings.set_section('General')
            self.render_classification_images(cellanalyzer, images, frame)
            self.render_contour_images(cellanalyzer, images, frame)

            if self.settings.get('Output', 'rendering_channel_gallery'):
                self.render_channel_gallery(cellanalyzer, frame)

            if self.settings.get('Output', 'rendering_labels_discwrite'):
                cellanalyzer.exportLabelImages(self._labels_dir)

            self.logger.info(" - Frame %d, duration (ms): %3d" \
                                 %(frame, stopwatch.interim()*1000))
            cellanalyzer.purge(features=self.export_features)

        return n_images
示例#14
0
    def _build_dimension_lookup(self):
        s = StopWatch(start=True)
        lookup = {}
        has_xy = False
        positions = []
        times = []
        channels = []
        zslices = []

        dimension_items = self._get_dimension_items()
        print("Get dimensions: %s" % s.interim())
        s.reset(start=True)

        # if use_frame_indices is set in the ini file,
        # we make a first scan of the items and determine for each position
        # the list of timepoints.
        # Then, we can assign to each position a dictionary that assigns to each timepoint
        # its index (after ordering).
        if self.use_frame_indices:
            #all_times = list(set([int(item[Dimensions.Time]) if Dimensions.Time in item else 0
            #                      for item in dimension_items]))
            #all_times.sort()
            first_pass = {}
            for item in dimension_items:
                position = item[Dimensions.Position]
                if not position in first_pass:
                    first_pass[position] = []

                if Dimensions.Time in item:
                    time_val = int(item[Dimensions.Time])
                else:
                    time_val = 0
                first_pass[position].append(time_val)

            time_index_correspondence = {}
            for pos in first_pass.keys():
                first_pass[position].sort()
                time_index_correspondence[pos] = dict(
                    zip(first_pass[position],
                        range(len(first_pass[position]))))

        for item in dimension_items:
            # import image info only once
            if not has_xy:
                has_xy = True
                info = ccore.ImageImportInfo(
                    os.path.join(self.path, item['filename']))

                self.meta_data.set_image_info(info)
                self.has_multi_images = False  #info.images > 1

            # position
            position = item[Dimensions.Position]
            if not position in lookup:
                lookup[position] = {}

            # time
            if Dimensions.Time in item:
                time_from_filename = int(item[Dimensions.Time])
            else:
                time_from_filename = 0
                item[Dimensions.Time] = str(time_from_filename)

            if self.use_frame_indices:
                time = time_index_correspondence[position][time_from_filename]
            else:
                time = time_from_filename
            if not time in lookup[position]:
                lookup[position][time] = {}

            # channels
            if Dimensions.Channel in item:
                channel = item[Dimensions.Channel]
            else:
                channel = '1'
                item[Dimensions.Channel] = channel
            if not channel in lookup[position][time]:
                lookup[position][time][channel] = {}

            # leave zslice optional.
            # in case of multi-images it must not be defined
            if Dimensions.ZSlice in item:
                zslice = item[Dimensions.ZSlice]
            else:
                zslice = 0
                item[Dimensions.ZSlice] = zslice
            if zslice == '':
                zslice = None
            if not zslice is None:
                zslice = int(zslice)
            if not zslice in lookup[position][time][channel]:
                lookup[position][time][channel][zslice] = item['filename']

            # allow to read timestamps from file if not present
            if MetaInfo.Timestamp in item:
                timestamp = float(item[MetaInfo.Timestamp])
                self.meta_data.append_absolute_time(position, time, timestamp)
            elif self.timestamps_from_file in ['mtime', 'ctime']:
                filename_full = os.path.join(self.path, item['filename'])
                if self.timestamps_from_file == 'mtime':
                    timestamp = os.path.getmtime(filename_full)
                else:
                    timestamp = os.path.getctime(filename_full)
                item[MetaInfo.Timestamp] = timestamp
                self.meta_data.append_absolute_time(position, time, timestamp)

            if MetaInfo.Well in item:
                well = item[MetaInfo.Well]
                subwell = item.get(MetaInfo.Subwell, None)
                self.meta_data.append_well_subwell_info(
                    position, well, subwell)

            if (self.has_multi_images
                    and self.multi_image == self.MULTIIMAGE_USE_ZSLICE):
                if not zslice is None:
                    raise ValueError(
                        'Multi-image assigned for zslice conflicts'
                        ' with zslice token in filename!')
                zslices.extend(range(1, info.images + 1))
            else:
                zslices.append(zslice)

            positions.append(position)
            times.append(time)
            channels.append(channel)

        self.meta_data.positions = tuple(sorted(set(positions)))

        # assure that all items of one dimension are of same length
        times = set(times)
        channels = set(channels)
        zslices = set(zslices)
        # find overall valid number of frames
        for p in lookup:
            times = times.intersection(lookup[p].keys())
        # find overall valid channels/zslices based on overall valid frames
        for p in lookup:
            for t in times:
                channels = channels.intersection(lookup[p][t].keys())
                for c in channels:
                    zslices = zslices.intersection(lookup[p][t][c].keys())
        self.meta_data.times = sorted(times)
        self.meta_data.channels = sorted(channels)
        self.meta_data.zslices = sorted(zslices)
        self.meta_data.image_files = len(dimension_items)

        print('Build time: %s' % s.stop())
        return lookup
示例#15
0
    def _analyze(self, cellanalyzer):

        thread = QThread.currentThread()

        stopwatch = StopWatch(start=True)
        crd = Coordinate(self.plate_id, self.position, self._frames,
                         list(set(self.ch_mapping.values())))

        for frame, channels in self._imagecontainer( \
            crd, interrupt_channel=True, interrupt_zslice=True):

            self.interruptionPoint()
            txt = '%s, %s, T %d (%d/%d)' \
                  %(self.plate_id, self.position, frame,
                    self._frames.index(frame)+1, len(self._frames))

            self.statusUpdate(text=txt,
                              interval=stopwatch.interim(),
                              increment=True)

            stopwatch.reset(start=True)
            cellanalyzer.initTimepoint(frame)
            self.register_channels(cellanalyzer, channels)

            cellanalyzer.process()

            self.logger.debug(" - Frame %d, cellanalyzer.process (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            images = []

            if self.settings('Processing', 'tracking'):
                apc = AppPreferences()
                region = self.settings('Tracking', 'region')
                samples = self.timeholder[frame][
                    PrimaryChannel.NAME].get_region(region)
                self._tracker.track_next_frame(frame, samples)

                if apc.display_tracks:
                    size = cellanalyzer.getImageSize(PrimaryChannel.NAME)
                    img_conn, img_split = self._tracker.render_tracks(
                        frame, size, apc.track_length, apc.cradius)
                    images += [(img_conn, '#FFFF00', 1.0),
                               (img_split, '#00FFFF', 1.0)]

            self.logger.debug(" - Frame %d, Tracking (ms): %3d" \
                             %(frame, stopwatch.interval()*1000))

            # can't cluster on a per frame basis
            if self.settings("EventSelection", "supervised_event_selection"):
                for channel, clf in self.classifiers.iteritems():
                    cellanalyzer.classify_objects(clf, channel)

            self.logger.debug(" - Frame %d, Classification (ms): %3d" \
                             % (frame, stopwatch.interval()*1000))

            self.settings.set_section('General')
            # want emit all images at once

            imgs = {}
            imgs.update(
                self.render_classification_images(cellanalyzer, images, frame))
            imgs.update(self.render_contour_images(cellanalyzer, images,
                                                   frame))
            msg = 'PL %s - P %s - T %05d' % (self.plate_id, self.position,
                                             frame)
            self.setImage(imgs, msg, 50)

            cellanalyzer.purge(features=self.export_features)
            self.logger.debug(" - Frame %d, duration (ms): %3d" \
                              %(frame, stopwatch.interim()*1000))