def track_features(self, timeholder, visitor_data, channel_regions, position, outdir): shutil.rmtree(outdir, True) makedirs(outdir) for tracks in visitor_data.itervalues(): for startid, event_data in tracks.iteritems(): if startid in ('_full_tracks', '_current_branch'): continue for chname, region in channel_regions.iteritems(): for region_name, feature_names in region.iteritems(): try: frame, obj_label, branch = Tracker.split_nodeid( startid) except ValueError: frame, obj_label = Tracker.split_nodeid(startid) branch = 1 filename = 'features__P%s__T%05d__O%04d__B%02d__C%s__R%s.txt' \ %(position, frame, obj_label, branch, chname, region_name) filename = join(outdir, filename) self._data_per_channel(timeholder, event_data, filename, chname, region_name, feature_names, position)
def _makedirs(self): assert isinstance(self.position, basestring) assert isinstance(self._out_dir, basestring) self._analyzed_dir = join(self._out_dir, "analyzed") if self.has_timelapse: self._position_dir = join(self._analyzed_dir, self.position) else: self._position_dir = self._analyzed_dir odirs = (self._analyzed_dir, join(self._out_dir, "log"), join(self._out_dir, "log", "_finished"), join(self._out_dir, "hdf5"), join(self._out_dir, "plots"), join(self._position_dir, "statistics"), join(self._position_dir, "gallery"), join(self._position_dir, "channel_gallery"), join(self._position_dir, "images"), join(self._position_dir, "images","_labels")) for odir in odirs: try: makedirs(odir) except os.error: # no permissions self.logger.error("mkdir %s: failed" %odir) else: self.logger.info("mkdir %s: ok" %odir) setattr(self, "_%s_dir" %basename(odir.lower()).strip("_"), odir)
def _on_saveas_classifier(self, path=None): learner = self._learner if path is None: path = os.path.expanduser("~") result = QFileDialog.getExistingDirectory( self, 'Save to classifier directory', os.path.abspath(path)) else: result = path if result: if self._save_classifier(result): try: path2 = learner.annotations_dir filenames = os.listdir(path2) filenames = [os.path.join(path2, f) for f in filenames if os.path.isfile(os.path.join(path2, f)) and os.path.splitext(f)[1].lower() == '.xml'] fmt = time.strftime('_backup__%Y%m%d_%H%M%S') path_backup = os.path.join(path2, fmt) makedirs(path_backup) for filename in filenames: shutil.copy2(filename, path_backup) os.remove(filename) self._annotations.export_to_xml(path2, learner.class_labels, self._imagecontainer) except: exception(self, "Problems saving annotation data...") else: information(self, "Classifier successfully saved", "Class definitions and annotations " "successfully saved to '%s'." % result) finally: coord = self.browser.get_coordinate() self._imagecontainer.set_plate(coord.plate)
def submit_job(self, job_type, settings, path_out, emails, nr_items=1, batch_size=1, version=CECOG_DEFAULT_VERSION): path_out = str(path_out.replace('\\', '/')) settings = settings.replace('\\', '/') path_out = os.path.normpath(path_out) path_out_settings = os.path.join(path_out, 'settings') makedirs(path_out_settings) filename_settings = os.path.join(path_out_settings, 'cecog_settings.conf') f = file(filename_settings, 'w') f.write(settings) f.close() args = ['-s', filename_settings] # adjust the number of job items according to the batch size nr_items = int(nr_items / batch_size) # for modulo > 0 add one more item for the rest if nr_items % batch_size > 0: nr_items += 1 is_bulk_job = True #if nr_items > 1 else False jt = self._session.createJobTemplate() jt = cecog_job_template(jt, path_out, args, emails, version, batch_size, is_bulk_job) if is_bulk_job: job_id = self._session.runBulkJobs(jt, 1, nr_items, 1) else: job_id = self._session.runJob(jt) print job_id return job_id
def _makedirs(self): assert isinstance(self.position, basestring) assert isinstance(self._out_dir, basestring) self._analyzed_dir = join(self._out_dir, "analyzed") if self.has_timelapse: self._position_dir = join(self._analyzed_dir, self.position) else: self._position_dir = self._analyzed_dir odirs = (self._analyzed_dir, join(self._out_dir, "log"), join(self._out_dir, "log", "_finished"), join(self._out_dir, "hdf5"), join(self._out_dir, "plots"), join(self._position_dir, "statistics"), join(self._position_dir, "tc3"), join(self._position_dir, "gallery"), join(self._position_dir, "channel_gallery"), join(self._position_dir, "images"), join(self._position_dir, "images","_labels")) for odir in odirs: try: makedirs(odir) except os.error: # no permissions self.logger.error("mkdir %s: failed" %odir) else: self.logger.info("mkdir %s: ok" %odir) setattr(self, "_%s_dir" %basename(odir.lower()).strip("_"), odir)
def draw_annotation_images(self, plate, training_set, container, learner, rid=""): cldir = dict([(cname, join(learner.samples_dir, cname)) \ for cname in learner.class_names.values()]) # create dir per class name for dir_ in cldir.values(): makedirs(dir_) for obj in training_set.itervalues(): rgb_value = ccore.RGBValue(*hex2rgb(obj.strHexColor)) file_ = 'PL%s___P%s___T%05d___X%04d___Y%04d' \ %(plate, self.P, self._iT, obj.oCenterAbs[0], obj.oCenterAbs[1]) obj.file = file_ file_ = join(cldir[obj.strClassName], '%s___%s.png' % (file_, rid + "_%s")) container.exportObject(obj.iId, file_ % "img", file_ % "msk") container.markObjects([obj.iId], rgb_value, False, True) ccore.drawFilledCircle(ccore.Diff2D(*obj.oCenterAbs), 3, container.img_rgb, rgb_value)
def full_tracks(self, timeholder, visitor_data, position, outdir): shutil.rmtree(outdir, True) makedirs(outdir) for start_id, data in visitor_data.iteritems(): for idx, track in enumerate(data['_full']): has_header = False line1 = [] line2 = [] line3 = [] frame, obj_label= Tracker.split_nodeid(start_id)[:2] filename = 'P%s__T%05d__O%04d__B%02d.txt' \ %(position, frame, obj_label, idx+1) f = file(join(outdir, filename), 'w') for node_id in track: frame, obj_id = Tracker.split_nodeid(node_id) coordinate = Coordinate(position=position, time=frame) prefix = [frame, self.meta_data.get_timestamp_relative(coordinate), obj_id] prefix_names = ['frame', 'time', 'objID'] items = [] for channel in timeholder[frame].values(): for region_id in channel.region_names(): region = channel.get_region(region_id) if obj_id in region: flkp = self._map_feature_names(region.feature_names) if not has_header: keys = ['classLabel', 'className'] if channel.NAME == 'Primary': keys += ['centerX', 'centerY'] keys += flkp.keys() line1 += [channel.NAME.upper()] * len(keys) line2 += [str(region_id)] * len(keys) line3 += keys obj = region[obj_id] features = region.features_by_name(obj_id, flkp.values()) values = [x if not x is None else '' for x in [obj.iLabel, obj.strClassName]] if channel.NAME == 'Primary': values += [obj.oCenterAbs[0], obj.oCenterAbs[1]] values += list(features) items.extend(values) if not has_header: has_header = True prefix_str = [''] * len(prefix) line1 = prefix_str + line1 line2 = prefix_str + line2 line3 = prefix_names + line3 f.write('%s\n' %CSVParams.sep.join(line1)) f.write('%s\n' %CSVParams.sep.join(line2)) f.write('%s\n' %CSVParams.sep.join(line3)) f.write('%s\n' %CSVParams.sep.join([str(i) for i in prefix + items])) f.close()
def exportLabelImages(self, pathOut, compression='LZW'): # no segmentaion in virtual channels --> no label images for channel in self.proc_channels.itervalues(): channel_id = channel.strChannelId for region, container in channel.containers.iteritems(): outdir = join(pathOut, channel_id, region) makedirs(outdir) fname = join(outdir, 'P%s_T%05d.tif' % (self.P, self._iT)) container.exportLabelImage(fname, compression)
def exportLabelImages(self, pathOut, compression='LZW'): # no segmentaion in virtual channels --> no label images for channel in self.proc_channels.itervalues(): channel_id = channel.strChannelId for region, container in channel.containers.iteritems(): outdir = join(pathOut, channel_id, region) makedirs(outdir) fname = join(outdir, 'P%s_T%05d.tif' %(self.P, self._iT)) container.exportLabelImage(fname, compression)
def clf_dir(self, path): if not isdir(path): raise IOError("Path to classifier '%s' does not exist." % path) self._clf_dir = path for dir_ in self._subdirs: subdir = join(path, dir_) setattr(self, "%s_dir" % dir_, subdir) makedirs(subdir)
def _get_path_out(self, path, prefix): if self._settings.get2('groupby_oligoid'): suffix = 'byoligo' elif self._settings.get2('groupby_genesymbol'): suffix = 'bysymbol' else: suffix = 'bypos' path_out = join(path, '%s_%s' % (prefix, suffix)) makedirs(path_out) return path_out
def write_center_tables(self): """Write a csv file with frame number and bounding boxes.""" dir_ = join(self._outdir, "_info_") makedirs(dir_) for startid, centers in self.centers.iteritems(): fname = join(dir_, "P%s__T%05d__O%04d__B%02d.csv" % ((self.position,) + self._split_nodeid(startid))) with open(fname, "w") as fp: writer = csv.writer(fp, delimiter=",") writer.writerow(["Frame", "ObjId", "centerX", "centerY"]) for frame, objid, center in centers: writer.writerow((frame, objid) + center)
def write_center_tables(self): """Write a csv file with frame number and bounding boxes.""" dir_ = join(self._outdir, '_info_') makedirs(dir_) for startid, centers in self.centers.iteritems(): fname = join(dir_, "P%s__T%05d__O%04d__B%02d.csv" \ %((self.position, )+self._split_nodeid(startid))) with open(fname, 'w') as fp: writer = csv.writer(fp, delimiter=',') writer.writerow(['Frame', 'ObjId', 'centerX', 'centerY']) for frame, objid, center in centers: writer.writerow((frame, objid) + center)
def _makedirs(self): """Make output directories (analyzed, dumps and log)""" odirs = ("analyzed", "hdf5", "plots", "log") for odir in odirs: path = join(self._out_dir, odir) try: makedirs(path) except os.error: # no permissions self.logger.error("mkdir %s: failed" % path) else: self.logger.info("mkdir %s: ok" % path) setattr(self, "_%s_dir" % basename(odir).lower(), path)
def _makedirs(self): """Make output directories (analyzed, dumps and log)""" odirs = ("analyzed", "hdf5", "plots", "log") for odir in odirs: path = join(self._out_dir, odir) try: makedirs(path) except os.error: # no permissions self.logger.error("mkdir %s: failed" %path) else: self.logger.info("mkdir %s: ok" %path) setattr(self, "_%s_dir" %basename(odir).lower(), path)
def _makedirs(self): odirs = (join(self._outdir, "cellh5"), ) if AppPreferences().write_logs: odirs += (join(self._outdir, "log"), ) for odir in odirs: try: makedirs(odir) except os.error: # no permissions self.logger.error("mkdir %s: failed" % odir) else: self.logger.info("mkdir %s: ok" % odir) setattr(self, "_%s_dir" % basename(odir.lower()).strip("_"), odir)
def _makedirs(self): odirs = (join(self._outdir, "cellh5"), ) if AppPreferences().write_logs: odirs += (join(self._outdir, "log"), ) for odir in odirs: try: makedirs(odir) except os.error: # no permissions self.logger.error("mkdir %s: failed" %odir) else: self.logger.info("mkdir %s: ok" %odir) setattr(self, "_%s_dir" %basename(odir.lower()).strip("_"), odir)
def cecog_job_template(jt, path_out, args, emails, version, batch_size=1, is_bulk_job=False): job_name = 'cecog_batch_analyzer' env_variables = ['PATH', 'LD_LIBRARY_PATH'] base_path = os.path.join(CECOG_VERSIONS_PATH, version) batch_path = os.path.join(base_path, 'pysrc', 'cecog', 'batch') jt.jobName = job_name jt.workingDirectory = batch_path print jt.workingDirectory # I want the almost the same environment as for the gateway! if os.environ.has_key('PYTHONPATH'): os.environ["PYTHONPATH"] = os.path.join(base_path, "pysrc")+os.pathsep+ \ os.environ["PYTHONPATH"] else: os.putenv("PYTHONPATH", os.path.join(base_path, "pysrc")) if os.environ.has_key('PYTHON_BIN'): pybin = os.environ['PYTHON_BIN'] else: pybin = 'python' jt.jobEnvironment = os.environ print jt.jobEnvironment jt.remoteCommand = pybin print jt.remoteCommand jt.args = ['cecog_batch.py'] + args jt.joinFiles = True jt.email = emails jt.nativeSpecification = '-m bea -q gerlich.q -P cellcognition' path_out_cluster = os.path.join(path_out, 'log_cluster') makedirs(path_out_cluster) path_out_cluster = ':' + path_out_cluster if is_bulk_job: jt.outputPath = path_out_cluster # FIXME: another DRMAA hack: the PARAMETRIC_INDEX # is NOT resolved in args! jt.args += ['--cluster_index', 'SGE_TASK_ID', '--batch_size', str(batch_size)] else: jt.outputPath = path_out_cluster return jt
def draw_annotation_images(self, plate, training_set, container, learner, rid=""): cldir = dict([(cname, join(learner.samples_dir, cname)) \ for cname in learner.class_names.values()]) # create dir per class name for dir_ in cldir.values(): makedirs(dir_) for obj in training_set.itervalues(): rgb_value = ccore.RGBValue(*hex2rgb(obj.strHexColor)) file_ = 'PL%s___P%s___T%05d___X%04d___Y%04d' \ %(plate, self.P, self._iT, obj.oCenterAbs[0], obj.oCenterAbs[1]) obj.file = file_ file_ = join(cldir[obj.strClassName], '%s___%s.png' %(file_, rid+"_%s")) container.exportObject(obj.iId, file_ %"img", file_ %"msk") container.markObjects([obj.iId], rgb_value, False, True) ccore.drawFilledCircle(ccore.Diff2D(*obj.oCenterAbs), 3, container.img_rgb, rgb_value)
def _makedirs(self): """Create/setup output directories. -) <analyzed-dir>/analyzed (already exists) -) <analysis-dir>/hmm -) <analysis-dir>/hmm/gallery """ assert isinstance(self._outdir, basestring) self._analyzed_dir = join(self._outdir, "analyzed") odirs = (join(self._outdir, "hmm"), join(self._outdir, "hmm", "gallery")) for odir in odirs: try: makedirs(odir) except os.error: # no permissions raise OSError("Missing permissions to create dir\n(%s)" % odir) else: setattr(self, "_%s_dir" % basename(odir.lower()).strip("_"), odir)
def _makedirs(self): """Create/setup output directories. -) <analyzed-dir>/analyzed (already exists) -) <analysis-dir>/hmm -) <analysis-dir>/hmm/gallery """ assert isinstance(self._outdir, basestring) self._analyzed_dir = join(self._outdir, "analyzed") odirs = [join(self._outdir, "hmm")] if self.ecopts.write_gallery: odirs.append(join(self._outdir, "hmm", "gallery")) for odir in odirs: try: makedirs(odir) except os.error: # no permissions raise OSError("Missing permissions to create dir\n(%s)" %odir) else: setattr(self, "_%s_dir" %basename(odir.lower()).strip("_"), odir)
def track_features(self, timeholder, visitor_data, channel_regions, position, outdir): shutil.rmtree(outdir, True) makedirs(outdir) for tracks in visitor_data.itervalues(): for startid, event_data in tracks.iteritems(): if startid.startswith('_'): continue for chname, region in channel_regions.iteritems(): for region_name, feature_names in region.iteritems(): try: frame, obj_label, branch = Tracker.split_nodeid(startid) except ValueError: frame, obj_label = Tracker.split_nodeid(startid) branch = 1 filename = 'features_P%s_T%05d_O%04d_B%02d_C%s_R%s.txt' \ %(position, frame, obj_label, branch, chname, region_name) filename = join(outdir, filename) self._data_per_channel(timeholder, event_data, filename, chname, region_name, feature_names, position)
def _on_saveas_classifier(self, path=None): learner = self._learner if path is None: path = os.path.expanduser("~") result = QFileDialog.getExistingDirectory( self, 'Save to classifier directory', os.path.abspath(path)) else: result = path if result: if self._save_classifier(result): try: path2 = learner.annotations_dir filenames = os.listdir(path2) filenames = [ os.path.join(path2, f) for f in filenames if os.path.isfile(os.path.join(path2, f)) and os.path.splitext(f)[1].lower() == '.xml' ] fmt = time.strftime('_backup__%Y%m%d_%H%M%S') path_backup = os.path.join(path2, fmt) makedirs(path_backup) for filename in filenames: shutil.copy2(filename, path_backup) os.remove(filename) self._annotations.export_to_xml(path2, learner.class_labels, self._imagecontainer) except: exception(self, "Problems saving annotation data...") else: information( self, "Classifier successfully saved", "Class definitions and annotations " "successfully saved to '%s'." % result) finally: coord = self.browser.get_coordinate() self._imagecontainer.set_plate(coord.plate)
def convertToOneFilePerTrack(cls, path_out, image_compression=''): for event_id in os.listdir(path_out): event_path = os.path.join(path_out, event_id) #print event_path if os.path.isdir(event_path): # get all image cutter files filenames = collect_files(event_path, extensions=['.jpg', '.png', '.tif'], absolute=True) if len(filenames) > 0: img_out = None # determine file extension ext = os.path.splitext(filenames[0])[1] # stitch image horizontally for idx, filename in enumerate(filenames): img = cls.read_image(filename) if img_out is None: size = img.width, img.height img_out = cls.IMAGE_CLASS(size[0] * len(filenames), size[1]) ccore.copySubImage(img, ccore.Diff2D(0, 0), ccore.Diff2D(size[0], size[1]), img_out, ccore.Diff2D(size[0]*idx, 0)) # save a one file with event_id (P,T,O) + extension filename_out = os.path.join(path_out, event_id) + ext #print filename_out ccore.writeImage(img_out, filename_out) path_out_info = os.path.join(path_out, '_info') makedirs(path_out_info) shutil.copy2(os.path.join(event_path, '_%s.txt' % event_id), os.path.join(path_out_info, '_%s.txt' % event_id)) shutil.rmtree(event_path, ignore_errors=True)
def full_tracks(self, timeholder, visitor_data, position, outdir): shutil.rmtree(outdir, True) makedirs(outdir) for start_id, data in visitor_data.iteritems(): for idx, track in enumerate(data['_full_tracks']): has_header = False line1 = [] line2 = [] line3 = [] frame, obj_label = Tracker.split_nodeid(start_id)[:2] filename = 'P%s__T%05d__O%04d__B%02d.txt' \ %(position, frame, obj_label, idx+1) f = file(join(outdir, filename), 'w') for node_id in track: frame, obj_id = Tracker.split_nodeid(node_id) coordinate = Coordinate(position=position, time=frame) prefix = [ frame, self.meta_data.get_timestamp_relative(coordinate), obj_id ] prefix_names = ['frame', 'time', 'objID'] items = [] for channel in timeholder[frame].values(): for region_id in channel.region_names(): region = channel.get_region(region_id) if obj_id in region: flkp = self._map_feature_names( region.feature_names) if not has_header: keys = ['classLabel', 'className'] if channel.NAME == 'Primary': keys += ['centerX', 'centerY'] keys += flkp.keys() line1 += [channel.NAME.upper()] * len(keys) line2 += [str(region_id)] * len(keys) line3 += keys obj = region[obj_id] features = region.features_by_name( obj_id, flkp.values()) values = [ x if not x is None else '' for x in [obj.iLabel, obj.strClassName] ] if channel.NAME == 'Primary': values += [ obj.oCenterAbs[0], obj.oCenterAbs[1] ] values += list(features) items.extend(values) if not has_header: has_header = True prefix_str = [''] * len(prefix) line1 = prefix_str + line1 line2 = prefix_str + line2 line3 = prefix_names + line3 f.write('%s\n' % CSVParams.sep.join(line1)) f.write('%s\n' % CSVParams.sep.join(line2)) f.write('%s\n' % CSVParams.sep.join(line3)) f.write( '%s\n' % CSVParams.sep.join([str(i) for i in prefix + items])) f.close()
def make_target_dir(self): makedirs(join(self._outdir, "-".join(self._channel.merge_regions).lower()))
def render(self, strPathOut, dctRenderInfo=None, strFileSuffix='.jpg', strCompression='98', writeToDisc=True, images=None): lstImages = [] if not images is None: lstImages += images if dctRenderInfo is None: for name, oChannel in self._channel_registry.iteritems(): for strRegion, oContainer in oChannel.containers.iteritems(): strHexColor, fAlpha = oChannel.dctAreaRendering[strRegion] imgRaw = oChannel.meta_image.image imgCon = ccore.Image(imgRaw.width, imgRaw.height) ccore.drawContour(oContainer.getBinary(), imgCon, 255, False) lstImages.append((imgRaw, strHexColor, 1.0)) lstImages.append((imgCon, strHexColor, fAlpha)) else: for channel_name, dctChannelInfo in dctRenderInfo.iteritems(): if channel_name in self._channel_registry: oChannel = self._channel_registry[channel_name] if 'raw' in dctChannelInfo: strHexColor, fAlpha = dctChannelInfo['raw'] # special casing for virtual channel to mix # raw images together if oChannel.is_virtual(): lstImages.extend(oChannel.meta_images(fAlpha)) else: lstImages.append((oChannel.meta_image.image, strHexColor, 1.0)) if 'contours' in dctChannelInfo: # transform the old dict-style to the new tuple-style, # which allows multiple definitions for one region if isinstance(dctChannelInfo['contours'], dict): lstContourInfos = [(k,)+v for k,v in dctChannelInfo['contours'].iteritems()] else: lstContourInfos = dctChannelInfo['contours'] for tplData in lstContourInfos: strRegion, strNameOrColor, fAlpha, bShowLabels = tplData[:4] # draw contours only if region is present if oChannel.has_region(strRegion): if len(tplData) > 4: bThickContours = tplData[4] else: bThickContours = False imgRaw = oChannel.meta_image.image if strNameOrColor == 'class_label': oContainer = oChannel.containers[strRegion] oRegion = oChannel.get_region(strRegion) dctLabels = {} dctColors = {} for iObjId, oObj in oRegion.iteritems(): iLabel = oObj.iLabel if not iLabel is None: if not iLabel in dctLabels: dctLabels[iLabel] = [] dctLabels[iLabel].append(iObjId) dctColors[iLabel] = oObj.strHexColor imgCon2 = ccore.Image(imgRaw.width, imgRaw.height) for iLabel, lstObjIds in dctLabels.iteritems(): imgCon = ccore.Image(imgRaw.width, imgRaw.height) # Flip this and use drawContours with fill option enables to get black background oContainer.drawContoursByIds(lstObjIds, 255, imgCon, bThickContours, False) # oContainer.drawContoursByIds(lstObjIds, 255, imgCon, bThickContours, True) lstImages.append((imgCon, dctColors[iLabel], fAlpha)) if isinstance(bShowLabels, bool) and bShowLabels: oContainer.drawTextsByIds(lstObjIds, [str(iLabel)]*len(lstObjIds), imgCon2) lstImages.append((imgCon2, '#FFFFFF', 1.0)) else: oContainer = oChannel.containers[strRegion] oRegion = oChannel.get_region(strRegion) lstObjIds = oRegion.keys() imgCon = ccore.Image(imgRaw.width, imgRaw.height) if not strNameOrColor is None: oContainer.drawContoursByIds(lstObjIds, 255, imgCon, bThickContours, False) else: strNameOrColor = '#FFFFFF' lstImages.append((imgCon, strNameOrColor, fAlpha)) if bShowLabels: imgCon2 = ccore.Image(imgRaw.width, imgRaw.height) oContainer.drawLabelsByIds(lstObjIds, imgCon2) lstImages.append((imgCon2, '#FFFFFF', 1.0)) if len(lstImages) > 0: imgRgb = ccore.makeRGBImage([x[0].getView() for x in lstImages], [ccore.RGBValue(*hex2rgb(x[1])) for x in lstImages], [x[2] for x in lstImages]) if writeToDisc: strFilePath = join(strPathOut, "P%s_T%05d%s" %(self.P, self._iT, strFileSuffix)) makedirs(strPathOut) ccore.writeImage(imgRgb, strFilePath, strCompression) self.logger.debug("* rendered image written '%s'" % strFilePath) else: strFilePath = '' return imgRgb, strFilePath
def render(self, strPathOut, dctRenderInfo=None, strFileSuffix='.jpg', strCompression='98', writeToDisc=True, images=None): lstImages = [] if not images is None: lstImages += images if dctRenderInfo is None: for name, oChannel in self._channel_registry.iteritems(): for strRegion, oContainer in oChannel.containers.iteritems(): strHexColor, fAlpha = oChannel.dctAreaRendering[strRegion] imgRaw = oChannel.meta_image.image imgCon = ccore.Image(imgRaw.width, imgRaw.height) ccore.drawContour(oContainer.getBinary(), imgCon, 255, False) lstImages.append((imgRaw, strHexColor, 1.0)) lstImages.append((imgCon, strHexColor, fAlpha)) else: for channel_name, dctChannelInfo in dctRenderInfo.iteritems(): if channel_name in self._channel_registry: oChannel = self._channel_registry[channel_name] if 'raw' in dctChannelInfo: strHexColor, fAlpha = dctChannelInfo['raw'] # special casing for virtual channel to mix # raw images together if oChannel.is_virtual(): lstImages.extend(oChannel.meta_images(fAlpha)) else: lstImages.append( (oChannel.meta_image.image, strHexColor, 1.0)) if 'contours' in dctChannelInfo: # transform the old dict-style to the new tuple-style, # which allows multiple definitions for one region if isinstance(dctChannelInfo['contours'], dict): lstContourInfos = [ (k, ) + v for k, v in dctChannelInfo['contours'].iteritems() ] else: lstContourInfos = dctChannelInfo['contours'] for tplData in lstContourInfos: strRegion, strNameOrColor, fAlpha, bShowLabels = tplData[: 4] # draw contours only if region is present if oChannel.has_region(strRegion): if len(tplData) > 4: bThickContours = tplData[4] else: bThickContours = False imgRaw = oChannel.meta_image.image if strNameOrColor == 'class_label': oContainer = oChannel.containers[strRegion] oRegion = oChannel.get_region(strRegion) dctLabels = {} dctColors = {} for iObjId, oObj in oRegion.iteritems(): iLabel = oObj.iLabel if not iLabel is None: if not iLabel in dctLabels: dctLabels[iLabel] = [] dctLabels[iLabel].append(iObjId) dctColors[ iLabel] = oObj.strHexColor imgCon2 = ccore.Image( imgRaw.width, imgRaw.height) for iLabel, lstObjIds in dctLabels.iteritems( ): imgCon = ccore.Image( imgRaw.width, imgRaw.height) # Flip this and use drawContours with fill option enables to get black background oContainer.drawContoursByIds( lstObjIds, 255, imgCon, bThickContours, False) # oContainer.drawContoursByIds(lstObjIds, 255, imgCon, bThickContours, True) lstImages.append( (imgCon, dctColors[iLabel], fAlpha)) if isinstance(bShowLabels, bool) and bShowLabels: oContainer.drawTextsByIds( lstObjIds, [str(iLabel)] * len(lstObjIds), imgCon2) lstImages.append((imgCon2, '#FFFFFF', 1.0)) else: oContainer = oChannel.containers[strRegion] oRegion = oChannel.get_region(strRegion) lstObjIds = oRegion.keys() imgCon = ccore.Image( imgRaw.width, imgRaw.height) if not strNameOrColor is None: oContainer.drawContoursByIds( lstObjIds, 255, imgCon, bThickContours, False) else: strNameOrColor = '#FFFFFF' lstImages.append( (imgCon, strNameOrColor, fAlpha)) if bShowLabels: imgCon2 = ccore.Image( imgRaw.width, imgRaw.height) oContainer.drawLabelsByIds( lstObjIds, imgCon2) lstImages.append( (imgCon2, '#FFFFFF', 1.0)) if len(lstImages) > 0: imgRgb = ccore.makeRGBImage( [x[0].getView() for x in lstImages], [ccore.RGBValue(*hex2rgb(x[1])) for x in lstImages], [x[2] for x in lstImages]) if writeToDisc: strFilePath = join( strPathOut, "P%s_T%05d%s" % (self.P, self._iT, strFileSuffix)) makedirs(strPathOut) ccore.writeImage(imgRgb, strFilePath, strCompression) self.logger.debug("* rendered image written '%s'" % strFilePath) else: strFilePath = '' return imgRgb, strFilePath
def _run_plate(self, plate_id): filename = self._settings.get2('filename_to_R') cmd = self.get_cmd(filename) path_out = self._imagecontainer.get_path_out() wd = abspath(join(CecogEnvironment.R_SOURCE_DIR, 'hmm')) f = file(join(wd, 'run_hmm.R'), 'r') lines = f.readlines() f.close() path_analyzed = self._join(path_out, 'analyzed') path_out_hmm = self._join(path_out, 'hmm') # don't do anything if the 'hmm' folder already exists and # the skip-option is on if isdir(path_out_hmm) and self._settings.get2( 'skip_processed_plates'): return makedirs(path_out_hmm) region_name_primary = self._settings.get( 'Classification', 'primary_classification_regionname') region_name_secondary = self._settings.get( 'Classification', 'secondary_classification_regionname') path_out_hmm_region = self._convert( self._get_path_out(path_out_hmm, '%s_%s' % ('primary', region_name_primary))) # take mapping file for plate or generate dummy mapping file # for the R script if plate_id in self._mapping_files: # convert path for R mapping_file = self._convert(self._mapping_files[plate_id]) else: mapping_file = self._generate_mapping(wd, path_out_hmm, path_analyzed) if self._settings.get2('overwrite_time_lapse'): time_lapse = self._settings.get2('timelapse') else: meta_data = self._imagecontainer.get_meta_data() if meta_data.has_timestamp_info: time_lapse = meta_data.plate_timestamp_info[0] / 60. else: raise ValueError( "Plate '%s' has not time-lapse info.\n" "Please define (overwrite) the value manually." % plate_id) if self._settings.get2('compose_galleries'): gallery_names = ['primary'] + \ [x for x in ['secondary','tertiary'] if self._settings.get('Processing', '%s_processchannel' % x)] else: gallery_names = None for i in range(len(lines)): line2 = lines[i].strip() if line2 == '#WORKING_DIR': lines[i] = "WORKING_DIR = '%s'\n" % self._convert(wd) elif line2 == '#FILENAME_MAPPING': lines[i] = "FILENAME_MAPPING = '%s'\n" % mapping_file elif line2 == '#PATH_INPUT': lines[i] = "PATH_INPUT = '%s'\n" % path_analyzed elif line2 == '#GROUP_BY_GENE': lines[i] = "GROUP_BY_GENE = %s\n" \ % str(self._settings.get2('groupby_genesymbol')).upper() elif line2 == '#GROUP_BY_OLIGOID': lines[i] = "GROUP_BY_OLIGOID = %s\n" \ % str(self._settings.get2('groupby_oligoid')).upper() elif line2 == '#TIMELAPSE': lines[i] = "TIMELAPSE = %s\n" % time_lapse elif line2 == '#MAX_TIME': lines[i] = "MAX_TIME = %s\n" % self._settings.get2('max_time') elif line2 == '#SINGLE_BRANCH': lines[i] = "SINGLE_BRANCH = %s\n" \ % str(self._settings.get2('ignore_tracking_branches')).upper() elif line2 == '#GALLERIES': if gallery_names is None: lines[i] = "GALLERIES = NULL\n" else: lines[i] = "GALLERIES = c(%s)\n" \ % ','.join(["'%s'" % x for x in gallery_names]) if len(self._learner_dict) == 0 or \ 'primary' not in self._learner_dict: raise RuntimeError(('Classifier not found. Please check ' 'your classifications settings...')) ## if 'primary' in self._learner_dict: if self._settings.get2('constrain_graph'): primary_graph = self._convert( self._settings.get2('primary_graph')) else: primary_graph = self._generate_graph( 'primary', wd, path_out_hmm, region_name_primary) if line2 == '#FILENAME_GRAPH_P': lines[i] = "FILENAME_GRAPH_P = '%s'\n" % primary_graph elif line2 == '#CLASS_COLORS_P': learner = self._learner_dict['primary'] colors = ",".join(["'%s'" % learner.hexcolors[x] \ for x in learner.class_names.values()]) lines[i] = "CLASS_COLORS_P = c(%s)\n" % colors elif line2 == '#REGION_NAME_P': lines[i] = "REGION_NAME_P = '%s'\n" % region_name_primary elif line2 == '#SORT_CLASSES_P': if self._settings.get2('enable_sorting'): lines[i] = "SORT_CLASSES_P = c(%s)\n" \ % self._settings.get2('sorting_sequence') else: lines[i] = "SORT_CLASSES_P = NULL\n" elif line2 == "#PATH_OUT_P": lines[i] = "PATH_OUT_P = '%s'\n" % path_out_hmm_region ## if 'secondary' in self._learner_dict: if self._settings.get2('constrain_graph'): secondary_graph = self._convert( self._settings.get2('secondary_graph')) else: secondary_graph = self._generate_graph( 'secondary', wd, path_out_hmm, region_name_secondary) if line2 == '#FILENAME_GRAPH_S': lines[i] = "FILENAME_GRAPH_S = '%s'\n" % secondary_graph elif line2 == '#CLASS_COLORS_S': learner = self._learner_dict['secondary'] colors = ",".join(["'%s'" % learner.hexcolors[x] \ for x in learner.class_names.values()]) lines[i] = "CLASS_COLORS_S = c(%s)\n" % colors elif line2 == '#REGION_NAME_S': lines[i] = "REGION_NAME_S = '%s'\n" % region_name_secondary elif line2 == '#SORT_CLASSES_S': secondary_sort = self._settings.get2('secondary_sort') if secondary_sort == '': lines[i] = "SORT_CLASSES_S = NULL\n" else: lines[i] = "SORT_CLASSES_S = c(%s)\n" % secondary_sort elif line2 == "#PATH_OUT_S": lines[i] = "PATH_OUT_S = '%s'\n" % \ self._convert(self._get_path_out( path_out_hmm, '%s_%s' \ % ('secondary', region_name_secondary))) input_filename = join(path_out_hmm, 'cecog_hmm_input.R') f = file(input_filename, 'w') f.writelines(lines) f.close() self._process = QtCore.QProcess() self._process.setWorkingDirectory(wd) self._process.start(cmd, ['BATCH', '--silent', '-f', input_filename]) self._process.readyReadStandardOutput.connect(self._on_stdout) self._process.waitForFinished(-1) if self._process.exitCode() != 0: self._process.setReadChannel(QtCore.QProcess.StandardError) msg = str(self._process.readLine()).rstrip() msg = ''.join(list(self._process.readAll())) self.analyzer_error.emit(msg) self.abort() elif self._settings.get2('compose_galleries') and not self._abort: sample = self._settings.get2('compose_galleries_sample') if sample == -1: sample = None for group_name in compose_galleries(path_out, path_out_hmm_region, sample=sample): self._logger.debug('gallery finished for group: %s' % group_name) if self._abort: break if self._settings.get2('show_html') and not self._abort: QtGui.QDesktopServices.openUrl( QtCore.QUrl( 'file://' + join(path_out_hmm_region, 'index.html'), QtCore.QUrl.TolerantMode))
def _run_plate(self, plate_id): path_out = self._imagecontainer.get_path_out() path_analyzed = join(path_out, 'analyzed') makedirs(path_analyzed) mapping_file = self._mapping_files[plate_id] class_colors = {} for i, name in self._learner_dict['primary'].class_names.items(): class_colors[i] = self._learner_dict['primary'].hexcolors[name] class_names = {} for i, name in self._learner_dict['primary'].class_names.items(): class_names[i] = name self._settings.set_section(SECTION_NAME_POST_PROCESSING) if self._settings.get2('ibb_analysis'): ibb_options = {} ibb_options['ibb_ratio_signal_threshold'] = \ self._settings.get2('ibb_ratio_signal_threshold') ibb_options['ibb_range_signal_threshold'] = \ self._settings.get2('ibb_range_signal_threshold') ibb_options['ibb_onset_factor_threshold'] = \ self._settings.get2('ibb_onset_factor_threshold') ibb_options['nebd_onset_factor_threshold'] = \ self._settings.get2('nebd_onset_factor_threshold') ibb_options['single_plot'] = self._settings.get2('single_plot') ibb_options['single_plot_max_plots'] = \ self._settings.get2('single_plot_max_plots') ibb_options['single_plot_ylim_range'] = \ self._settings.get2('single_plot_ylim_low'), \ self._settings.get2('single_plot_ylim_high') tmp = (self._settings.get2('group_by_group'), self._settings.get2('group_by_genesymbol'), self._settings.get2('group_by_oligoid'), self._settings.get2('group_by_position')) ibb_options['group_by'] = \ int(np.log2(int(reduce(lambda x,y: str(x)+str(y), np.array(tmp).astype(np.uint8)),2))+0.5) tmp = (self._settings.get2('color_sort_by_group'), self._settings.get2('color_sort_by_genesymbol'), self._settings.get2('color_sort_by_oligoid'), self._settings.get2('color_sort_by_position')) ibb_options['color_sort_by'] = \ int(np.log2(int(reduce(lambda x,y: str(x)+str(y), np.array(tmp).astype(np.uint8)),2))+0.5) if not ibb_options['group_by'] < ibb_options['color_sort_by']: raise AttributeError(('Group by selection must be more general ' ' than the color sorting! (%d !> %d)' % (ibb_options['group_by'], ibb_options['color_sort_by']))) ibb_options['color_sort_by'] = \ IBBAnalysis.COLOR_SORT_BY[ibb_options['color_sort_by']] ibb_options['timeing_ylim_range'] = \ self._settings.get2('plot_ylim1_low'), \ self._settings.get2('plot_ylim1_high') path_out_ibb = join(path_out, 'ibb') makedirs(path_out_ibb) ibb_analyzer = IBBAnalysis(path_analyzed, path_out_ibb, plate_id, mapping_file, class_colors, class_names, **ibb_options) ibb_analyzer.run() if self._settings.get2('securin_analysis'): path_out_securin = join(path_out, 'sec') makedirs(path_out_securin) securin_options = {} securin_analyzer = SecurinAnalysis(path_analyzed, path_out_securin, plate_id, mapping_file, class_colors, class_names, **securin_options) securin_analyzer.run()
def _run_plate(self, plate_id): path_out = self._imagecontainer.get_path_out() path_analyzed = join(path_out, 'analyzed') makedirs(path_analyzed) mapping_file = self._mapping_files[plate_id] class_colors = {} for i, name in self._learner_dict['primary'].class_names.items(): class_colors[i] = self._learner_dict['primary'].hexcolors[name] class_names = {} for i, name in self._learner_dict['primary'].class_names.items(): class_names[i] = name self._settings.set_section(SECTION_NAME_POST_PROCESSING) if self._settings.get2('ibb_analysis'): ibb_options = {} ibb_options['ibb_ratio_signal_threshold'] = \ self._settings.get2('ibb_ratio_signal_threshold') ibb_options['ibb_range_signal_threshold'] = \ self._settings.get2('ibb_range_signal_threshold') ibb_options['ibb_onset_factor_threshold'] = \ self._settings.get2('ibb_onset_factor_threshold') ibb_options['nebd_onset_factor_threshold'] = \ self._settings.get2('nebd_onset_factor_threshold') ibb_options['single_plot'] = self._settings.get2('single_plot') ibb_options['single_plot_max_plots'] = \ self._settings.get2('single_plot_max_plots') ibb_options['single_plot_ylim_range'] = \ self._settings.get2('single_plot_ylim_low'), \ self._settings.get2('single_plot_ylim_high') tmp = (self._settings.get2('group_by_group'), self._settings.get2('group_by_genesymbol'), self._settings.get2('group_by_oligoid'), self._settings.get2('group_by_position')) ibb_options['group_by'] = \ int(np.log2(int(reduce(lambda x,y: str(x)+str(y), np.array(tmp).astype(np.uint8)),2))+0.5) tmp = (self._settings.get2('color_sort_by_group'), self._settings.get2('color_sort_by_genesymbol'), self._settings.get2('color_sort_by_oligoid'), self._settings.get2('color_sort_by_position')) ibb_options['color_sort_by'] = \ int(np.log2(int(reduce(lambda x,y: str(x)+str(y), np.array(tmp).astype(np.uint8)),2))+0.5) if not ibb_options['group_by'] < ibb_options['color_sort_by']: raise AttributeError( ('Group by selection must be more general ' ' than the color sorting! (%d !> %d)' % (ibb_options['group_by'], ibb_options['color_sort_by']))) ibb_options['color_sort_by'] = \ IBBAnalysis.COLOR_SORT_BY[ibb_options['color_sort_by']] ibb_options['timeing_ylim_range'] = \ self._settings.get2('plot_ylim1_low'), \ self._settings.get2('plot_ylim1_high') path_out_ibb = join(path_out, 'ibb') makedirs(path_out_ibb) ibb_analyzer = IBBAnalysis(path_analyzed, path_out_ibb, plate_id, mapping_file, class_colors, class_names, **ibb_options) ibb_analyzer.run() if self._settings.get2('securin_analysis'): path_out_securin = join(path_out, 'sec') makedirs(path_out_securin) securin_options = {} securin_analyzer = SecurinAnalysis(path_analyzed, path_out_securin, plate_id, mapping_file, class_colors, class_names, **securin_options) securin_analyzer.run()
def __init__(self, eventselector, strPathIn, oP, strPathOut, imageCompression="85", imageSuffix=".jpg", border=0, writeSubdirs=True, writeDescription=True, size=None, oneFilePerTrack=False): self._bHasImages = False dctTimePoints = {} for strStartId, lstTimeData in eventselector.bboxes( \ size=size, border=border).iteritems(): items = Tracker.split_nodeid(strStartId) iStartT, iObjId = items[:2] if len(items) == 3: branch_id = items[2] else: branch_id = 1 if writeSubdirs: strPathOutEvent = os.path.join(strPathOut, self._format_name(oP, iStartT, iObjId, branch_id)) else: strPathOutEvent = strPathOut makedirs(strPathOutEvent) if writeDescription: oFile = file(os.path.join(strPathOutEvent, "_%s.txt" % self._format_name(oP, iStartT, iObjId, branch_id)), "w") lstData = ["Frame", "ObjId", "x1", "y1", "x2", "y2"] oFile.write("%s\n" % "\t".join(map(str, lstData))) for iCnt, (iT, tplBoundingBox, lstObjIds) in enumerate(lstTimeData): if writeDescription: lstData = [iT, ';'.join(map(str, lstObjIds))] + list(tplBoundingBox) oFile.write("%s\n" % "\t".join(map(str, lstData))) if not iT in dctTimePoints: dctTimePoints[iT] = [] dctTimePoints[iT].append((strStartId, lstObjIds, iCnt, strPathOutEvent, tplBoundingBox)) if writeDescription: oFile.close() for idx, (iT, lstItems) in enumerate(dctTimePoints.iteritems()): #print iT, lstItems imgXY = self._getImage(strPathIn, iT) for strStartId, lstObjIds, iCnt, strPathOutEvent, tplBoundingBox in lstItems: x1, y1, x2, y2 = tplBoundingBox x1Corr = 0 if x1 < 0 else x1 y1Corr = 0 if y1 < 0 else y1 x2Corr = imgXY.width-1 if x2 >= imgXY.width else x2 y2Corr = imgXY.height-1 if y2 >= imgXY.height else y2 imgSub = ccore.subImage(imgXY, ccore.Diff2D(x1Corr, y1Corr), ccore.Diff2D(x2Corr-x1Corr+1, y2Corr-y1Corr+1)) if (x1 < 0 or y1 < 0 or x2 >= imgXY.width or y2 >= imgXY.height): imgSub2 = self.IMAGE_CLASS(size[0], size[1]) ccore.copySubImage(imgSub, imgSub2, ccore.Diff2D(x1Corr-x1, y1Corr-y1)) imgSub = imgSub2 assert imgSub.width == size[0] assert imgSub.width == x2-x1+1 assert imgSub.height == size[1] assert imgSub.height == y2-y1+1 if self.PROCESS_LABEL: lstImages = [] for iObjId in lstObjIds: lstImages.append(ccore.copyImageIfLabel(imgSub, imgSub, iObjId)) imgSub = ccore.projectImage(lstImages, ccore.ProjectionType.MaxProjection) strFilenameImage = os.path.join(strPathOutEvent, "P%s__T%05d%s" % (oP, iT, imageSuffix)) ccore.writeImage(imgSub, strFilenameImage) if oneFilePerTrack and os.path.isdir(strPathOut): self.convertToOneFilePerTrack(strPathOut, imageCompression)
def compose_galleries(path, path_hmm, quality="90", one_daughter=True, sample=30): logger = logging.getLogger('compose_galleries') column_name = 'Trajectory' path_index = os.path.join(path_hmm, '_index') if not os.path.isdir(path_index): logger.warning(("Index path '%s' does not exist. Make sure the error" " correction was executed successfully." %path_index)) return for filename in os.listdir(path_index): logger.info('Creating gallery overview for %s' % filename) group_name = os.path.splitext(filename)[0] t = read_table(os.path.join(path_index, filename))[1] t.reverse() if one_daughter: for record in t[:]: if record[column_name].split('__')[4] != 'B01': t.remove(record) n = len(t) if not sample is None and sample <= n: idx = random.sample(xrange(n), sample) idx.sort() d = [t[i] for i in idx] else: d = t n = len(d) results = {} for idx, record in enumerate(d): #print idx, record traj = record[column_name] items = traj.split('__') pos = items[1][1:] key = '__'.join(items[1:5]) gallery_path = os.path.join(path, 'analyzed', pos, 'gallery') if os.path.isdir(gallery_path): for gallery_name in os.listdir(gallery_path): img = ccore.readImageRGB(os.path.join(gallery_path, gallery_name, '%s.jpg' % key)) if gallery_name not in results: results[gallery_name] = ccore.RGBImage(img.width, img.height*n) img_out = results[gallery_name] ccore.copySubImage(img, ccore.Diff2D(0, 0), ccore.Diff2D(img.width, img.height), img_out, ccore.Diff2D(0, img.height*idx)) for gallery_name in results: path_out = os.path.join(path_hmm, '_gallery', gallery_name) makedirs(path_out) image_name = os.path.join(path_out, '%s.jpg' % group_name) ccore.writeImage(results[gallery_name], image_name, quality) logger.debug("Gallery image '%s' successfully written." % image_name) yield group_name