def draw_annotation_images(self, plate, training_set, container, learner, rid=""): cldir = dict([(cname, join(learner.samples_dir, cname)) \ for cname in learner.class_names.values()]) # create dir per class name for dir_ in cldir.values(): makedirs(dir_) for obj in training_set.itervalues(): rgb_value = ccore.RGBValue(*hex2rgb(obj.strHexColor)) file_ = 'PL%s___P%s___T%05d___X%04d___Y%04d' \ %(plate, self.P, self._iT, obj.oCenterAbs[0], obj.oCenterAbs[1]) obj.file = file_ file_ = join(cldir[obj.strClassName], '%s___%s.png' % (file_, rid + "_%s")) container.exportObject(obj.iId, file_ % "img", file_ % "msk") container.markObjects([obj.iId], rgb_value, False, True) ccore.drawFilledCircle(ccore.Diff2D(*obj.oCenterAbs), 3, container.img_rgb, rgb_value)
def render_tracks(self, frame, size, n=5, thick=True, radius=3): img_conn = ccore.Image(*size) img_split = ccore.Image(*size) if n < 0 or frame - n + 1 < self.start_frame: current = self.start_frame n = frame - current + 1 else: current = frame - n + 1 found = False for i in range(n): col = int(max(255. * (i + 1) / n, 255)) if current in self._frame_data: preframe = self.closest_preceding_frame(current) if preframe is not None: found = True for objIdP in self._frame_data[preframe]: nodeIdP = self.node_id(preframe, objIdP) objP = self.graph.node_data(nodeIdP) if self.graph.out_degree(nodeIdP) > 1: img = img_split else: img = img_conn for edgeId in self.graph.out_arcs(nodeIdP): nodeIdC = self.graph.tail(edgeId) objC = self.graph.node_data(nodeIdC) ccore.drawLine(ccore.Diff2D(*objP.oCenterAbs), ccore.Diff2D(*objC.oCenterAbs), img, col, thick=thick) ccore.drawFilledCircle( ccore.Diff2D(*objC.oCenterAbs), radius, img_conn, col) current += 1 if not found and frame in self._frame_data: for objId in self._frame_data[frame]: nodeId = self.node_id(frame, objId) obj = self.graph.node_data(nodeId) ccore.drawFilledCircle(ccore.Diff2D(*obj.oCenterAbs), radius, img_conn, col) return img_conn, img_split
def render_tracks(self, frame, size, n=5, thick=True, radius=3): img_conn = ccore.Image(*size) img_split = ccore.Image(*size) if n < 0 or frame-n+1 < self.start_frame: current = self.start_frame n = frame-current+1 else: current = frame-n+1 found = False for i in range(n): col = int(max(255.*(i+1)/n, 255)) if current in self._frame_data: preframe = self.closest_preceding_frame(current) if preframe is not None: found = True for objIdP in self._frame_data[preframe]: nodeIdP = self.node_id(preframe, objIdP) objP = self.graph.node_data(nodeIdP) if self.graph.out_degree(nodeIdP) > 1: img = img_split else: img = img_conn for edgeId in self.graph.out_arcs(nodeIdP): nodeIdC = self.graph.tail(edgeId) objC = self.graph.node_data(nodeIdC) ccore.drawLine(ccore.Diff2D(*objP.oCenterAbs), ccore.Diff2D(*objC.oCenterAbs), img, col, thick=thick) ccore.drawFilledCircle(ccore.Diff2D(*objC.oCenterAbs), radius, img_conn, col) current += 1 if not found and frame in self._frame_data: for objId in self._frame_data[frame]: nodeId = self.node_id(frame, objId) obj = self.graph.node_data(nodeId) ccore.drawFilledCircle(ccore.Diff2D(*obj.oCenterAbs), radius, img_conn, col) return img_conn, img_split
def draw_annotation_images(self, plate, training_set, container, learner, rid=""): cldir = dict([(cname, join(learner.samples_dir, cname)) \ for cname in learner.class_names.values()]) # create dir per class name for dir_ in cldir.values(): makedirs(dir_) for obj in training_set.itervalues(): rgb_value = ccore.RGBValue(*hex2rgb(obj.strHexColor)) file_ = 'PL%s___P%s___T%05d___X%04d___Y%04d' \ %(plate, self.P, self._iT, obj.oCenterAbs[0], obj.oCenterAbs[1]) obj.file = file_ file_ = join(cldir[obj.strClassName], '%s___%s.png' %(file_, rid+"_%s")) container.exportObject(obj.iId, file_ %"img", file_ %"msk") container.markObjects([obj.iId], rgb_value, False, True) ccore.drawFilledCircle(ccore.Diff2D(*obj.oCenterAbs), 3, container.img_rgb, rgb_value)
def collectObjects(self, plate_id, P, lstReader, oLearner, byTime=True): #channel_name = oLearner.strChannelId strRegionId = oLearner.strRegionId img_rgb = None self._oLogger.debug('* collecting samples...') # bSuccess = True # channels = sorted(self._channel_registry.values()) # primary_cChannel = None # for channel2 in lstChannels: # # self.time_holder.prepare_raw_image(channel) # self.time_holder.apply_segmentation(oChannel2, oPrimaryChannel) # # if oPrimaryChannel is None: # assert oChannel2.RANK == 1 # oPrimaryChannel = oChannel2 self.process(apply = False, extract_features = False) # self._channel_registry oChannel = self._channel_registry[oLearner.channel_name] oContainer = oChannel.get_container(strRegionId) objects = oContainer.getObjects() object_lookup = {} for oReader in lstReader: lstCoordinates = None if (byTime and P == oReader.getPosition() and self._iT in oReader): lstCoordinates = oReader[self._iT] elif (not byTime and P in oReader): lstCoordinates = oReader[P] #print "moo", P, oReader.getPosition(), byTime, self._iT in oReader #print lstCoordinates, byTime, self.P, oReader.keys() if not lstCoordinates is None: #print self.iP, self._iT, lstCoordinates for dctData in lstCoordinates: label = dctData['iClassLabel'] if (label in oLearner.dctClassNames and dctData['iPosX'] >= 0 and dctData['iPosX'] < oContainer.width and dctData['iPosY'] >= 0 and dctData['iPosY'] < oContainer.height): center1 = ccore.Diff2D(dctData['iPosX'], dctData['iPosY']) # test for obj_id "under" annotated pixel first obj_id = oContainer.img_labels[center1] # if not background: valid obj_id found if obj_id > 0: dict_append_list(object_lookup, label, obj_id) # otherwise try to find nearest object in a search # radius of 30 pixel (compatibility with CellCounter) else: dists = [] for obj_id, obj in objects.iteritems(): diff = obj.oCenterAbs - center1 dist_sq = diff.squaredMagnitude() # limit to 30 pixel radius if dist_sq < 900: dists.append((obj_id, dist_sq)) if len(dists) > 0: dists.sort(lambda a,b: cmp(a[1], b[1])) obj_id = dists[0][0] dict_append_list(object_lookup, label, obj_id) object_ids = set(flatten(object_lookup.values())) objects_del = set(objects.keys()) - object_ids for obj_id in objects_del: oContainer.delObject(obj_id) self.time_holder.apply_features(oChannel) region = oChannel.get_region(strRegionId) learner_objects = [] for label, object_ids in object_lookup.iteritems(): class_name = oLearner.dctClassNames[label] hex_color = oLearner.dctHexColors[class_name] rgb_value = ccore.RGBValue(*hexToRgb(hex_color)) for obj_id in object_ids: obj = region[obj_id] obj.iLabel = label obj.strClassName = class_name obj.strHexColor = hex_color if (obj.oRoi.upperLeft[0] >= 0 and obj.oRoi.upperLeft[1] >= 0 and obj.oRoi.lowerRight[0] < oContainer.width and obj.oRoi.lowerRight[1] < oContainer.height): iCenterX, iCenterY = obj.oCenterAbs strPathOutLabel = os.path.join(oLearner.dctEnvPaths['samples'], oLearner.dctClassNames[label]) safe_mkdirs(strPathOutLabel) strFilenameBase = 'PL%s___P%s___T%05d___X%04d___Y%04d' % (plate_id, self.P, self._iT, iCenterX, iCenterY) obj.sample_id = strFilenameBase learner_objects.append(obj) strFilenameImg = os.path.join(strPathOutLabel, '%s___img.png' % strFilenameBase) strFilenameMsk = os.path.join(strPathOutLabel, '%s___msk.png' % strFilenameBase) # FIXME: export Objects is segfaulting for objects # where its bounding box is touching the border # i.e. one corner point equals zero! oContainer.exportObject(obj_id, strFilenameImg, strFilenameMsk) oContainer.markObjects([obj_id], rgb_value, False, True) #print obj_id, obj.oCenterAbs, iCenterX, iCenterY print '*** CSdebug: drawFilledCircle', iCenterX, iCenterY ccore.drawFilledCircle(ccore.Diff2D(iCenterX, iCenterY), 3, oContainer.img_rgb, rgb_value) if len(learner_objects) > 0: oLearner.applyObjects(learner_objects) # we don't want to apply None for feature names oLearner.setFeatureNames(oChannel.lstFeatureNames) strPathOut = os.path.join(oLearner.dctEnvPaths['controls']) safe_mkdirs(strPathOut) oContainer.exportRGB(os.path.join(strPathOut, "P%s_T%05d_C%s_R%s.jpg" %\ (self.P, self._iT, oLearner.strChannelId, oLearner.strRegionId)), '90') img_rgb = oContainer.img_rgb return img_rgb