def mk_traxel(x, y, z, id): t = pgmlink.Traxel() t.ID = id t.add_feature_array("com", 3) t.set_feature_value("com", 0, x) t.set_feature_value("com", 1, y) t.set_feature_value("com", 2, z) return t
def test_property_maps(self): t = pgmlink.Traxel() t.Id = 33 g = pgmlink.HypothesesGraph() n1 = g.addNode(0) m = g.addNodeTraxelMap() m[n1] = t self.assertEqual(m[n1].Id, 33)
def generate_traxelstore(h5file, options, feature_path, time_range, x_range, y_range, z_range, size_range, x_scale=1.0, y_scale=1.0, z_scale=1.0, with_div=True, median_object_size=None, max_traxel_id_at=None, with_merger_prior=True, max_num_mergers=1, ext_probs=None): """ Legacy way of creating the "traxelstore", that can handle the old drosophila and rapoport ilastik project files and load the stored probabilities. """ logging.getLogger('hypotheses_graph_to_json.py').info("generating traxels") logging.getLogger('hypotheses_graph_to_json.py').info( "filling traxelstore") try: import pgmlink as track ts = track.TraxelStore() fs = track.FeatureStore() max_traxel_id_at = track.VectorOfInt() withPgmlink = True except: import hytra.core.probabilitygenerator as track withPgmlink = False ts, fs = None, None max_traxel_id_at = [] logging.getLogger('hypotheses_graph_to_json.py').info( "fetching region features and division probabilities") logging.getLogger('hypotheses_graph_to_json.py').debug( "region features path: {}".format(options.obj_count_path)) logging.getLogger('hypotheses_graph_to_json.py').debug( "division features path: {}".format(options.div_prob_path)) logging.getLogger('hypotheses_graph_to_json.py').debug("{}, {}".format( h5file.filename, feature_path)) detection_probabilities = [] division_probabilities = [] if with_div: logging.getLogger('hypotheses_graph_to_json.py').debug( options.div_prob_path) divProbs = h5file[options.div_prob_path] if with_merger_prior: detProbs = h5file[options.obj_count_path] if x_range is None: x_range = [0, sys.maxint] if y_range is None: y_range = [0, sys.maxint] if z_range is None: z_range = [0, sys.maxint] shape_t = len(h5file[options.obj_count_path].keys()) keys_sorted = range(shape_t) if time_range is not None: if time_range[1] == -1: time_range[1] = shape_t keys_sorted = [ key for key in keys_sorted if time_range[0] <= int(key) < time_range[1] ] else: time_range = (0, shape_t) # use this as Traxelstore dummy if we're not using pgmlink if not withPgmlink: class TSDummy: traxels = [] def bounding_box(self): return [time_range[0], 0, 0, 0, time_range[1], 1, 1, 1] def add(self, fs, traxel): self.traxels.append(traxel) ts = TSDummy() filtered_labels = {} obj_sizes = [] total_count = 0 empty_frame = False for t in keys_sorted: feats_name = options.feats_path % (t, t + 1, 'RegionCenter') # region_centers = np.array(feats[t]['0']['RegionCenter']) region_centers = np.array(h5file[feats_name]) feats_name = options.feats_path % (t, t + 1, 'Coord<Minimum>') lower = np.array(h5file[feats_name]) feats_name = options.feats_path % (t, t + 1, 'Coord<Maximum>') upper = np.array(h5file[feats_name]) if region_centers.size: region_centers = region_centers[1:, ...] lower = lower[1:, ...] upper = upper[1:, ...] feats_name = options.feats_path % (t, t + 1, 'Count') # pixel_count = np.array(feats[t]['0']['Count']) pixel_count = np.array(h5file[feats_name]) if pixel_count.size: pixel_count = pixel_count[1:, ...] logging.getLogger('hypotheses_graph_to_json.py').info( "at timestep {}, {} traxels found".format(t, region_centers.shape[0])) count = 0 filtered_labels[t] = [] for idx in range(region_centers.shape[0]): if len(region_centers[idx]) == 2: x, y = region_centers[idx] z = 0 elif len(region_centers[idx]) == 3: x, y, z = region_centers[idx] else: raise Exception( "The RegionCenter feature must have dimensionality 2 or 3." ) size = pixel_count[idx] if (x < x_range[0] or x >= x_range[1] or y < y_range[0] or y >= y_range[1] or z < z_range[0] or z >= z_range[1] or size < size_range[0] or size >= size_range[1]): filtered_labels[t].append(int(idx + 1)) continue else: count += 1 traxel = track.Traxel() if withPgmlink: traxel.set_feature_store(fs) traxel.set_x_scale(x_scale) traxel.set_y_scale(y_scale) traxel.set_z_scale(z_scale) traxel.Id = int(idx + 1) traxel.Timestep = int(t) traxel.add_feature_array("com", 3) for i, v in enumerate([x, y, z]): traxel.set_feature_value('com', i, float(v)) if with_div: traxel.add_feature_array("divProb", 1) prob = 0.0 prob = float(divProbs[str(t)][idx + 1][1]) # idx+1 because region_centers and pixel_count start from 1, divProbs starts from 0 traxel.set_feature_value("divProb", 0, prob) division_probabilities.append(prob) if with_merger_prior and ext_probs is None: traxel.add_feature_array("detProb", max_num_mergers + 1) probs = [] for i in range(len(detProbs[str(t)][idx + 1])): probs.append(float(detProbs[str(t)][idx + 1][i])) probs[max_num_mergers] = sum(probs[max_num_mergers:]) for i in range(max_num_mergers + 1): traxel.set_feature_value("detProb", i, float(probs[i])) detection_probabilities.append([ traxel.get_feature_value("detProb", i) for i in range(max_num_mergers + 1) ]) traxel.add_feature_array("count", 1) traxel.set_feature_value("count", 0, float(size)) if median_object_size is not None: obj_sizes.append(float(size)) ts.add(fs, traxel) logging.getLogger('hypotheses_graph_to_json.py').info( "at timestep {}, {} traxels passed filter".format(t, count)) max_traxel_id_at.append(int(region_centers.shape[0])) if count == 0: empty_frame = True total_count += count if median_object_size is not None: median_object_size[0] = np.median(np.array(obj_sizes), overwrite_input=True) logging.getLogger('hypotheses_graph_to_json.py').info( 'median object size = {}'.format(median_object_size[0])) return ts, fs, max_traxel_id_at, division_probabilities, detection_probabilities
def generate_traxelstore(h5file, options, feature_path, time_range, x_range, y_range, z_range, size_range, x_scale=1.0, y_scale=1.0, z_scale=1.0, with_div=True, with_local_centers=False, median_object_size=None, max_traxel_id_at=None, with_merger_prior=True, max_num_mergers=1, with_optical_correction=False, ext_probs=None): print "generating traxels" print "filling traxelstore" ts = track.TraxelStore() fs = track.FeatureStore() max_traxel_id_at = track.VectorOfInt() print "fetching region features and division probabilities" print h5file.filename, feature_path detection_probabilities = [] division_probabilities = [] if with_div: print options.div_prob_path divProbs = h5file[options.div_prob_path] if with_merger_prior: detProbs = h5file[options.obj_count_path] if with_local_centers: localCenters = None # self.RegionLocalCenters(time_range).wait() if x_range is None: x_range = [0, sys.maxint] if y_range is None: y_range = [0, sys.maxint] if z_range is None: z_range = [0, sys.maxint] shape_t = len(h5file[options.obj_count_path].keys()) keys_sorted = range(shape_t) if time_range is not None: if time_range[1] == -1: time_range[1] = shape_t keys_sorted = [ key for key in keys_sorted if time_range[0] <= int(key) < time_range[1] ] else: time_range = (0, shape_t) filtered_labels = {} obj_sizes = [] total_count = 0 empty_frame = False for t in keys_sorted: feats_name = options.feats_path % (t, t + 1, 'RegionCenter') #region_centers = np.array(feats[t]['0']['RegionCenter']) region_centers = np.array(h5file[feats_name]) feats_name = options.feats_path % (t, t + 1, 'Coord<Minimum>') lower = np.array(h5file[feats_name]) feats_name = options.feats_path % (t, t + 1, 'Coord<Maximum>') upper = np.array(h5file[feats_name]) if region_centers.size: region_centers = region_centers[1:, ...] lower = lower[1:, ...] upper = upper[1:, ...] if with_optical_correction: try: feats_name = options.feats_path % (t, t + 1, 'RegionCenter_corr') region_centers_corr = np.array(h5file[feats_name]) except: raise Exception, 'cannot consider optical correction since it has not been computed' if region_centers_corr.size: region_centers_corr = region_centers_corr[1:, ...] feats_name = options.feats_path % (t, t + 1, 'Count') #pixel_count = np.array(feats[t]['0']['Count']) pixel_count = np.array(h5file[feats_name]) if pixel_count.size: pixel_count = pixel_count[1:, ...] print "at timestep ", t, region_centers.shape[0], "traxels found" count = 0 filtered_labels[t] = [] for idx in range(region_centers.shape[0]): if len(region_centers[idx]) == 2: x, y = region_centers[idx] z = 0 elif len(region_centers[idx]) == 3: x, y, z = region_centers[idx] else: raise Exception, "The RegionCenter feature must have dimensionality 2 or 3." size = pixel_count[idx] if (x < x_range[0] or x >= x_range[1] or y < y_range[0] or y >= y_range[1] or z < z_range[0] or z >= z_range[1] or size < size_range[0] or size >= size_range[1]): filtered_labels[t].append(int(idx + 1)) continue else: count += 1 traxel = track.Traxel() traxel.set_feature_store(fs) traxel.set_x_scale(x_scale) traxel.set_y_scale(y_scale) traxel.set_z_scale(z_scale) traxel.Id = int(idx + 1) traxel.Timestep = int(t) traxel.add_feature_array("com", 3) for i, v in enumerate([x, y, z]): traxel.set_feature_value('com', i, float(v)) if with_optical_correction: traxel.add_feature_array("com_corrected", 3) for i, v in enumerate(region_centers_corr[idx]): traxel.set_feature_value("com_corrected", i, float(v)) if len(region_centers_corr[idx]) == 2: traxel.set_feature_value("com_corrected", 2, 0.) if with_div: traxel.add_feature_array("divProb", 1) prob = 0.0 prob = float(divProbs[str(t)][idx + 1][1]) # idx+1 because region_centers and pixel_count start from 1, divProbs starts from 0 traxel.set_feature_value("divProb", 0, prob) division_probabilities.append(prob) if with_local_centers: raise Exception, "not yet implemented" traxel.add_feature_array("localCentersX", len(localCenters[t][idx + 1])) traxel.add_feature_array("localCentersY", len(localCenters[t][idx + 1])) traxel.add_feature_array("localCentersZ", len(localCenters[t][idx + 1])) for i, v in enumerate(localCenters[t][idx + 1]): traxel.set_feature_value("localCentersX", i, float(v[0])) traxel.set_feature_value("localCentersY", i, float(v[1])) traxel.set_feature_value("localCentersZ", i, float(v[2])) if with_merger_prior and ext_probs is None: traxel.add_feature_array("detProb", max_num_mergers + 1) probs = [] for i in range(len(detProbs[str(t)][idx + 1])): probs.append(float(detProbs[str(t)][idx + 1][i])) probs[max_num_mergers] = sum(probs[max_num_mergers:]) for i in range(max_num_mergers + 1): traxel.set_feature_value("detProb", i, float(probs[i])) detection_probabilities.append([ traxel.get_feature_value("detProb", i) for i in range(max_num_mergers + 1) ]) traxel.add_feature_array("count", 1) traxel.set_feature_value("count", 0, float(size)) if median_object_size is not None: obj_sizes.append(float(size)) ts.add(fs, traxel) print "at timestep ", t, count, "traxels passed filter" max_traxel_id_at.append(int(region_centers.shape[0])) if count == 0: empty_frame = True total_count += count # load features from raw data if len(options.raw_filename) > 0: print("Computing Features from Raw Data: {}".format( options.raw_filename)) start_time = time.time() with h5py.File(options.raw_filename, 'r') as raw_h5: shape = h5file['/'.join( options.label_img_path.split('/')[:-1])].values()[0].shape[1:4] shape = (len(h5file['/'.join( options.label_img_path.split('/')[:-1])].values()), ) + shape print("Shape is {}".format(shape)) # loop over all frames and compute features for all traxels per frame for timestep in xrange(max(0, time_range[0]), min(shape[0], time_range[1])): print("\tFrame {}".format(timestep)) # TODO: handle smaller FOV instead of looking at full frame label_image_path = options.label_img_path % ( timestep, timestep + 1, shape[1], shape[2], shape[3]) label_image = np.array( h5file[label_image_path][0, ..., 0]).squeeze().astype(np.uint32) raw_image = np.array(raw_h5['/'.join( options.raw_path.split('/'))][timestep, ..., 0]).squeeze().astype( np.float32) max_traxel_id = track.extract_region_features( raw_image, label_image, fs, timestep) # uncomment the following if no features are taken from the ilp file any more # #max_traxel_id_at.append(max_traxel_id) # for idx in xrange(1, max_traxel_id): # traxel = track.Traxel() # traxel.set_x_scale(x_scale) # traxel.set_y_scale(y_scale) # traxel.set_z_scale(z_scale) # traxel.Id = idx # traxel.Timestep = timestep # ts.add(fs, traxel) end_time = time.time() print("Feature computation for a dataset of shape {} took {} secs". format(shape, end_time - start_time)) #fs.dump() if median_object_size is not None: median_object_size[0] = np.median(np.array(obj_sizes), overwrite_input=True) print 'median object size = ' + str(median_object_size[0]) return ts, fs, max_traxel_id_at, division_probabilities, detection_probabilities
def _generate_traxelstore(self, time_range, x_range, y_range, z_range, size_range, x_scale=1.0, y_scale=1.0, z_scale=1.0, with_div=False, with_local_centers=False, median_object_size=None, max_traxel_id_at=None): if not self.Parameters.ready(): raise Exception("Parameter slot is not ready") parameters = self.Parameters.value parameters['scales'] = [x_scale, y_scale, z_scale] parameters['time_range'] = [min(time_range), max(time_range)] parameters['x_range'] = x_range parameters['y_range'] = y_range parameters['z_range'] = z_range parameters['size_range'] = size_range print "generating traxels" print "fetching region features and division probabilities" feats = self.ObjectFeatures(time_range).wait() if with_div: divProbs = self.ClassMapping(time_range).wait() if with_local_centers: localCenters = self.RegionLocalCenters(time_range).wait() print "filling traxelstore" ts = pgmlink.TraxelStore() max_traxel_id_at = pgmlink.VectorOfInt() filtered_labels = {} obj_sizes = [] total_count = 0 empty_frame = False for t in feats.keys(): rc = feats[t][default_features_key]['RegionCenter'] if rc.size: rc = rc[1:, ...] ct = feats[t][default_features_key]['Count'] if ct.size: ct = ct[1:, ...] print "at timestep ", t, rc.shape[0], "traxels found" count = 0 filtered_labels[t] = [] for idx in range(rc.shape[0]): # for 2d data, set z-coordinate to 0: if len(rc[idx]) == 2: x, y = rc[idx] z = 0 elif len(rc[idx]) == 3: x, y, z = rc[idx] else: raise Exception, "The RegionCenter feature must have dimensionality 2 or 3." size = ct[idx] if (x < x_range[0] or x >= x_range[1] or y < y_range[0] or y >= y_range[1] or z < z_range[0] or z >= z_range[1] or size < size_range[0] or size >= size_range[1]): filtered_labels[t].append(int(idx + 1)) continue else: count += 1 tr = pgmlink.Traxel() tr.set_x_scale(x_scale) tr.set_y_scale(y_scale) tr.set_z_scale(z_scale) tr.Id = int(idx + 1) tr.Timestep = t # pgmlink expects always 3 coordinates, z=0 for 2d data tr.add_feature_array("com", 3) for i, v in enumerate([x, y, z]): tr.set_feature_value('com', i, float(v)) if with_div: tr.add_feature_array("divProb", 1) # idx+1 because rc and ct start from 1, divProbs starts from 0 tr.set_feature_value("divProb", 0, float(divProbs[t][idx + 1][1])) # FIXME: check whether it is 2d or 3d data! if with_local_centers: tr.add_feature_array("localCentersX", len(localCenters[t][idx + 1])) tr.add_feature_array("localCentersY", len(localCenters[t][idx + 1])) tr.add_feature_array("localCentersZ", len(localCenters[t][idx + 1])) for i, v in enumerate(localCenters[t][idx + 1]): tr.set_feature_value("localCentersX", i, float(v[0])) tr.set_feature_value("localCentersY", i, float(v[1])) tr.set_feature_value("localCentersZ", i, float(v[2])) tr.add_feature_array("count", 1) tr.set_feature_value("count", 0, float(size)) if median_object_size is not None: obj_sizes.append(float(size)) ts.add(tr) print "at timestep ", t, count, "traxels passed filter" max_traxel_id_at.append(int(rc.shape[0])) if count == 0: empty_frame = True total_count += count if median_object_size is not None: median_object_size[0] = np.median(np.array(obj_sizes), overwrite_input=True) print 'median object size = ' + str(median_object_size[0]) return ts, filtered_labels, empty_frame
def _generate_traxelstore(self, time_range, x_range, y_range, z_range, size_range, x_scale=1.0, y_scale=1.0, z_scale=1.0, with_div=False, with_local_centers=False, median_object_size=None, max_traxel_id_at=None, with_opt_correction=False, with_coordinate_list=False, with_classifier_prior=False, coordinate_map=None): if not self.Parameters.ready(): raise Exception("Parameter slot is not ready") if coordinate_map is not None and not with_coordinate_list: coordinate_map.initialize() parameters = self.Parameters.value parameters['scales'] = [x_scale, y_scale, z_scale] parameters['time_range'] = [min(time_range), max(time_range)] parameters['x_range'] = x_range parameters['y_range'] = y_range parameters['z_range'] = z_range parameters['size_range'] = size_range logger.info("generating traxels") logger.info("fetching region features and division probabilities") feats = self.ObjectFeatures(time_range).wait() if with_div: if not self.DivisionProbabilities.ready() or len( self.DivisionProbabilities([0]).wait()[0]) == 0: raise Exception, "Classifier not yet ready. Did you forget to train the Division Detection Classifier?" divProbs = self.DivisionProbabilities(time_range).wait() if with_local_centers: localCenters = self.RegionLocalCenters(time_range).wait() if with_classifier_prior: if not self.DetectionProbabilities.ready() or len( self.DetectionProbabilities([0]).wait()[0]) == 0: raise Exception, "Classifier not yet ready. Did you forget to train the Object Count Classifier?" detProbs = self.DetectionProbabilities(time_range).wait() logger.info("filling traxelstore") ts = pgmlink.TraxelStore() max_traxel_id_at = pgmlink.VectorOfInt() filtered_labels = {} obj_sizes = [] total_count = 0 empty_frame = False for t in feats.keys(): rc = feats[t][default_features_key]['RegionCenter'] lower = feats[t][default_features_key]['Coord<Minimum>'] upper = feats[t][default_features_key]['Coord<Maximum>'] if rc.size: rc = rc[1:, ...] lower = lower[1:, ...] upper = upper[1:, ...] if with_opt_correction: try: rc_corr = feats[t][ config.features_vigra_name]['RegionCenter_corr'] except: raise Exception, 'cannot consider optical correction since it has not been computed before' if rc_corr.size: rc_corr = rc_corr[1:, ...] ct = feats[t][default_features_key]['Count'] if ct.size: ct = ct[1:, ...] logger.info("at timestep {}, {} traxels found".format( t, rc.shape[0])) count = 0 filtered_labels_at = [] for idx in range(rc.shape[0]): # for 2d data, set z-coordinate to 0: if len(rc[idx]) == 2: x, y = rc[idx] z = 0 elif len(rc[idx]) == 3: x, y, z = rc[idx] else: raise Exception, "The RegionCenter feature must have dimensionality 2 or 3." size = ct[idx] if (x < x_range[0] or x >= x_range[1] or y < y_range[0] or y >= y_range[1] or z < z_range[0] or z >= z_range[1] or size < size_range[0] or size >= size_range[1]): filtered_labels_at.append(int(idx + 1)) continue else: count += 1 tr = pgmlink.Traxel() tr.set_x_scale(x_scale) tr.set_y_scale(y_scale) tr.set_z_scale(z_scale) tr.Id = int(idx + 1) tr.Timestep = t # pgmlink expects always 3 coordinates, z=0 for 2d data tr.add_feature_array("com", 3) for i, v in enumerate([x, y, z]): tr.set_feature_value('com', i, float(v)) if with_opt_correction: tr.add_feature_array("com_corrected", 3) for i, v in enumerate(rc_corr[idx]): tr.set_feature_value("com_corrected", i, float(v)) if len(rc_corr[idx]) == 2: tr.set_feature_value("com_corrected", 2, 0.) if with_div: tr.add_feature_array("divProb", 1) # idx+1 because rc and ct start from 1, divProbs starts from 0 tr.set_feature_value("divProb", 0, float(divProbs[t][idx + 1][1])) if with_classifier_prior: tr.add_feature_array("detProb", len(detProbs[t][idx + 1])) for i, v in enumerate(detProbs[t][idx + 1]): val = float(v) if val < 0.0000001: val = 0.0000001 if val > 0.99999999: val = 0.99999999 tr.set_feature_value("detProb", i, float(v)) # FIXME: check whether it is 2d or 3d data! if with_local_centers: tr.add_feature_array("localCentersX", len(localCenters[t][idx + 1])) tr.add_feature_array("localCentersY", len(localCenters[t][idx + 1])) tr.add_feature_array("localCentersZ", len(localCenters[t][idx + 1])) for i, v in enumerate(localCenters[t][idx + 1]): tr.set_feature_value("localCentersX", i, float(v[0])) tr.set_feature_value("localCentersY", i, float(v[1])) tr.set_feature_value("localCentersZ", i, float(v[2])) tr.add_feature_array("count", 1) tr.set_feature_value("count", 0, float(size)) if median_object_size is not None: obj_sizes.append(float(size)) ts.add(tr) # add coordinate lists if with_coordinate_list and coordinate_map is not None: # store coordinates in arma::mat # generate roi: assume the following order: txyzc n_dim = len(rc[idx]) roi = [0] * 5 roi[0] = slice(int(t), int(t + 1)) roi[1] = slice(int(lower[idx][0]), int(upper[idx][0] + 1)) roi[2] = slice(int(lower[idx][1]), int(upper[idx][1] + 1)) if n_dim == 3: roi[3] = slice(int(lower[idx][2]), int(upper[idx][2] + 1)) else: assert n_dim == 2 image_excerpt = self.LabelImage[roi].wait() if n_dim == 2: image_excerpt = image_excerpt[0, ..., 0, 0] elif n_dim == 3: image_excerpt = image_excerpt[0, ..., 0] else: raise Exception, "n_dim = %s instead of 2 or 3" pgmlink.extract_coordinates(coordinate_map, image_excerpt, lower[idx].astype(np.int64), tr) if len(filtered_labels_at) > 0: filtered_labels[str(int(t) - time_range[0])] = filtered_labels_at logger.info("at timestep {}, {} traxels passed filter".format( t, count)) max_traxel_id_at.append(int(rc.shape[0])) if count == 0: empty_frame = True total_count += count if median_object_size is not None: median_object_size[0] = np.median(np.array(obj_sizes), overwrite_input=True) logger.info('median object size = ' + str(median_object_size[0])) self.FilteredLabels.setValue(filtered_labels, check_changed=False) return ts, empty_frame
def _generate_traxelstore(self, time_range, x_range, y_range, z_range, size_range, x_scale=1.0, y_scale=1.0, z_scale=1.0, with_div=False, with_local_centers=False, median_object_size=None, max_traxel_id_at=None, with_opt_correction=False, with_coordinate_list=False, with_classifier_prior=False): if not self.Parameters.ready(): raise Exception("Parameter slot is not ready") parameters = self.Parameters.value parameters['scales'] = [x_scale, y_scale, z_scale] parameters['time_range'] = [min(time_range), max(time_range)] parameters['x_range'] = x_range parameters['y_range'] = y_range parameters['z_range'] = z_range parameters['size_range'] = size_range logger.info("generating traxels") logger.info("fetching region features and division probabilities") feats = self.ObjectFeatures(time_range).wait() if with_div: if not self.DivisionProbabilities.ready() or len( self.DivisionProbabilities([0]).wait()[0]) == 0: msgStr = "\nDivision classifier has not been trained! " + \ "Uncheck divisible objects if your objects don't divide or " + \ "go back to the Division Detection applet and train it." raise DatasetConstraintError("Tracking", msgStr) divProbs = self.DivisionProbabilities(time_range).wait() if with_local_centers: localCenters = self.RegionLocalCenters(time_range).wait() if with_classifier_prior: if not self.DetectionProbabilities.ready() or len( self.DetectionProbabilities([0]).wait()[0]) == 0: msgStr = "\nObject count classifier has not been trained! " + \ "Go back to the Object Count Classification applet and train it." raise DatasetConstraintError("Tracking", msgStr) detProbs = self.DetectionProbabilities(time_range).wait() logger.info("filling traxelstore") ts = pgmlink.TraxelStore() fs = pgmlink.FeatureStore() max_traxel_id_at = pgmlink.VectorOfInt() filtered_labels = {} obj_sizes = [] total_count = 0 empty_frame = False for t in feats.keys(): rc = feats[t][default_features_key]['RegionCenter'] lower = feats[t][default_features_key]['Coord<Minimum>'] upper = feats[t][default_features_key]['Coord<Maximum>'] if rc.size: rc = rc[1:, ...] lower = lower[1:, ...] upper = upper[1:, ...] if with_opt_correction: try: rc_corr = feats[t][ config.features_vigra_name]['RegionCenter_corr'] except: raise Exception, 'Can not consider optical correction since it has not been computed before' if rc_corr.size: rc_corr = rc_corr[1:, ...] ct = feats[t][default_features_key]['Count'] if ct.size: ct = ct[1:, ...] logger.debug("at timestep {}, {} traxels found".format( t, rc.shape[0])) count = 0 filtered_labels_at = [] for idx in range(rc.shape[0]): # for 2d data, set z-coordinate to 0: if len(rc[idx]) == 2: x, y = rc[idx] z = 0 elif len(rc[idx]) == 3: x, y, z = rc[idx] else: raise DatasetConstraintError( "Tracking", "The RegionCenter feature must have dimensionality 2 or 3." ) size = ct[idx] if (x < x_range[0] or x >= x_range[1] or y < y_range[0] or y >= y_range[1] or z < z_range[0] or z >= z_range[1] or size < size_range[0] or size >= size_range[1]): filtered_labels_at.append(int(idx + 1)) continue else: count += 1 tr = pgmlink.Traxel() tr.set_feature_store(fs) tr.set_x_scale(x_scale) tr.set_y_scale(y_scale) tr.set_z_scale(z_scale) tr.Id = int(idx + 1) tr.Timestep = int(t) # pgmlink expects always 3 coordinates, z=0 for 2d data tr.add_feature_array("com", 3) for i, v in enumerate([x, y, z]): tr.set_feature_value('com', i, float(v)) tr.add_feature_array("CoordMinimum", 3) for i, v in enumerate(lower[idx]): tr.set_feature_value("CoordMinimum", i, float(v)) tr.add_feature_array("CoordMaximum", 3) for i, v in enumerate(upper[idx]): tr.set_feature_value("CoordMaximum", i, float(v)) if with_opt_correction: tr.add_feature_array("com_corrected", 3) for i, v in enumerate(rc_corr[idx]): tr.set_feature_value("com_corrected", i, float(v)) if len(rc_corr[idx]) == 2: tr.set_feature_value("com_corrected", 2, 0.) if with_div: tr.add_feature_array("divProb", 1) # idx+1 because rc and ct start from 1, divProbs starts from 0 tr.set_feature_value("divProb", 0, float(divProbs[t][idx + 1][1])) if with_classifier_prior: tr.add_feature_array("detProb", len(detProbs[t][idx + 1])) for i, v in enumerate(detProbs[t][idx + 1]): val = float(v) if val < 0.0000001: val = 0.0000001 if val > 0.99999999: val = 0.99999999 tr.set_feature_value("detProb", i, float(val)) # FIXME: check whether it is 2d or 3d data! if with_local_centers: tr.add_feature_array("localCentersX", len(localCenters[t][idx + 1])) tr.add_feature_array("localCentersY", len(localCenters[t][idx + 1])) tr.add_feature_array("localCentersZ", len(localCenters[t][idx + 1])) for i, v in enumerate(localCenters[t][idx + 1]): tr.set_feature_value("localCentersX", i, float(v[0])) tr.set_feature_value("localCentersY", i, float(v[1])) tr.set_feature_value("localCentersZ", i, float(v[2])) tr.add_feature_array("count", 1) tr.set_feature_value("count", 0, float(size)) if median_object_size is not None: obj_sizes.append(float(size)) ts.add(fs, tr) if len(filtered_labels_at) > 0: filtered_labels[str(int(t) - time_range[0])] = filtered_labels_at logger.debug("at timestep {}, {} traxels passed filter".format( t, count)) max_traxel_id_at.append(int(rc.shape[0])) if count == 0: empty_frame = True total_count += count if median_object_size is not None: median_object_size[0] = np.median(np.array(obj_sizes), overwrite_input=True) logger.info('median object size = ' + str(median_object_size[0])) self.FilteredLabels.setValue(filtered_labels, check_changed=True) return fs, ts, empty_frame, max_traxel_id_at
def fillTraxels(self, usePgmlink=True, ts=None, fs=None, dispyNodeIps=[], turnOffFeatures=[]): """ Compute all the features and predict object count as well as division probabilities. Store the resulting information (and all other features) in the given pgmlink::TraxelStore, or create a new one if ts=None. usePgmlink: boolean whether pgmlink should be used and a pgmlink.TraxelStore and pgmlink.FeatureStore returned ts: an initial pgmlink.TraxelStore (only used if usePgmlink=True) fs: an initial pgmlink.FeatureStore (only used if usePgmlink=True) returns (ts, fs) but only if usePgmlink=True, otherwise it fills self.TraxelsPerFrame """ if usePgmlink: import pgmlink if ts is None: ts = pgmlink.TraxelStore() fs = pgmlink.FeatureStore() else: assert (fs is not None) getLogger().info("Extracting features...") self._featuresPerFrame = self._extractAllFeatures( dispyNodeIps=dispyNodeIps, turnOffFeatures=turnOffFeatures) getLogger().info("Creating traxels...") progressBar = ProgressBar(stop=len(self._featuresPerFrame)) progressBar.show(increase=0) for frame, features in self._featuresPerFrame.iteritems(): # predict random forests if self._countClassifier is not None: objectCountProbabilities = self._countClassifier.predictProbabilities( features=None, featureDict=features) if self._divisionClassifier is not None and frame + 1 < self.timeRange[ 1]: divisionProbabilities = self._divisionClassifier.predictProbabilities( features=None, featureDict=features) # create traxels for all objects for objectId in range(1, features.values()[0].shape[0]): # print("Frame {} Object {}".format(frame, objectId)) pixelSize = features['Count'][objectId] if pixelSize == 0 or (self._options.sizeFilter is not None \ and (pixelSize < self._options.sizeFilter[0] \ or pixelSize > self._options.sizeFilter[1])): continue # create traxel if usePgmlink: traxel = pgmlink.Traxel() else: traxel = Traxel() traxel.Id = objectId traxel.Timestep = frame # add raw features for key, val in features.iteritems(): if key == 'id': traxel.idInSegmentation = val[objectId] elif key == 'filename': traxel.segmentationFilename = val[objectId] else: try: if isinstance( val, list): # polygon feature returns a list! featureValues = val[objectId] else: featureValues = val[objectId, ...] except: getLogger().error( "Could not get feature values of {} for key {} from matrix with shape {}" .format(objectId, key, val.shape)) raise AssertionError() try: self._setTraxelFeatureArray( traxel, featureValues, key) if key == 'RegionCenter': self._setTraxelFeatureArray( traxel, featureValues, 'com') except: getLogger().error( "Could not add feature array {} for {}".format( featureValues, key)) raise AssertionError() # add random forest predictions if self._countClassifier is not None: self._setTraxelFeatureArray( traxel, objectCountProbabilities[objectId, :], self.detectionProbabilityFeatureName) if self._divisionClassifier is not None and frame + 1 < self.timeRange[ 1]: self._setTraxelFeatureArray( traxel, divisionProbabilities[objectId, :], self.divisionProbabilityFeatureName) # set other parameters traxel.set_x_scale(self.x_scale) traxel.set_y_scale(self.y_scale) traxel.set_z_scale(self.z_scale) if usePgmlink: # add to pgmlink's traxelstore ts.add(fs, traxel) else: self.TraxelsPerFrame.setdefault(frame, {})[objectId] = traxel progressBar.show() if usePgmlink: return ts, fs