def get_data(self, image_lst, loc_lst, visualize=False): """ prepare data for training phase """ data = [] labels = [] result = [] for image, locs in zip(image_lst, loc_lst): demo_img = self.imread(image, 1) processed_img = self.preprocess(demo_img) segments = self.segment(processed_img) segments =[self.preprocess_segment(s) for s in segments] locations = list(loc_lst) # draw all counted objects in the image # visualize true cells # check if each segment is close to one true cell for seg in segments: data.append(seg.get_region(demo_img)) result.append(seg) self.eval_segments(segments, locs) labels.extend([1 if s.detected else 0 for s in segments]) if visualize: com.visualize_segments(demo_img, segments, locations) com.debug_im(processed_img) com.debug_im(demo_img, True) return data, labels, result
def test(self, image, loc_lst, viz=False): """ test an image """ demo = self.imread(image) assert demo is not None correct = 0 locations = list(loc_lst) data, labels, segments = self.get_data([image], [loc_lst]) total_segments = len(data) testing_data = [self._extraction.compute(im) for im in data] hist_data, hog_data = zip(*testing_data) print "total of segments: ", total_segments hist_data = np.array(hist_data, dtype=np.float32) hog_data = np.array(hog_data, dtype=np.float32) ############################ # Normalize the histogram feature hist_data = normalize(hist_data, axis=1) ############################## # RandomizedPCA for the hog feature hog_data = self._pca.transform(hog_data) ############################## # Fusion of classifiers y_proba_lbp = self.clf_lbp.predict_proba(hog_data) y_proba_hist = self.clf_hist.predict_proba(hist_data) y_proba = None if self.op == "sum": y_proba = (y_proba_hist + y_proba_lbp) elif self.op == "max": y_proba = np.maximum(y_proba_hist, y_proba_lbp) elif self.op == "min": y_proba = np.minimum(y_proba_hist, y_proba_lbp) elif self.op == "mul": y_proba = np.multiply(y_proba_hist, y_proba_lbp) # print "y proba:", y_proba result = np.argmax(y_proba, axis=1) # score = accuracy_score(labels_test, predicted) # visualization for predicted, expected, s in zip(result, labels, segments): if predicted == expected: correct += 1 s.detected = True if viz: com.visualize_segments(demo, segments, locations) com.debug_im(demo) return total_segments, correct
def test(self, image, loc_lst, viz=False): """ test an image """ demo = self.imread(image) correct = 0 locations = list(loc_lst) data, labels, segments = self.get_data([image], [loc_lst]) total_segments = len(data) testing_data = [self._extraction.compute(im)[0] for im in data] testing_data = np.vstack(testing_data) result = self._classifier.predict(testing_data) for predicted, expected, s in zip(result, labels, segments): if predicted == expected: correct += 1 s.detected = True if viz: com.visualize_segments(demo, segments, locations) com.debug_im(demo) return correct, total_segments
def test(self, image, loc_lst, viz=False): """ test an image """ demo = self.imread(image) locations = loc_lst data, l, segments = self.get_data([image], [loc_lst]) num_samples = len(data) labels = [] features = [] for idx, im in enumerate(data): f = self._extraction.compute(im, self._extraction.detect(im))[1] if f is not None: features.append(f) labels.append(l[idx]) testing_data = [] for feature in features: coeffs = sparse.encode(feature, self.dictionary, self.alpha) vector = pooling.max_pooling(coeffs) testing_data.append(vector) testing_data = np.vstack(testing_data) testing_data = np.float32(testing_data) labels = np.float32(labels) result = self._classifier.predict(testing_data) for predicted, expected, s in zip(result, labels, segments): if predicted == expected: s.detected = True correct = len(filter(lambda x: x.detected, segments)) if viz: com.visualize_segments(demo, segments, locations) com.debug_im(demo) return correct, num_samples