Exemple #1
0
    def test(self, image, loc_lst, viz=False):
        """ test an image """
        demo = self.imread(image)
        correct = 0
        if viz:
            for loc in loc_lst:
                cv2.circle(demo, loc, 2, (0, 0, 255), 3)

        data, labels, segments = self.get_data([image], [loc_lst])
        total_segments = len(data)
        testing_data = [self.preprocess_pca(im)for im in data]

        testing_data = np.vstack(testing_data)
        testing_data = self._pca.transform(testing_data)

        result = self._classifier.predict(testing_data)

        for predicted, expected, s in zip(result, labels, segments):
            print "predicted: ", predicted
            print "expected: ", expected
            if predicted == expected:
                correct += 1
                if viz:
                   s.draw(demo, (0, 255, 0), 1)
            else:
                if viz:
                    s.draw(demo, (255, 255, 0), 1)
        if viz:
            com.debug_im(demo)
        return total_segments, correct
Exemple #2
0
    def get_data(self, image_lst, loc_lst, visualize=False):
        """
        prepare data for training phase
        """
        data = []
        labels = []
        result = []

        for image, locs in zip(image_lst, loc_lst):

            demo_img = self.imread(image, 1)
            processed_img = self.preprocess(demo_img)
            segments = self.segment(processed_img)
            segments =[self.preprocess_segment(s) for s in segments]
            locations = list(loc_lst)
            # draw all counted objects in the image
            # visualize true cells
            # check if each segment is close to one true cell

            for seg in segments:
                data.append(seg.get_region(demo_img))
                result.append(seg)
            self.eval_segments(segments, locs)
            labels.extend([1 if s.detected else 0 for s in segments])

            if visualize:
                com.visualize_segments(demo_img, segments, locations)
                com.debug_im(processed_img)
                com.debug_im(demo_img, True)

        return data, labels, result
Exemple #3
0
    def test(self, image, loc_lst, viz=False):
        """ test an image """
        demo = self.imread(image)
        assert demo is not None
        correct = 0
        locations = list(loc_lst)
        data, labels, segments = self.get_data([image], [loc_lst])
        total_segments = len(data)
        testing_data = [self._extraction.compute(im) for im in data]
        hist_data, hog_data = zip(*testing_data)
        print "total of segments: ", total_segments

        hist_data = np.array(hist_data, dtype=np.float32)
        hog_data = np.array(hog_data, dtype=np.float32)
        ############################
        # Normalize the histogram feature
        hist_data = normalize(hist_data, axis=1)

        ##############################
        # RandomizedPCA for the hog feature
        hog_data = self._pca.transform(hog_data)

        ##############################
        # Fusion of classifiers

        y_proba_lbp = self.clf_lbp.predict_proba(hog_data)
        y_proba_hist = self.clf_hist.predict_proba(hist_data)
        y_proba = None

        if self.op == "sum":
            y_proba = (y_proba_hist + y_proba_lbp)
        elif self.op == "max":
            y_proba = np.maximum(y_proba_hist, y_proba_lbp)
        elif self.op == "min":
            y_proba = np.minimum(y_proba_hist, y_proba_lbp)
        elif self.op == "mul":
            y_proba = np.multiply(y_proba_hist, y_proba_lbp)

        # print "y proba:", y_proba
        result = np.argmax(y_proba, axis=1)

        # score = accuracy_score(labels_test, predicted)
        # visualization
        for predicted, expected, s in zip(result, labels, segments):
            if predicted == expected:
                correct += 1
                s.detected = True

        if viz:
            com.visualize_segments(demo, segments, locations)
            com.debug_im(demo)
        return total_segments, correct
    def test(self, image, loc_lst, viz=False):
        """ test an image """
        demo = self.imread(image)
        correct = 0
        locations = list(loc_lst)

        data, labels, segments = self.get_data([image], [loc_lst])
        total_segments = len(data)
        testing_data = [self._extraction.compute(im)[0] for im in data]

        testing_data = np.vstack(testing_data)

        result = self._classifier.predict(testing_data)

        for predicted, expected, s in zip(result, labels, segments):
            if predicted == expected:
                correct += 1
                s.detected = True
        if viz:
            com.visualize_segments(demo, segments, locations)
            com.debug_im(demo)
        return correct, total_segments
def watershed(image):
    """ the watershed algorithm """
    if len(image.shape) != 2:
        raise TypeError("The input image must be gray-scale ")

    # thresholding
    _, thres = cv2.threshold(image, 0, 255,
                             cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    kernel = np.ones((1, 1), np.uint8)
    opening = cv2.morphologyEx(thres, cv2.MORPH_OPEN, kernel, iterations=1)
    bg = cv2.dilate(opening, kernel, iterations=2)

    dist_transform = cv2.distanceTransform(thres, cv2.cv.CV_DIST_L2, 3)
    com.debug_im(dist_transform)
    _, fg = cv2.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0)

    fg = np.uint8(fg)

    unknown = cv2.subtract(bg, fg)
    markers, _ = label(fg)

    markers = markers + 1

    markers[unknown == 255] = 0
    image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
    cv2.watershed(image, markers)
    markers[markers == -1] = 0
    markers = markers.astype(np.uint8)

    contours, _ = cv2.findContours(markers,
                                   cv2.RETR_LIST,
                                   cv2.CHAIN_APPROX_SIMPLE)

    segments = [Contour(points_lst) for points_lst in contours]
    for s in segments:
        s.draw(image, (255, 255, 0), 1)
    com.debug_im(image)
    return segments
    def test(self, image, loc_lst, viz=False):
        """ test an image """
        demo = self.imread(image)
        locations = loc_lst
        data, l, segments = self.get_data([image], [loc_lst])
        num_samples = len(data)
        labels = []
        features = []

        for idx, im in enumerate(data):
            f = self._extraction.compute(im, self._extraction.detect(im))[1]
            if f is not None:
                features.append(f)
                labels.append(l[idx])

        testing_data = []

        for feature in features:
            coeffs = sparse.encode(feature, self.dictionary, self.alpha)
            vector = pooling.max_pooling(coeffs)
            testing_data.append(vector)

        testing_data = np.vstack(testing_data)
        testing_data = np.float32(testing_data)

        labels = np.float32(labels)
        result = self._classifier.predict(testing_data)

        for predicted, expected, s in zip(result, labels, segments):
            if predicted == expected:
                s.detected = True
        correct = len(filter(lambda x: x.detected, segments))
        if viz:
            com.visualize_segments(demo, segments, locations)
            com.debug_im(demo)
        return correct, num_samples
Exemple #7
0
    def get_data(self, image_lst, loc_lst, visualize=False):
        """
        prepare data for training phase
        """
        data = []
        labels = []
        result = []
        for image, locs in zip(image_lst, loc_lst):

            demo_img = self.imread(image, 1)
            processed_img, gray_img = self.preprocess(demo_img)
            segments = self.segment(processed_img, gray_img, demo_img)

            correct = 0
            # draw all counted objects in the image

            if visualize:
                for seg in segments:
                    seg.draw(demo_img, (0, 255, 0), 1)
                for loc in locs:
                    cv2.circle(demo_img, loc, 2, (0, 255, 0), 1)

            # visualize true cells
            # check if each segment is close to one true cell
            for seg in segments:
                data.append(seg.get_region(gray_img))
                result.append(seg)

                if len(locs) == 0:
                    labels.append(-1)
                    continue

                point, dist = com.nearest_point(seg.center, locs)

                if dist <= self._db.tol:
                    locs.remove(point)
                    correct += 1
                    labels.append(1)
                    if visualize:
                        seg.draw(demo_img, (255, 255, 0), 1)
                else:
                    labels.append(-1)

            if visualize:
                com.debug_im(gray_img)
                com.debug_im(processed_img)
                com.debug_im(demo_img, True)

        return data, labels, result
Exemple #8
0
# data_train = np.transpose(data_train)
data_test = np.array(data_test)
# data_test = np.transpose(data_test)
# Randomized PCA
n_components = 400
# pca = SparsePCA(n_components, n_jobs=-1).fit(data_train)
# pca = RandomizedPCA(n_components, whiten=True).fit(data_train)
print data_train.shape
pca = PCA(n_components=400).fit(data_train)
print "components size: ", pca.components_.shape
pca_features_train = pca.transform(data_train)
pca_features_test = pca.transform(data_test)
print pca_features_train.shape

for feature in pca_features_train:
    com.debug_im(feature.reshape(20, 20))

# grid_param = {"kernel": ("rbf", "poly", "sigmoid"),
                # "C": np.logspace(-5, -3, num=8, base=2),
                # "gamma": np.logspace(-15, 3, num=8, base=2)}

# clf = GridSearchCV(SVC(class_weight="auto"), grid_param)
# clf = clf.fit(pca_features_train, labels_train)

# print clf.best_estimator_

# y_pred = clf.predict(pca_features_test)

# score = accuracy_score(labels_test, y_pred)
# scores.append(score)