Example #1
0
    def show_gray_histogram(self):
        numbins = 256
        ranges = [0.0, 255.0]

        width = 256
        height = 256

        bytes_per_line = 3 * width

        hist_image = np.zeros([height, width, 3], np.uint8)

        # hist_image = np.zeros((256,256,3)) #创建用于绘制直方图的全0图像

        bins = np.arange(numbins).reshape(numbins, 1)  # 直方图中各bin的顶点位置

        color = [(255, 0, 0)]  # BGR三种颜色

        for ch, col in enumerate(color):
            origin_hist = cv2.calcHist([self.gray_image], [ch], None, [numbins], ranges)
            cv2.normalize(origin_hist, origin_hist, 0, 255 * 0.9, cv2.NORM_MINMAX)
            hist = np.int32(np.around(origin_hist))
            pts = np.column_stack((bins, hist))
            cv2.polylines(hist_image, [pts], False, col)

        # print(type(hist_image.data))
        hist_image = np.flipud(hist_image)
        # cv2.imshow("histogram", hist_image)
        demo_utils.show_cvimage_to_label(hist_image, self.gray_histogram_label)
Example #2
0
    def load_image(self):
        fname, _ = QFileDialog.getOpenFileName(self, 'Open file', './images', "Image files (*.jpg *.png)")

        if len(fname) > 0:
            self.image = cv2.imread(fname)
            # cv2.imshow(fname, image)
            demo_utils.show_cvimage_to_label(self.image, self.image_label)
            self.show_histogram()
    def classify_sklearn_svm(self):

        svc = SVC()
        svc.fit(self.training_data, self.responses_data)

        predicted = svc.predict(self.test_data)
        print("Confusion matrix:\n%s" %
        metrics.confusion_matrix(self.test_responses_data,
                                   predicted))
        print("Accuracy: %0.4f" % metrics.accuracy_score(self.test_responses_data,
                                                     predicted))

        output = self.image.copy()

        font = cv2.FONT_HERSHEY_SIMPLEX

        # fontScale
        fontScale = 0.5

        # Blue color in BGR
        color1 = (255, 0, 0)

        # Line thickness of 2 px
        thickness = 1

        self.features = classify_utils.extract_feautre(self.threshold_image)
        print(self.features)

        for i in range(0, len(self.features)):

            sample_data = np.array([self.features[i][0]], dtype=np.float32)

            # print(sample_data.shape)

            result = svc.predict(sample_data)

            print(self.features[i][0])
            print(result[0])

            label = int(result[0])

            org = (int(self.features[i][1][0]), int(self.features[i][1][1]))

            if label == 0:
                color = (255, 0, 0)
                text = "NUT"
            elif label == 1:
                color = (0, 255, 0)
                text = "RING"
            else:
                color = (0, 0, 255)
                text = "SCREW"

            output = cv2.putText(output, text, org, font,
                                 fontScale, color, thickness)

            demo_utils.show_cvimage_to_label(output, self.result_label2)
Example #4
0
    def remove_noise(self):

        height, width, bytes_per_component = self.image.shape
        bytes_per_line = 3 * width

        self.noise_image = np.zeros(self.image.shape, np.uint8)

        cv2.medianBlur(self.image, 3, self.noise_image)

        demo_utils.show_cvimage_to_label(self.noise_image, self.noise_label)
    def load_image(self):
        fname, _ = QFileDialog.getOpenFileName(self, 'Open file', './classify_data', "Image files (*.jpg *.png *.pgm)")

        if len(fname) > 0:
            self.image = cv2.imread(fname)

            demo_utils.show_cvimage_to_label(self.image, self.image_label)
            self.light_image = cv2.imread('./classify_data/pattern.pgm')

            self.threshold_image = classify_utils.preprocess_image(self.image, self.light_image)

            demo_utils.show_cvimage_to_label(self.threshold_image, self.threshold_label)
Example #6
0
    def show_histogram(self):
        # hist = cv2.calcHist([self.image],
        #                     [0], #使用的通道
        #                     None, #没有使用mask
        #                     [256], #HistSize
        #                     [0.0,255.0])

        b, g, r = cv2.split(self.image)
        numbins = 256
        ranges = [0.0, 256.0]

        b_hist = cv2.calcHist([b], [0], None, [numbins], ranges)
        g_hist = cv2.calcHist([g], [0], None, [numbins], ranges)
        r_hist = cv2.calcHist([r], [0], None, [numbins], ranges)

        print(b_hist.shape)

        width = 256
        height = 256

        hist_image = np.zeros([height, width, 3], np.uint8)

        cv2.normalize(b_hist, b_hist, 0, height * 0.9, cv2.NORM_MINMAX)
        cv2.normalize(g_hist, g_hist, 0, height * 0.9, cv2.NORM_MINMAX)
        cv2.normalize(r_hist, r_hist, 0, height * 0.9, cv2.NORM_MINMAX)

        for i in range(1, numbins, 1):
            cv2.line(hist_image,
                     (i - 1, height - np.int32(np.around(b_hist[i - 1][0]))),
                     (i, height - np.int32(np.around(b_hist[i][0]))),
                     (255, 0, 0)
                     )
            cv2.line(hist_image,
                     (i - 1, height - np.int32(np.around(g_hist[i - 1][0]))),
                     (i, height - np.int32(np.around(g_hist[i][0]))),
                     (0, 255, 0)
                     )
            cv2.line(hist_image,
                     (i - 1, height - np.int32(np.around(r_hist[i - 1][0]))),
                     (i, height - np.int32(np.around(r_hist[i][0]))),
                     (0, 0, 255)
                     )

        # cv2.imshow("Histogram", hist_image)
        demo_utils.show_cvimage_to_label(hist_image, self.histogram_label)

        self.show_histogram2()
Example #7
0
    def remove_light(self):

        height, width, bytes_per_component = self.image.shape
        bytes_per_line = 3 * width

        img32 = np.float32(self.noise_image)
        light32 = np.float32(self.light_image)
        #
        devided = np.divide(img32, light32)

        sub_result = 1 - devided

        aux = abs(255 * sub_result)
        self.light_removed_image = np.uint8(aux)

        # light_removed_image = abs(light32 - img32)
        # light_removed_image = np.uint8(light_removed_image)

        # light_removed_image = np.zeros(self.image.shape, np.uint8)
        # cv2.medianBlur(self.image, 3, light_removed_image)

        demo_utils.show_cvimage_to_label(self.light_removed_image,
                                         self.light_removed_label)
Example #8
0
    def apply_threshold(self):

        height, width, bytes_per_component = self.image.shape
        bytes_per_line = 3 * width

        self.threshold_image = np.zeros(self.image.shape, np.uint8)
        cv2.threshold(self.light_removed_image, 50, 255, cv2.THRESH_BINARY,
                      self.threshold_image)

        demo_utils.show_cvimage_to_label(self.threshold_image,
                                         self.threshold_label)

        imgray = cv2.cvtColor(self.threshold_image, cv2.COLOR_BGR2GRAY)

        result = cv2.connectedComponentsWithStats(imgray)

        # cv2.CC_STAT_AREA

        print(" %d objects found" % result[0])
        num_labels = result[0]
        # The second cell is the label matrix
        labels = result[1]
        stats = result[2]
        controlids = result[3]

        output = np.zeros(self.image.shape, np.uint8)

        random.seed()

        font = cv2.FONT_HERSHEY_SIMPLEX

        # org
        org = (50, 50)

        # fontScale
        fontScale = 0.5

        # Blue color in BGR
        color1 = (255, 0, 0)

        # Line thickness of 2 px
        thickness = 1

        outputed = False
        for i in range(1, num_labels):
            area = stats[i][cv2.CC_STAT_AREA]
            if area > 10:
                print("object:%d with area %d" % (i, area))
                mask = labels == i

                color = (random.randint(40, 220), random.randint(40, 220),
                         random.randint(40, 220))

                # np.putmask(output, mask, [color])

                output[mask] = color

                # Using cv2.putText() method

                # if not outputed:

                print(controlids[i])

                org = controlids[i].astype(int)

                output = cv2.putText(output, "area: %d" % (area), tuple(org),
                                     font, fontScale, color, thickness)

                # outputed = True
                # cv2.putText(output, "area %d" % (area), tuple(controlids[i]), cv2.FONT_HERSHEY_COMPLEX,int(6) , color)
        demo_utils.show_cvimage_to_label(output, self.segment_label)
    def classify_cv_svm(self):

        self.svm = cv2.ml.SVM_create()
        self.svm.setType(cv2.ml.SVM_C_SVC)
        self.svm.setNu(0.05)
        self.svm.setKernel(cv2.ml.SVM_CHI2)
        self.svm.setDegree(1.0)

        # self.svm.setC(2.67)
        self.svm.setGamma(2.0)
        # TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));
        self.svm.setTermCriteria((cv2.TERM_CRITERIA_MAX_ITER, 100, 1.e-06))

        self.svm.train(self.training_data, cv2.ml.ROW_SAMPLE, self.responses_data)
        # self.svm.save('svm_data.dat')

        result = self.svm.predict(self.test_data)

        print(result[1].shape)

        print(self.test_responses_data.shape)

        # result = np.aarray(result, dtype=np.int32)

        result1 = np.array([x[0] for x in result[1]], dtype=np.int32)

        print(result1.shape)

        mask = result1 == self.test_responses_data
        correct = np.count_nonzero(mask)
        print("Accuracy cv svm: %0.4f" % (correct * 100.0 / len(result1)))
        output = self.image.copy()

        font = cv2.FONT_HERSHEY_SIMPLEX

        # fontScale
        fontScale = 0.5

        # Blue color in BGR
        color1 = (255, 0, 0)

        # Line thickness of 2 px
        thickness = 1

        self.features = classify_utils.extract_feautre(self.threshold_image)
        print(self.features)

        for i in range(0, len(self.features)):

            sample_data = np.array([self.features[i][0]], dtype=np.float32)

            # print(sample_data.shape)

            result = self.svm.predict(sample_data)

            print(self.features[i][0])
            print(result[1][0][0])

            label = int(result[1][0][0])

            org = (int(self.features[i][1][0]), int(self.features[i][1][1]))

            if label == 0:
                color = (255, 0, 0)
                text = "NUT"
            elif label == 1:
                color = (0, 255, 0)
                text = "RING"
            else:
                color = (0, 0, 255)
                text = "SCREW"

            output = cv2.putText(output, text, org, font,
                                 fontScale, color, thickness)

            demo_utils.show_cvimage_to_label(output, self.result_label)