Пример #1
0
def grade_2(image_part):
    correct = 0
    questions = get_questions(image_part)
    for i in range(15):
        if debug:
            show_image(
                questions[i]
            )  #------------------------------------------------------------------------------------------
        choices_list = get_choices_2(questions[i])
        correct += grade_question(choices_list, FINAL_ANSWER_2[i])
        if debug:
            print correct  #------------------------------------------------------------------------------------------
    return correct
Пример #2
0
def grade_all(warped):
    total_grade = 0

    try:
        gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        thresh = cv2.threshold(gray, 0, 255,
                               cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    except:
        thresh = cv2.threshold(warped, 0, 255,
                               cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

    if debug:
        show_image(
            thresh
        )  #------------------------------------------------------------------------------------------
    first_part, second_part, third_part = crop_image(thresh)
    total_grade = grade_1(first_part) + grade_2(second_part) + grade_3(
        third_part)

    return total_grade
Пример #3
0
def image_trans(image):
    if DEBUG:
        show_image(image)

    edged = cv2.Canny(image, 75, 200)
    if DEBUG:
        show_image(edged)

    thresh = cv2.threshold(image, 0, 255,
                           cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    hybrid = edged + thresh
    hybrid = edged + thresh

    if CASE == 0:
        img, cnts, hierarchy = cv2.findContours(hybrid.copy(), cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    elif CASE == 1:
        img, cnts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)

    docCnt = None
    if len(cnts) > 0:
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
        for c in cnts:
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.01 * peri, True)

            if len(approx) == 4:
                docCnt = approx
                break

    if docCnt is None:
        if DEBUG:
            print "No contour is found"
        return None, None

    warped = four_point_transform(IMAGE, docCnt.reshape(4, 2))
    if DEBUG:
        cv2.drawContours(image, [docCnt], -1, 255, -1)
        show_image(image)
    return warped, cv2.contourArea(docCnt)
Пример #4
0
 def option1_callback(self):
     show.show_image(self.pic_inf.file_name)
Пример #5
0
    def feature_extraction_single_latent(self,img_file, output_path = None, show_processes=False ):
        #block = True
        img = io.imread(img_file)

        name = os.path.basename(img_file)
        mask_CNN,_ = self.ROI_model.run(img)
        h,w = mask_CNN.shape
        #mask = mask_dilation(mask, block_size=16)
        latent_template = template.Template()

        minu_thr = 0.3

        # template set 1: no ROI and enhancement are required
        # texture image is used for coase segmentation

        descriptor_imgs = []
        texture_img = preprocessing.FastCartoonTexture(img, sigma=2.5, show=False)




        descriptor_imgs.append(texture_img)

        contrast_img_guassian = preprocessing.local_constrast_enhancement_gaussian(img)

        quality_map, _, _ = get_maps.get_quality_map_dict(texture_img, self.dict_all, self.dict_ori,
                                                                      self.dict_spacing, block_size=16, process=False)
        quality_map_pixel = cv2.resize(quality_map, (0, 0), fx=16, fy=16)
        #plt.imshow(quality_map_pixel,cmap='gray')
        #plt.show()
        mask_coarse = quality_map_pixel > 0.3
        mask_coarse = mask_coarse.astype(np.int)
        mask = mask_coarse * mask_CNN
        # show.show_mask(mask_CNN, img, fname='mask_RCNN.jpeg',block=block)
        # show.show_mask(mask_coarse,img,fname = 'mask_coarse.jpeg',block=block)
        # show.show_mask(mask, img, fname='mask.jpeg',block=block)




        #show.show_mask(mask, AEC_img, fname='mask_AEC.jpeg',block=block)
        # plt.imshow(AEC_img,cmap = 'gray')
        # plt.show(block=block)
        # plt.close()



        #show.show_mask(mask_CNN, img, fname='mask_RCNN.jpeg',block=block)

        # AEC_img[mask == 0] = 128
        # plt.imshow(AEC_img, cmap='gray')
        # plt.show(block=block)
        # plt.close()

        AEC_img = self.enhancement_model.run(texture_img)
        quality_map, dir_map, fre_map = get_maps.get_quality_map_dict(AEC_img, self.dict_all, self.dict_ori,self.dict_spacing, block_size=16, process=False)

        blkH, blkW = dir_map.shape

        if show_processes:
            show.show_orientation_field(img, dir_map,mask = mask,fname='OF.jpeg')




        # mnt = self.minu_model.run(contrast_img_mean, minu_thr=0.1)
        # mnt = self.remove_spurious_minutiae(mnt, mask)
        # minutiae_sets.append(mnt)
        #
        # fname = output_path + os.path.splitext(name)[0] + '_contrast_img_mean.jpeg'
        # show.show_minutiae(contrast_img_mean, mnt, block=block, fname=fname)


        enh_contrast_img = filtering.gabor_filtering_pixel(contrast_img_guassian, dir_map + math.pi / 2, fre_map,
                                                          mask=mask,
                                                          block_size=16, angle_inc=3)

        enh_texture_img = filtering.gabor_filtering_pixel(texture_img, dir_map + math.pi / 2, fre_map,
                                                          mask=mask,
                                                          block_size=16, angle_inc=3)

        if show_processes:
            show.show_image(texture_img, mask=mask, block=True, fname='cropped_texture_image.jpeg')
            show.show_image(AEC_img, mask=mask, block=True, fname='cropped_AEC_image.jpeg')
            show.show_image(enh_contrast_img, mask=mask, block=True, fname='cropped_enh_image.jpeg')

        #np.ones((h, w), np.int)
        descriptor_imgs.append(enh_contrast_img)


        quality_map2, _ , _ = get_maps.get_quality_map_dict(enh_contrast_img, self.dict_all,self.dict_ori,self.dict_spacing, block_size=16,
                                                                      process=False)
        quality_map_pixel2 = cv2.resize(quality_map2, (0, 0), fx=16, fy=16)

        mask2 = quality_map_pixel2 > 0.50

        #mask = mask*mask2

        minutiae_sets = []
        mnt = self.minu_model.run(contrast_img_guassian, minu_thr=0.05)
        mnt = self.remove_spurious_minutiae(mnt, mask)
        minutiae_sets.append(mnt)
        if show_processes:
            fname = 'minutiae_texture_img.jpeg'
            show.show_minutiae(texture_img, mnt, mask=mask,block=block, fname=fname)

        mnt = self.minu_model.run(AEC_img, minu_thr=0.3)
        mnt = self.remove_spurious_minutiae(mnt, mask2)
        minutiae_sets.append(mnt)
        if show_processes:
            fname = 'minutiae_AEC_img.jpeg'
            show.show_minutiae(AEC_img, mnt, mask=mask, block=block, fname=fname)

        mnt = self.minu_model.run(enh_contrast_img, minu_thr=0.3)
        mnt = self.remove_spurious_minutiae(mnt, mask2)
        minutiae_sets.append(mnt)
        if show_processes:
            fname = 'minutiae_enh_contrast_img.jpeg'
            show.show_minutiae(enh_contrast_img, mnt, mask=mask,block=block, fname=fname)

        mnt = self.minu_model.run(enh_texture_img, minu_thr=0.3)
        mnt = self.remove_spurious_minutiae(mnt, mask2)
        minutiae_sets.append(mnt)

        # minutiae template 1
        des = descriptor.minutiae_descriptor_extraction(texture_img, minutiae_sets[0], self.patch_types, self.des_models,
                                                         self.patchIndexV, batch_size=128)

        minu_template = template.MinuTemplate(h=h, w=w, blkH=blkH, blkW=blkW, minutiae=minutiae_sets[0],
                                                   des=des, oimg=dir_map, mask=mask)
        latent_template.add_minu_template(minu_template)

        # minutiae template 2
        des = descriptor.minutiae_descriptor_extraction(texture_img, minutiae_sets[1], self.patch_types,
                                                        self.des_models,
                                                        self.patchIndexV, batch_size=128)

        minu_template = template.MinuTemplate(h=h, w=w, blkH=blkH, blkW=blkW, minutiae=minutiae_sets[1],
                                              des=des, oimg=dir_map, mask=mask)
        latent_template.add_minu_template(minu_template)

        # minutiae template 3
        des = descriptor.minutiae_descriptor_extraction(enh_texture_img, minutiae_sets[2], self.patch_types,
                                                        self.des_models,
                                                        self.patchIndexV, batch_size=128)

        minu_template = template.MinuTemplate(h=h, w=w, blkH=blkH, blkW=blkW, minutiae=minutiae_sets[2],
                                              des=des, oimg=dir_map, mask=mask)
        latent_template.add_minu_template(minu_template)

        # minutiae template 4
        des = descriptor.minutiae_descriptor_extraction(enh_texture_img, minutiae_sets[3], self.patch_types,
                                                        self.des_models,
                                                        self.patchIndexV, batch_size=128)

        minu_template = template.MinuTemplate(h=h, w=w, blkH=blkH, blkW=blkW, minutiae=minutiae_sets[3],
                                              des=des, oimg=dir_map, mask=mask)
        latent_template.add_minu_template(minu_template)



        return latent_template
Пример #6
0
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # 对象检测
        cascade = cv2.CascadeClassifier(cascade_path)
        # detectMultiScale函数。它可以检测出图片中所有的人脸,并将人脸用vector保存各个人脸的坐标、大小(用矩形表示),可能有多个
        facerect = cascade.detectMultiScale(frame_gray,
                                            scaleFactor=1.2,
                                            minNeighbors=3,
                                            minSize=(10, 10))
        if len(facerect) > 0:
            print('face detected')
            color = (255, 255, 255)  # 白
            for rect in facerect:

                x, y = rect[0:2]  # 前两项是坐标
                width, height = rect[2:4]  # 后两项是大小
                image = frame[y - 10:y + height,
                              x:x + width]  # frame是整张图,只截取人脸的特定区域的图
                result = _predict(model, image)
                if result == 0:  # boss
                    print('someone is approaching')
                    show_image()
                else:
                    print('Nobody')

        k = cv2.waitKey(100)  # 输入27时,会中止程序
        if k == 27:
            break

    cap.release()
    cv2.destroyAllWindows()