예제 #1
0
    def _remove_background_right(self, img, boundary):
        orig_shape = img.shape
        new_img = rotate_image(img, boundary.angle)
        new_img[:, (boundary.x + 1):] = 0
        right_cropped = rotate_image(new_img, -1 * boundary.angle)

        return self._remove_zero_padding(right_cropped, orig_shape)
예제 #2
0
def augment_data(image, face, landmarks):
    '''produce 2 type of augumentation: rotation and noising,
    returns an array of 3 tuple (image, landmarks)'''
    assert (type(face) is Region)

    h, w = image.shape[:2]
    angle = 30
    pivot = face.center()

    # 30 degree rotation
    A = utils.rotate_image(image, angle, pivot)
    B = utils.rotate_landmarks(landmarks, pivot, angle)
    R1 = utils.points_region(B)

    # -30 degree rotatation
    C = utils.rotate_image(image, -angle, pivot)
    D = utils.rotate_landmarks(landmarks, pivot, -angle)
    R2 = utils.points_region(D)

    # mirroring
    E = utils.flip_image(image)
    F = utils.naive_flip_landmarks(landmarks, w)
    R3 = face.flip(width=w)

    return [(A, B, R1), (C, D, R2), (E, F, R3)]
예제 #3
0
    def _remove_background_left(self, img, boundary):
        orig_shape = img.shape
        new_img = rotate_image(img, boundary.angle)
        if boundary.x > 1:
            new_img[:, :(boundary.x - 1)] = 0

        left_cropped = rotate_image(new_img, -1 * boundary.angle)

        return self._remove_zero_padding(left_cropped, orig_shape)
예제 #4
0
def process_image(image, angle):
    image_h, image_w = image.shape[0], image.shape[1]
    image_rotated = rotate_image(image, angle)
    image_rotated = crop_around_center(image_rotated, image_h, image_w)
    processed_image = np.vstack((image, image_rotated))

    return processed_image
예제 #5
0
    def _find_angle(self, edges):
        plt.figure(figsize=(20, 15))

        score_df = pd.DataFrame([], columns=['score'])
        # plot_index = 1
        for angle in range(-self._max_angle_deviation, self._max_angle_deviation):

            rotated_edges = rotate_image(edges, angle)
            score_df.loc[angle] = self._get_score(rotated_edges)

            # plt.subplot(self._max_angle_deviation, 2, plot_index)
            # plt.imshow(rotated_edges)

            # bin_count = self._get_bin_count(rotated_edges)
            # y_val = self._get_y_from_bin(self._get_y_min_bin_index(bin_count))

            # x_min = 0
            # x_max = rotated_edges.shape[0]

            # plt.plot([y_val, y_val], [x_min, x_max])
            # plt.title('Angle {} Score:{}'.format(angle * 180 / math.pi, score_df.loc[angle].values[0]))
            # plot_index += 1

        # plt.show()
        score_df = score_df.sort_values('score')
        return score_df.index[-1]
예제 #6
0
def get_person_image(personId):
    "生成并返回人物图像的filepath"
    conn = db.get_connection()
    try:
        sql = ("select face.id, photo.path, face.rotation, face.location"
               " from tbl_face face"
               " inner join tbl_photo photo on face.photo_id = photo.id"
               " where face.person_id = %s"
               " order by photo.taken_time desc"
               " limit 1")
        with conn.cursor() as cursor:
            cursor.execute(sql, (personId, ))
            row = cursor.fetchone()
            if row is None:
                return None
            faceId = row[0]
            photoPath = row[1]
            rotation = row[2]
            faceBox = row[3]
            # 截取人脸
            image = cv2.imread(photoPath)
            logger = get_logger()
            logger.info(f"photoPath is {photoPath}")
            logger.info(f"rotation is {rotation} type {type(rotation)}")
            image = rotate_image(image, rotation)
            (top, right, bottom, left) = faceBox
            faceImage = image[top:bottom, left:right]
            # 保存到磁盘
            os.makedirs(settings.PERSON_IMAGES_HOME, exist_ok=True)
            path = os.path.join(settings.PERSON_IMAGES_HOME, f"{faceId}.jpg")
            cv2.imwrite(path, faceImage)
            return path
    finally:
        db.put_connection(conn)
def justify_front(path):
    print(path)
    img = cv2.imdecode(np.fromfile(path, dtype=np.uint8), 1)

    # for i in range(100, 200):
    #     for j in range(50, 200):
    #         binary_img = utils.getCanny(img, i, j, 15, 0)
    #         print(i, j)
    #         utils.show(binary_img, 1)

    binary_img = utils.getCanny(img, 100, 150, 15, 0)
    # utils.show(binary_img, 0)
    contours, hierarchy = cv2.findContours(binary_img, cv2.RETR_CCOMP,
                                           cv2.CHAIN_APPROX_NONE)
    flag = False
    for i in range(len(contours)):
        x, y, w, h = cv2.boundingRect(contours[i])
        area = w * h
        if (area > 20000) & (abs(w - h) < 20):
            if (x > 400) & (y > 270):
                print(path)
                flag = True
            # print(x, y, w, h)
            # img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # utils.show(img, 0)

    if flag:
        img = utils.rotate_image(img, 180)
        utils.show(img, 1)
        cv2.imencode('.jpg', img)[1].tofile(path)
예제 #8
0
    def get_score(self, mask, degree):

        rotated_mask = rotate_image(mask, degree)
        left_score = self._get_left_score(degree, rotated_mask.copy())
        right_score = self._get_right_score(degree, rotated_mask.copy())

        return (left_score, right_score)
def get_places():
    """
    This API analyses an image an returns the scenic place classification
    ---
    tags:
      - Image Analyzer
    consumes:
      - multipart/form-data
    produces: 
      -application/json
    parameters:
      - in: formData
        name: upfile
        type: file
        required: true
        description: Image file to analyse
    responses:
      500:
        description: Error, something went wrong!
      200:
        description: Detection success!
    """
    #logger=logging.getLogger(__name__)
    #logger.debug("Calling /places")
    model = Places()
    query_class = request.args.get('class')
    iFile = request.files.getlist('upfile')[0]
    img = load_image(iFile)
    img = rotate_image(img, iFile)
    img = limit_size_image(img)
    response = model.get_tags(img, query_class)
    return jsonify(response)
def food_detection():
    """
    This API returns whether the image uploaded is a food or not.
    Call this api passing a coloured image.
    ---
    tags:
      - Image Analyzer
    consumes:
      - multipart/form-data
    produces: 
      -application/json
    parameters:
      - in: formData
        name: upfile
        type: file
        required: true
        description: Upload your file
    responses:
      500:
        description: Error, something went wrong!
      200:
        description: Detection success!
    """
    #logger=logging.getLogger(__name__)
    #logger.info("Calling /aesthetics")
    response = None
    iFile = request.files.getlist('upfile')[0]
    print iFile
    img = load_image(iFile)
    img = rotate_image(img, iFile)
    img = cv2.resize(img, (227, 227))
    img = img.transpose(2, 0, 1)
    model = Food(img)
    response = model.get_prediction()
    return jsonify(response)
def get_gender():
    """
    This API returns the predicted gender of all faces detected in an image.
    Call this api passing a coloured image.
    ---
    tags:
      - Image Analyzer
    consumes:
      - multipart/form-data
    produces: 
      -application/json
    parameters:
      - in: formData
        name: upfile
        type: file
        required: true
        description: Upload your file
    responses:
      500:
        description: Error, something went wrong!
      200:
        description: Detection success!
    """
    response = None
    iFile = request.files.getlist('upfile')[0]
    img = load_image(iFile)
    img = rotate_image(img, iFile)
    img = limit_size_image(img)
    model = Gender(img)
    response = model.get_prediction()
    return jsonify(response)
def get_faces():
    """
    This API searches an image for faces and returns the location of each face
    ---
    tags:
      - Image Analyzer
    consumes:
      - multipart/form-data
    produces: 
      -application/json
    parameters:
      - in: formData
        name: upfile
        type: file
        required: true
        description: Image file to analyse
    responses:
      500:
        description: Error, something went wrong!
      200:
        description: Detection success!
    """
    #logger=logging.getLogger(__name__)
    #logger.info("Calling prediction data for /people")
    response = None
    model = People()
    query_class = request.args.get('class')
    iFile = request.files.getlist('upfile')[0]
    img = load_image(iFile)
    img = rotate_image(img, iFile)
    img = limit_size_image(img)
    response = model.get_tags_coords(img, query_class)
    return jsonify(response)
예제 #13
0
def detect_face(path):
    img = cv2.imdecode(np.fromfile(path, dtype=np.uint8), 1)
    img = utils.img_resize(img)
    height, width = img.shape[0:2]
    print(height, width)
    print("========== detect face ===========")
    for i in range(3):
        img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        front_face_detected = frontFaceClassifier.detectMultiScale(
            img_grey,
            scaleFactor=1.2,
            minNeighbors=5,
            flags=cv2.CASCADE_SCALE_IMAGE)

        if len(front_face_detected) != 0:
            print(path)
            print("rotate " + str(i) + " times")
            print(img.shape)
            for x, y, w, h in front_face_detected:
                print(x, y, w, h)
            #     cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 2)
            # show(img, 0)
            if i != 0:
                cv2.imencode('.jpg', img)[1].tofile(path)
            return True
        else:
            img = utils.rotate_image(img, 90)
    return False
예제 #14
0
def crop_main(img_path):
    # base_path=img_path.split("/")
    img = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), 1)
    img = utils.img_resize(img, 900)
    binary_img = utils.getCanny(img, 20, 50, 3, 0)
    # max_contour, max_area = utils.findMaxContour(binary_img)
    contours, _ = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    for i in range(len(contours)):
        area = cv2.contourArea(contours[i])
        rotate = False
        if area > 5000:
            x, y, w, h = cv2.boundingRect(contours[i])
            if w > h:
                if (w / h < 1) | (w / h > 2):
                    break
                else:
                    print(w, h)
            else:
                if (h / w < 1) | (h / w > 2):
                    break
                else:
                    print(w, h)
                    rotate = True
            image = img[y - 10:y + h + 10, x - 10:x + w + 10]
            print(image.shape)
            if rotate:
                image = utils.rotate_image(image, 90)
            image = cv2.resize(image, (856, 540), interpolation=cv2.INTER_CUBIC)
            print(image.shape)
            if len(image.shape) == 3:
                image_grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            else:
                image_grey = image

            front_face_detected = frontFaceClassifier.detectMultiScale(image_grey, scaleFactor=1.2,
                                                                       minNeighbors=5, flags=cv2.CASCADE_SCALE_IMAGE)

            if len(front_face_detected) != 0:
                for x, y, w, h in front_face_detected:
                    if x < 856 / 2:
                        image = utils.rotate_image(image, 180)
                    # cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 2)
            utils.show(image, 1)
            name = img_path.split("\\")[1]
            # 把框出来的身份证图片另存为
            cv2.imencode('.jpg', image)[1].tofile(
                "E:/KingT/staff/test/all身份证/" + name + "_" + str(i) + img_path.split("\\")[2])
예제 #15
0
    def update(self):
        self.get_keys()
        mouse_dir = conf.vec(self.game.camera.mouseadjustment(pg.mouse.get_pos())) - conf.vec(self.pos)
        self.rot = mouse_dir.angle_to(conf.vec(1,0))
        self.pos += self.vel * self.game.dt
        if self.vel != conf.vec(0,0):
            self.image = utils.rotate_image(self.game.player_imgs[1], self.rot)
        else:
            self.image = utils.rotate_image(self.game.player_imgs[0], self.rot)
        self.rect = self.image.get_rect(center = self.rect.center)
        self.rect.center = self.pos
        self.hit_rect.centerx = self.pos.x
        utils.collide_with_walls(self, self.game.walls, 'x')
        self.hit_rect.centery = self.pos.y
        utils.collide_with_walls(self, self.game.walls, 'y')
        self.rect.center = self.hit_rect.center

        # mob hits
        hits = pg.sprite.spritecollide(self, self.game.mobs, False, utils.collide_hit_rect)
        for hit in hits:
            if conf.random.random() < 0.7:
                self.game.soundManager.play_sound_effect(conf.random.choice(self.game.player_hit_sounds))
            self.health -= conf.MOB_DAMAGE
            hit.vel = conf.vec(0,0)
            if self.health <= 0:
                self.game.playing = False
        if hits:
            self.pos += conf.vec(conf.MOB_KNOCKBACK, 0).rotate(-hits[0].rot)

        # player picks up health
        hits = pg.sprite.spritecollide(self, self.game.items, False, utils.collide_hit_rect)
        for hit in hits:
            if hit.type == 'health' and self.health < conf.PLAYER_HEALTH:
                hit.kill()
                self.game.soundManager.play_sound_effect(self.game.effect_sounds['health_up'])
                self.add_health(conf.ITEM_HEALTH_AMOUNT)
            if (hit.type in conf.WEAPONS):
                hit.kill()
                self.game.soundManager.play_sound_effect(self.game.effect_sounds[hit.type])
                self.secondary_weapon = hit.type
                self.secondary_weapon_bullets = conf.WEAPONS[hit.type]['ammo']
예제 #16
0
    def _split_image(self, img):
        # First work is to find the optimal angle to rotate
        edges = self._get_edges(img)
        angle = self._find_angle(edges)

        # After angle is found
        rotated_edges = rotate_image(edges, angle)

        bin_count = self._get_bin_count(rotated_edges)
        y_min_bin_index = self._get_y_min_bin_index(bin_count)
        y_val_in_rectangle = int(self._get_y_from_bin(y_min_bin_index))

        y = self._get_y_wrt_center(y_val_in_rectangle, rotated_edges)

        dst = rotate_image(img, angle)
        y_val = self._get_y_wrt_left_top_corner(y, dst)

        first_image = dst[:, :(y_val + self._buffer), :]
        second_image = dst[:, (y_val - self._buffer):, :]

        return (first_image, second_image)
예제 #17
0
def test_augment():
    utils.init_face_detector(True, 321)
    utils.load_shape_predictor("dlib/shape_predictor_68_face_landmarks.dat")

    for img in utils.images_inside("trainset"):
        points = utils.detect_landmarks(img, region(img))

        angle = 30
        h, w = img.shape[:2]
        center = (w / 2, h / 2)

        # 30 degree rotation
        rot1 = utils.rotate_image(img, angle)
        rot_pts1 = utils.rotate_landmarks(points, center, angle)

        # -30 degree rotatation
        rot2 = utils.rotate_image(img, -angle)
        rot_pts2 = utils.rotate_landmarks(points, center, -angle)

        # mirroring
        mir = utils.flip_image(img)
        mir_pts = utils.detect_landmarks(mir, region(mir))

        utils.draw_points(img, points)
        utils.draw_points(rot1, rot_pts1, color=Colors.cyan)
        utils.draw_points(rot2, rot_pts2, color=Colors.purple)
        utils.draw_points(mir, mir_pts, color=Colors.green)

        while True:
            cv2.imshow("image", img)
            cv2.imshow("mirrored", mir)
            cv2.imshow("rotated30", rot1)
            cv2.imshow("rotated-30", rot2)

            key = cv2.waitKey(20) & 0Xff

            if key == Keys.ESC:
                break
            elif key == Keys.Q:
                return cv2.destroyAllWindows()
def recognize_information(path, config):
    image = cv2.imdecode(np.fromfile(path, dtype=np.uint8), 1)
    image = utils.img_resize(image, )
    print("\n-----------------------------------")
    print(path)
    print(image.shape)
    for i in range(3):
        print("+++" + str(i) + "+++")
        if config == 1:
            imagegray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
            retval, img = cv2.threshold(imagegray, 120, 255,
                                        cv2.THRESH_OTSU + cv2.THRESH_BINARY)
        elif config == 2:
            img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        elif config == 3:
            img = image
        utils.show(img, 1)
        text = pytesseract.image_to_string(img, lang='chi_sim')
        print(text)
        if (text.find("学 位") != -1) | (text.find("学 士") != -1):
            print("=====学位证=====")
            # cv2.imencode('.jpg', image)[1].tofile(path)
            # utils.show(img, 1)
            return 1
        elif (text.find("人 力") != -1) | (text.find("资 源") != -1) | (text.find("人 事") != -1) \
                | (text.find("工 作") != -1) | (text.find("甲 方") != -1) | (text.find("劳 动") != -1) \
                | (text.find("双 方") != -1) | (text.find("符 合") != -1) | (text.find("知 识") != -1):
            return 5
        elif (text.find("成 人") != -1) | (text.find("高 等") != -1) | (text.find("毕 业") != -1) \
                | (text.find("合 格") != -1) | (text.find("半 业") != -1) | (text.find("课 程") != -1) \
                | (text.find("单 业") != -1) | (text.find("毗 业") != -1) | (text.find("注 册") != -1):
            print("=====毕业证=====")
            # cv2.imencode('.jpg', image)[1].tofile(path)
            # utils.show(img, 1)
            return 2
        elif text.find("合 同") != -1:
            return 4
        elif (text.find("居 民") != -1) | (text.find("公 民") != -1) | (text.find("民 身") != -1) \
                | (text.find("份 证") != -1) | (text.find("身 休") != -1) | (text.find("休 证") != -1):
            print("=====身份证=====")
            cv2.imencode('.jpg', image)[1].tofile(path)
            utils.show(img, 1)
            return 3
        else:
            image = utils.rotate_image(image, 90)
            utils.show(img, 1)

    print("=====----=====")
    return -1
예제 #19
0
            def _collate_func(batch):
                batch = default_collate(batch)
                err_message = "A batch must contain two tensors: images, labels."
                assert len(batch) == 2, err_message

                images = np.asarray(batch[0])
                new_images, new_targets = [], []
                for img in images:
                    for target in range(0, 4):
                        rotated_image = rotate_image(img, angle=90*target)
                        new_images.append(rotated_image)
                        new_targets.append(target)
                images_array = np.array(new_images)
                targets_array = np.array(new_targets)
                return (torch.Tensor(images_array), torch.LongTensor(targets_array))
예제 #20
0
    def detect_roofs(self, img_name, in_path=None):
        in_path = self.in_path if in_path is None else in_path 
        try:
            rgb_unrotated = cv2.imread(in_path+img_name, flags=cv2.IMREAD_COLOR)
            gray = cv2.cvtColor(rgb_unrotated, cv2.COLOR_BGR2GRAY)
            gray = cv2.equalizeHist(gray)

            if self.downsized:
                rgb_unrotated = utils.resize_rgb(rgb_unrotated, h=rgb_unrotated.shape[0]/2, w=rgb_unrotated.shape[1]/2)
                gray = utils.resize_grayscale(gray, w=gray.shape[1]/2, h=gray.shape[0]/2)

        except IOError as e:
            print e
            sys.exit(-1)
        else:
            for roof_type, detectors in self.roof_detectors.iteritems():
                for i, detector in enumerate(detectors):
                    for angle in self.angles:
                        #for thatch we only need one angle
                        if self.rotate_detectors[i] == False and angle>0 or (roof_type=='thatch' and angle>0):#roof_type == 'thatch' and angle>0:
                            continue

                        print 'Detecting with detector: '+str(i)
                        print 'ANGLE '+str(angle)

                        with Timer() as t: 
                            rotated_image = utils.rotate_image(gray, angle) if angle>0 else gray
                            delete_image = utils.rotate_image_RGB(rgb_unrotated, angle) if angle>0 else gray
                            detections, _ = self.detect_and_rectify(detector, rotated_image, angle, rgb_unrotated.shape[:2], rgb_rotated=delete_image) 
                            if self.downsized:
                                detections = detections*2
                            self.viola_detections.set_detections(roof_type=roof_type, img_name=img_name, 
                                    angle=angle, detection_list=detections, img=rotated_image)

                        print 'Time detection: {0}'.format(t.secs)
                        self.viola_detections.total_time += t.secs
                        if DEBUG:
                            rgb_to_write = cv2.imread(in_path+img_name, flags=cv2.IMREAD_COLOR)
                            utils.draw_detections(detections, rgb_to_write, color=(255,0,0))
                            cv2.imwrite('{0}{3}{1}_{2}.jpg'.format('', img_name[:-4], angle, roof_type), rgb_to_write)
            return rgb_unrotated
def detect_face(img):
    image_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # utils.show(image_grey, 0)
    front_face_detected = frontFaceClassifier.detectMultiScale(
        image_grey,
        scaleFactor=1.2,
        minNeighbors=2,
        flags=cv2.CASCADE_SCALE_IMAGE)
    print(len(front_face_detected))
    if len(front_face_detected) != 0:
        for x, y, w, h in front_face_detected:
            print(x, y, w, h)
            # img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # utils.show(img, 0)
            if (w < 100) | (y < 100):
                return False, img
            if x < 428:
                img = utils.rotate_image(img, 180)
                print("rotate")
            return True, img
    return False, img
예제 #22
0
    def upright_image(self, page, config):
        """
        Rotates an image by 90 degree increments until it is upright.

        Args:
            page (numpy.ndarray): An ndarray representing the test image.
            config (dict): A dictionary containing the config file values.

        Returns:
            page (numpy.ndarray): An ndarray representing the upright test 
                image.

        """
        if self.image_is_upright(page, config):
            return page
        else:
            for _ in range(3):
                page = utils.rotate_image(page, 90)
                if self.image_is_upright(page, config):
                    return page
        return None
def get_face_id(repo):
    """
    This API searches an image for faces and returns the person face grouop the name of a face group in the DB for a repo.
    ---
    paths:
      /people/{repo}
    tags:
      - Image Analyzer
    consumes:
      - multipart/form-data
    produces: 
      -application/json
    parameters:
      - in: formData
        name: upfile
        type: file
        required: true
        description: Image file to analyse
      - in: path
        name: repo
        schema:
          type: integer
        required: true
    responses:
      500:
        description: Error, something went wrong!
      200:
        description: Detection success!
    """
    #logger=logging.getLogger(__name__)
    #logger.info("Calling prediction data for /faces")
    response = None
    model = People(repo)
    query_class = request.args.get('class')
    iFile = request.files.getlist('upfile')[0]
    img = load_image(iFile)
    img = rotate_image(img, iFile)
    img = limit_size_image(img)
    response = model.get_tags_face(img, query_class)
    return jsonify(response)
예제 #24
0
            if len(faces):
                for f in faces:
                    # Crop out the face
                    x, y, w, h = [v for v in f]
                    CROPPED_FACE = GRAY_FRAME[y:y + h, x:x + w]
                    CROPPED_FACE = cv2.flip(CROPPED_FACE, 1)

                    name_to_display = predict(CROPPED_FACE)

                    cv2.rectangle(ROTATED_FRAME, (x, y), (x + w, y + h),
                                  (0, 255, 0))
                    cv2.putText(ROTATED_FRAME, name_to_display, (x, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0))

                # rotate the frame back and trim the black paddings
                PROCESSED_FRAME = utils.rotate_image(ROTATED_FRAME, r * (-1))
                PROCESSED_FRAME = utils.trim(PROCESSED_FRAME, FRAME_SCALE)

                # reset the optimized rotation map
                CURRENT_ROTATION_MAP = utils.get_rotation_map(r)
                FACEFOUND = True

        if FACEFOUND:
            FRAME_SKIP_RATE = 0
        else:
            FRAME_SKIP_RATE = SKIP_FRAME
    else:
        FRAME_SKIP_RATE -= 1

    cv2.putText(PROCESSED_FRAME, "Press ESC or 'q' to quit.", (5, 15),
                cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
예제 #25
0
def detect_two(path1, path2):
    global null_list
    print("=====detect two=====")
    print(path1)
    print(path2)
    null_flag = False
    img1 = cv2.imdecode(np.fromfile(path1, dtype=np.uint8), 1)
    img2 = cv2.imdecode(np.fromfile(path2, dtype=np.uint8), 1)
    img_list = []
    for img in [img1, img2]:

        # img = utils.img_resize(img)
        # for i in range(0, 100):
        #     for j in range(0, 100):
        #         binary_img = utils.getCanny(img, i, j, 6, 0)
        #         print(i, j)
        #         utils.show(binary_img, 10)

        binary_img = utils.getCanny(img, 20, 60, 20, 0)
        # utils.show(utils.img_resize(binary_img), 0)
        contours, _ = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        for i in range(len(contours)):
            rotate = False
            x, y, w, h = cv2.boundingRect(contours[i])
            # print(x, y, w, h)
            # m = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # utils.show(utils.img_resize(m), 0)
            if (w * h > 25000) & ((w > 275) | (h > 275)) & (x > 0) & (y > 0):
                if w > h:
                    if (w / h < 1) | (w / h > 2):
                        break
                    else:
                        print(w, h)
                else:
                    if (h / w < 1) | (h / w > 2):
                        break
                    else:
                        print(w, h)
                        rotate = True
                if (y - 10 > 0) & (x - 10 > 0):
                    image = img[y - 10:y + h + 10, x - 10:x + w + 10]
                else:
                    image = img[y:y + h, x:x + w]
                if rotate:
                    image = utils.rotate_image(image, 90)
                image = cv2.resize(image, (856, 540), interpolation=cv2.INTER_CUBIC)
                # utils.show(image, 0)
                img_list.append(image)

    while len(img_list) < 2:
        print(str(len(img_list)), path1, path2)
        print("null")
        null_flag = True
        null_list.append(path1)
        img_list.append(np.zeros((540, 856, 3), dtype=np.uint8))

    fname = path1.split("\\")[1]
    out_folder_path = "E:/KingT/staff/result/身份证null/" + fname
    if null_flag:
        pic_name1 = path1.split("\\")[2]
        pic_name2 = path2.split("\\")[2]
        if not os.path.exists(out_folder_path):
            os.mkdir(out_folder_path)
        # shutil.copy(path1, out_folder_path + "/" + pic_name1)
        # shutil.copy(path2, out_folder_path + "/" + pic_name2)
    else:
        if os.path.exists(out_folder_path):
            # ...
            shutil.rmtree(out_folder_path)
    # 检测人脸
    for i in range(2):
        image_grey = cv2.cvtColor(img_list[0], cv2.COLOR_BGR2GRAY)
        front_face_detected = frontFaceClassifier.detectMultiScale(image_grey, scaleFactor=1.25,
                                                                   minNeighbors=5,
                                                                   flags=cv2.CASCADE_SCALE_IMAGE)
        if len(front_face_detected) != 0:
            for x, y, w, h in front_face_detected:
                # print(x, y, w, h)
                if (w > 120) & (x > 300):
                    # cv2.rectangle(img_list[0], (x, y), (x + w, y + h), (0, 255, 255), 2)
                    # utils.show(img_list[1], 200)
                    # utils.show(img_list[0], 200)
                    return img_list[1], img_list[0]
                else:
                    if i == 1:
                        return img_list[0], img_list[1]
                    else:
                        img_list[0] = utils.rotate_image(img_list[0], 180)

        else:
            return img_list[0], img_list[1]
예제 #26
0
                                                 FACE_DIM,
                                                 interpolation=cv2.INTER_AREA)
                    face_to_predict = cv2.cvtColor(face_to_predict,
                                                   cv2.COLOR_BGR2GRAY)
                    name_to_display = svm.predict(clf, pca, face_to_predict,
                                                  face_profile_names)

                    # Display frame
                    cv2.rectangle(rotated_frame, (x, y), (x + w, y + h),
                                  (0, 255, 0))
                    cv2.putText(rotated_frame, name_to_display, (x, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0))

                # rotate the frame back and trim the black paddings
                processed_frame = ut.trim(
                    ut.rotate_image(rotated_frame, rotation * (-1)),
                    frame_scale)

                # reset the optmized rotation map
                current_rotation_map = get_rotation_map(rotation)

                faceFound = True

                break

        if faceFound:
            frame_skip_rate = 0
            # print "Face Found"
        else:
            frame_skip_rate = SKIP_FRAME
            # print "Face Not Found"
                if len(eyes) == 0:
                    ag = push_val(0)

                average = avg_eyeq()
                if average > 30:
                    print("Eye_X: ", average)
                else:
                    print("---------------------", average)
                    cv2.putText(frame_rotated, "Warning !", (120, 400),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (18, 255, 255), 2)
                    # cv2ImgAddText(frame_rotated, "警告", 140, 60, (255, 255, 0), 200)
                    # winsound.Beep(1000, 100)

                #旋转帧至原来位置并修剪黑色填充物
                resized_frame = ut.trim(
                    ut.rotate_image(frame_rotated, rotation * (-1)),
                    frame_scale)
                #重置旋转后的图像
                current_rotation_map = get_rotation_map(rotation)

                faceFound = True

                break

        if faceFound:
            frame_skip_rate = 0
            print("Face Found")
        else:
            frame_skip_rate = SKIP_FRAME
            print("Face Not Found")
예제 #28
0
                    # Crop out the face
                    x, y, w, h = [ v for v in f ] # scale the bounding box back to original frame size
                    cropped_face = rotated_frame[y: y + h, x: x + w]   # img[y: y + h, x: x + w]
                    cropped_face = cv2.resize(cropped_face, DISPLAY_FACE_DIM, interpolation = cv2.INTER_AREA)

                    # Name Prediction
                    face_to_predict = cv2.resize(cropped_face, FACE_DIM, interpolation = cv2.INTER_AREA)
                    face_to_predict = cv2.cvtColor(face_to_predict, cv2.COLOR_BGR2GRAY)
                    name_to_display = svm.predict(clf, pca, face_to_predict, face_profile_names)

                    # Display frame
                    cv2.rectangle(rotated_frame, (x,y), (x+w,y+h), (0,255,0))
                    cv2.putText(rotated_frame, name_to_display, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,255,0))

                # rotate the frame back and trim the black paddings
                processed_frame = ut.trim(ut.rotate_image(rotated_frame, rotation * (-1)), frame_scale)

                # reset the optmized rotation map
                current_rotation_map = get_rotation_map(rotation)

                faceFound = True


                break

        if faceFound:
            frame_skip_rate = 0
            # print ("Face Found")
        else:
            frame_skip_rate = SKIP_FRAME
            # print ("Face Not Found")
예제 #29
0
    # loading the data flips the images vertically!

    #block = io.read_block()
    block = neo.Block()
    seg = neo.Segment(name='segment 0', index=0)
    block.segments.append(seg)
    print('vlock', block)
    print('seg', block.segments[0])

    block.segments[0].imagesequences.append(imageSequences)

    # change data orientation to be top=ventral, right=lateral
    imgseq = block.segments[0].imagesequences[0]
    imgseq = flip_image(imgseq, axis=-2)
    imgseq = rotate_image(imgseq, rotation=-90)
    block.segments[0].imagesequences[0] = imgseq

    # Transform into analogsignals
    block.segments[0].analogsignals = []
    block = ImageSequence2AnalogSignal(block)

    block.segments[0].analogsignals[0] = time_slice(
        block.segments[0].analogsignals[0], args.t_start, args.t_stop)

    if args.annotations is not None:
        block.segments[0].analogsignals[0].annotations.\
                                    update(parse_string2dict(args.annotations))

    block.segments[0].analogsignals[0].annotations.update(
        orientation_top=args.orientation_top)
예제 #30
0
    def _augmentation_function(self, images, labels):
        '''
        Function for augmentation of minibatches. It will transform a set of images and corresponding labels
        by a number of optional transformations. Each image/mask pair in the minibatch will be seperately transformed
        with random parameters.
        :param images: A numpy array of shape [minibatch, X, Y, (Z), nchannels]
        :param labels: A numpy array containing a corresponding label mask
        :param do_rotations: Rotate the input images by a random angle between -15 and 15 degrees.
        :param do_scaleaug: Do scale augmentation by sampling one length of a square, then cropping and upsampling the image
                            back to the original size.
        :param do_fliplr: Perform random flips with a 50% chance in the left right direction.
        :return: A mini batch of the same size but with transformed images and masks.
        '''
        def get_option(name, default):
            return self.augmentation_options[
                name] if name in self.augmentation_options else default

        try:
            import cv2
        except:
            return False
        else:

            if images.ndim > 4:
                raise AssertionError(
                    'Augmentation will only work with 2D images')

            # If segmentation labels also augment them, otherwise don't
            augment_labels = True if labels.ndim > 1 else False

            do_rotations = get_option('do_rotations', False)
            do_scaleaug = get_option('do_scaleaug', False)
            do_fliplr = get_option('do_fliplr', False)
            do_flipud = get_option('do_flipud', False)
            do_elasticaug = get_option('do_elasticaug', False)
            augment_every_nth = get_option(
                'augment_every_nth', 2)  # 2 means augment half of the images
            # 1 means augment every image

            if do_rotations or do_scaleaug or do_elasticaug:
                nlabels = get_option('nlabels', None)
                if not nlabels:
                    raise AssertionError(
                        "When doing augmentations with rotations, scaling, or elastic transformations "
                        "the parameter 'nlabels' must be provided.")

            new_images = []
            new_labels = []
            num_images = images.shape[0]

            for ii in range(num_images):

                img = np.squeeze(images[ii, ...])
                lbl = np.squeeze(labels[ii, ...])

                coin_flip = np.random.randint(augment_every_nth)
                if coin_flip == 0:

                    # ROTATE
                    if do_rotations:

                        angles = get_option('rot_degrees', 10.0)
                        random_angle = np.random.uniform(-angles, angles)
                        img = utils.rotate_image(img, random_angle)

                        if augment_labels:
                            if nlabels <= 4:
                                lbl = utils.rotate_image_as_onehot(
                                    lbl, random_angle, nlabels=nlabels)
                            else:
                                # If there are more than 4 labels open CV can no longer handle one-hot interpolation
                                lbl = utils.rotate_image(
                                    lbl,
                                    random_angle,
                                    interp=cv2.INTER_NEAREST)

                    # RANDOM CROP SCALE
                    if do_scaleaug:

                        offset = get_option('offset', 30)
                        n_x, n_y = img.shape
                        r_y = np.random.random_integers(n_y - offset, n_y)
                        p_x = np.random.random_integers(0, n_x - r_y)
                        p_y = np.random.random_integers(0, n_y - r_y)

                        img = utils.resize_image(
                            img[p_y:(p_y + r_y), p_x:(p_x + r_y)], (n_x, n_y))
                        if augment_labels:
                            if nlabels <= 4:
                                lbl = utils.resize_image_as_onehot(
                                    lbl[p_y:(p_y + r_y),
                                        p_x:(p_x + r_y)], (n_x, n_y),
                                    nlabels=nlabels)
                            else:
                                lbl = utils.resize_image(
                                    lbl[p_y:(p_y + r_y),
                                        p_x:(p_x + r_y)], (n_x, n_y),
                                    interp=cv2.INTER_NEAREST)

                    # RANDOM ELASTIC DEFOMRATIONS (like in U-NET)
                    if do_elasticaug:

                        mu = 0
                        sigma = 10
                        n_x, n_y = img.shape

                        dx = np.random.normal(mu, sigma, 9)
                        dx_mat = np.reshape(dx, (3, 3))
                        dx_img = utils.resize_image(dx_mat, (n_x, n_y),
                                                    interp=cv2.INTER_CUBIC)

                        dy = np.random.normal(mu, sigma, 9)
                        dy_mat = np.reshape(dy, (3, 3))
                        dy_img = utils.resize_image(dy_mat, (n_x, n_y),
                                                    interp=cv2.INTER_CUBIC)

                        img = utils.dense_image_warp(img, dx_img, dy_img)

                        if augment_labels:

                            if nlabels <= 4:
                                lbl = utils.dense_image_warp_as_onehot(
                                    lbl, dx_img, dy_img, nlabels=nlabels)
                            else:
                                lbl = utils.dense_image_warp(
                                    lbl,
                                    dx_img,
                                    dy_img,
                                    interp=cv2.INTER_NEAREST,
                                    do_optimisation=False)

                # RANDOM FLIP
                if do_fliplr:
                    coin_flip = np.random.randint(
                        max(2, augment_every_nth)
                    )  # Flipping wouldn't make sense if you do it always
                    if coin_flip == 0:
                        img = np.fliplr(img)
                        if augment_labels:
                            lbl = np.fliplr(lbl)

                if do_flipud:
                    coin_flip = np.random.randint(max(2, augment_every_nth))
                    if coin_flip == 0:
                        img = np.flipud(img)
                        if augment_labels:
                            lbl = np.flipud(lbl)

                new_images.append(img[...])
                new_labels.append(lbl[...])

            sampled_image_batch = np.asarray(new_images)
            sampled_label_batch = np.asarray(new_labels)

            return sampled_image_batch, sampled_label_batch
예제 #31
0
                                                           minSize=(5, 5), maxSize=(80, 80),
                                                           flags=cv2.CASCADE_SCALE_IMAGE)
                # for big
                # faceRects_mouth=classifier_mouth.detectMultiScale(img_facehalf_bottom,scaleFactor=1.3,minNeighbors=10,minSize=(40,40),maxSize=(80,80),flags=cv2.CASCADE_SCALE_IMAGE)

                if len(mouths) > 0:
                    for mouth in mouths:
                        xm1, ym1, wm1, hm1 = mouth
                        cv2.rectangle(mouth_color, (xm1, ym1), (xm1 + wm1, ym1 + hm1), (0, 0, 255), 2)

                num += 1
                cv2.putText(frame_rotated, 'num:%d' % num, (x + 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255),
                            2)

                #旋转帧至原来位置并修剪黑色填充物
                frame_resized = ut.trim(ut.rotate_image(frame_rotated, rotation * (-1)), frame_scale)
                #重置旋转后的图像
                current_rotation_map = get_rotation_map(rotation)

                faceFound = True
                break

        if faceFound:
            frame_skip_rate = 0
            print("Face Found")
        else:
            frame_skip_rate = SKIP_FRAME
            print("Face Not Found")

    else:
        frame_skip_rate -= 1
예제 #32
0
            # for f in faces:
            #     x, y, w, h = [ v*SCALE_FACTOR for v in f ] # scale the bounding box back to original frame size
            #     cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0))
            #     cv2.putText(frame, "Training Face", (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,255,0))

            if len(faces):
                for f in faces:
                    x, y, w, h = [v for v in f]  # scale the bounding box back to original frame size
                    cropped_face = rotated_frame[y : y + h, x : x + w]  # img[y: y + h, x: x + w]
                    cropped_face = cv2.resize(cropped_face, FACE_DIM, interpolation=cv2.INTER_AREA)
                    cv2.rectangle(rotated_frame, (x, y), (x + w, y + h), (0, 255, 0))
                    cv2.putText(rotated_frame, "Training Face", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0))

                # rotate the frame back and trim the black paddings
                processed_frame = ut.trim(ut.rotate_image(rotated_frame, rotation * (-1)), frame_scale)

                # reset the optmized rotation map
                current_rotation_map = get_rotation_map(rotation)

                faceFound = True

                break

        if faceFound:
            frame_skip_rate = 0
            # print "Face Found"
        else:
            frame_skip_rate = SKIP_FRAME
            # print "Face Not Found"