Beispiel #1
0
def process_1_img(im_path, model):
    resized = False
    frame = cv2.imread(im_path)
    if frame is None:
        return 2
    if frame.shape[0] > 1000:
        frame = my_resize(frame, width=900, height=900)
        resized = True
    im_grids_final, points_grids, list_transform_matrix = main_grid_detector_img(
        frame, resized=resized)
    if im_grids_final is None:
        return 3
    grids_matrix = process_extract_digits(im_grids_final,
                                          model,
                                          save_images_digit=False)
    if all(elem is None for elem in grids_matrix):
        return 4
    grids_solved = main_solve_grids(grids_matrix)

    if grids_solved is None:
        return 5

    ims_filled_grid = write_solved_grids(im_grids_final, grids_matrix,
                                         grids_solved)
    im_final = recreate_img_filled(frame, ims_filled_grid, points_grids,
                                   list_transform_matrix)

    cv2.imwrite(
        os.path.dirname(im_path) + "/solved/" +
        os.path.splitext(os.path.basename(im_path))[0] + "_solved.jpg",
        im_final)

    return 1
Beispiel #2
0
def main_grid_detector_img(frame,
                           resized=True,
                           display=False,
                           using_webcam=False,
                           use_hough=False):
    if not resized:
        frame_resize = my_resize(frame,
                                 width=param_resize_width,
                                 height=param_resize_height)
    else:
        frame_resize = frame
    ratio = frame.shape[0] / frame_resize.shape[0]
    prepro_im_edges = preprocess_im(frame_resize, using_webcam)

    if display:
        extreme_points_biased, img_lines, img_contour = get_lines_and_corners(
            frame_resize.copy(),
            prepro_im_edges,
            use_hough=use_hough,
            display=display)
        show_big_image(frame_resize, prepro_im_edges, img_lines, img_contour,
                       use_hough)

    else:
        extreme_points_biased = get_lines_and_corners(frame_resize.copy(),
                                                      prepro_im_edges,
                                                      use_hough=use_hough,
                                                      display=display)

    if extreme_points_biased is None:
        return None, None, None
    grids_final, points_grids, transfo_matrix = undistorted_grids(
        frame, extreme_points_biased, ratio)
    return grids_final, points_grids, transfo_matrix
Beispiel #3
0
def show_big_image(img, im_prepro, im_contours, pre_filled, display_annot=False):
    from src.useful_functions import my_resize
    color_text = (0, 0, 255)
    my_font = cv2.FONT_HERSHEY_SIMPLEX
    my_font_scale = 1.2
    m_thickness = 2

    top = np.concatenate((img, cv2.cvtColor(im_prepro, cv2.COLOR_GRAY2BGR)), axis=1)
    bot = np.concatenate((im_contours, pre_filled), axis=1)
    im_res = np.concatenate((top, bot), axis=0)

    if display_annot:
        h_im, w_im, _ = im_res.shape

        text1 = "0/ Initial Grid"
        text2 = "1/ Preprocessed Grid"
        text3 = "2/ Digits Detection"
        text4 = "3/ Digits Identification"

        (text_width, text_height) = cv2.getTextSize(text1, my_font, fontScale=my_font_scale, thickness=m_thickness)[0]
        cv2.rectangle(im_res, (0, 0),
                      (text_width + 15, text_height + 15),
                      BLACK, cv2.FILLED)
        cv2.putText(im_res, text1,
                    (5, text_height + 5),
                    my_font, my_font_scale, color_text, m_thickness)

        (text_width, text_height) = cv2.getTextSize(text2, my_font, fontScale=my_font_scale, thickness=m_thickness)[0]
        cv2.rectangle(im_res, (w_im // 2, 0),
                      (w_im // 2 + text_width + 15, text_height + 15),
                      BLACK, cv2.FILLED)
        cv2.putText(im_res, text2,
                    (w_im // 2 + 5, text_height + 5),
                    my_font, my_font_scale, color_text, m_thickness)

        (text_width, text_height) = cv2.getTextSize(text3, my_font, fontScale=my_font_scale, thickness=m_thickness)[0]
        cv2.rectangle(im_res, (0, h_im // 2),
                      (text_width + 15, h_im // 2 + text_height + 15),
                      BLACK, cv2.FILLED)
        cv2.putText(im_res, text3,
                    (5, h_im // 2 + text_height + 5),
                    my_font, my_font_scale, color_text, m_thickness)

        (text_width, text_height) = cv2.getTextSize(text4, my_font, fontScale=my_font_scale, thickness=m_thickness)[0]
        cv2.rectangle(im_res, (w_im // 2, h_im // 2),
                      (w_im // 2 + text_width + 15, h_im // 2 + text_height + 15),
                      BLACK, cv2.FILLED)
        cv2.putText(im_res, text4,
                    (w_im // 2 + 5, h_im // 2 + text_height + 5),
                    my_font, my_font_scale, color_text, m_thickness)

    cv2.imshow('res', my_resize(im_res, height=600))
Beispiel #4
0
    def wrapper(*args, **kwargs):
        img = kwargs["img"]
        if img.shape[0] > 1000 or img.shape[0] < 800:
            old_shape = img.shape
            kwargs["img"] = my_resize(img,
                                      width=param_resize_width,
                                      height=param_resize_height)
        else:
            old_shape = None

        im_final = func(*args, **kwargs)
        if old_shape is not None:
            im_final = cv2.resize(im_final, old_shape[:2][::-1])

        return im_final
Beispiel #5
0
def show_hough(edges):
    # cv2.imshow("edges", edges)
    # old_values = [-1,-1,-1]
    while (1):
        w = cv2.getTrackbarPos('width', 'track')
        edges_resize = my_resize(edges, width=max(100, w))
        cv2.imshow("edges_resize", edges_resize)

        A = cv2.getTrackbarPos('thresh', 'track')
        B = cv2.getTrackbarPos('minLineLength', 'track')
        C = cv2.getTrackbarPos('maxLineGa', 'track')
        rho = max(1, cv2.getTrackbarPos('rho', 'track'))
        theta = max(1, cv2.getTrackbarPos('theta', 'track')) * np.pi / 180
        my_lines = []

        img_lines = np.zeros((edges_resize.shape[:2]), np.uint8)

        lines_raw = cv2.HoughLinesP(edges_resize,
                                    rho=rho,
                                    theta=theta,
                                    threshold=A,
                                    minLineLength=B,
                                    maxLineGap=C)

        img_binary_lines = cv2.cvtColor(edges_resize, cv2.COLOR_GRAY2BGR)
        if lines_raw is not None:
            for line in lines_raw:
                my_lines.append(MyHoughPLines(line))

            for line in my_lines:
                x1, y1, x2, y2 = line.get_limits()
                cv2.line(img_lines, (x1, y1), (x2, y2), 255, 2)

            for line in my_lines:
                x1, y1, x2, y2 = line.get_limits()
                cv2.line(img_binary_lines, (x1, y1), (x2, y2), (0, 0, 255), 2)

        # cv2.imshow('img_lines', resize(img_lines, width=900))
        cv2.imshow('img_lines', img_lines)
        # cv2.imshow('img_binary_lines', resize(img_binary_lines, width=900))
        cv2.imshow('img_binary_lines', img_binary_lines)
        k = cv2.waitKey(10) & 0xFF
        if k == 27:
            break
Beispiel #6
0
def process_single_img(frame, model, save=False):
    # Resizing image
    if frame.shape[0] > 1000 or frame.shape[0] < 800:
        old_shape = frame.shape
        frame = my_resize(frame,
                          width=param_resize_width,
                          height=param_resize_height)
    else:
        old_shape = None

    # Extracting grids
    im_grids_final, points_grids, list_transform_matrix = main_grid_detector_img(
        frame)
    if im_grids_final is None:
        return frame

    # Generate matrix representing digits in grids
    grids_matrix = process_extract_digits(im_grids_final, model)
    if all(elem is None for elem in grids_matrix):
        return frame

    # Solving grids
    grids_solved = main_solve_grids(grids_matrix)

    if grids_solved is None:
        return frame

    ims_filled_grid = write_solved_grids(im_grids_final, grids_matrix,
                                         grids_solved)
    im_final = recreate_img_filled(frame, ims_filled_grid, points_grids,
                                   list_transform_matrix)

    if old_shape is not None:
        im_final = cv2.resize(im_final, old_shape[:2][::-1])

    if save:
        if not os.path.isdir(save_folder):
            os.makedirs(save_folder)
        cv2.imwrite(
            save_folder + os.path.splitext(os.path.basename(im_path))[0] +
            "_solved.jpg", im_final)

    return im_final
Beispiel #7
0
def show_big_image(frame_resize,
                   prepro_im,
                   img_lines,
                   img_contour,
                   use_hough=False):
    if not use_hough:
        im_res = np.concatenate((frame_resize, img_contour), axis=0)
    else:
        top = np.concatenate(
            (frame_resize, cv2.cvtColor(prepro_im, cv2.COLOR_GRAY2BGR)),
            axis=1)
        bot = np.concatenate(
            (cv2.cvtColor(img_lines, cv2.COLOR_GRAY2BGR), img_contour), axis=1)
        im_res = np.concatenate((top, bot), axis=0)
        h_im, w_im, _ = im_res.shape

        text1 = "0/ Initial Image"
        text2 = "1/ Preprocessed Image"
        text3 = "2/ Hough Transform"
        text4 = "3/ Grids Extraction"

        (text_width,
         text_height) = cv2.getTextSize(text1,
                                        font,
                                        fontScale=font_scale_normal,
                                        thickness=thickness_normal)[0]
        # cv2.putText(im_res, text1,
        #             (w_im // 2 - text_width - 30, h_im // 2 - 30),
        #             font, font_scale_normal, WHITE, thickness_normal * 3)
        cv2.rectangle(im_res, (0, 0), (text_width + 30, text_height + 30),
                      WHITE, cv2.FILLED)
        cv2.putText(im_res, text1, (10, text_height + 10), font,
                    font_scale_normal, RED, thickness_normal)

        (text_width,
         text_height) = cv2.getTextSize(text2,
                                        font,
                                        fontScale=font_scale_normal,
                                        thickness=thickness_normal)[0]
        cv2.rectangle(im_res, (w_im // 2, 0),
                      (w_im // 2 + text_width + 30, text_height + 30), WHITE,
                      cv2.FILLED)
        cv2.putText(im_res, text2, (w_im // 2 + 10, text_height + 10), font,
                    font_scale_normal, RED, thickness_normal)

        (text_width,
         text_height) = cv2.getTextSize(text3,
                                        font,
                                        fontScale=font_scale_normal,
                                        thickness=thickness_normal)[0]
        cv2.rectangle(im_res, (0, h_im // 2),
                      (text_width + 30, h_im // 2 + text_height + 30), WHITE,
                      cv2.FILLED)
        cv2.putText(im_res, text3, (10, h_im // 2 + text_height + 10), font,
                    font_scale_normal, RED, thickness_normal)

        (text_width,
         text_height) = cv2.getTextSize(text4,
                                        font,
                                        fontScale=font_scale_normal,
                                        thickness=thickness_normal)[0]
        cv2.rectangle(
            im_res, (w_im // 2, h_im // 2),
            (w_im // 2 + text_width + 30, h_im // 2 + text_height + 30), WHITE,
            cv2.FILLED)
        cv2.putText(im_res, text4,
                    (w_im // 2 + 10, h_im // 2 + text_height + 10), font,
                    font_scale_normal, RED, thickness_normal)

    cv2.imshow('res', my_resize(im_res, height=900))
Beispiel #8
0
def main_process_img(im_path,
                     model,
                     save=False,
                     display=False,
                     use_hough=True,
                     save_images_digit=False):
    init = time.time()
    frame = cv2.imread(
        im_path)  # TODO Check if image not well oriented - EXIF data
    init0 = time.time()
    if frame is None:
        logger.error("This path doesn't lead to a frame")
        sys.exit(3)
    if frame.shape[0] > 1000 or frame.shape[0] < 800:
        frame = my_resize(frame,
                          width=param_resize_width,
                          height=param_resize_height)
    im_grids_final, points_grids, list_transform_matrix = main_grid_detector_img(
        frame, display=display, use_hough=use_hough)
    found_grid_time = time.time()
    if im_grids_final is None:
        logger.error("No grid found")
        sys.exit(3)
    logger.info("Grid(s) found")
    grids_matrix = process_extract_digits(im_grids_final,
                                          model,
                                          display=display,
                                          display_digit=False,
                                          save_images_digit=save_images_digit)
    if all(elem is None for elem in grids_matrix):
        logger.error("Failed during digits extraction")
        sys.exit(3)
    logger.info("Extraction done")
    extract_time = time.time()
    grids_solved = main_solve_grids(grids_matrix)
    logger.info("Solving done")

    if grids_solved is None:
        print(grids_matrix)
        cv2.imshow('grid_extract', im_grids_final[0])
        cv2.imwrite(
            save_folder + os.path.splitext(os.path.basename(im_path))[0] +
            "_failed.jpg", im_grids_final[0])
        cv2.waitKey()
        sys.exit(3)

    solve_time = time.time()

    ims_filled_grid = write_solved_grids(im_grids_final, grids_matrix,
                                         grids_solved)
    im_final = recreate_img_filled(frame, ims_filled_grid, points_grids,
                                   list_transform_matrix)
    final_time = time.time()

    if save:
        if not os.path.isdir(save_folder):
            os.makedirs(save_folder)
        cv2.imwrite(
            save_folder + os.path.splitext(os.path.basename(im_path))[0] +
            "_solved.jpg", im_final)

    total_time = final_time - init

    load_time = init0 - init
    logger.info("Load Image\t\t\t{:03.1f}% - {:05.2f}ms".format(
        100 * load_time / total_time, 1000 * load_time))
    founding_time = found_grid_time - init0
    logger.info("Grid Research \t\t{:03.1f}% - {:05.2f}ms".format(
        100 * founding_time / total_time, 1000 * founding_time))
    extraction_duration = extract_time - found_grid_time
    logger.info("Digits Extraction \t{:03.1f}% - {:05.2f}ms".format(
        100 * extraction_duration / total_time, 1000 * extraction_duration))
    solving_duration = solve_time - extract_time
    logger.info("Grid Solving \t\t{:03.1f}% - {:05.2f}ms".format(
        100 * solving_duration / total_time, 1000 * solving_duration))
    recreation_duration = final_time - solve_time
    logger.info("Image recreation \t{:03.1f}% - {:05.2f}ms".format(
        100 * recreation_duration / total_time, 1000 * recreation_duration))
    logger.info("PROCESS DURATION \t{:.2f}s".format(final_time - init0))
    logger.info("EVERYTHING DONE \t{:.2f}s".format(total_time))
    # print(grid)
    # print(grid_solved)

    if len(ims_filled_grid) == 1:
        cv2.imshow('img', frame)
        cv2.imshow('grid_extract', im_grids_final[0])
        cv2.imshow('grid_filled', ims_filled_grid[0])
    cv2.imshow('im_final', im_final)
    cv2.waitKey()