Beispiel #1
0
def example_face_ortho(filename_actor, filename_obj, filename_3dmarkers=None):

    D = detector_landmarks.detector_landmarks(
        '..//_weights//shape_predictor_68_face_landmarks.dat',
        filename_3dmarkers)
    image_actor = cv2.imread(filename_actor)

    R = tools_GL3D.render_GL3D(filename_obj=filename_obj,
                               W=image_actor.shape[1],
                               H=image_actor.shape[0],
                               is_visible=False,
                               projection_type='O',
                               scale=(1, 1, 1))
    R.inverce_transform_model('Z')
    L = D.get_landmarks(image_actor)
    L3D = D.model_68_points

    rvec, tvec, scale_factor = D.get_pose_ortho(image_actor,
                                                L,
                                                L3D,
                                                R.mat_trns,
                                                do_debug=True)
    print('[ %1.2f, %1.2f, %1.2f], [%1.2f,  %1.2f,  %1.2f], [%1.2f,%1.2f]' %
          (rvec[0], rvec[1], rvec[2], tvec[0], tvec[1], tvec[2],
           scale_factor[0], scale_factor[1]))

    image_3d = R.get_image_ortho(rvec, tvec, scale_factor, do_debug=True)
    clr = (255 * numpy.array(R.bg_color)).astype(numpy.int)
    result = tools_image.blend_avg(image_actor, image_3d, clr, weight=0)
    cv2.imwrite('./images/output/face_GL_ortho.png', result)
    cv2.imwrite('./images/output/face_image_3d.png', image_3d)

    return
Beispiel #2
0
def example_03_find_homography_by_keypoints(detector='SIFT', matchtype='knn'):

    folder_input = 'images/ex_keypoints/'
    img1 = cv2.imread(folder_input + 'left.jpg')
    img2 = cv2.imread(folder_input + 'rght.jpg')

    img1_gray_rgb = tools_image.desaturate(img1)
    img2_gray_rgb = tools_image.desaturate(img2)

    folder_output = 'images/output/'
    output_filename1 = folder_output + 'left_transformed_homography.png'
    output_filename2 = folder_output + 'rght_transformed_homography.png'
    output_filename = folder_output + 'blended_homography.png'

    if not os.path.exists(folder_output):
        os.makedirs(folder_output)
    else:
        tools_IO.remove_files(folder_output)

    points1, des1 = tools_alg_match.get_keypoints_desc(img1, detector)
    points2, des2 = tools_alg_match.get_keypoints_desc(img2, detector)

    homography = tools_calibrate.get_homography_by_keypoints_desc(
        points1, des1, points2, des2, matchtype)
    match1, match2, distance = tools_alg_match.get_matches_from_keypoints_desc(
        points1, des1, points2, des2, matchtype)

    for each in match1:
        img1_gray_rgb = tools_draw_numpy.draw_circle(img1_gray_rgb,
                                                     int(each[1]),
                                                     int(each[0]), 3,
                                                     [0, 0, 255])
    for each in match2:
        img2_gray_rgb = tools_draw_numpy.draw_circle(img2_gray_rgb,
                                                     int(each[1]),
                                                     int(each[0]), 3,
                                                     [255, 255, 0])

    result_image1, result_image2 = tools_calibrate.get_stitched_images_using_homography(
        img1_gray_rgb,
        img2_gray_rgb,
        homography,
        background_color=(255, 255, 255))
    result_image = tools_image.blend_avg(result_image1,
                                         result_image2,
                                         background_color=(255, 255, 255))
    # result_image = tools_calibrate.blend_multi_band(result_image1, result_image2)

    cv2.imwrite(output_filename1, result_image1)
    cv2.imwrite(output_filename2, result_image2)
    cv2.imwrite(output_filename, result_image)
    return
Beispiel #3
0
def example_04_blend_white_avg():
    folder_input = 'images/ex_blend/'
    img1 = cv2.imread(folder_input + 'white_L.png')
    img2 = cv2.imread(folder_input + 'white_R.png')

    folder_output = 'images/output/'

    if not os.path.exists(folder_output):
        os.makedirs(folder_output)
    else:
        tools_IO.remove_files(folder_output)

    cv2.imwrite(folder_output + 'avg.png', tools_image.blend_avg(img1, img2, (255, 255, 255)))

    return
Beispiel #4
0
def example_face_perspective(filename_actor,
                             filename_obj,
                             filename_3dmarkers=None,
                             do_debug=False):

    D = detector_landmarks.detector_landmarks(
        '..//_weights//shape_predictor_68_face_landmarks.dat',
        filename_3dmarkers)

    image_actor = cv2.imread(filename_actor)
    image_actor = tools_image.smart_resize(image_actor, 640, 640)

    R = tools_GL3D.render_GL3D(filename_obj=filename_obj,
                               W=image_actor.shape[1],
                               H=image_actor.shape[0],
                               is_visible=False,
                               projection_type='P',
                               scale=(1, 1, 0.25))

    L = D.get_landmarks(image_actor)
    L3D = D.model_68_points
    L3D[:, 2] = 0

    rvec, tvec = D.get_pose_perspective(image_actor, L, L3D, R.mat_trns)

    print('[ %1.2f, %1.2f, %1.2f], [%1.2f,  %1.2f,  %1.2f]' %
          (rvec[0], rvec[1], rvec[2], tvec[0], tvec[1], tvec[2]))

    image_3d = R.get_image_perspective(rvec, tvec, do_debug=do_debug)
    clr = (255 * numpy.array(R.bg_color)).astype(numpy.int)
    result = tools_image.blend_avg(image_actor, image_3d, clr, weight=0)
    cv2.imwrite('./images/output/face_GL.png', result)

    M = pyrr.matrix44.multiply(pyrr.matrix44.create_from_eulers(rvec),
                               pyrr.matrix44.create_from_translation(tvec))
    R.mat_model, R.mat_view = tools_pr_geom.decompose_model_view(M)
    result = tools_render_CV.draw_points_numpy_MVP(L3D, image_actor,
                                                   R.mat_projection,
                                                   R.mat_view, R.mat_model,
                                                   R.mat_trns)
    result = D.draw_landmarks_v2(result, L)
    cv2.imwrite('./images/output/face_CV_MVP.png', result)

    return
Beispiel #5
0
def blend_by_coord(image,
                   coord1,
                   coord2,
                   window_size,
                   fill_declines=False,
                   background_color=(128, 128, 128)):

    reproject1, reproject2, m1, m2 = tools_alg_match.reproject_matches(
        image,
        image,
        coord1,
        coord2,
        fill_declines=fill_declines,
        window_size=window_size,
        background_color=background_color)
    base1 = image.copy()
    mask1 = tools_image.get_mask(reproject1, background_color)
    base1[mask1 == 0] = background_color
    result = tools_image.blend_avg(reproject1,
                                   base1,
                                   background_color=background_color)

    return result
Beispiel #6
0
def example_project_GL_vs_CV_acuro():
    marker_length = 1
    aperture_x, aperture_y = 0.5, 0.5

    frame = cv2.imread('./images/ex_aruco/01.jpg')
    R = tools_GL3D.render_GL3D(filename_obj='./images/ex_GL/box/box.obj',
                               W=frame.shape[1],
                               H=frame.shape[0],
                               is_visible=False,
                               projection_type='P')

    mat_camera = tools_pr_geom.compose_projection_mat_3x3(
        frame.shape[1], frame.shape[0], aperture_x, aperture_y)

    axes_image, r_vec, t_vec = tools_aruco.detect_marker_and_draw_axes(
        frame, marker_length, mat_camera, numpy.zeros(4))
    cv2.imwrite(
        './images/output/cube_CV.png',
        tools_render_CV.draw_cube_numpy(axes_image, mat_camera, numpy.zeros(4),
                                        r_vec.flatten(), t_vec.flatten(),
                                        (0.5, 0.5, 0.5)))

    image_3d = R.get_image_perspective(r_vec.flatten(),
                                       t_vec.flatten(),
                                       aperture_x,
                                       aperture_y,
                                       scale=(0.5, 0.5, 0.5),
                                       do_debug=True)
    clr = (255 * numpy.array(R.bg_color)).astype(numpy.int)
    cv2.imwrite('./images/output/cube_GL.png',
                tools_image.blend_avg(frame, image_3d, clr, weight=0))

    r_vec, t_vec = r_vec.flatten(), t_vec.flatten()
    print('[ %1.2f, %1.2f, %1.2f], [%1.2f,  %1.2f,  %1.2f],  %1.2f' %
          (r_vec[0], r_vec[1], r_vec[2], t_vec[0], t_vec[1], t_vec[2],
           aperture_x))
    return
Beispiel #7
0
def E2E_detect_patterns(folder_in,
                        filename_in_field,
                        folder_out,
                        pattern_height,
                        pattern_width,
                        max_period,
                        window_size=25,
                        step=10):

    print(folder_in)

    image = cv2.imread(folder_in + filename_in_field)
    image_gray = tools_image.desaturate(image)

    peak_factor = 0.25
    cut_factor = 0.95

    #pairs hitmap
    if os.path.isfile(folder_out + 'hitmap.png'):
        hitmap = cv2.imread(folder_out + 'hitmap.png', 0)
        tools_IO.remove_files(folder_out)
        cv2.imwrite(folder_out + '%d-hitmap_00.png' % (window_size),
                    tools_image.hitmap2d_to_jet(hitmap))
        cv2.imwrite(folder_out + 'hitmap.png', hitmap)
    else:
        tools_IO.remove_files(folder_out)
        hitmap, pattern_height, pattern_width = get_self_hits(
            image,
            max_period,
            pattern_height,
            pattern_width,
            window_size,
            step,
            folder_debug=folder_out)
        cv2.imwrite(folder_out + '%d-hitmap_00.png' % (window_size),
                    tools_image.hitmap2d_to_jet(hitmap))
        cv2.imwrite(folder_out + 'hitmap.png', hitmap)

    #candidates to patterns
    coord = tools_alg_grid_templates.find_local_peaks(
        hitmap, int(pattern_height * peak_factor),
        int(pattern_width * peak_factor))
    for i in range(0, coord.shape[0]):
        image_gray = tools_draw_numpy.draw_circle(image_gray, coord[i, 0],
                                                  coord[i, 1], 5, (0, 64, 255))
    cv2.imwrite(folder_out + '%d-hits_00.png' % window_size, image_gray)
    patterns_candidates = tools_alg_grid_templates.coordinates_to_images(
        image, coord, cut_factor * pattern_height, cut_factor * pattern_width)
    hitmap_candidates = tools_alg_grid_templates.coordinates_to_images(
        hitmap, coord, cut_factor * pattern_height, cut_factor * pattern_width)

    #best pattern
    pattern = get_best_pattern(patterns_candidates, hitmap_candidates)
    cv2.imwrite(folder_out + '%d-pattern_00.png' % (window_size), pattern)

    #template match with pattern
    hitmap = tools_alg_match.calc_hit_field_basic(image,
                                                  pattern,
                                                  rotation_tol=10,
                                                  rotation_step=0.5)
    cv2.imwrite(folder_out + '%d-hitmap_01_jet.png' % (window_size),
                tools_image.hitmap2d_to_jet(hitmap))
    cv2.imwrite(folder_out + '%d-hitmap_01_vrd.png' % (window_size),
                tools_image.hitmap2d_to_viridis(hitmap))

    #visualize hits
    min_value = 200
    coord, image_miss = tools_alg_grid_templates.find_local_peaks_greedy(
        hitmap, int(1.0 * pattern_height), int(1.0 * pattern_width), min_value)
    image_gray = tools_image.desaturate(image)
    for i in range(0, coord.shape[0]):
        value = int(85 * (hitmap[coord[i, 0], coord[i, 1]] - min_value) /
                    (255 - min_value))
        image_gray = tools_draw_numpy.draw_circle(
            image_gray, coord[i, 0], coord[i, 1], 9,
            tools_image.hsv2bgr((value, 255, 255)))

    image_gray_and_miss = tools_image.put_layer_on_image(image_gray,
                                                         image_miss,
                                                         background_color=(0,
                                                                           0,
                                                                           0))
    image_gray_and_miss = tools_image.blend_avg(image_gray,
                                                image_gray_and_miss,
                                                weight=0.5)
    cv2.imwrite(folder_out + '%d-hits_01.png' % window_size,
                image_gray_and_miss)

    #reproject pattern
    image_gen = generate_input(image, coord, pattern)
    cv2.imwrite(folder_out + '%d-gen.png' % window_size, image_gen)

    return


# ----------------------------------------------------------------------------------------------------------------------