コード例 #1
0
    def createAlgsList(self):
        # First we populate the list of algorithms with those created
        # extending GeoAlgorithm directly (those that execute GDAL
        # using the console)
        self.preloadedAlgs = [nearblack(), information(), warp(), translate(),
                              rgb2pct(), pct2rgb(), merge(), buildvrt(), polygonize(), gdaladdo(),
                              ClipByExtent(), ClipByMask(), contour(), rasterize(), proximity(),
                              sieve(), fillnodata(), ExtractProjection(), gdal2xyz(),
                              hillshade(), slope(), aspect(), tri(), tpi(), roughness(),
                              ColorRelief(), GridInvDist(), GridAverage(), GridNearest(),
                              GridDataMetrics(), gdaltindex(), gdalcalc(), rasterize_over(),
                              # ----- OGR tools -----
                              OgrInfo(), Ogr2Ogr(), Ogr2OgrClip(), Ogr2OgrClipExtent(),
                              Ogr2OgrToPostGis(), Ogr2OgrToPostGisList(), Ogr2OgrPointsOnLines(),
                              Ogr2OgrBuffer(), Ogr2OgrDissolve(), Ogr2OgrOneSideBuffer(),
                              Ogr2OgrTableToPostGisList(), OgrSql(),
                              ]

        # And then we add those that are created as python scripts
        folder = self.scriptsFolder()
        if os.path.exists(folder):
            for descriptionFile in os.listdir(folder):
                if descriptionFile.endswith('py'):
                    try:
                        fullpath = os.path.join(self.scriptsFolder(),
                                                descriptionFile)
                        alg = GdalScriptAlgorithm(fullpath)
                        self.preloadedAlgs.append(alg)
                    except WrongScriptException as e:
                        ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, e.msg)
コード例 #2
0
def warp_target(subject, target):

    if (target.shape[0] > subject.shape[0]):
        print('bigger target')
        new_subject = np.zeros(
            (target.shape[0] - subject.shape[0], subject.shape[1], 3),
            dtype=subject.dtype)
        subject = np.vstack((subject, new_subject))
    else:
        print('bigger subject')
        #resizing target
        new_target = np.zeros(
            (subject.shape[0] - target.shape[0], target.shape[1], 3),
            dtype=target.dtype)
        target = np.vstack((target, new_target))

    if (subject.shape[0] % 2 != 0):
        zero_layer = np.zeros((1, target.shape[1], 3), dtype=target.dtype)
        target = np.vstack((target, zero_layer))
        subject = np.vstack((subject, zero_layer))

    #cv2.imshow('s', subject)
    #cv2.imshow('t', target)
    #cv2.waitKey(0)

    warped_target = warp.warp(target, subject)
    #cv2.imshow('new', warped_target)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()

    return subject, warped_target
コード例 #3
0
    def createAlgsList(self):
        # First we populate the list of algorithms with those created
        # extending GeoAlgorithm directly (those that execute GDAL
        # using the console)
        self.preloadedAlgs = [nearblack(), information(), warp(), translate(),
            rgb2pct(), pct2rgb(), merge(), polygonize(), gdaladdo(),
            ClipByExtent(), ClipByMask(), contour(), rasterize(), proximity(),
            sieve(), fillnodata(), ExtractProjection(), gdal2xyz(),
            hillshade(), slope(), aspect(), tri(), tpi(), roughness(),
            ColorRelief(), GridInvDist(), GridAverage(), GridNearest(),
            GridDataMetrics(),
            # ----- OGR tools -----
            OgrInfo(), Ogr2Ogr(), OgrSql(),
            ]

        # And then we add those that are created as python scripts
        folder = self.scriptsFolder()
        if os.path.exists(folder):
            for descriptionFile in os.listdir(folder):
                if descriptionFile.endswith('py'):
                    try:
                        fullpath = os.path.join(self.scriptsFolder(),
                                descriptionFile)
                        alg = GdalScriptAlgorithm(fullpath)
                        self.preloadedAlgs.append(alg)
                    except WrongScriptException, e:
                        ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, e.msg)
コード例 #4
0
def main3():
    init = util.readImage('input/TestSeq/Shift0.png')
    final = util.readImage('input/TestSeq/ShiftR2.png')
    U, V = lk_flow(init, final)
    warped = warp.warp(final, U, V)
    cv2.imshow("original", init)
    cv2.imshow("modified", warped)
    import time
    time.sleep(30)
コード例 #5
0
 def createAlgsList(self):
     # First we populate the list of algorithms with those created
     # extending GeoAlgorithm directly (those that execute GDAL
     # using the console)
     self.preloadedAlgs = [
         nearblack(),
         information(),
         warp(),
         translate(),
         rgb2pct(),
         pct2rgb(),
         merge(),
         buildvrt(),
         polygonize(),
         gdaladdo(),
         ClipByExtent(),
         ClipByMask(),
         contour(),
         rasterize(),
         proximity(),
         sieve(),
         fillnodata(),
         ExtractProjection(),
         gdal2xyz(),
         hillshade(),
         slope(),
         aspect(),
         tri(),
         tpi(),
         roughness(),
         ColorRelief(),
         GridInvDist(),
         GridAverage(),
         GridNearest(),
         GridDataMetrics(),
         gdaltindex(),
         gdalcalc(),
         rasterize_over(),
         retile(),
         gdal2tiles(),
         # ----- OGR tools -----
         OgrInfo(),
         Ogr2Ogr(),
         Ogr2OgrClip(),
         Ogr2OgrClipExtent(),
         Ogr2OgrToPostGis(),
         Ogr2OgrToPostGisList(),
         Ogr2OgrPointsOnLines(),
         Ogr2OgrBuffer(),
         Ogr2OgrDissolve(),
         Ogr2OgrOneSideBuffer(),
         Ogr2OgrTableToPostGisList(),
         OgrSql(),
     ]
コード例 #6
0
 def createAlgsList(self):
     # First we populate the list of algorithms with those created
     # extending GeoAlgorithm directly (those that execute GDAL
     # using the console)
     self.preloadedAlgs = [
         nearblack(),
         information(),
         warp(),
         translate(),
         rgb2pct(),
         pct2rgb(),
         merge(),
         buildvrt(),
         polygonize(),
         gdaladdo(),
         ClipByExtent(),
         ClipByMask(),
         contour(),
         rasterize(),
         proximity(),
         sieve(),
         fillnodata(),
         ExtractProjection(),
         gdal2xyz(),
         hillshade(),
         slope(),
         aspect(),
         tri(),
         tpi(),
         roughness(),
         ColorRelief(),
         GridInvDist(),
         GridAverage(),
         GridNearest(),
         GridDataMetrics(),
         gdaltindex(),
         gdalcalc(),
         rasterize_over(),
         retile(),
         gdal2tiles(),
         # ----- OGR tools -----
         OgrInfo(),
         Ogr2Ogr(),
         Ogr2OgrClip(),
         Ogr2OgrClipExtent(),
         Ogr2OgrToPostGis(),
         Ogr2OgrToPostGisList(),
         Ogr2OgrPointsOnLines(),
         Ogr2OgrBuffer(),
         Ogr2OgrDissolve(),
         Ogr2OgrOneSideBuffer(),
         Ogr2OgrTableToPostGisList(),
         OgrSql(),
     ]
コード例 #7
0
def q3helper(image1, image2, part):
    gauPyr1 = pyramid.gaussPyramid(image1, 3)
    gauPyr2 = pyramid.gaussPyramid(image2, 3)
    for i in range(len(gauPyr1)):
        plotDisplacements(gauPyr1[i].astype(np.float32),
                          gauPyr2[i].astype(np.float32),
                          'output/ps5-3-a-%d-%d.png' % (part, i))
        #continue
        U, V = lk_flow(gauPyr1[i].astype(np.float32),
                       gauPyr2[i].astype(np.float32))
        warped = warp.warp(gauPyr2[i].astype(np.float32), U, V)
        diff = warped - gauPyr1[i].astype(np.float32)
        cv2.imwrite("output/ps5-3-a-%d-%d.png" % (part + 1, i), upscale(diff))
コード例 #8
0
def make_prediction(detector, detection_probability, classifier,
                    data_transform):
    frame_name = []
    emotion_predictions = []
    device = torch.device("cpu")
    list_frames = os.listdir(TEST_DIR)
    list_frames.sort(key=natural_keys)
    for frame in list_frames:
        print("Start warping frame {} ...".format(frame))
        frame_name.append(frame)
        input_path = os.path.join(TEST_DIR, frame)
        output_path = os.path.join(OUTPUT_TEST_DIR, "detected_" + frame)

        detections, extracted_objects_array = warp(detector,
                                                   detection_probability,
                                                   input_path, output_path)

        if not detections:
            print("No one has been found on frame {}".format(frame))
            emotion_predictions.append("Unknown")
            continue
        else:
            path_to_input = ""
            for detection, object_path in zip(detections,
                                              extracted_objects_array):
                print(object_path)
                print(
                    detection["name"],
                    " : ",
                    detection["percentage_probability"],
                    " : ",
                    detection["box_points"],
                )
                print(
                    "---------------------------------------------------------\n"
                )
                path_to_input = object_path
                if detection["name"] == "Tom":
                    break
            # Send a warped image to the input of a classifier
            print("Sending a warped image to the input of a classifier...")
            with torch.set_grad_enabled(False):
                input_image = _image_loader(data_transform,
                                            path_to_input).to(device)
                classifier.to(device)
                preds = classifier(input_image)
                preds_class = preds.argmax(dim=1)
            emotion_predictions.append(class_names[preds_class])
    return (frame_name, emotion_predictions)
コード例 #9
0
def hierarchical_lk(init, final, levels=4):
    Lk = pyramid.gaussPyramid(init, levels)
    Rk = pyramid.gaussPyramid(final, levels)
    #for i in range(levels):
    #    Lk = pyramid.reduce(Lk)
    #    Rk = pyramid.reduce(Rk)
    U = np.zeros(Lk[-1].shape, dtype=np.float32)
    V = np.zeros(Lk[-1].shape, dtype=np.float32)
    for i in range(levels, -1, -1):
        print U.shape, V.shape, Lk[i].shape
        Wk = warp.warp(Lk[i], -U, -V).astype(np.float32)
        print Wk.dtype, Rk[i].dtype
        Dx, Dy = lk_flow(Wk, Rk[i].astype(np.float32))
        U = U + Dx
        V = V + Dy
        U = pyramid.expand(U) * 2
        V = pyramid.expand(V) * 2
    return U, V
コード例 #10
0
ファイル: chage_age.py プロジェクト: zx4829468/magic_face
def warp_target(subject, target):
    if (target.shape[0] > subject.shape[0]):
        print('bigger target')
        new_subject = np.zeros(
            (target.shape[0] - subject.shape[0], subject.shape[1], 3),
            dtype=subject.dtype)
        subject = np.vstack((subject, new_subject))
    else:
        print('bigger subject')
        new_target = np.zeros(
            (subject.shape[0] - target.shape[0], target.shape[1], 3),
            dtype=target.dtype)
        target = np.vstack((target, new_target))

    if (subject.shape[0] % 2 != 0):
        zero_layer = np.zeros((1, target.shape[1], 3), dtype=target.dtype)
        target = np.vstack((target, zero_layer))
        subject = np.vstack((subject, zero_layer))
    warped_target = warp.warp(target, subject)
    return subject, warped_target
コード例 #11
0
ファイル: lanefinding.py プロジェクト: andreas28/CarND
def main():

    # Load camera matrix and distortion parameters from pickle file
    calib_mtx, calib_dist = load_calib_data("calib_data.p")

    # Make a list of test images
    images = glob.glob('test_images/*.jpg')
    src, dst, M, Minv = prepare_perspective_transform()

    for img_file in images:
        img = cv2.imread(img_file)
        img_undistorted = cv2.undistort(img, calib_mtx, calib_dist, None,
                                        calib_mtx)
        thres = thresholding(img_undistorted, weight=(0.5, 0.5), thres=60)

        img_warped = warp(thres, M)

        hist = initial_sliding_window(img_warped)
        #plt.plot(hist)
        plt.show()

    cv2.destroyAllWindows()
コード例 #12
0
def pipeline(img,
             mtx,
             dist,
             M,
             Minv,
             lane_centers,
             debug=False,
             raw_fname=None):
    # the pipeline to process images.
    undistorted = cv2.undistort(img, mtx, dist, None, mtx)

    binary = binary_for_lanes(undistorted)
    # remove distraction by cropping to keep on the region of interests
    cropped = keep_region_of_interest(binary)
    # cropped = binary
    if debug:
        mpimg.imsave("./output_images/binary_" + os.path.basename(raw_fname),
                     cropped,
                     cmap='gray')

    warped = warp.warp(cropped, M)
    if debug:
        saved_name = './output_images/' + 'warped_' + os.path.basename(
            raw_fname)
        mpimg.imsave(saved_name, warped, cmap='gray')

    lanes_on_warped, left_fit, right_fit, curverad = tracker.lanesDetection(
        warped, lane_centers)
    result, offset, lanes_on_warped = lane_centers.display_detected(
        left_fit, right_fit, 5, img, Minv, curverad, lanes_on_warped)
    result = compose_diagScreen(curverad=curverad,
                                offset=offset,
                                mainDiagScreen=result,
                                diag1=binary,
                                diag2=cropped,
                                diag4=warped,
                                diag5=undistorted,
                                diag7=lanes_on_warped)
    return result, binary, warped, undistorted
def main():
    path_to_model = args.model
    detector = load_detector(path_to_model, detection_config_path)

    mode = args.set
    probability = args.probability

    print("Warp frames from the {} set: \n".format(mode))

    if mode == "train":
        input = TRAIN_PATH
        output = TRAIN_OUTPUT_PATH

    elif mode == "val":
        input = VAL_PATH
        output = VAL_OUTPUT_PATH

    for frame in os.listdir(input):
        print("Start warping frame {} ...".format(frame))
        input_path = os.path.join(input, frame)
        output_path = os.path.join(output, "detected_" + frame)
        detections, extracted_objects_array = warp(detector, probability,
                                                   input_path, output_path)

        print("Detections: ", detections)
        print("Extracted object arrays: ", extracted_objects_array)

        for detection, object_path in zip(detections, extracted_objects_array):
            print(object_path)
            print(
                detection["name"],
                " : ",
                detection["percentage_probability"],
                " : ",
                detection["box_points"],
            )
            print(
                "---------------------------------------------------------\n")
コード例 #14
0
def generate_overlay(img_dst, floor_data, mat_data, status_bar_data,
                     overlay_data, mode):

    # Setup
    overlay_max_width = img_dst.shape[1] - overlay_data[
        'status_bar_min_width']  # Maximum overlay width (status bar dependent)
    overlay_max_height = int(
        img_dst.shape[0] / 100 * overlay_data['overlay_max_height']
    )  # Maximum overlay height (percentage (!) of frame height)

    if overlay_data['overlay_position'] != 0 and overlay_data[
            'overlay_position'] != 1 and overlay_data[
                'overlay_position'] != 2 and overlay_data[
                    'overlay_position'] != 3:  # Check overlay position validity
        print('Invalid overlay position.')
        sys.exit(-1)

    # Source image generation
    img_src, h, warp_offset, map_dim = warp(img_dst, floor_data, mat_data,
                                            mode)  # Generate map source image
    src_width = img_src.shape[1]  # Map source original width
    src_height = img_src.shape[0]  # Map source original height

    # Map size check
    if img_src.shape[1] + overlay_data[
            'border_thickness'] * 2 > overlay_max_width:  # Check if map width is too large
        new_width = overlay_max_width - overlay_data['border_thickness'] * 2
        new_height = int(overlay_max_width / img_src.shape[1] *
                         img_src.shape[0])
        if new_width == 0:  # In extreme cases might become 0, so 1 is the minimum
            new_width = 1
        elif new_height == 0:  # In extreme cases might become 0, so 1 is the minimum
            new_height = 1
        img_src = cv2.resize(img_src,
                             (new_width, new_height))  # Resize map accordingly

    if img_src.shape[0] + overlay_data[
            'border_thickness'] * 2 > overlay_max_height:  # Check if map height is too large
        new_height = overlay_max_height
        new_width = int(overlay_max_height / img_src.shape[0] *
                        img_src.shape[1])
        if new_width == 0:  # In extreme cases might become 0, so 1 is the minimum
            new_width = 1
        elif new_height == 0:  # In extreme cases might become 0, so 1 is the minimum
            new_height = 1
        img_src = cv2.resize(img_src,
                             (new_width, new_height))  # Resize map accordingly

    # Map background
    if img_src.shape[0] < overlay_data[
            'status_bar_min_height']:  # Add empty background (= border) to map if it's too small with respect to the status bar
        offset = int(
            (overlay_data['status_bar_min_height'] -
             (img_src.shape[0] + overlay_data['border_thickness'] * 2 - 2)) / 2
        ) - 1  # An additional top border will introduce an offset that must be considered for a correct people position drawing
        if offset < 0:
            offset = 0
        if (overlay_data['status_bar_min_height'] -
            (img_src.shape[0] + overlay_data['border_thickness'] * 2 -
             1)) % 2 == 0:  # Compute and add top and bottom border
            img_src = cv2.copyMakeBorder(
                img_src,
                offset + 1,
                offset + 1,
                0,
                0,
                cv2.BORDER_CONSTANT,
                value=status_bar_data['status_bar_background'])
        else:
            img_src = cv2.copyMakeBorder(
                img_src,
                offset,
                offset + 1,
                0,
                0,
                cv2.BORDER_CONSTANT,
                value=status_bar_data['status_bar_background'])
        overlay_data['ver_offset'] += int(offset / 2)
    else:
        offset = 0

    # Map external border creation
    img_src = cv2.copyMakeBorder(
        img_src,
        overlay_data['border_thickness'],
        0,
        overlay_data['border_thickness'],
        0,
        cv2.BORDER_CONSTANT,
        value=status_bar_data['overlay_left_top_border'])
    img_src = cv2.copyMakeBorder(
        img_src,
        0,
        overlay_data['border_thickness'],
        0,
        overlay_data['border_thickness'],
        cv2.BORDER_CONSTANT,
        value=status_bar_data['overlay_right_bottom_border'])

    overlay_width = img_src.shape[1]
    overlay_height = img_src.shape[0]

    # Map position
    corners = []
    if overlay_data['overlay_position'] == 0:  # Top left
        start_point = [0, 0]
        end_point = [overlay_width, overlay_height]
        corners.extend([(end_point[0], 0), (img_dst.shape[1] - 1, 0),
                        (img_dst.shape[1] - 1,
                         overlay_data['status_bar_min_height']),
                        (end_point[0], overlay_data['status_bar_min_height'])])
    elif overlay_data['overlay_position'] == 1:  # Top right
        start_point = [img_dst.shape[1] - overlay_width, 0]
        end_point = [img_dst.shape[1], overlay_height]
        corners.extend([(0, 0), (start_point[0] - 1, 0),
                        (start_point[0] - 1,
                         overlay_data['status_bar_min_height']),
                        (0, overlay_data['status_bar_min_height'])])
    elif overlay_data['overlay_position'] == 2:  # Bottom right
        start_point = [
            img_dst.shape[1] - overlay_width, img_dst.shape[0] - overlay_height
        ]
        end_point = [img_dst.shape[1], img_dst.shape[0]]
        corners.extend([
            (0, start_point[1] - overlay_data['status_bar_min_height'] +
             overlay_height - 1),
            (start_point[0] - 1, start_point[1] -
             overlay_data['status_bar_min_height'] + overlay_height - 1),
            (start_point[0] - 1, img_dst.shape[0] - 1),
            (0, img_dst.shape[0] - 1)
        ])
    elif overlay_data['overlay_position'] == 3:  # Bottom left
        start_point = (0, img_dst.shape[0] - overlay_height)
        end_point = (overlay_width, img_dst.shape[0])
        corners.extend([
            (end_point[0], start_point[1] -
             overlay_data['status_bar_min_height'] + overlay_height - 1),
            (img_dst.shape[1] - 1, start_point[1] -
             overlay_data['status_bar_min_height'] + overlay_height - 1),
            (img_dst.shape[1] - 1, img_dst.shape[0] - 1),
            (end_point[0], img_dst.shape[0] - 1)
        ])

    # Map ratios (needed for correct further transformations)
    width_ratio = src_width / img_src.shape[1]
    height_ratio = src_height / img_src.shape[0]

    overlay_additional_data = {
        'img_src': img_src,
        'overlay_dim': [overlay_width, overlay_height],
        'start_end_points': [start_point, end_point],
        'corners': corners,
        'h': h,
        'width_height_ratio': [width_ratio, height_ratio],
        'map_offset': offset,
        'warp_offset': warp_offset,
        'map_dim': map_dim
    }

    overlay_data.update(
        overlay_additional_data
    )  # Update overlay_data dictionary with the map and its data

    return
コード例 #15
0
ファイル: main.py プロジェクト: andreas28/CarND
def main():

    # Load camera matrix and distortion parameters from pickle file
    calib_mtx, calib_dist = load_calib_data("calib_data.p")


    # Load Video
    clip1 = VideoFileClip("project_video.mp4")
    #clip1 = clip1.subclip(38)
    #clip1 = VideoFileClip("challenge_video.mp4")

    #Create Video writer
    #clip_out = VideoFileClip("out.mp4")

    #initial threshold for color/sobel
    g_thres = 40
    left_not_plausible_frames = 50
    right_not_plausible_frames = 50

    #prepare forward and inverse matrix
    src, dst, M, Minv = prepare_perspective_transform()

    #store previous coefficients
    prev_left_coeffs = None
    prev_right_coeffs = None
    prev_left_curvature = None
    prev_right_curvature = None

    for img in clip1.iter_frames():
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

        #Undistort image
        img_undistorted = cv2.undistort(img, calib_mtx, calib_dist, None, calib_mtx)

        #Apply thresholding
        #thres = thresholding(img_undistorted, weight=(0.5,5.0), thres=g_thres)
        thres = thresholding(img_undistorted, weight=(0.2,0.8), thres=g_thres)

        #Warp image
        img_warped = warp (thres, M)

        #Calculate new threshold depending on number of non-zero pixels
        nonzeroes = (np.sum(img_warped) / (255*img_warped.shape[0]*img_warped.shape[1]))
        if (nonzeroes <= 0.065):#0.07):
            g_thres = g_thres - 2
        if (nonzeroes >= 0.08):#0.075):
            g_thres = g_thres + 2

        #print (nonzeroes)
        #print (g_thres)

        #Search lanes
        if (left_not_plausible_frames == 0) or (right_not_plausible_frames == 0) or prev_right_coeffs == None or prev_left_coeffs == None:
            left_coeffs, right_coeffs, left_curverad, right_curverad = initial_sliding_window(img_warped)
        else:
            left_coeffs, right_coeffs, left_curverad, right_curverad = search_in_margin(img_warped, prev_left_coeffs, prev_right_coeffs)


        #Calculate distance to left and right lane
        left_center, right_center = calculate_center(left_coeffs, right_coeffs)

        #print (left_curverad)
        #print (right_curverad)
        #print("-------")
        #print ("1: %f 2: %f 3: %f" % (right_coeffs[0], right_coeffs[1], right_curverad))
        #print ("1: %f 2: %f 3: %f" % (left_coeffs[0], left_coeffs[1], left_curverad))
        #print ("left_center %f right_center %f" % (left_center, right_center))


        # Check left and right lane for plausibility, if not plausible, use last plausible lane for at least 100 frames
        if (left_not_plausible_frames > 0) and not plausible_pixel_pos(left_coeffs, prev_left_coeffs, y_positions=[0, 700], diffs_thresholds=[100, 100] ):
            left_coeffs = prev_left_coeffs
            left_curverad = prev_left_curvature
            left_not_plausible_frames -= 1
        else:
            left_not_plausible_frames = 50
            prev_left_coeffs = left_coeffs
            prev_left_curvature = left_curverad

        if (right_not_plausible_frames > 0) and not plausible_pixel_pos(right_coeffs, prev_right_coeffs, y_positions=[0, 700], diffs_thresholds=[100, 100] ):
            right_coeffs = prev_right_coeffs
            right_curverad = prev_right_curvature
            right_not_plausible_frames -= 1
        else:
            left_not_plausible_frames = 50
            prev_right_coeffs = right_coeffs
            prev_right_curvature = right_curverad


        #Draw polynoms in image
        img_poly = np.zeros_like(img_warped)
        draw_polynom(img_poly, left_coeffs, right_coeffs)

        #Draw area
        img_area = np.zeros_like(img_warped)
        img_area = np.dstack((img_area, img_area, img_area))
        draw_area(img_area, left_coeffs, right_coeffs)

        #Warp back to image
        unwarped_poly = cv2.warpPerspective(img_poly, Minv, (img_undistorted.shape[1], img_undistorted.shape[0]))
        unwarped_area = cv2.warpPerspective(img_area, Minv, (img_undistorted.shape[1], img_undistorted.shape[0]))

        #Draw red lane lines on image
        img_undistorted[unwarped_poly > 1] = [0,0,255]

        #Draw area
        img_undistorted = cv2.addWeighted(unwarped_area, 0.2, img_undistorted, 0.8, 1)

        #Write text on image
        write_on_image(img_undistorted, left_center, right_center, left_curverad, right_curverad)

        #plt.plot(hist)
        #plt.show()

        cv2.imshow("org", img_undistorted)
        cv2.imshow("warp", img_warped)
        cv2.imshow("thres", thres)
        cv2.waitKey(10)


    cv2.destroyAllWindows()
コード例 #16
0
def click(event, x, y, flags, param):
    if event == 1:
        param.append(np.array([y, x]))


cv2.namedWindow('image')
cv2.setMouseCallback('image', click, img1Clicks)
cv2.namedWindow('image2')
cv2.setMouseCallback('image2', click, img2Clicks)

# Get correspondences from mouse clicks
while (1):
    cv2.imshow('image', img)
    cv2.imshow('image2', img2)
    k = cv2.waitKey(1)
    if k > 0:
        break

H = homography(img2Clicks, img1Clicks)
imOut = warp(img2, H)

imOut[0:img.shape[0], 0:img.shape[1]] = img
while (1):
    cv2.imshow('output', imOut)
    k = cv2.waitKey(1) & 0XFF
    if k > 0:
        break

cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #17
0
ファイル: swap_faces.py プロジェクト: jthirani/FaceSwapping
def swap_faces(source, target):
    # Get the frame from the target image
    target_image, target_points = detect(target)
    old_gray = cv2.cvtColor(target_image, cv2.COLOR_BGR2GRAY)
    face_det = dlib.get_frontal_face_detector()
    face_pred = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    video = cv2.VideoCapture(source)
    retval, frame = video.read()

    # Output video parameters
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    height = len(frame)
    width = len(frame[0])

    fps = video.get(5)
    output = cv2.VideoWriter('output5.avi', fourcc, fps, (width, height))

    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    # Getting the first frame for optical flow
    gray_previous = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    dets = face_det(gray_previous, 1)
    for (i, det) in enumerate(dets):
        points_previous = face_utils.shape_to_np(face_pred(gray_previous, det))

    count = 0
    while (True):
        print(count)
        count += 1
        retval, frame_new = video.read()
        if retval:
            gray_frame_new = cv2.cvtColor(frame_new, cv2.COLOR_BGR2GRAY)
            points_previous = points_previous.reshape(68, 2).astype(np.float32)
            of_points_new, status, err = cv2.calcOpticalFlowPyrLK(
                gray_previous, gray_frame_new, points_previous, None,
                **lk_params)
            dets_new = face_det(gray_frame_new, 1)
            for (i, det) in enumerate(dets_new):
                points_new = face_utils.shape_to_np(
                    face_pred(gray_frame_new, det))

            # Getting the coordinates of the features in the frame through optical flow and dlib facial detection
            new_points = np.zeros((68, 2))
            for i in range(68):
                # Case where features are not easily visible - Eg: Side view, bad lighting
                if (len(points_new) != 68 and status[i] == 1):
                    new_points[i, :] = of_points_new[i, :]
                # Case where each of the facial features is visible
                elif (len(points_new) == 68 and status[i] == 1):
                    new_points[i, :] = 0.5 * of_points_new[
                        i, :] + 0.5 * points_new[i, :]
                # Otherwise, take average of new facial features and old ones
                else:
                    new_points[i, :] = 0.5 * points_previous[
                        i, :] + 0.5 * points_new[i, :]

            swapped = warp(frame_new, target_image, new_points, target_points)
            #new stuff
            print(np.max(new_points.shape), 1)
            source_r = np.zeros(new_points.shape)
            count2 = 0
            for i in range(new_points.shape[0]):
                if (not (new_points[i][0] >= frame_new.shape[1]
                         or new_points[i][1] >= frame_new.shape[0]
                         or new_points[i][0] < 0 or new_points[i][1] < 0)):
                    source_r[count2, :] = new_points[i, :]
                    count2 = count2 + 1
                else:
                    print("got one")
            print(count2)
            source_r = source_r[:count2]
            print(source_r.shape)
            #end of new stuff
            new_points = np.array(source_r).astype(int)
            print(new_points.shape)
            hull = cv2.convexHull(np.array(new_points), False).astype(np.int32)
            mask = np.zeros(frame_new.shape, dtype=np.uint8)
            cv2.fillConvexPoly(mask, hull, (255, 255, 255))

            boundingRect = cv2.boundingRect(np.float32(hull))
            width = boundingRect[2]
            height = boundingRect[3]
            centreX = int(boundingRect[0] + width / 2)
            centreY = int(boundingRect[1] + height / 2)
            center = ((centreX, centreY))
            swapped = cv2.seamlessClone(swapped, frame_new, mask, center,
                                        cv2.NORMAL_CLONE)

            output.write(swapped)
            gray_previous = gray_frame_new
            points_previous = points_new
        else:
            break

    video.release()
    output.release()