def video_convert_fl(input_name, channel, out_suf = '', is_sub = False, sub_time = 5):
    white_output = 'test_videos_output' + out_suf + '/' + input_name + '.mp4'
    clip1 = VideoFileClip("test_videos/" + input_name + ".mp4")
    if is_sub:
        clip1 = clip1.subclip(0,sub_time)
    white_clip = clip1.fl(channel)
    white_clip.write_videofile(white_output, audio=False)
Example #2
0
 def process(self, sub_clip=None):
     """
     Process the video clip
     :param sub_clip: optionally specify a sub clip (start, end)
     :return: None
     """
     clip = VideoFileClip(self.input_file)
     if sub_clip:
         clip = clip.subclip(sub_clip[0], sub_clip[1])
     out_clip = clip.fl(lambda gf, t: self._process_image(gf(t), t))
     out_clip.write_videofile(self.output_file, audio=False)
Example #3
0
def run_pipeline(video_file, duration=None, end=False):
    """Runs pipeline on a video and writes it to temp folder"""
    print('processing video file {}'.format(video_file))
    clip = VideoFileClip(video_file)

    if duration is not None:
        if end:
            clip = clip.subclip(clip.duration - duration)
        else:
            clip = clip.subclip(0, duration)

    line_history = LineHistory()
    processed = clip.fl(lambda gf, t: pipeline(gf(t), line_history), [])
    processed.write_videofile('temp/' + video_file, audio=False)
Example #4
0
def main(video_file, duration=None, end=False):
    """Runs pipeline on a video and writes it to temp folder"""
    print('processing video file {}'.format(video_file))
    clip = VideoFileClip(video_file)

    if duration is not None:
        if end:
            clip = clip.subclip(clip.duration - duration)
        else:
            clip = clip.subclip(0, duration)

    # load SVM classifier & scaler from file
    clf, scaler = train_classifier.load_classifier()
    history = HeatMapHistory()
    processed = clip.fl(lambda gf, t: pipeline(gf(t), history, clf, scaler),
                        [])
    processed.write_videofile('output.mp4', audio=False)
Example #5
0
    def ProcessVideoClip(self, input_file):
        """
        Apply the FindLaneLines() function to each frame in a given video file.
        Save the results to a new video file in the same location using the
        same filename but with "_lanes" appended.

        Args:
            input_file (str): Process this video file.

        Returns:
            none

        To speed up the testing process or for debugging we can use a subclip
        of the video. To do so add

            .subclip(start_second, end_second)

        to the end of the line below, where start_second and end_second are
        integer values representing the start and end of the subclip.
        """
        file_name, ext = os.path.splitext(input_file)
        # Optional location for modified video frames.
        self.video_dir = file_name + '_lanes' if WRITE_OUTPUT_FRAMES else None

        # Open the video file.
        input_clip = VideoFileClip(input_file)  # .subclip(40, 45)

        # For each frame in the video clip, replace the frame image with the
        # result of applying the 'FindLaneLines' function.
        # NOTE: this function expects color images!!
        self.current_frame = 0
        output_clip = input_clip.fl(self.FindLaneLines)

        # Save the resulting, modified, video clip to a file.
        output_file = file_name + '_lanes' + ext
        output_clip.write_videofile(output_file, audio=False)

        # Cleanup
        input_clip.reader.close()
        input_clip.audio.reader.close_proc()
        del input_clip
        output_clip.reader.close()
        output_clip.audio.reader.close_proc()
        del output_clip
class Tracker(object):
    def __init__(self):
        self.cumheat = []
        self.input_video_clip = VideoFileClip(video_input)
        self.output_video_clip = self.input_video_clip.fl(self.pipeline)

    def pipeline(self, gf, t):
        img = gf(t)
        aux = np.copy(img)
        img = img.astype(
            np.float32
        ) / 255  # im you're searching is a jpg (0 to 255), trained on .png (0 to 1 by mpimg)
        windows = slide_variable_window(img, x_start, y_start, xy_window,
                                        x_overlap, y_overlap)

        hot_windows = search_windows(img,
                                     windows,
                                     svc,
                                     X_scaler,
                                     color_space=color_space,
                                     spatial_size=spatial_size,
                                     hist_bins=hist_bins,
                                     orient=orient,
                                     pix_per_cell=pix_per_cell,
                                     cell_per_block=cell_per_block,
                                     hog_channel=hog_channel,
                                     spatial_feat=spatial_feat,
                                     hist_feat=hist_feat,
                                     hog_feat=hog_feat)

        heat = np.zeros_like(img[:, :, 0]).astype(np.float)
        heat = add_heat(heat, hot_windows)  # Add heat to each box in box list
        self.cumheat.append(heat)
        heat = apply_threshold(
            sum(self.cumheat[-n_frames:]),
            heat_thres)  # Apply thresh to help remove false positives
        heatmap = np.clip(heat, 0,
                          255)  # Visualize the heatmap when displaying

        labels = label(
            heatmap)  # Find final boxes from heatmap using label function
        draw_img = draw_labeled_bboxes(aux, labels)

        return draw_img
    def process(self, sub_clip=None, frame_divisor=4):
        """
        Process the video clip
        :param frame_divisor: process one in every x frames (set to 0 or lower to disable
        :param sub_clip: optionally specify a sub clip (start, end)
        :return: None
        """
        self.current_frame = 0

        def handle_frame(img, t):
            img = self._process_image(img, self.current_frame, frame_divisor)
            self.current_frame += 1
            return img

        clip = VideoFileClip(self.input_file)
        if sub_clip:
            clip = clip.subclip(sub_clip[0], sub_clip[1])
        out_clip = clip.fl(lambda gf, t: handle_frame(gf(t), t))
        out_clip.write_videofile(self.output_file, audio=False)
Example #8
0
    def get_outputs(self):
        super(ExtractSlideClipJob, self).get_outputs()

        warp_slide = self.warp_slides.get_outputs()
        enhance_contrast = self.enhance_contrast.get_outputs()

        # not doing the cut here but rather in the final video composition
        clip = VideoFileClip(os.path.join(self.video_location, self.video_filename))

        def apply_effects(get_frame, t):
            """Function that chains together all the post processing effects."""
            frame = get_frame(t)

            warped = warp_slide(frame)
            contrast_enhanced = enhance_contrast(warped, t)

            return contrast_enhanced

        # retains the duration of the clip
        return clip.fl(apply_effects)
Example #9
0
def process_video_with_time(path_of_video,
                            effect_function=None,
                            save_to=None,
                            preview: bool = False):
    def return_the_same_frame(get_frame, t: float):
        return get_frame(t)

    if effect_function == None:
        effect_function = return_the_same_frame
    if save_to == None:
        file_ = Path(path_of_video)
        save_to = file_.with_name(file_.stem + "_modified.mp4")

    clip = VideoFileClip(path_of_video)
    modified_clip = clip.fl(effect_function)

    if preview:
        modified_clip.preview()
    else:
        modified_clip.write_videofile(save_to)
def process_video(fname):
    # Import everything needed to edit/save/watch video clips
    from moviepy.editor import VideoFileClip

    # nowStr for the filenames of the outputs
    import datetime
    now = datetime.datetime.now()
    nowStr = now.strftime("%Y-%m-%d_%H-%M-%S")
    output_fname = 'output/' + os.path.splitext(fname)[0] + '_' + nowStr + '.mp4'
    ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
    ## To do so add .subclip(start_second,end_second) to the end of the line below
    ## Where start_second and end_second are integer values representing the start and end of the subclip
    ## You may also uncomment the following line for a subclip of the first 5 seconds
    ##clip = VideoFileClip(fname).subclip(0,5)
    # project_video.mp4
    # clip = VideoFileClip(fname).subclip(20, 27)
    # clip = VideoFileClip(fname).subclip(20, 22)
    # clip = VideoFileClip(fname).subclip(38, 43)
    # clip = VideoFileClip(fname).subclip(48.5, None)
    clip = VideoFileClip(fname)
    clip = clip.fl(process_video_frame) #NOTE: this function expects color images!!
    if args.output:
        clip.write_videofile(output_fname, audio=False)
Example #11
0
    prob_thres=1 #probability threshold
    bbox_list=[]
    for bbox, bit in zip(windows,pt):
        if bit>=prob_thres:
            bbox_list.append(bbox)
    pp = pt[(pt >= prob_thres)]

    heatmap=add_heat2(img, bbox_list,pp)
     
    heatmap_history[f%f_smooth,:,:]=heatmap
    
    heatmap_med=np.median(heatmap_history,axis=0)#####mean
    
    #heatmap_med[(heatmap_med < 1)]=0
    #heatmap_med[(heatmap_med > 5)]=5
    heatmap_med=cv2.GaussianBlur(heatmap_med,(11,11),0)
    
    #heatmap[(heatmap > 2)]=2

    #
    labels = label(heatmap_med)
    draw_img = draw_labeled_bboxes(np.copy(get_frame(t)), labels)
    
    #draw_img=draw_boxes(get_frame(t), bbox_list, color=(0, 0, 255), thick=6)
    return draw_img

video_output = 'output_images/project_video.mp4'
clip1 = VideoFileClip("project_video.mp4")
white_clip = clip1.fl(process_frame)
white_clip.write_videofile(video_output, audio=False)
    threshold=0.2)

vid = VideoFileClip(video_filename)
fps = vid.fps

object_data = json.load(
    open(os.path.join('videos/incoming', data_filename), 'r'))


def transform(get_frame, time):
    frame = get_frame(time)

    idx = int(time * fps + start_time)
    result = object_data[idx]

    if result is None:
        result = detector.detect(frame)

    return detector.add_boxes(frame, result)


vid2 = vid.fl(transform)

if args.output:
    vid2.write_videofile(args.output)
else:
    tmp_file_name = '/tmp/tmp_video_style_transfer.mp4'
    vid2.write_videofile(tmp_file_name)

    os.rename(tmp_file_name, video_filename)
Example #13
0
def transform(image):
    binary_warped, undist, M = thresh_warp(image,
                                           mtx,
                                           dist,
                                           ch_thresh=(170, 255),
                                           sx_thresh=(30, 100))
    lane_obj = lane_lines(binary_warped, image, undist, inv(M))
    left_fit = lane_obj['left_fit']
    right_fit = lane_obj['right_fit']
    left_curverad = lane_obj['left_curverad']
    right_curverad = lane_obj['right_curverad']
    curverad = (left_curverad + right_curverad) / 2
    drift = lane_obj['drift']
    out_img = lane_obj['out_img']
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(out_img, 'Radius of Curvature = ' + str(int(curverad)) + '(m)',
                (0, 130), font, 1, (200, 255, 155), 2, cv2.LINE_AA)
    if drift > 0:
        pos = 'left'
    else:
        pos = 'right'
    cv2.putText(
        out_img, 'Vehicle is ' + str.format('{0:.3f}', abs(drift)) + 'm ' +
        pos + ' of center', (0, 230), font, 1, (200, 255, 155), 2, cv2.LINE_AA)
    return out_img


clip = VideoFileClip('project_video.mp4')
newclip = clip.fl(lambda gf, t: transform(gf(t)))
newclip.write_videofile("final_video.mp4")
class LaneLineTracker(object):

    def __init__(self, video_path, calibration, source_points, destination_points, output_path='../output.mp4', output_debug_image=False):
        self.output_path = output_path
        self.source_points = source_points
        self.destination_points = destination_points
        self.calibration = calibration
        self.left_lines = []
        self.right_lines = []
        self.curvatures = []
        self.frame_number = 0
        self.output_debug_image = output_debug_image
        if video_path is not None:
            self.input_video_clip = VideoFileClip(video_path)
            self.output_video_clip = self.input_video_clip.fl(self.process_frame)

    def process_video(self):
        self.output_video_clip.write_videofile(self.output_path, audio=False)

    def draw_text(self, frame, text, x, y):
        cv2.putText(frame, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .8, (255, 255, 255), 2)

    def process_frame(self, gf, t):
        image = gf(t)
        return self.process_image(image)

    def process_image(self, image):
        undistorted = cv2.undistort(image, self.calibration.camera_matrix, self.calibration.distortion_coefficients, None)
        filtered = get_edge_mask(undistorted, input_image_color_space='rgb')
        warped = warp_birds_eye(filtered, self.source_points, self.destination_points)
        windows, left_window_points, right_window_points = sliding_window(warped)
        left_line = LaneLine(warped, left_window_points)
        right_line = LaneLine(warped, right_window_points)

        left_line.fit()
        right_line.fit()

        moving_n = 8

        self.left_lines.append(left_line)
        self.right_lines.append(right_line)

        original_birds_eye = warp_birds_eye(undistorted, self.source_points, self.destination_points)
        filtered_birds_eye = warp_birds_eye(filtered, self.source_points, self.destination_points)

        y = left_line.generate_y()

        lane_drawing = np.zeros_like(original_birds_eye)
        left_x = np.median(np.array([ l.evaluate() for l in self.left_lines[-moving_n:] ]), axis=0)
        right_x = np.median(np.array([ l.evaluate() for l in self.right_lines[-moving_n:] ]), axis=0)

        left_points = np.vstack([left_x, y]).T
        right_points = np.vstack([right_x, y]).T

        all_points = np.concatenate([left_points, right_points[::-1], left_points[:1]])

        cv2.fillConvexPoly(lane_drawing, np.int32([all_points]), (0, 255, 0))

        unwarped_lane_drawing = warp_birds_eye(lane_drawing, self.source_points, self.destination_points, reverse=True)
        original_perspective_windows = warp_birds_eye(windows, self.source_points, self.destination_points, reverse=True)

        frame = cv2.addWeighted(undistorted, 1.0, unwarped_lane_drawing, 0.2, 0)

        l = np.average(np.array([line.camera_distance() for line in self.left_lines[-moving_n:]]))
        r = np.average(np.array([line.camera_distance() for line in self.right_lines[-moving_n:]]))
        if l - r > 0:
            self.draw_text(frame, '{:.3} cm right of center'.format((l - r) * 100), 20, 115)
        else:
            self.draw_text(frame, '{:.3} cm left of center'.format((r - l) * 100), 20, 115)

        self.curvatures.append(np.mean([left_line.curvature_radius(), right_line.curvature_radius()]))
        curvature = np.average(self.curvatures[-moving_n:])
        self.draw_text(frame, 'Radius of curvature:  {:.3} km'.format(curvature / 1000), 20, 80)

        if self.output_debug_image is True:
            window_centroids = find_window_centroids(warped)
            edges_with_windows = draw_windows(warped, window_centroids)
            color_edge_mask = get_edge_mask(undistorted, input_image_color_space='rgb', return_all_channels=True)
            color_edge_mask_birds_eye = warp_birds_eye(color_edge_mask, self.source_points, self.destination_points)
            gray_edge_mask_birds_eye = warp_birds_eye(filtered, self.source_points, self.destination_points)
            windows_perspective = warp_birds_eye(edges_with_windows, self.destination_points, self.source_points)
            plt.imsave('../video_output/' + '{:03}'.format(self.frame_number) + '-0-undistorted.jpg', undistorted)
            plt.imsave('../video_output/' + '{:03}'.format(self.frame_number) + '-1-edges.jpg', color_edge_mask)
            plt.imsave('../video_output/' + '{:03}'.format(self.frame_number) + '-2-undistorted-birds-eye.jpg', original_birds_eye)
            plt.imsave('../video_output/' + '{:03}'.format(self.frame_number) + '-3-edges-birds-eye.jpg', color_edge_mask_birds_eye)
            plt.imsave('../video_output/' + '{:03}'.format(self.frame_number) + '-4-windows.jpg', edges_with_windows)
            plt.imsave('../video_output/' + '{:03}'.format(self.frame_number) + '-5-windows-birds-eye.jpg', windows_perspective)
            plt.imsave('../video_output/' + '{:03}'.format(self.frame_number) + '-6-synthetic-lane-birds-eye.jpg', lane_drawing)
            plt.imsave('../video_output/' + '{:03}'.format(self.frame_number) + '-7-output.jpg', frame)

        self.frame_number += 1

        return frame
Example #15
0
                                       'frame{:06d}.jpg'.format(self.current_frame))
            filled_lane_cv2 = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            cv2.imwrite(output_file, filled_lane_cv2)

        # Return the modified image.
        return img


wf = WriteFrame()
wf.output_dir = './project_video_frames'

current_frame = 0
input_file = './project_video.mp4'
input_clip = VideoFileClip(input_file)

output_clip = input_clip.fl(wf.write_frame)

# Save the resulting, modified, video clip to a file.
head, ext = os.path.splitext(input_file)
root, file_name = os.path.split(head)
output_file = os.path.join(wf.output_dir, file_name + '_lanes' + ext)
output_clip.write_videofile(output_file, audio=False)

# Cleanup
input_clip.reader.close()
input_clip.audio.reader.close_proc()
del input_clip
output_clip.reader.close()
output_clip.audio.reader.close_proc()
del output_clip