Ejemplo n.º 1
0
def main():
    path = './inputvideos/'
    video = 'project_video_1'
    white_output = 'outputvideos/{}_done.mp4'.format(video)
    clip1 = VideoFileClip('{}.mp4'.format(path+video))#.subclip(0,6)
    white_clip = clip1.fl_image(process_image)  # NOTE: this function expects color images!!
    white_clip.write_videofile(white_output, audio=False)
def save_detected_lane_on_video(video_path):
    """Saves annotated video with lane detection"""
    video = VideoFileClip(video_path)
    video_annotated = video.fl_image(lane_detector.process_image)
    name = os.path.basename(video_path)
    video_annotated_file_name = '{}{}'.format(name.split('.')[0], ANNOTATED_VIDEO_SUFFIX)
    video_annotated.write_videofile(video_annotated_file_name, audio=False)
Ejemplo n.º 3
0
def car_detection_on_video(in_path, out_path):
    """ Performs car detection on each frame of the video """

    car_detector = CarDetector()
    clip = VideoFileClip(in_path)

    video = clip.fl_image(car_detector.detect_cars)
    video.write_videofile(out_path, audio=False)
Ejemplo n.º 4
0
def main():
    # video = 'harder_challenge_video'
    # video = 'challenge_video'
    video = 'project_video'
    white_output = '{}_done_2.mp4'.format(video)
    clip1 = VideoFileClip('{}.mp4'.format(video)).subclip(30, 51)
    white_clip = clip1.fl_image(process_image)  # NOTE: this function expects color images!!
    white_clip.write_videofile(white_output, audio=False)
Ejemplo n.º 5
0
def main():
	
	#clip1 = VideoFileClip(args["input"])
	#project_clip = clip1.fl_image(find_lane)
    clip1 = VideoFileClip(VIDEOS[SELECTED_VIDEO])
    project_clip = clip1.fl_image(find_lane)

    project_output = VIDEOS[SELECTED_VIDEO][:-4] + '_ann.mp4'
    project_clip.write_videofile(project_output, audio=False)
def video(input_video):
    """
    Build video with vehicle detection boxes, number of vehicles detected
    """
    original_video = '{}.mp4'.format(input_video)
    video = VideoFileClip(original_video)
    output_video = video.fl_image(process)
    output_video.write_videofile('{}_output.mp4'.format(input_video),
                                 audio=False)
Ejemplo n.º 7
0
def annotate_video(video_path, save=False):
    """Returns or saves annotated video"""
    video = VideoFileClip(video_path)
    video_annotated = video.fl_image(lane_detector.process_image)
    if save:
        video_annotated_file_name = '{}{}'.format(video_path.split('.')[0], ANNOTATED_VIDEO_SUFFIX)
        video_annotated.write_videofile(video_annotated_file_name, audio=False)
    else:
        return video_annotated
def test_using_video():

    video_output = '..\\project_video_out.mp4'
    clip1 = VideoFileClip('..\\project_video.mp4')

    ctr[0] = 0
    video_clip = clip1.fl_image(
        process_image)  #NOTE: this function expects color images!!
    video_clip.write_videofile(video_output, audio=False)
Ejemplo n.º 9
0
def main_video():
    camera = prepare()
    warper = Warper()
    s = lane.LaneSearch(window_count=15)
    clip = VideoFileClip('project_video.mp4')
    #clip = clip.subclip(t_start=38, t_end=43)
    #clip = clip.subclip(t_start=35, t_end=44)
    result = clip.fl_image(process(camera, warper, s))
    result.write_videofile('out.mp4', audio=False)
Ejemplo n.º 10
0
def main(video_name='other_video'):
    if video_name.endswith('.mp4'):
        video_name = video_name.rsplit('.', 1)[0]

    white_output = '{}_done.mp4'.format(video_name)
    # Uncomment the end of the line to analyze a subclip of the video.
    clip1 = VideoFileClip('{}.mp4'.format(video_name))  #.subclip(0, 5)
    white_clip = clip1.fl_image(
        process_image)  # NOTE: this function expects color images!!
    white_clip.write_videofile(white_output, audio=False)
Ejemplo n.º 11
0
def process_video(video_file_name, input_video_dir, output_video_dir):
	if not os.path.exists(output_directory + '/' + output_video_dir):
		os.makedirs(output_directory + '/' + output_video_dir)
	clip = VideoFileClip(input_video_dir + '/' + video_file_name)
	output_clip = clip.fl_image(process_image)
	video_file_output = output_directory + '/' + output_video_dir + '/' + video_file_name
	output_clip.write_videofile(video_file_output, audio = False)
	display(
		HTML("""<video width="960" height="540" controls><source src="{0}"></video>""".format(video_file_output))
	)
Ejemplo n.º 12
0
 def detect_on_video(self, video_in_path=Constants.VIDEOS_IN_PATH):
     dataset_utils = DatasetUtils(
         detections_path=Constants.VIDEO_DETECTIONS_PATH)
     video = VideoFileClip(video_in_path)
     output_video = video.fl_image(self.detect)
     extension = video_in_path.split(".")[-1]
     filename = video_in_path.split('/')[-1].split('.')[0]
     filename = filename + "_result." + extension
     video_out_path = Constants.VIDEOS_OUT_PATH + filename
     output_video.write_videofile(video_out_path, audio=False)
     dataset_utils.save_detections(in_paths=[video_in_path],
                                   out_paths=[video_out_path])
def process_video(name):
    if os.path.exists(name) == False:
        print("Can't find file:", name)
        return
    basename = os.path.basename(name)
    head, ext = os.path.splitext(basename)
    output_video = head + '_output' + ext

    svc, scaler, dcspace, spatial_size, hist_bins, orient, pix_per_cell, cell_per_block, hog_channel, spatial_feat, hist_feat, hog_feat = load_classifier()

    clip1 = VideoFileClip(name)
    output_clip = clip1.fl_image(lambda frame: process_frame(frame, svc, scaler, dcspace, spatial_size,
                                                             hist_bins, orient, pix_per_cell, cell_per_block, hog_channel, spatial_feat, hist_feat, hog_feat))
    output_clip.write_videofile(output_video, audio=False)
def main():
    #input file name without extension type

    video = 'project_video'

    path = './inputvideos/'
    video_in = path + video
    time_name = time.strftime("%Y%m%d-%H%M%S")
    white_output = 'output_videos/{}_{}.webm'.format(video, time_name)
    #change the subclip size
    clip1 = VideoFileClip('{}.mp4'.format(video_in)).subclip(1, 6)

    white_clip = clip1.fl_image(
        process_image)  # NOTE: this function expects color images!!
    white_clip.write_videofile(white_output, audio=False, codec='libvpx')
Ejemplo n.º 15
0
    def process_video(self,
                      video_file,
                      file_out,
                      t_start=None,
                      t_end=None,
                      process_pool=None):

        input_clip = VideoFileClip(video_file)

        if t_start is not None:
            input_clip = input_clip.subclip(t_start=t_start, t_end=t_end)

        if self.debug:
            self.processed_frames = []

            stage_idx = 0

            output_clip = input_clip.fl_image(
                lambda frame: self.process_frame_stage(frame, stage_idx,
                                                       process_pool))
            output_clip.write_videofile(file_out, audio=False)

            if len(self.processed_frames) > 0:
                out_file_path = os.path.split(file_out)
                out_file_name = out_file_path[1].split('.')
                for _ in range(len(self.processed_frames[0]) - 1):
                    self.frame_count = 0
                    stage_idx += 1
                    stage_file = '{}.{}'.format(
                        os.path.join(out_file_path[0], out_file_name[0]) +
                        '_' + str(stage_idx), out_file_name[1])
                    output_clip.write_videofile(stage_file, audio=False)
        else:
            output_clip = input_clip.fl_image(
                lambda frame: self.process_frame(frame, process_pool))
            output_clip.write_videofile(file_out, audio=False)
Ejemplo n.º 16
0
def process_video():
    pipeline = get_pipeline()

    clip = VideoFileClip(filename="./project_video.mp4")

    class Processor:
        def __init__(self):
            self.heatmap = None

        def bounding_box(self, image):
            thresholded = np.multiply(np.copy(self.heatmap.thresholded), 255).round().astype('uint8')
            thresholded = cv2.cvtColor(thresholded, cv2.COLOR_RGB2GRAY)

            ret, thresh = cv2.threshold(thresholded, 0, 255, cv2.THRESH_BINARY)
            im2, contours, hierarchy = cv2.findContours(thresh, 1, 2)

            for contour in contours:
                M = cv2.moments(contour)
                x, y, w, h = cv2.boundingRect(contour)
                image = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

            return image

        def process(self, image):
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            if self.heatmap is None:
                self.heatmap = Heatmap(
                    image=image,
                    cooldown_rate=0.05,
                    warmup_rate=0.1,
                    threshold=0.8
                )
            pipeline.process(image, self.heatmap)
            image = self.bounding_box(image)
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            return image

    clip = clip.fl_image(Processor().process)

    clip.write_videofile(filename="./project_video_output.mp4", audio=False)
    pipeline.feature_extractor_cache.save()
Ejemplo n.º 17
0
def main(_):
    model = DCSCN.SuperResolution(FLAGS, model_name=FLAGS.model_name)
    model.build_graph()
    model.build_optimizer()
    model.build_summary_saver()

    model.init_all_variables()
    model.load_model()

    video=FLAGS.file
    lrclip=VideoFileClip(video).subclip(0,20)
    audioclip=lrclip.audio
    audioclip.write_audiofile('audio'+video[:-4]+'.mp3')
    hrclip=lrclip.fl_image(model.doframe)
    # hrclip.ipython_display()
    # lrclip.ipython_display()
    '''
        Uncomment the line if you run the code in jupyter notebook
    '''
    hroutput='hr'+video
    hrclip.write_videofile(hroutput, audio='audio'+video[:-4]+'.mp3', threads=8, progress_bar=False)
Ejemplo n.º 18
0
def main():
    # Load parameters
    parser = argparse.ArgumentParser(
        description=
        'Locates cars in video and places bounding boxes around them.',
        usage='%(prog)s [ -vi & -vo | -img ]? [extra_options]')
    parser.add_argument('-vi',
                        '--video_in',
                        type=str,
                        default='./data/test_videos/test_video.mp4',
                        help='Video to find cars in.')
    parser.add_argument('-vo',
                        '--video_out',
                        type=str,
                        help='Where to save video to.')
    parser.add_argument(
        '-img',
        '--images_in',
        type=str,
        help=
        'Search path (glob style) to test images. Cars will be found in images rather than video.'
    )
    parser.add_argument(
        '-clf',
        '--clf_savefile',
        type=str,
        default='./data/trained_classifier.pkl',
        help="File path to pickled trained classifier made by 'train.py'")
    parser.add_argument(
        '-sc',
        '--scaler_savefile',
        type=str,
        default='./data/Xy_scaler.pkl',
        help="File path to pickled StandardScalar made by 'train.py'")
    parser.add_argument(
        '-viz',
        '--visualization',
        type=str,
        default='cars',
        help=
        "'cars' to draw bounding box around cars or 'windows' to show all the detected windows."
    )
    parser.add_argument('-st',
                        '--start',
                        type=int,
                        default=0,
                        help="Timestamp (seconds) to start video.")
    parser.add_argument('-nd',
                        '--end',
                        type=int,
                        default=None,
                        help="Timestamp (seconds) to end video.")
    args = parser.parse_args()
    name, ext = args.video_in.split('/')[-1].rsplit('.', 1)
    args.video_out = './output/{}_{}.{}'.format(name, args.visualization, ext)

    # Set up car finder
    print("Loading classifier from '{}'.".format(args.clf_savefile))
    clf = joblib.load(args.clf_savefile)
    print("Loading scaler from '{}'.".format(args.scaler_savefile))
    scaler = joblib.load(args.scaler_savefile)
    fvb = CarFeatureVectorBuilder(feature_scaler=scaler)
    car_finder = CarFinder(clf, fvb, args.visualization)

    # Find cars in...
    if args.images_in is not None:  # run on images
        print('\nSearching for cars in images...')
        imgs = []
        files = sorted(glob(args.images_in))
        for imgf in files:
            # Find cars
            image = plt.imread(imgf)
            display_img = car_finder.find_cars(image, single=True)
            imgs.append(display_img)

        n_col = 3
        fig, axes = plt.subplots(len(files) // n_col + 1, n_col)
        for ax, f, img in zip(axes.flatten(), files, imgs):
            ax.imshow(img)
            ax.set_title(f)
            ax.axis('off')
        plt.show()

    else:  # run on video
        print("\nFinding cars in '{}',\nthen saving to  '{}'...".format(
            args.video_in, args.video_out))
        input_video = VideoFileClip(args.video_in)
        output_video = input_video.fl_image(car_finder.find_cars).subclip(
            args.start, args.end)
        output_video.write_videofile(args.video_out, audio=False)
#
# undist = undistort(image, calib_params)
#
# # Warp
# warped, M = bird_view(undist)
#
# # Threshold
# binary_image = threshold_image(warped)

# Plot the result
# result = binary_image
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
# f.tight_layout()
#
# ax1.imshow(image)
# ax1.set_title('Original Image', fontsize=40)
#
# ax2.imshow(result, cmap='gray')
# ax2.set_title('Pipeline Result', fontsize=40)
# plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# plt.savefig('result.jpg')
# plt.show()

# Process video
Left = Line("left")
Right = Line("right")
video_output = 'result.mp4'
clip1 = VideoFileClip("project_video.mp4")  #.subclip(40, 43)
white_clip = clip1.fl_image(process)
white_clip.write_videofile(video_output, audio=False)
def process_and_save_video(input, output, pipeline):
    clip = VideoFileClip(input)
    white_clip = clip.fl_image(pipeline)
    white_clip.write_videofile(output, audio=False)
Ejemplo n.º 21
0
def process_image(base_img):
    global BASE_IMG, CANNY_IMG
    BASE_IMG = base_img
    ysize = base_img.shape[0]
    xsize = base_img.shape[1]
    image = to_hsv(base_img)
    image = gaussian_blur(image, 3)
    image = filter_color(image)
    image = canny(image, 30, 130)
    CANNY_IMG = image
    image = region_of_interest(
        image,
        np.array([[(40, ysize), (xsize / 2, ysize / 2 + 40),
                   (xsize / 2, ysize / 2 + 40), (xsize - 40, ysize)]],
                 dtype=np.int32))
    image = hough_lines(image, 1, np.pi / 90, 10, 15, 10)

    # return image
    return weighted_img(image, base_img, β=250.)


# src_img = (matplotlib.image.imread('../vlcsnap-error105.png') * 255).astype('uint8')
# src_img = process_image(src_img)
# plt.imshow(src_img, cmap='hsv_r')
# plt.show()

white_output = 'challengeDone.mp4'
clip1 = VideoFileClip('challenge.mp4')  # .subclip(14, 16)
white_clip = clip1.fl_image(
    process_image)  # NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
Ejemplo n.º 22
0
def pipe(img):
    draw_img = np.copy(img)
    box_list = []
    out_img, _list = find_cars(img, draw_img, 1, classifier.svc, classifier.X_scaler, classifier.orient,
                               classifier.pix_per_cell, classifier.cell_per_block, classifier.spatial,
                               classifier.histbin)
    box_list = box_list + _list

    out_img, _list = find_cars(img, draw_img, 1.5, classifier.svc, classifier.X_scaler, classifier.orient,
                               classifier.pix_per_cell, classifier.cell_per_block, classifier.spatial,
                               classifier.histbin)
    box_list = box_list + _list

    out_img, _list = find_cars(img, draw_img, 2, classifier.svc, classifier.X_scaler, classifier.orient,
                               classifier.pix_per_cell, classifier.cell_per_block, classifier.spatial,
                               classifier.histbin)
    box_list = box_list + _list

    draw_img, heatmap = heat_map(img, box_list)
    return draw_img

if __name__ == '__main__':
    # clip = VideoFileClip("test_video.mp4")
    # processed_clip = clip.fl_image(pipe)  # NOTE: this function expects color images!!
    # processed_clip.write_videofile("test_video_out.mp4", audio=False)

    clip = VideoFileClip("project_video.mp4")
    processed_clip = clip.fl_image(pipe)  # NOTE: this function expects color images!!
    processed_clip.write_videofile("project_video_out.mp4", audio=False)

FRAME_SHAPE = (1280, 720)
HIST_STEPS = 10
OFFSET = 250
FRAME_MEMORY = 7
SRC = np.float32([(132, 703), (540, 466), (740, 466), (1147, 703)])

DST = np.float32([(SRC[0][0] + OFFSET, 720), (SRC[0][0] + OFFSET, 0),
                  (SRC[-1][0] - OFFSET, 0), (SRC[-1][0] - OFFSET, 720)])

VIDEOS = [
    "../videos/project_video.mp4", "../videos/challenge_video.mp4",
    "../videos/harder_challenge_video.mp4"
]
SELECTED_VIDEO = 0

if __name__ == '__main__':
    cam_calibration = get_camera_calibration()
    cam_calibrator = CameraCalibrator(FRAME_SHAPE, cam_calibration)

    ld = LaneDetector(SRC,
                      DST,
                      n_frames=FRAME_MEMORY,
                      cam_calibration=cam_calibrator,
                      transform_offset=OFFSET)

    clip1 = VideoFileClip(VIDEOS[SELECTED_VIDEO])
    project_clip = clip1.fl_image(ld.process_frame)

    project_output = VIDEOS[SELECTED_VIDEO][:-4] + '_ann.mp4'
    project_clip.write_videofile(project_output, audio=False)
Ejemplo n.º 24
0
    undist = cv2.undistort(img, mtx, dist, None, mtx)
    # Get color and
    thresh_img = get_thresh(img)
    top_down, Minv = perspective_transform(thresh_img)
    yvals, left_fitx, right_fitx, left_curve, right_curve = collect_points(
        top_down)
    result = display_rewarp(yvals, left_fitx, right_fitx, img, top_down, Minv,
                            undist, left_curve, right_curve)
    return result


# 4 instances of Line class, two for current lanes (left and right), and two for storing information about all
#  detected lines
left_line = Line()
right_line = Line()
left_curr = Line()
right_curr = Line()

video_output = 'video_out_0129_2.mp4'
clip1 = VideoFileClip("project_video.mp4")

# Load calibration matricies
with open("camera_cal/camera_dist_pickle.p", mode='rb') as f:
    pfile = pickle.load(f)
mtx = pfile["mtx"]
dist = pfile["dist"]

# Perform image processing on each frame and save new video
video_clip = clip1.fl_image(process_frame)
video_clip.write_videofile(video_output, audio=False)
offset = img_size[0] * 0.25

destination_points = np.float32(
	[
		[offset, 0],
		[img_size[0] - offset, 0],
		[offset, img_size[1]],
		[img_size[0] - offset, img_size[1]],
	]
)

# Calculating the perspective transform M and its inverse based on the source points and the destination points.
M = cv2.getPerspectiveTransform(source_points, destination_points)
M_inverse = cv2.getPerspectiveTransform(destination_points, source_points)


video_clip1 = clip1.fl_image(process_image)
video_clip1.write_videofile(output_video1, audio=False)

# output_video1 = 'test_videos_output/challenge_video_tracked.mp4'
# input_video1 = 'test_videos/challenge_video.mp4'
# clip1 = VideoFileClip(input_video1)
# video_clip1 = clip1.fl_image(process_image)
# video_clip1.write_videofile(output_video1, audio=False)
#
# output_video1 = 'test_videos_output/harder_challenge_video_tracked.mp4'
# input_video1 = 'test_videos/harder_challenge_video.mp4'
# clip1 = VideoFileClip(input_video1)
# video_clip1 = clip1.fl_image(process_image)
# video_clip1.write_videofile(output_video1, audio=False)
Ejemplo n.º 26
0
        lane.get_curvature(), lane.get_position())
    cv2.putText(undistorted,
                radius_label, (0, 24),
                cv2.FONT_HERSHEY_SIMPLEX,
                1, (255, 0, 0),
                thickness=2)
    unwarped_lane_projection = unwarp(lane.get_projection(image))
    return cv2.addWeighted(undistorted, 1, unwarped_lane_projection, 0.3, 0)


if __name__ == '__main__':
    test_images = glob.glob('../test_images/*.jpg')
    output_images = []

    for test_image in test_images:
        image = cv2.imread(test_image)
        result = process_image(image,
                               smooth=False,
                               threshold=0.0,
                               use_mask=False)

        output_images.append(image)
        output_images.append(result)

    collage = util.collage(output_images, len(test_images), 2)
    cv2.imwrite('../output_images/pipeline.png', collage)

    input_clip = VideoFileClip('../project_video.mp4')
    output_clip = input_clip.fl_image(process_image)
    output_clip.write_videofile('../output.mp4', audio=False)

hough_img = list(map(linedetect, canny_img))


def weightSum(input_set):
    img = list(input_set)
    return cv2.addWeighted(img[0], 1, img[1], 0.8, 0)


result_img = list(map(weightSum, zip(hough_img, imageList)))

# display_images(result_img)


def processImage(image):
    interest = roi(image)
    filterimg = color_filter(interest)
    canny = cv2.Canny(grayscale(filterimg), 50, 120)
    myline = hough_lines(canny, 1, np.pi / 180, 10, 20, 5)
    weighted_img = cv2.addWeighted(myline, 1, image, 0.8, 0)
    return weighted_img


# we can use camera instead of video clip(port 1 open for usb camera so we can make use of that)
output1 = 'test_videos_output/challenge.mp4'
clip1 = VideoFileClip("test_videos/challenge.mp4")
pclip1 = clip1.fl_image(
    processImage)  # NOTE: this function expects color images!!
pclip1.write_videofile(output1, audio=False)
Ejemplo n.º 28
0
def processVideo(input_path, output_path, detection_method, audio=False):
    video = VideoFileClip(input_path)
    output_video = video.fl_image(detection_method)
    output_video.write_videofile(output_path, audio=False)
Ejemplo n.º 29
0
from moviepy.video.io.VideoFileClip import VideoFileClip
from lane_finder import LaneFinder

easy = "./videos/project_video.mp4"
medium = "./videos/challenge_video.mp4"
hard = "./videos/harder_challenge_video.mp4"

input_video = hard
debug = False  # output a debugging version of the video

if __name__ == '__main__':
    """
    Process each frame of a video to detect lanes and render output video
    """
    lane_finder = LaneFinder()

    video_clip = VideoFileClip(input_video)

    if debug:
        processed_clip = video_clip.fl_image(lane_finder.debug_frame)
    else:
        processed_clip = video_clip.fl_image(lane_finder.process_frame)

    # save video
    processed_clip.write_videofile(input_video[:-4] + '_processed.mp4',
                                   audio=False)

    print('Done')
Ejemplo n.º 30
0
                               ytop_draw + win_draw + ystart)))
    return boxes


def pipeline_keras(image):
    scale = 1.5
    boxes = find_cars(image, 400, 670, 0, 1280, scale, model, X_scaler, orient,
                      pix_per_cell, cell_per_block, spatial_size, hist_bins)
    scale = 2
    boxes.extend(
        find_cars(image, 400, 670, 0, 1280, scale, model, X_scaler, orient,
                  pix_per_cell, cell_per_block, spatial_size, hist_bins))
    print boxes
    image = draw_boxes(image, boxes, (0, 255, 0), 1)
    return image


white_output = 'output_keras_v1.mp4'
clip1 = VideoFileClip("project_video.mp4")
# white_clip = clip1.subclip(2,7).fl_image(process_imagev2) #NOTE: this function expects color images!!
white_clip = clip1.fl_image(
    pipeline_keras)  #NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)

file_name = 'test_images/test17.jpg'


def pipeline(file_name):
    image = mpimg.imread(file_name)
    return pipeline_keras(image)