예제 #1
0
def info(nombre):
    info = VideoFileClip(nombre).subclip(0, NUM_SECONDS)
    print ("info: h: {}, w: {}".format(info.h, info.w))
예제 #2
0
#test_image = mpimg.imread("challenge_frames/frame137.jpg")
#test_image = mpimg.imread("test_images/solidWhiteCurve.jpg")
#test_image = mpimg.imread("test_images/solidYellowLeft.jpg")
#show_image(test_image)
#
#plt.imshow(processImage(test_image))
#plt.show()
#plt.pause(0.5)
    
#test_images = os.listdir("test_images/")
#for index, test_image_str in enumerate(test_images):
#   test_image_abs_path = ''. join(["test_images/", test_image_str])
#   test_image_output_abs_path = ''. join(["test_images_output/", test_image_str])
#   test_image = mpimg.imread(test_image_abs_path)
#   processed_image = processImage(test_image)
#   cv2.imwrite(test_image_output_abs_path, processed_image)

#clip_solid_white_output = 'test_videos_output/solidWhiteRight.mp4'
#clip_solid_white = VideoFileClip("test_videos/solidWhiteRight.mp4")
#challenge_clip = clip_solid_white.fl_image(processImage) #NOTE: this function expects color images!!
#challenge_clip.write_videofile(clip_solid_white_output, audio=False)

clip_solid_yellow_output = 'test_videos_output/solidYellowLeft.mp4'
clip_solid_yellow = VideoFileClip("test_videos/solidYellowLeft.mp4")
challenge_clip = clip_solid_yellow.fl_image(processImage) #NOTE: this function expects color images!!
challenge_clip.write_videofile(clip_solid_yellow_output, audio=False)
#
#clip_challenge_output = 'test_videos_output/challenge.mp4'
#clip_challenge = VideoFileClip("test_videos/challenge.mp4")
#challenge_clip = clip_challenge.fl_image(processImage) #NOTE: this function expects color images!!
#challenge_clip.write_videofile(clip_challenge_output, audio=False)
예제 #3
0
def crop_one(vid_path):
    clip = VideoFileClip(str(vid_path))
    x = get_split(clip.get_frame(2))
    if x: return clip.crop(x2=x), clip.crop(x1=x)
 def video_pipeline(self):
     clip1 = VideoFileClip(self.video)
     white_clip = clip1.fl_image(process_video_image)
     white_clip.write_videofile(''.join(['processed_', self.video]),
                                audio=False)
예제 #5
0
print("EVALUATE")
deeplabcut.evaluate_network(path_config_file,plotting=True)

print("CUT SHORT VIDEO AND ANALYZE")

# Make super short video (so the analysis is quick!)
vname='brief'
newvideo=os.path.join(cfg['project_path'],'videos',vname+'.mp4')
try: #you need ffmpeg command line interface    
    subprocess.call(['ffmpeg','-i',video[0],'-ss','00:00:00','-to','00:00:00.4','-c','copy',newvideo])
except:
    #for windows:
    import moviepy
    from moviepy.editor import VideoFileClip,VideoClip
    clip = VideoFileClip(video[0])
    clip.reader.initialize()
    def make_frame(t):
        return clip.get_frame(1)
    
    newclip = VideoClip(make_frame, duration=1)
    newclip.write_videofile(newvideo,fps=30)

deeplabcut.analyze_videos(path_config_file,[newvideo])

print("CREATE VIDEO")
deeplabcut.create_labeled_video(path_config_file,[newvideo])


print("EXTRACT OUTLIERS")
deeplabcut.extract_outlier_frames(path_config_file,[newvideo],outlieralgorithm='jump',epsilon=0,automatic=True)
예제 #6
0
def extract_features(input_dir, output_dir, model_type='inceptionv3', batch_size=32):
    """
    Extracts features from a CNN trained on ImageNet classification from all
    videos in a directory.
    Args:
        input_dir (str): Input directory of videos to extract from.
        output_dir (str): Directory where features should be stored.
        model_type (str): Model type to use.
        batch_size (int): Batch size to use when processing.
    """

    input_dir = os.path.expanduser(input_dir)
    output_dir = os.path.expanduser(output_dir)

    if not os.path.isdir(input_dir):
        sys.stderr.write("Input directory '%s' does not exist!\n" % input_dir)
        sys.exit(1)


    # Load desired ImageNet model
    
    # Note: import Keras only when needed so we don't waste time revving up
    #       Theano/TensorFlow needlessly in case of an error

    model = None
    input_shape = (224, 224)

    if model_type.lower() == 'inceptionv3':
        from keras.applications import InceptionV3
        model = InceptionV3(include_top=True, weights='imagenet')
    elif model_type.lower() == 'xception':
        from keras.applications import Xception
        model = Xception(include_top=True, weights='imagenet')
    elif model_type.lower() == 'resnet50':
        from keras.applications import ResNet50
        model = ResNet50(include_top=True, weights='imagenet')
    elif model_type.lower() == 'vgg16':
        from keras.applications import VGG16
        model = VGG16(include_top=True, weights='imagenet')
    elif model_type.lower() == 'vgg19':
        from keras.applications import VGG19
        model = VGG19(include_top=True, weights='imagenet')
    else:
        sys.stderr.write("'%s' is not a valid ImageNet model.\n" % model_type)
        sys.exit(1)

    if model_type.lower() == 'inceptionv3' or model_type.lower() == 'xception':
        shape = (299, 299)

    # Get outputs of model from layer just before softmax predictions

    from keras.models import Model
    model = Model(model.inputs, output=model.layers[-2].output)


    # Create output directories

    visual_dir = os.path.join(output_dir, 'visual') # RGB features
    #motion_dir = os.path.join(output_dir, 'motion') # Spatiotemporal features
    #opflow_dir = os.path.join(output_dir, 'opflow') # Optical flow features

    for directory in [visual_dir]:#, motion_dir, opflow_dir]:
        if not os.path.exists(directory):
            os.makedirs(directory)


    # Find all videos that need to have features extracted

    def is_video(x):
        return x.endswith('.mp4') or x.endswith('.avi') or x.endswith('.mov')

    vis_existing = [x.split('.')[0] for x in os.listdir(visual_dir)]
    #mot_existing = [os.path.splitext(x)[0] for x in os.listdir(motion_dir)]
    #flo_existing = [os.path.splitext(x)[0] for x in os.listdir(opflow_dir)]

    video_filenames = [x for x in sorted(os.listdir(input_dir))
                       if is_video(x) and os.path.splitext(x)[0] not in vis_existing]


    # Go through each video and extract features

    from keras.applications.imagenet_utils import preprocess_input

    for video_filename in tqdm(video_filenames):

        # Open video clip for reading
        try:
            clip = VideoFileClip( os.path.join(input_dir, video_filename) )
        except Exception as e:
            sys.stderr.write("Unable to read '%s'. Skipping...\n" % video_filename)
            sys.stderr.write("Exception: {}\n".format(e))
            continue

        # Sample frames at 1fps
        fps = int( np.round(clip.fps) )
        frames = [scipy.misc.imresize(crop_center(x.astype(np.float32)), shape)
                  for idx, x in enumerate(clip.iter_frames()) if idx % fps == fps//2]


        n_frames = len(frames)

        frames_arr = np.empty((n_frames,)+shape+(3,), dtype=np.float32)
        for idx, frame in enumerate(frames):
            frames_arr[idx,:,:,:] = frame

        frames_arr = preprocess_input(frames_arr)

        features = model.predict(frames_arr, batch_size=batch_size)

        name, _ = os.path.splitext(video_filename)
        feat_filepath = os.path.join(visual_dir, name+'.npy')

        with open(feat_filepath, 'wb') as f:
            np.save(f, features)
예제 #7
0
def ExtractFramesbasedonPreselection(
    Index,
    extractionalgorithm,
    Dataframe,
    dataname,
    video,
    cfg,
    config,
    opencv=True,
    cluster_resizewidth=30,
    cluster_color=False,
    savelabeled=True,
):
    from deeplabcut.create_project import add

    start = cfg["start"]
    stop = cfg["stop"]
    numframes2extract = cfg["numframes2pick"]
    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, "all")

    videofolder = str(Path(video).parents[0])
    vname = str(Path(video).stem)
    tmpfolder = os.path.join(cfg["project_path"], "labeled-data", vname)
    if os.path.isdir(tmpfolder):
        print("Frames from video", vname,
              " already extracted (more will be added)!")
    else:
        auxiliaryfunctions.attempttomakefolder(tmpfolder)

    nframes = len(Dataframe)
    print("Loading video...")
    if opencv:
        vid = VideoWriter(video)
        fps = vid.fps
        duration = vid.calc_duration()
    else:
        from moviepy.editor import VideoFileClip

        clip = VideoFileClip(video)
        fps = clip.fps
        duration = clip.duration

    if cfg["cropping"]:  # one might want to adjust
        coords = (cfg["x1"], cfg["x2"], cfg["y1"], cfg["y2"])
    else:
        coords = None

    print("Duration of video [s]: ", duration, ", recorded @ ", fps, "fps!")
    print("Overall # of frames: ", nframes,
          "with (cropped) frame dimensions: ")
    if extractionalgorithm == "uniform":
        if opencv:
            frames2pick = frameselectiontools.UniformFramescv2(
                vid, numframes2extract, start, stop, Index)
        else:
            frames2pick = frameselectiontools.UniformFrames(
                clip, numframes2extract, start, stop, Index)
    elif extractionalgorithm == "kmeans":
        if opencv:
            frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                vid,
                numframes2extract,
                start,
                stop,
                cfg["cropping"],
                coords,
                Index,
                resizewidth=cluster_resizewidth,
                color=cluster_color,
            )
        else:
            if cfg["cropping"]:
                clip = clip.crop(y1=cfg["y1"],
                                 y2=cfg["x2"],
                                 x1=cfg["x1"],
                                 x2=cfg["x2"])
            frames2pick = frameselectiontools.KmeansbasedFrameselection(
                clip,
                numframes2extract,
                start,
                stop,
                Index,
                resizewidth=cluster_resizewidth,
                color=cluster_color,
            )

    else:
        print(
            "Please implement this method yourself! Currently the options are 'kmeans', 'jump', 'uniform'."
        )
        frames2pick = []

    # Extract frames + frames with plotted labels and store them in folder (with name derived from video name) nder labeled-data
    print("Let's select frames indices:", frames2pick)
    colors = visualization.get_cmap(len(bodyparts), cfg["colormap"])
    strwidth = int(np.ceil(np.log10(nframes)))  # width for strings
    for index in frames2pick:  ##tqdm(range(0,nframes,10)):
        if opencv:
            PlottingSingleFramecv2(
                vid,
                cfg["cropping"],
                coords,
                Dataframe,
                bodyparts,
                tmpfolder,
                index,
                cfg["dotsize"],
                cfg["pcutoff"],
                cfg["alphavalue"],
                colors,
                strwidth,
                savelabeled,
            )
        else:
            PlottingSingleFrame(
                clip,
                Dataframe,
                bodyparts,
                tmpfolder,
                index,
                cfg["dotsize"],
                cfg["pcutoff"],
                cfg["alphavalue"],
                colors,
                strwidth,
                savelabeled,
            )
        plt.close("all")

    # close videos
    if opencv:
        vid.close()
    else:
        clip.close()
        del clip

    # Extract annotations based on DeepLabCut and store in the folder (with name derived from video name) under labeled-data
    if len(frames2pick) > 0:
        DF = Dataframe.loc[frames2pick]
        DF.index = [
            os.path.join("labeled-data", vname,
                         "img" + str(index).zfill(strwidth) + ".png")
            for index in DF.index
        ]  # exchange index number by file names.

        machinefile = os.path.join(
            tmpfolder, "machinelabels-iter" + str(cfg["iteration"]) + ".h5")
        if Path(machinefile).is_file():
            Data = pd.read_hdf(machinefile, "df_with_missing")
            DataCombined = pd.concat([Data, DF])
            # drop duplicate labels:
            DataCombined = DataCombined[~DataCombined.index.duplicated(
                keep="first")]

            DataCombined.to_hdf(machinefile, key="df_with_missing", mode="w")
            DataCombined.to_csv(
                os.path.join(tmpfolder, "machinelabels.csv")
            )  # this is always the most current one (as reading is from h5)
        else:
            DF.to_hdf(machinefile, key="df_with_missing", mode="w")
            DF.to_csv(os.path.join(tmpfolder, "machinelabels.csv"))
        try:
            if cfg["cropping"]:
                add.add_new_videos(
                    config, [video],
                    coords=[coords])  # make sure you pass coords as a list
            else:
                add.add_new_videos(config, [video], coords=None)
        except:  # can we make a catch here? - in fact we should drop indices from DataCombined if they are in CollectedData.. [ideal behavior; currently this is pretty unlikely]
            print(
                "AUTOMATIC ADDING OF VIDEO TO CONFIG FILE FAILED! You need to do this manually for including it in the config.yaml file!"
            )
            print("Videopath:", video, "Coordinates for cropping:", coords)
            pass

        print(
            "The outlier frames are extracted. They are stored in the subdirectory labeled-data\%s."
            % vname)
        print(
            "Once you extracted frames for all videos, use 'refine_labels' to manually correct the labels."
        )
    else:
        print("No frames were extracted.")
예제 #8
0
import numpy as np  # for numerical operations
from moviepy.editor import VideoFileClip, concatenate
from core.models import Video

video = Video.objects.get(id=1)
print video.video.name

clip = VideoFileClip(video.video.name)
cut = lambda i: clip.audio.subclip(i, i + 1).to_soundarray(fps=22000)
volume = lambda array: np.sqrt(((1.0 * array)**2).mean())
volumes = [volume(cut(i)) for i in range(0, int(clip.duration - 1))]

print 'volumes: %s' % volumes

averaged_volumes = np.array(
    [sum(volumes[i:i + 10]) / 10 for i in range(len(volumes) - 10)])

print 'averaged_volumes: %s' % averaged_volumes

increases = np.diff(averaged_volumes)[:-1] >= 0
decreases = np.diff(averaged_volumes)[1:] <= 0
peaks_times = (increases * decreases).nonzero()[0]
peaks_vols = averaged_volumes[peaks_times]
peaks_times = peaks_times[peaks_vols > np.percentile(peaks_vols, 90)]

print 'peaked: %s' % peaks_times

final_times = [peaks_times[0]]
for t in peaks_times:
    if (t - final_times[-1]) < 60:
        if averaged_volumes[t] > averaged_volumes[final_times[-1]]:
예제 #9
0
print('proceeding to compile final video')
pathlist = Path(new_path + '/').glob('**/*.mp4')
final_video = []
for path in pathlist:
    path_in_str = str(path)
    print('adding:', path_in_str)
    idx = path_in_str.index(
        '.'
    )  # key is set arbritarily according to file path, conincidences are unlikely but possible
    mykey = path_in_str[idx - 5:idx] + path_in_str[0]
    try:
        title, author = title_author[mykey]
    except KeyError:  #  note: keyerror is perfectly legal, meaning videos in today's file are not included in the scraped videos upon runtime, ie the videos scraped earlier today are not included this time
        continue
    try:
        clip = VideoFileClip(path_in_str).resize(
            (1920, 1080))  # original was width=1600 or height=1080
        final_title = ''
        x = 0
        while len(title) > 30:
            part = title[:30]
            try:
                idx = part[::-1].index(' ')
            except:
                #  there is no whitespace in the title eg: AAAAAAAAAAA but still length more than 30
                title = title[:27] + '...'
                break
            part = part[::-1][idx:][::-1]
            final_title += part + '\n'
            title = title.replace(part, '')
            x += 0.5
        final_title += title
예제 #10
0
def transfer_and_split_in_sequences(input_s3_bucket, input_file_key,
                                    output_s3_bucket, output_key_prefix,
                                    subvid_duration_in_sec, dynamodb_region_id,
                                    dynamodb_tableId):
    """
    Gets a video from s3, split it in subvids based on duration, and uploads all resulting files back to s3. You must have access rights

    :param input_s3_bucket: name of the s3 bucket to get input video from
    :param input_file_key: key of the file to get in the bucket
    :param output_s3_bucket: name of the s3 bucket to write to
    :param output_key_prefix: prefix to use for the keys of the files to write in the bucket
                              should contain "{video_name}", which will be replaced by the input video name.
                              final keys will be : output_key_prefix.format(video_name)/video_name_i.video_format
    :param subvid_duration_in_sec: length of subvids in seconds. Last vid may be shorter
    :return: None
    """

    # get video from s3
    temp_file = tempfile.NamedTemporaryFile(delete=False)
    get_video_from_s3(input_s3_bucket, input_file_key, temp_file.name)
    video_name = os.path.basename(input_file_key)
    video_format = video_name[video_name.rfind('.') + 1:]
    video_name = video_name[:video_name.rfind('.')]

    print("starting time-based split")
    video_object = VideoFileClip(temp_file.name)
    upload_video_information = {
        "VideoId": generate_row_id(),
        "process_steps": [{
            "step": "upload",
            "state": "processing"
        }],
        "creation_time": datetime.datetime.now().isoformat(),
        "bucket": input_s3_bucket,
        "key": input_file_key,
        "name": video_name,
        "extension": video_format,
        "size": video_object.size,
        "fps": video_object.fps,
        "duration": video_object.duration,
        "audio": (video_object.audio is not None),
        "sub_videos": []
    }
    send_video_info_to_dynamo_db(dynamodb_region_id, dynamodb_tableId,
                                 upload_video_information)

    current_start = 0
    current_index = 0

    while current_start < video_object.duration:
        # get new subvid and write it to s3
        new_subvid = video_object.subclip(
            current_start,
            min(current_start + subvid_duration_in_sec, video_object.duration))
        output_key = os.path.join(
            output_key_prefix.format(video_name=video_name),
            "{}_{}.{}".format(video_name, current_index, video_format))
        subvid_temp = tempfile.NamedTemporaryFile(
            delete=False, prefix="/tmp/", suffix='.{}'.format(video_format))
        new_subvid.write_videofile(subvid_temp.name)
        put_video_to_s3(subvid_temp.name, output_s3_bucket, output_key)

        subvideo_information = {
            "VideoId": generate_row_id(),
            "process_steps": [{
                "step": "timesplit",
                "state": "done"
            }],
            "creation_time": datetime.datetime.now().isoformat(),
            "bucket": output_s3_bucket,
            "key": output_key,
            "name": os.path.basename(output_key),
            "extension": video_format,
            "size": new_subvid.size,
            "fps": new_subvid.fps,
            "duration": new_subvid.duration,
            "audio": (new_subvid.audio is not None),
            "parent_video": upload_video_information["VideoId"]
        }
        send_video_info_to_dynamo_db(dynamodb_region_id, dynamodb_tableId,
                                     subvideo_information)
        upload_video_information["sub_videos"].append(
            subvideo_information["VideoId"])

        os.remove(subvid_temp.name)
        current_index += 1
        current_start += subvid_duration_in_sec

    upload_video_information["process_steps"][0]["state"] = "done"
    send_video_info_to_dynamo_db(dynamodb_region_id, dynamodb_tableId,
                                 upload_video_information)
    # clean
    os.remove(temp_file.name)
    print("split finished")
    slideWindow_img, result = helpers.fit_polynomial(warped_binary, undist)
    resultwText = helpers.addTextResult(result)  # Put text on resulting image
    final_result = helpers.addOverlay(resultwText, slideWindow_img)
    return final_result


# Calibrate camera with sample images
mtx, dist = helpers.calibrateCamera()

# Create line objects (leftline and rightLine)
helpers.leftLine = Line()
helpers.rightLine = Line()

# Video processing only
video = '/Users/sumedhinamdar/Documents/GitHub/CarND-Advanced-Lane-Lines/challenge_video.mp4'
clip1 = VideoFileClip(video)
# NOTE: this function expects color images!!
white_clip = clip1.fl_image(process_image)
white_output = video.replace('.mp4', '_lanesFound.mp4')
white_clip.write_videofile(white_output, audio=False)

# # Image processing only
# # Pass in test images to imageList
# imageList = glob.glob(
#     '/Users/sumedhinamdar/Documents/GitHub/CarND-Advanced-Lane-Lines/test_images/*.jpg')
#
# for image in imageList:
#     img = mpimg.imread(image)
#     # print('image: ' + image.strip('/Users/sumedhinamdar/Documents/GitHub/CarND-Advanced-Lane-Lines/test_images/'))
#     # Step 1: Undistort images using mtx and dist coefficients from calibrateCamera
#     undist = cv2.undistort(img, mtx, dist, None, mtx)
예제 #12
0
파일: main.py 프로젝트: Ramzesovich66/CarND
    if cfg.compute_calib_params:
        camera_calibration()

    # 2. A test image processing
    if 0 == cfg.video_mode:
        images = glob.glob(cfg.test_img_folder + '*.jpg')
        for idx, fname in enumerate(images):
            img = mpimg.imread(fname)
            pipeline(img, fname)
    else:
        # 3. Processing a video frame

        # 3.1 Store a annotated video frame into a file
        if 1 == cfg.store_video:
            if cfg.clip_video:
                clip = VideoFileClip(cfg.video_file_name + '.mp4').subclip(
                    cfg.clip_start, cfg.clip_end)
            else:
                clip = VideoFileClip(cfg.video_file_name + '.mp4')
            processed_video = clip.fl_image(pipeline)
            temp = cfg.video_file_name
            processed_video.write_videofile(
                cfg.output_video_folder + 'out_' + temp[3::] + '_' +
                str(cfg.num_of_frames) + '_frames.mp4',
                audio=False)

        else:
            # 3.2 Display annotated video on a fly without storing it into a file
            cap = cv2.VideoCapture(cfg.video_file_name + '.mp4')
            while cap.isOpened():
                ret, color_frame = cap.read()
                if ret:
예제 #13
0
    with open(PERSPECTIVE_FILE_NAME, 'rb') as f:
        perspective_data = pickle.load(f)

    perspective_transform = perspective_data["perspective_transform"]
    pixels_per_meter = perspective_data['pixels_per_meter']
    orig_points = perspective_data["orig_points"]

    input_dir = "test_images"
    output_dir = "output_images"
    '''
    for image_file in os.listdir(input_dir):
        if image_file.endswith("jpg"):
            img = mpimg.imread(os.path.join(input_dir, image_file))
            lf = LaneFinder(settings.ORIGINAL_SIZE, settings.UNWARPED_SIZE, cam_matrix, dist_coeffs,
                perspective_transform, pixels_per_meter, "warning.png")
            img = lf.process_image(img, True, show_period=1, blocking=False)
    '''
    video_files = ['Test.mp4']
    output_path = "output_videos"

    for file in video_files:
        lf = LaneFinder(settings.ORIGINAL_SIZE, settings.UNWARPED_SIZE,
                        cam_matrix, dist_coeffs, perspective_transform,
                        pixels_per_meter, "warning.png")
        output = os.path.join(output_path, "lane_" + file)
        clip2 = VideoFileClip(file)
        challenge_clip = clip2.fl_image(
            lambda x: lf.process_image(x, reset=False, show_period=20))
        challenge_clip.write_videofile(output, audio=False)
예제 #14
0
def usePlan():
    with open('plan.index', 'r') as f:
        i = 0
        print('Checking plan')
        for line in f:
            if (str(line) != ""):
                if (str(line) != None):
                    linedata = line.split(' ')[0]
                    if (linedata == "end"):
                            print('Found end of script')
                            break
                    if (linedata == "using"):
                            print('Found "using" identifier')
                            usingdata = line.split(' ')[1]
                            print('Setting "filmindex" variable')
                            filmindex = str(line.split(' ')[1])
                            usingoperator = line.split(' ')[2]
                            if (usingoperator == "as"):
                                print('Assinging "filmindex" variable to ' + str(line.split(' ')[3]).splitlines()[0])
                                usingFiIdentity = line.split(' ')[3]
                                fiIdentity = str(line.split(' ')[3].splitlines()[0])
                    if (linedata == "play"):
                        playtype = line.split(' ')[1]
                        if (playtype == "settings"):
                            print('Found "settings" block')
                            settingslist = line.split(' ')[2]
                            settingslist = settingslist.split(',')
                            if (settingslist[0] == "inorder"):
                                inOrder = "true"
                                print('inOrder: ' + inOrder)
                            if (settingslist[0] == "ogquality"):
                                ogQuality = "true"
                                print('ogQuality: ' + ogQuality)
                            if (settingslist[0] == "noads"):
                                noAds = "true"
                                print('noAds: ' + noAds)
                            if (settingslist[0] == "notrailers"):
                                noTrailersTillEnd = "true"
                                print('noTrailersTillEnd: ' + noTrailersTillEnd)
                            if (settingslist[1] == "inorder"):
                                inOrder = "true"
                                print('inOrder: ' + inOrder)
                            if (settingslist[1] == "ogquality"):
                                ogQuality = "true"
                                print('ogQuality: ' + ogQuality)
                            if (settingslist[1] == "noads"):
                                noAds = "true"
                                print('noAds: ' + noAds)
                            if (settingslist[1] == "notrailers"):
                                noTrailersTillEnd = "true"
                                print('noTrailersTillEnd: ' + noTrailersTillEnd)
                            if (settingslist[2] == "inorder"):
                                inOrder = "true"
                                print('inOrder: ' + inOrder)
                            if (settingslist[2] == "ogquality"):
                                ogQuality = "true"
                                print('ogQuality: ' + ogQuality)
                            if (settingslist[2] == "noads"):
                                noAds = "true"
                                print('noAds: ' + noAds)
                            if (settingslist[2] == "notrailers"):
                                noTrailersTillEnd = "true"
                                print('noTrailersTillEnd: ' + noTrailersTillEnd)
                            if (settingslist[3] == "inorder"):
                                inOrder = "true"
                                print('inOrder: ' + inOrder)
                            if (settingslist[3] == "ogquality"):
                                ogQuality = "true"
                                print('ogQuality: ' + ogQuality)
                            if (settingslist[3] == "noads"):
                                noAds = "true"
                                print('noAds: ' + noAds)
                            if (settingslist[3] == "notrailers"):
                                noTrailersTillEnd = "true"
                                print('noTrailersTillEnd: ' + noTrailersTillEnd)
                        if (playtype == "viewing"):
                            index = line.split(' ')[2]
                            index = index.split(':')
                            useIndex = index[0]
                            FilmID.append(index[1])
                            print('Found "viewing" block with index ' + useIndex + ' and film ID ' + FilmID[-1])
                else:
                    print('Line ' + str(i) + ' empty, ignoring...')
                i = i + 1
    print('RECAP:')
    print('  Film Index File: ' + filmindex)
    print('  Registered Film Index: ' + fiIdentity)
    print('  inorder setting: ' + inOrder)
    print('  ogquality setting: ' + ogQuality)
    print('  noads setting: ' + noAds)
    print('  notrailers setting: ' + noTrailersTillEnd)
    print('  Last Used Film Index: ' + useIndex)
    print('  Detected Film Array: ' + str(FilmID))
    print('  Film Array Length: ' + str(len(FilmID)))
    print('Checking registered film index file...')
    while True:
        try:
            index = open(str(filmindex), 'r')
            print('Registered film index OK')
            break
        except:
            print('Registered film index ERROR')
            time.sleep(5)
    print('Starting screening...')
    closeFfmpeg()
    time.sleep(1)
    closeFfmpeg()
    time.sleep(1)
    closeFfmpeg()
    print(filmindex)
    totalIDs = len(FilmID) - 1
    print(totalIDs)
    i = 0
    
    while i <= totalIDs:
        with open(filmindex, 'r') as index:
            for line in index:
                if line.split(' ')[0].startswith(FilmID[i]):
                    print('playing film from location ' + line.split(';;;')[1])
                    sendMSG("Switching stream, please wait")
                    print("Closing any other possible streams")
                    closeFfmpeg()
                    closeFfmpeg()
                    time.sleep(10)
                    print("Starting new stream instance: " + "films/" + str(line.split(';;;')[1]))
                    os.system('start /min ffmpeg -re -stream_loop -1 -i "' + "films/" + str(line.split(';;;')[1]) + '" -vcodec libx264 -profile:v main -preset:v medium -r 30 -g 60 -keyint_min 60 -sc_threshold 0 -b:v 2500k -maxrate 2500k -bufsize 2500k -filter:v scale="trunc(oh*a/2)*2:720" -sws_flags lanczos+accurate_rnd -acodec aac -b:a 96k -ar 48000 -ac 2 -f flv rtmp://live.twitch.tv/app/live_191842162_wbSygqVSvBa4G2aQ2qivMswluXUZ7L')
                    sendMSG("Playback started of " + line.split(';;;')[2])
                    changeTitle("Playing: " + line.split(';;;')[2])
                    clip = VideoFileClip("films/" + str(line.split(';;;')[1]))
                    video_duration = clip.duration - 3
                    print(video_duration)
                    print("Waiting " + str(video_duration) + " for film to finish")
                    try:
                        time.sleep(video_duration)
                    except:
                        pass
                    closeFfmpeg()
                    closeFfmpeg()
        i=i+1
    print('Plan finished, switching to twitch control...')
    twitchControl()
def run(argv):
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)
    global epochs
    global batch_size
    global lr
    global beta

    epochs = int(argv[0])
    batch_size = int(argv[1])
    lr = float(argv[2])
    beta = float(argv[3])
    print("Epochs: ", epochs, " Batch Size: ", batch_size, " Learning Rate: ",
          lr, " Beta: ", beta)
    print(
        "Output will go to ",
        'result_epochs_{}_batchsz_{}_lr_{}_beta_{}.mp4'.format(
            epochs, batch_size, lr, beta))

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    clip = VideoFileClip('driving.mp4')

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        #get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
        get_batches_fn = my_gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # Build NN using load_vgg, layers, and optimize function
        vgg_input, vgg_keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(
            sess, vgg_path)
        #print("vgg_input: ", vgg_input)

        nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out,
                               num_classes)
        #print("nn_last_layer: ", nn_last_layer)

        correct_label = tf.placeholder(tf.float32,
                                       [None, None, None, num_classes],
                                       name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')
        #print("tf.trainable_variables: ", tf.trainable_variables())
        logits, train_op, cross_entropy_loss = optimize(
            nn_last_layer, correct_label, learning_rate, num_classes)

        init = tf.global_variables_initializer()
        # Add ops to save and restore all the variables.
        saver = tf.train.Saver()

        sess.run(init)
        #saver.restore(sess, "/tmp/model.ckpt")
        #print("Model restored.")
        # Train NN using the train_nn function
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, vgg_input, correct_label, vgg_keep_prob,
                 learning_rate)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, vgg_keep_prob, vgg_input)

        # Save the variables to disk.
        save_path = saver.save(sess, "/tmp/model.ckpt")
        print("Model saved in path: %s" % save_path)
        # Restore variables from disk.
        #saver.restore(sess, "/tmp/model.ckpt")
        #print("Model restored.")

        # Apply the trained model to a video
        new_clip = clip.fl_image(
            pipeline(sess, logits, vgg_keep_prob, vgg_input))

        # write to file
        new_clip.write_videofile(
            'result_epochs_{}_batchsz_{}_lr_{}_beta_{}.mp4'.format(
                epochs, batch_size, lr, beta))
예제 #16
0
        video_clip_list.append(tempclip)
        video_list[choosen_vid].t = video_list[choosen_vid].t + gap
    else:
        tempclip = VideoClip(video_list[choosen_vid].video_clip,
                             video_list[choosen_vid].t,
                             video_list[choosen_vid].duration)
        video_clip_list.append(tempclip)
        video_list[choosen_vid].t = video_list[choosen_vid].duration
        count_videos = count_videos - 1
        video_list.remove(video_list[choosen_vid])

for clip_temp in video_clip_list:
    print(clip_temp.video_clip_name)
    print(clip_temp.video_start)
    print(clip_temp.video_end)

finalVideoClip = VideoFileClip(video_clip_list[0].video_clip_name).subclip(
    video_clip_list[0].video_start, video_clip_list[0].video_end).resize(
        (1280, 720))

i = 1
while i < len(video_clip_list):
    final_video_temp = VideoFileClip(
        video_clip_list[i].video_clip_name).subclip(
            video_clip_list[i].video_start,
            video_clip_list[i].video_end).resize((1280, 720))
    finalVideoClip = concatenate_videoclips([finalVideoClip, final_video_temp])
    i = i + 1

finalVideoClip.write_videofile("random_mashup.mp4")
예제 #17
0
    R_left, R_right = detector.get_radii()
    R_mean = 2.0 / (1/R_left + 1/R_right)
    d,W = detector.calc_distance_from_center()

    tr.put_line("RL=%7.2fm  RR=%7.2fm  R=%7.2fm W=%2.2fm  POS=%2.2fm" % (R_left, R_right, R_mean, W, d))
    tr.text_at("%02d.%02d" % (counter//frame_rate,counter%frame_rate), (20,h-40))

    counter += 1

    if args.render:
        new_frame = bgr2rgb(new_frame)

    return new_frame


clip = VideoFileClip(args.video_file)
counter = 0
frame_skip = 1
start_frame = args.t1
scale = args.scale
detector.scale = scale
key_wait = args.delay

if args.render:
    out_file_name = args.video_file.split(".")[0] + "_annotated.mp4"
    annotated_clip = clip.fl_image(process_frame)
    annotated_clip.write_videofile(out_file_name, fps=frame_rate, audio=False)
else:
    for frame in clip.iter_frames():
        if (counter % frame_skip) or (counter < start_frame):
            counter += 1
예제 #18
0
from moviepy.editor import VideoFileClip, CompositeVideoClip

from src.utils.lane_processor import LaneProcessor

clip = VideoFileClip("../project_video.mp4")  # .subclip(15, 30)
laneProcessor = LaneProcessor()

undistorted = clip.fl_image(laneProcessor.undistort)
thresholded = undistorted.fl_image(laneProcessor.threshold)
birdseye = undistorted.fl_image(laneProcessor.birdseye)
# laneProcessor.clear_lanes()
lane = birdseye.fl_image(laneProcessor.detect_lanes)
final = undistorted.fl_image(laneProcessor.draw_lanes)

combo = CompositeVideoClip([
    final,
    thresholded.resize(0.3).set_pos((502, 10)),
    lane.resize(0.3).set_pos((886, 10))
])
combo.write_videofile("../output_video.mp4", audio=False)
def procss6Imgs():
    for i in range(6)[:]:
        vFrame = VideoFileClip('./project_video.mp4').get_frame(30.0+i*5)
        x = processImg(vFrame, saveFlev=4, imgWrt='./output_images/'+str(i)+'_', dbg=True)
예제 #20
0
def store_test_resnet_output_chunks(dlc_cfg,
                                    nc=200,
                                    chunk_size=1000,
                                    allow_growth=True,
                                    debug_key=""):
    # debug_key = "nt_{}".format(nt_chunk)
    #
    #%%
    from deeplabcut.pose_estimation_tensorflow.nnet.net_factory import pose_net
    import tensorflow.contrib.slim as slim
    from tqdm import tqdm
    from skimage.util import img_as_ubyte
    from PoseDataLoader import TestDataLoader

    #%%
    clip = VideoFileClip(str(dlc_cfg.video_path))
    ny_raw, nx_raw = clip.size
    fps = clip.fps
    #%%
    nframes = clip.duration * clip.fps
    nframes_fsec = nframes - int(nframes)
    #%%
    if (nframes_fsec < 1 / clip.fps):
        nframes = np.floor(nframes).astype('int')
    else:
        nframes = np.ceil(nframes).astype('int')
        print('Warning. Check the number of frames')
    #%%
    # Build graph to pass frames through resnet
    TF.reset_default_graph()
    inputs = TF.placeholder(tf.float32, shape=[1, nx_raw, ny_raw, 3])
    pn = pose_net(dlc_cfg)
    net, end_points = pn.extract_features(inputs)
    # heads = pn.prediction_layers(net, end_points)
    # %%
    # restore from snapshot
    if 'snapshot' in dlc_cfg.init_weights:
        variables_to_restore = slim.get_variables_to_restore()
    else:
        variables_to_restore = slim.get_variables_to_restore(
            include=["resnet_v1"])

    restorer = TF.train.Saver(variables_to_restore)

    # Init session
    config_TF = TF.ConfigProto()
    config_TF.gpu_options.allow_growth = allow_growth
    sess = TF.Session(config=config_TF)

    sess.run(TF.global_variables_initializer())
    sess.run(TF.local_variables_initializer())
    # %%
    # Restore the one variable from disk
    restorer.restore(sess, dlc_cfg.init_weights)
    #%%
    nx_out, ny_out = net.shape.as_list()[1:3]
    #%%
    if nc is None:
        # load all channels
        nchannels = 2048
        nc = 2048
    elif isinstance(nc, int):
        nchannels = nc
    elif isinstance(nc, np.ndarray):
        nchannels = len(nc)
    else:
        raise Exception('Check nc')
    # %%

    num_chunks_tvideo = int(np.ceil(nframes / chunk_size))
    print('Video is split in {} resnet_out files'.format(num_chunks_tvideo))

    #%% Make test dataset
    test_data = TestDataLoader(dlc_cfg, debug_key=debug_key)
    #%%
    if not test_data.video_data_chunks_dir.exists():
        os.makedirs(test_data.video_data_chunks_dir)

    if not test_data.resnet_output_chunks_dir.exists():
        os.makedirs(test_data.resnet_output_chunks_dir)

    #%%
    for chunk_id, video_start in enumerate(np.arange(0, nframes, chunk_size)):
        video_end = min(video_start + chunk_size, nframes)

        nvideoframes = video_end - video_start

        #%% Make movie file:
        start_sec = np.round(video_start / fps, 5)
        end_sec = np.round(video_end / fps, 5)
        bonus = (nvideoframes - (end_sec - start_sec) * fps) / fps
        if bonus < 0:
            end_sec += 2 * bonus

        mini_clip = clip.subclip(t_start=start_sec, t_end=end_sec)
        n_frames = sum(1 for x in mini_clip.iter_frames())
        if not (n_frames == nvideoframes):
            raise Exception('what for {}'.format(chunk_id))

        video_fname = test_data.get_video_data_chunks_fname(chunk_id)
        mini_clip.write_videofile(str(video_fname))
        #print('Wrote file:\n {}'.format(video_fname))
        #%%
        # Make resnet output file:
        indices = np.arange(video_start, video_end)
        resnet_outputs = np.zeros((nvideoframes, nx_out, ny_out, nchannels),
                                  dtype="float32")
        pbar = tqdm(total=nvideoframes,
                    desc='Pass through resnet chunk {}'.format(chunk_id))
        step = nvideoframes // 3
        for counter, index in enumerate(indices):
            ff = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
            [net_output] = sess.run([net],
                                    feed_dict={inputs: ff[None, :, :, :]})
            if isinstance(nc, int):
                resnet_outputs[counter, :, :] = net_output[0, :, :, :nc]
            elif isinstance(nc, np.ndarray):
                resnet_outputs[counter, :, :] = net_output[0, :, :, nc]
            else:
                raise Exception('Not proper resnet channel selection')

            if (counter % step == 0) or (counter == nvideoframes):
                pbar.update(min(counter, step))
        pbar.close()

        resnet_fname = test_data.get_resnet_output_chunks_fname(chunk_id)
        with h5py.File(str(resnet_fname), 'w') as f:
            f.create_dataset("resnet_out", data=resnet_outputs)
            #f.create_dataset("resnet_idx", data=frames_in_chunk)
            #f.create_dataset("pv", data=pv_chunk)
            #f.create_dataset("ph", data=ph_chunk)
            f.create_dataset("start", data=video_start)
            f.create_dataset("stop", data=video_end)

    #print('Stored resnet output in:\n{}'.format(
    #    chunk_id, str(image_path)))

    return
예제 #21
0
def save_movie():
    for video_path in data.get_video_paths():
        video_clip = VideoFileClip(video_path)
        white_clip = video_clip.fl_image(pipeline)
        white_clip.write_videofile("output_" + video_path, audio=False)
예제 #22
0
            fig.add_subplot(339), plt.imshow(result,
                                             cmap='gray'), plt.title('Final')
            plt.show()

        return white, yellow, color, sobel, combined, result


if __name__ == "__main__":
    # Calibrate the camera
    calibration = calib.calibrate_camera('camera_cal', (9, 6), (720, 1280, 3))

    test_mode = 'video'

    if test_mode == 'image':
        # Find the lanes
        from scipy.misc import imread, imsave
        images = glob('test_images/*')
        for idx, img_path in enumerate(images):
            img = imread(img_path)
            ld = LaneFinder(cam_calibration=calibration,
                            debug_loc='output_images',
                            debug_level=2)
            res = ld.process(img)
            imsave('output_images/test' + str(idx) + '.png', res)
    else:
        from moviepy.editor import VideoFileClip
        ld = LaneFinder(cam_calibration=calibration)
        project_output = 'project_video_out.mp4'
        clip1 = VideoFileClip('project_video.mp4')
        project_clip = clip1.fl_image(ld.process)
        project_clip.write_videofile(project_output, audio=False)
예제 #23
0
##################################################

# videofolder='../videos/' #where your folder with videos is.

os.chdir(videofolder)
videos = np.sort([fn for fn in os.listdir(os.curdir) if (videotype in fn)])
print("Starting ", videofolder, videos)
for video in videos:
	dataname = video.split('.')[0] + scorer + '.h5'
	try:
		# Attempt to load data...
		pd.read_hdf(dataname)
		print("Video already analyzed!", dataname)
	except:
		print("Loading ", video)
		clip = VideoFileClip(video)
		ny, nx = clip.size  # dimensions of frame (height, width)
		fps = clip.fps
		frame_buffer = 10
		# nframes_approx = np.sum(1 for j in clip.iter_frames()) + frame_buffer # add some frames to ensure none are missed at the end
		nframes_approx = round(clip.duration * clip.fps) + frame_buffer

		if cropping:
			clip = clip.crop(
				y1=y1, y2=y2, x1=x1, x2=x2)  # one might want to adjust

		print("Duration of video [s]: ", clip.duration, ", recorded with ", fps,
			  "fps!")
		print("Approximate # of frames: ", nframes_approx-frame_buffer,
			  "with cropped frame dimensions: ", clip.size)
예제 #24
0
    x_rightLaneBegin = int((y_topRight - b) / a)
    x_rightLaneEnd = int((y_bottomRight - b) / a)
    for x in range(x_leftLaneBegin, x_leftLaneEnd, 1):
        cv2.line(line_image, (x, int(poly_leftLane(x))),
                 (x + 1, int(poly_leftLane(x + 1))), [255, 0, 0], 12)
    for x in range(x_rightLaneBegin, x_rightLaneEnd, 1):
        cv2.line(line_image, (x, int(poly_rightLane(x))),
                 (x + 1, int(poly_rightLane(x + 1))), [255, 0, 0], 12)

    # Create a "color" binary image to combine with line image
    color_edges = np.dstack((edges, edges, edges))

    # Draw the lines on the edge image
    lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)

    # [email protected]
    # Draw the lines on the input image
    lines_image = cv2.addWeighted(image, 0.8, line_image, 1, 0)
    return lines_image


white_output = 'solidWhiteRight-result.mp4'
clip1 = VideoFileClip("solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image)
white_clip.write_videofile(white_output, audio=False)

yellow_output = 'solidYellowLeft-result.mp4'
clip2 = VideoFileClip('solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
예제 #25
0
  pix_per_cell = dist_pickle["pix_per_cell"]
  cell_per_block = dist_pickle["cell_per_block"]
  spatial_size = dist_pickle["spatial_size"]
  hist_bins = dist_pickle["hist_bins"]
  feature_color = dist_pickle["feature_color"]
  hog_channel = dist_pickle['hog_channel']
  spatial_feat = dist_pickle['spatial_feat']
  hist_feat = dist_pickle['hist_feat']
  hog_feat = dist_pickle['hog_feat']
  enabled_features = [spatial_feat, hist_feat, hog_feat]
  # Raw search area / scales  
  ystart = 360
  ystop  = 640
  scales = [1.3, 1.5, 2.0]      
  print(orient, pix_per_cell, cell_per_block, spatial_size, hist_bins, feature_color, enabled_features, hog_channel)
 
  # SVM Detection
  VTracking = SVM_VehicleTracking(svc,X_scaler,orient,pix_per_cell,cell_per_block,
               spatial_size,hist_bins,feature_color,hog_channel,
               spatial_feat,hist_feat,hog_feat,ystart,ystop,scales)
  
  # Video processing
  drive_output = 'project_video_out.mp4'
  clip1 = VideoFileClip("project_video.mp4")
  drive_clip = clip1.fl_image(VTracking.TrackingPipeline) #NOTE: this function expects jpg color images!!
  drive_clip.write_videofile(drive_output, audio=False)
     
      
  
  
  
                            min_line_length, max_line_gap)
    right_line, left_line = extrapolate_lines(lines, image.shape[0])
    ave_right = average_lines(right_line, smooth_right)
    ave_left = average_lines(left_line, smooth_left)

    # Draw the lanes on the img and return to video
    projection = draw_lane_lines(frame1.color, ave_right, ave_left)
    final_img = weighted_img(projection, frame1.color)

    return final_img


'''------------------------------
        Import Video
---------------------------------'''
# Define the output file locations
white_output = ("test_videos_output/solidWhiteRight.mp4")
yellow_output = ('test_videos_output/solidYellowLeft.mp4')

# Define the input file locations
clip1 = VideoFileClip('test_videos/solidWhiteRight.mp4')
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')

# Process each frame in the video
white_clip = clip1.fl_image(process_image)
yellow_clip = clip2.fl_image(process_image)

# Write the video to file
white_clip.write_videofile(white_output, audio=False)
yellow_clip.write_videofile(yellow_output, audio=False)
예제 #27
0
    if segment_index < len(song_segments) - 1:
        segment_duration = song_segments[
            segment_index + 1][0] - song_segments[segment_index][0]
    else:
        segment_duration = song.duration - song_segments[segment_index][0]

    print('segment duration: ' + str(segment_duration))

    if song_segments[segment_index][1] in ['verse', 'hook', 'bridge', 'outro']:
        to_add = []
        to_add_length = 0
        while to_add_length < segment_duration:
            print('preparing clip: ' +
                  raw_filler_file_names[get_filler_index()])
            clip = VideoFileClip(filler_folder +
                                 raw_filler_file_names[get_filler_index()])
            to_add_length += clip.duration
            to_add.append(clip)
            filler_index += 1
        ratio = segment_duration / to_add_length
        for clip in to_add:
            clip = clip.set_duration(clip.duration * ratio)
            videos.append(clip)
            print('added clip of length ' + str(clip.duration))
            total_video_time += clip.duration
        segment_index += 1
    elif song_segments[segment_index][1] in ['prechorus']:
        to_add = []
        to_add_length = 0
        orig_clip_index = clip_index
        clip_count = math.floor(segment_duration / partial_period)
    global args
    script_start_time = time.time()

    parser = argparse.ArgumentParser(description='DetectNet - DIGITS')

    ### Positional arguments

    parser.add_argument('caffemodel',   help='Path to a .caffemodel')
    parser.add_argument('deploy_file',  help='Path to the deploy file')
    parser.add_argument('video_file',   help='Path to a video')
    parser.add_argument('output_video_file',   help='Path to output video name')

    ### Optional arguments

    parser.add_argument('-m', '--mean',
            help='Path to a mean file (*.npy)')
    parser.add_argument('--batch-size',
                        type=int)
    parser.add_argument('--nogpu',
            action='store_true',
            help="Don't use the GPU")
 
    args = vars(parser.parse_args())

    project_output = args['output_video_file']
    clip1 = VideoFileClip(args['video_file']);
    white_clip = clip1.fl_image(detect_car)
    white_clip.write_videofile(project_output, audio=False);

print 'Video took %f seconds.' % (time.time() - script_start_time)
예제 #29
0
def getVideoDuration(videoURL):
    clip = VideoFileClip(videoURL)
    return clip.duration
예제 #30
0
def create_video():
    clip_output = 'output_videos/test_video.mp4'
    clip = VideoFileClip("test_videos/test_video.mp4")
    clip_process = clip.fl_image(process_video)
    clip_process.write_videofile(clip_output, audio=False)