Esempio n. 1
0
def get_clip(video_title, start=(0,0), seconds=5):
    video = VideoFileClip(video_title)

    end = (start[0], start[1]+seconds)
    clip = video.subclip(start, end)

    return clip
Esempio n. 2
0
def make_small_gif(gif_path="F:\\input_gif.gif"):
    """
    shrink gif
    """
    output_file = 'F:\\\output.gif'
    video = VideoFileClip(gif_path).resize(0.6)
    length = int(video.fps * video.duration)
    
    video.write_gif(output_file,
                   fps=video.fps,
                   program="ffmpeg")
    

    new_frame_speed = int(1/(video.fps/2) * 100)
          
    options = ["gifsicle",
               output_file,
               '-O3',
               '-o', output_file
               
               ]
    result = subprocess.Popen(options,
                              stdout = subprocess.PIPE,
                              stderr = subprocess.STDOUT)
    for l in result.stdout.readlines():
        print l    
Esempio n. 3
0
def create_video_thumbnails(repo, file_id, path, size, thumbnail_file, file_size):

    t1 = timeit.default_timer()
    token = seafile_api.get_fileserver_access_token(repo.id,
            file_id, 'view', '', use_onetime=False)

    if not token:
        return (False, 500)

    inner_path = gen_inner_file_get_url(token, os.path.basename(path))
    clip = VideoFileClip(inner_path)
    tmp_path = str(os.path.join(tempfile.gettempdir(), '%s.png' % file_id[:8]))

    clip.save_frame(tmp_path, t=THUMBNAIL_VIDEO_FRAME_TIME)
    t2 = timeit.default_timer()
    logger.debug('Create thumbnail of [%s](size: %s) takes: %s' % (path, file_size, (t2 - t1)))

    try:
        ret = _create_thumbnail_common(tmp_path, thumbnail_file, size)
        os.unlink(tmp_path)
        return ret
    except Exception as e:
        logger.error(e)
        os.unlink(tmp_path)
        return (False, 500)
Esempio n. 4
0
def save_out(tracks, outfile=None, filetype='mp4'):

    out = []

    vids = [t for t in tracks if t['type'] == 'vid']
    texts = [t for t in tracks if t['type'] == 'text']

    for v in vids:
        c = VideoFileClip(v['content']).subclip(v['in'], v['in'] + v['duration'])
        c = c.set_start(v['start'])
        out.append(c)

    size = out[0].size

    for t in texts:
        c = create_sub(t['content'], size, rect_offset=195, min_height=55)
        c = c.set_start(t['start'])
        c = c.set_duration(t['duration'])
        out.append(c)

    final_clip = CompositeVideoClip(out)
    if outfile is None:
        outfile = 'msg_' + str(int(time.time())) + '.mp4'
    if filetype == 'gif':
        outfile = outfile.replace('.mp4', '.gif')
        final_clip.speedx(1.7).write_gif(outfile, fps=7, loop=1)
    else:
        final_clip.write_videofile(outfile, fps=24, codec='libx264')
    return outfile
def create_movie_dataset(movie_path, target_folder):
  if not os.path.isdir(target_folder): os.makedirs(target_folder)
  video = VideoFileClip(movie_path)
  num_frames = int(video.fps * video.duration)
  video = video.set_fps(1).set_duration(num_frames).resize(0.5)
  first_frame = 650
  num_cpus = multiprocessing.cpu_count()

  saved_frames = set(map(lambda x: int(x) if x else 0, map(lambda f: ''.join(x for x in f if x.isdigit()), os.listdir(target_folder))))
  num_done = len(saved_frames)
  if num_done == 0:
    offsets = np.random.randint(0, 10, num_frames - first_frame - 9)
    offset_file = os.path.join(target_folder, 'offsets.npz')
    np.savez_compressed(offset_file, offsets=offsets)

  frames_per_process = (num_frames - first_frame) / num_cpus
  for i in xrange(num_cpus):
    start_i = i * frames_per_process + first_frame
    end_i = num_frames if i == num_cpus - 1 else start_i + frames_per_process
    print start_i, end_i
    multiprocessing.Process(
      target=create_movie_process,
      args=(video, target_folder, start_i, end_i, first_frame, i, saved_frames)
    ).start()

  return True
Esempio n. 6
0
class MoviePyReader(FramesSequence):
    class_priority = 4
    @classmethod
    def class_exts(cls):
        return {'mov', 'mp4', 'avi', 'mpeg', 'wmv', 'mkv'}
    def __init__(self, filename):
        if VideoFileClip is None:
            raise ImportError('The MoviePyReader requires moviepy to work.')
        self.clip = VideoFileClip(filename)
        self.filename = filename
        self._fps = self.clip.fps
        self._len = int(self.clip.fps * self.clip.end)

        first_frame = self.clip.get_frame(0)
        self._shape = first_frame.shape
        self._dtype = first_frame.dtype

    def get_frame(self, i):
        return Frame(self.clip.get_frame(i / self._fps), frame_no=i)

    def __len__(self):
        return self._len

    @property
    def frame_shape(self):
        return self._shape

    @property
    def frame_rate(self):
        return self._fps

    @property
    def pixel_type(self):
        return self._dtype  
def processProjectVideo():
    CHALLENGEVIDEOOUTPUT = 'test_videos_output/ChallengeVideoOutput%04d.mp4'
    PROJECTVIDEOOUTPUT = './output_images/video/ProjectVideoOutput_%04d-%04d.mp4'
    CLIPLENGTH=10 # process 10 second clips
    ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
    ## To do so add .subclip(start_second,end_second) to the end of the line below
    ## Where start_second and end_second are integer values representing the start and end of the subclip
    ## You may also uncomment the following line for a subclip of the first 5 seconds
    ##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
    
    video = VideoFileClip('./project_video.mp4')
    #video = VideoFileClip("./test_video.mp4")

    duration = video.duration
    #duration=31.4
    numberOfClips=int(round(duration/float(CLIPLENGTH)+0.5))
    print ("processProjectVideo-duration:", duration, ", CLIPLENGTH:", CLIPLENGTH, ", numberOfClips:", numberOfClips)
    for clipNumber in range(0, numberOfClips): # 
        clipStart=clipNumber*CLIPLENGTH
        clipStop=min((clipNumber+1)*CLIPLENGTH,duration)
        print ("processProjectVideo-clipNumber:", clipNumber, ", clipStart:", clipStart, ", clipStop:", clipStop)
        videoClip = video.subclip(clipStart,clipStop)
        annotatedClip = videoClip.fl_image(process_image)
        videoFileName=PROJECTVIDEOOUTPUT % (clipStart,clipStop)
        print ("processProjectVideo-videoFileName:", videoFileName)
        annotatedClip.write_videofile(videoFileName, audio=False)
    def add_intro(self):
        for _, _, filenames in os.walk('../intros'):
            choice = os.path.join("../intros", random.choice(filenames))
            sys.stdout.write("Adding intro: %s\n" % choice)
            clip = VideoFileClip(choice)
            clip = clip.set_start(0)
 
        return clip
def decrypt_video(filename, t0=56):
	vid = VideoFileClip(filename)

	vid.save_frame("frame.png", t=t0+0.05)

	img = Image.open("frame.png").convert(mode='RGB')
	msg = stepic.decode(img)

	return msg 
Esempio n. 10
0
def avi_to_gif(odbname):
    """converts an avi video file to an animated gif so that it can easily be
        easily inserted into a pptx
        avidir = 'customBeamExample'
    """
    avinames=glob.glob(os.path.dirname(odbname)+'/*.avi')
    for aviname in avinames:
        clip = VideoFileClip(aviname)
        clip.write_gif(os.path.splitext(aviname)[0]+'.gif')
Esempio n. 11
0
def gen_gif(gif_video_id):
    gif_video = GifVideo.objects.get(pk=gif_video_id)
    vid = download_video(gif_video.video)

    clip = VideoFileClip(vid).subclip((0, 0), (0, 5)).resize((320, 240))
    clip.to_gif(
        "./gif_gallery/static/gif_gallery/gifs/{}.gif".format(vid),
        program='ffmpeg',
        fps=5)

    gif_video.gif_uri = '{}.gif'.format(vid)
    gif_video.save()
    os.remove(vid)
Esempio n. 12
0
def average_video(filepath, outpath, start=None, end=None, sample_every=1):
    """Calculate average of video frames"""

    # Load video
    vid = VideoFileClip(filepath, audio=False)
    width = vid.w
    height = vid.h

    if start is None and end is None:
        frame_generator = vid.iter_frames(progress_bar=True, dtype=np.uint8)
    else:
        if start is None:
            start = 0
        if end is None:
            end = vid.duration
        # compute time increment for sampling by frames
        sample_inc = sample_every / vid.fps
        frame_generator = tqdm(vid.get_frame(f) for f in frange(start, end, sample_inc))

    # create starting matrix of zeros
    sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    prev_f = np.zeros(shape=(height, width, 3), dtype=int)
    sum_delta_fs = np.zeros(shape=(height, width, 3), dtype=int)

    n_frames = 0
    for f in frame_generator:
        delta = f - prev_f
        sum_delta_fs += delta
        sum_fs += f

        ma_sum_fs += f
        if divmod(n_frames, 100)[1] == 0 and n_frames > 0:
            ma_f = ma_sum_fs / 100
            Image.fromarray(ma_f.astype(np.uint8))\
                .save(os.path.join(outpath, 'movavg_{}.png'.format(n_frames)))
            ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)

        n_frames += 1
        prev_f = f

    # average out the values for each frame
    average_delta_f = sum_delta_fs / n_frames
    average_f = sum_fs / n_frames

    # Create images
    delta_img = Image.fromarray(average_delta_f.astype(np.uint8))
    delta_img.save(os.path.join(outpath, 'average_delta.png'))
    final_img = Image.fromarray(average_f.astype(np.uint8))
    final_img.save(os.path.join(outpath, 'average.png'))
Esempio n. 13
0
def create_compilation(filename, index):
    dims = get_video_dimensions(filename)
    subclips = []
    video_file = VideoFileClip(filename)
    for label in sorted(index.keys()):
        label_img_filename = create_title_frame(label_as_title(label), dims)
        label_clip = ImageClip(label_img_filename, duration=2)
        os.remove(label_img_filename)
        subclips.append(label_clip)
        for region in index[label]:
            subclip = video_file.subclip(*region)
            subclips.append(subclip)
    if not subclips: return None
    return concatenate_videoclips(subclips)
def create_trump_dataset(movie_path, target_folder):
  # To write video to file: clip.write_videofile(outfile, codec='libx264', audio_codec='aac', temp_audiofile='china-%02d.m4a' % i, remove_temp=True)
  # moviepy help: http://zulko.github.io/blog/2014/06/21/some-more-videogreping-with-python/ 
  #               https://zulko.github.io/moviepy/ref/ref.html
  cuts = [(1.7, 2.5), (4.2, 4.6), (4.7, 5.2), (5.35, 5.93), (5.95, 6.45), (6.45, 6.95), (7, 7.34), (7.38, 7.82), (7.85, 8.24), (8.44, 9.04), (9.43, 9.7), (16.44, 16.7), (16.77, 17), (17, 17.31), (17.39, 17.67), (17.9, 18), (18.56, 18.8), (19, 19.4), (19.41, 19.75), (19.78, 20), (20.75, 21), (21, 21.52), (21.9, 22.41), (23, 23.52), (23.7, 23.96), (24.4, 24.7), (24.73, 24.98), (25, 25.38), (26.63, 27.15), (30, 30.36), (31.3, 31.77), (31.9, 32.16), (32.2, 32.5), (32.9, 33.16), (33.23, 33.4), (33.47, 33.79), (33.81, 34.25), (34.3, 34.65), (34.75, 35.23), (35.27, 35.95), (36.03, 36.59), (36.63, 37.04), (38.66, 39.1), (39.85, 40.3), (40.4, 40.75), (40.83, 41.271), (41.59, 41.95), (42.96, 43.33), (43.58, 43.88), (44, 44.6), (47, 47.48), (50.45, 50.75), (51, 51.33), (52.15, 52.48), (58.3, 58.55), (59, 59.4), (60, 60.4), (61.35, 61.71), (62.44, 62.8), (64.3, 64.6), (65.15, 65.58), (67.45, 67.8), (68.8, 69.15), (69.27, 69.6), (70.63, 70.97), (71, 71.4), (72.35, 72.8), (73.3, 73.7), (74.2, 74.61), (76, 76.9), (80.3, 80.65), (81.1, 81.4), (82.4, 82.75), (83.52, 84), (84.14, 84.49), (85.3, 85.6), (86.1, 86.4), (86.8, 87), (87.1, 87.48), (88, 88.2), (88.9, 89.37), (90.3, 90.7), (90.9, 91.2), (91.3, 91.5), (91.55, 91.78), (91.79, 92.06), (92.33, 92.67), (93.3, 93.55), (94.2, 94.5), (96.6, 96.96), (98, 98.44), (98.9, 99.1), (99.14, 99.53), (100.68, 100.92), (100.93, 101.25), (101.45, 101.8), (102.7, 102.96), (103.7, 104), (105.2, 105.7), (105.88, 106.1), (106.2, 106.6), (106.65, 107), (107.05, 107.85), (108.57, 109), (109.1, 109.48), (110.24, 110.74), (113.5, 113.85), (115.12, 115.4), (115.8, 116.25), (116.56, 116.95), (117.95, 118.35), (118.9, 119.3), (119.6, 120.2), (120.4, 120.9), (121.48, 121.9), (122.95, 123.25), (124.25, 124.65), (125, 125.39), (129.58, 129.9), (130.9, 131.3), (131.8, 132.15), (135, 135.5), (135.75, 136.1), (136.2, 136.65), (137, 137.4), (138.55, 138.8), (145.3, 145.75), (152.1, 152.5), (154.8, 155.25), (156.68, 156.95), (157.3, 157.8), (159.4, 159.78), (159.8, 160), (160.46, 160.8), (162.6, 163), (163.9, 164.18), (164.25, 164.63), (164.64, 165.1), (165.33, 165.7), (165.73, 166.1), (166.28, 166.58), (166.6, 167.06), (167.27, 167.65), (167.69, 168), (168.05, 168.45), (168.93, 169.25), (169.28, 169.6), (169.7, 170.15), (171.82, 172.24), (172.8, 173.1), (173.2, 173.6), (174.6, 175.04), (175.2, 175.6), (177, 177.35), (178.55, 178.97)]
  video = VideoFileClip(movie_path)
  subclips = [video.subclip(start, end) for (start, end) in cuts]
  for i in xrange(len(subclips)):
    clip = subclips[i]
    video_outfile = os.path.join(target_folder, 'video', 'china-%03d.mp4' % i)
    audio_outfile = os.path.join(target_folder, 'audio', 'china-%03d.m4a' % i)
    clip.write_videofile(video_outfile, codec='libx264', audio=False)
    clip.audio.write_audiofile(audio_outfile, codec='aac')
  return True
Esempio n. 15
0
def video2rollscan(videofile, focus, start=0, end=None, savefile=None):
    """
    
    Makes a scan of the roll from the video.
    Requires the pyton module MoviePy
    
    Parameters
    -----------
    
    video
        Any videofile that MoviePy (FFMPEG) can read.
        
    focus
        A function ( f(image)->rectangular image ). For instance
        if the line of interest is defined by y=15 and x=10...230
        
        >>> focus = lambda im : im[ [15], 10:230 ]
        
    start,end
        Where to start and stop, each one either in seconds, or in
        format `(minutes, seconds)`. By default `start=0` and `end`
        is the end of the video.
        
    savefile
        If provided, the scan image will be saved under this name.
    
    Returns
    --------
    
      A W*H*3 RGB picture of the piano roll made by stacking the focus
      lines of the different frames under one another.
    """

    from moviepy.editor import VideoFileClip

    if end is None:
        end = video.duration

    video = VideoFileClip(videofile, audio=False).subclip(start, end)

    tt = np.arange(0, video.duration, 1.0 / video.fps)
    result = np.vstack([focus(video.get_frame(t)) for t in tt])

    if savefile:
        import matplotlib.pyplot as plt

        plt.imsave(savefile)

    return result
Esempio n. 16
0
def create_summary(filename, regions):
    """ Join segments

    Args:
        filename(str): filename
        regions():
    Returns:
        VideoFileClip: joined subclips in segment

    """
    subclips = []
    input_video = VideoFileClip(filename)
    last_end = 0
    for (start, end) in regions:
        subclip = input_video.subclip(start, end)
        subclips.append(subclip)
        last_end = end
    return concatenate_videoclips(subclips)
Esempio n. 17
0
def convert_to_mp4_file(src_path, dest_path,
                        video_codec=DEFAULT_VIDEO_CODEC,
                        audio_codec=DEFAULT_VIDEO_AUDIO_CODEC):
    """
    Takes an existing movie file and converts it to mp4

    src_path (str): full path to the video file to be copied/processed

    dest_path (str): full path to where the video file should be copied

    video_codec (str): optional string to specify a codec. Otherwise
        all copied videos will be mp4 regardless of extension

    returns: dest_path
    """
    movie = VideoFileClip(src_path)
    movie.write_videofile(dest_path, codec=video_codec, audio_codec=audio_codec)
    return dest_path
Esempio n. 18
0
def average_video(filepath, outpath, start=None, end=None, sample_every=1):
    """Calculate average of video frames"""

    # Load video
    vid = VideoFileClip(filepath, audio=False).resize(width=66)
    width = vid.w
    height = vid.h

    if start is None and end is None:
        frame_generator = vid.iter_frames(progress_bar=True, dtype=np.uint8)
    else:
        if start is None:
            start = 0
        if end is None:
            end = vid.duration
        # compute time increment for sampling by frames
        sample_inc = sample_every / vid.fps
        frame_generator = tqdm(vid.get_frame(f) for f in frange(start, end, sample_inc))

    # create starting matrix of zeros
    sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    prev_f = np.zeros(shape=(height, width, 3), dtype=int)
    sum_delta_fs = np.zeros(shape=(height, width, 3), dtype=int)

    n_frames = 0
    for f in frame_generator:
        #delta = f - prev_f
        #sum_delta_fs += delta
        #sum_fs += f

        #ma_sum_fs += f
        #if divmod(n_frames, 100)[1] == 0 and n_frames > 0:
        #    ma_f = ma_sum_fs / 100
        #    Image.fromarray(ma_f.astype(np.uint8))\
        #        .save(os.path.join(outpath, 'movavg_{}.png'.format(n_frames)))
        #    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)

        #n_frames += 1
        #prev_f = f
        print len(f)
        time.sleep(1.0/float(sample_every))
Esempio n. 19
0
    def __init__(self, filename):
        if VideoFileClip is None:
            raise ImportError('The MoviePyReader requires moviepy to work.')
        self.clip = VideoFileClip(filename)
        self.filename = filename
        self._fps = self.clip.fps
        self._len = int(self.clip.fps * self.clip.end)

        first_frame = self.clip.get_frame(0)
        self._shape = first_frame.shape
        self._dtype = first_frame.dtype
Esempio n. 20
0
def excerpt_and_compile_video_file(src_path, dest_path, timestamps,
                                  left_padding=0.01,
                                  right_padding=0.01,
                                  video_codec=DEFAULT_VIDEO_CODEC,
                                  audio_codec=DEFAULT_VIDEO_AUDIO_CODEC):
    """
    creates a new video compiled from cuts of `src_path`

    timestamps (list): a sequence of tuples, in (start, end) (in seconds)
    """
    video = VideoFileClip(src_path)
    max_ts = video.duration
    clips = []
    for ts in timestamps:
        x = max(0, ts[0] - left_padding)
        y = min(max_ts, ts[1] + right_padding)
        clips.append(video.subclip(x, y))
    allclips = concatenate_videoclips(clips)
    allclips.write_videofile(dest_path, codec=video_codec, audio_codec=audio_codec)
    return dest_path
Esempio n. 21
0
def run_moving_crash(args, target, outfile):
    """Runs a moving crash based on moving (gif/mp4) inputs"""
    video = VideoFileClip(target)
    img = video.get_frame(t=0)  # first frame of the video
    bounds = foreground.get_fg_bounds(img.shape[1], args.max_depth)
    max_depth = bounds.max_depth
    crash_params = crash.CrashParams(
        max_depth, args.threshold, args.bg_value, args.rgb_select)
    options = _options(args.reveal_foreground, args.reveal_background,
                       args.crash, args.reveal_quadrants, args.bg_value)
    frames = video.iter_frames(fps=video.fps)

    def make_frame(_):
        frame = next(frames)
        fg, bounds = foreground.find_foreground(frame, crash_params)
        return _process_img(frame, fg, bounds, options)

    output_video = VideoClip(
        make_frame, duration=video.duration-(4/video.fps))  # trim last 4 frms
    output_video.write_videofile(
        outfile, preset=args.compression, fps=video.fps,
        threads=args.in_parallel)
Esempio n. 22
0
def make_clip(path,out_dir,framerate,start,end):
    """
    create gifs - given path and timecode-
    """
    video = VideoFileClip(path)
    print "loaded"
    clip = video.subclip(start,end).without_audio()
    frame = clip.get_frame(1)

    """get average luma"""
    lumas = []
    for l in frame:
        for f in l:
            r,g,b = f
            luma =  0.2126*r + 0.7152*g + 0.0722*b
            lumas.append(luma)
            
    average_luma = round(sum(lumas)/float(len(lumas)),2)
    
    if average_luma < 40: #gets rid of dark scenes (bad scenes)
        return 0

    mp4_file = os.path.join(out_dir,'clip_{0}.mp4'.format(start))
    gif_file = os.path.join(out_dir,'clip_{0}.gif'.format(start))
    
    score = 0
    print "creating clip from {0} to {1}".format(start,end)
    
    if os.path.isfile(mp4_file) == False:
        clip.write_videofile(mp4_file,fps=framerate)
        score += 0.5

    if os.path.isfile(gif_file) == False:
        clip.write_gif(gif_file,
                       fps=framerate,
                       program="ffmpeg")
        score += 0.5
    return score
    def get_previous_bests_vid(self, option, currloc):
        dpoint = None
        dpointvidfldr = None

        sys.stdout.write("Searching for previous intervals best vine...\n")
        for prev_date in self.PREV_DATES:
            vidfolderloc = os.path.join(SECTIONS_PATH, option, 'videos', \
                                                                str(prev_date))
            datafolderloc = os.path.join(SECTIONS_PATH, option, 'data', \
                                                                str(prev_date))
         
            for _, _, filenames in os.walk(datafolderloc):
                for idx, item in enumerate(filenames):
                    with open(os.path.join(datafolderloc, item)) as json_file:
                        json_data = json.load(json_file)
         
                    clip = VideoFileClip(os.path.join(vidfolderloc, \
                                          str(json_data["postId"]) + ".mp4"))

                    if clip.duration > 5:
                        if not dpoint:
                            dpoint = json_data
                            dpointvidfldr = vidfolderloc
                        elif dpoint["loops"]["count"] < json_data["loops"]\
                                                                    ["count"]:
                            dpoint = json_data
                            dpointvidfldr = vidfolderloc
                    elif not dpoint and idx == len(filenames) - 1:
                        dpoint = json_data
                        dpointvidfldr = vidfolderloc
     
        vidfile = os.path.join(dpointvidfldr, str(dpoint["postId"]) + ".mp4")
        sys.stdout.write("Adding previous best vid: %s\n" % vidfile)
        clip = (VideoFileClip(vidfile).resize((1070, 640)).\
                                    set_start(currloc + 6).crossfadein(1).\
                                    set_position((450, 255)).set_duration(5))
         
        return clip.without_audio()
Esempio n. 24
0
def process_video(filename, video_height=480, overwrite=False):

    gif_name = 'gifs/' + filename + '.gif'

    if isfile(gif_name) and overwrite == False:
        print "Skipping " + gif_name + " as it already exists."
        return 
    
    video_file = VideoFileClip(filename)

    try:
        assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
        video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
    except:
        print "Not resizing video."


    video_file = video_file.resize(height=video_height)

    end_image = video_file.to_ImageClip(0).set_duration(0.7)
    
    video_file = concatenate([video_file, end_image])

    logo_size = video_height/6
    text = ImageClip(expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(video_file.duration).resize(width=logo_size).set_pos((video_file.w-logo_size,video_file.h-logo_size))


    composite_video_file = CompositeVideoClip([video_file, text])
    composite_video_file.write_gif(gif_name,fps=20)

    fuzz_amt = 5
    commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'

    process = call(commands, shell=True)

    if getsize(gif_name) > 5*1024**2:
        process_video(filename, video_height=video_height*0.75, overwrite=True)
Esempio n. 25
0
def run_video():
    print("Run Video")

    from moviepy.editor import VideoFileClip

    file = "videos/challenge_video"
    
    clip = VideoFileClip("./" + file + ".mp4")
    output_video = "./" + file + "_processed.mp4"
    
    data_dir = './data'
    num_classes = 2

    global g_session
    global g_logits
    global g_keep_prob
    global g_input_image
    
    with tf.Session() as g_session:
        vgg_path = os.path.join(data_dir, 'vgg')

        correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        g_input_image, g_keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(g_session, vgg_path)
        layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)
        g_logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
        
        print("Restoring model...")
        saver = tf.train.Saver()
        saver.restore(g_session, "./model/semantic_segmentation_model.ckpt")
        print("Model restored.")

        output_clip = clip.fl_image(process_video_image)
        # output_clip = clip.subclip(0, 1).fl_image(process_video_image)
        output_clip.write_videofile(output_video, audio=False)
Esempio n. 26
0
def process_video(filename, overwrite=False, max_width=1600, max_height=1600, max_file_size=5*1024**2, gifdir='gifs/'):

    gif_name = gifdir + filename + '.gif'

    if isfile(gif_name) and overwrite == False:
        print "Skipping " + gif_name + " as it already exists."
        return 
    
    video_file = VideoFileClip(filename)

    try:
        assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
        video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
    except:
        print "Not resizing video."

    if video_file.h > max_height:
        video_file = video_file.resize(height=max_height)

    if video_file.w > max_width:
        video_file = video_file.resize(width=max_width)

    end_image = video_file.to_ImageClip(video_file.end-(1/video_file.fps)).set_duration(0.7)
    
    video_file = concatenate([video_file, end_image])
    fadein_video_file = CompositeVideoClip(
        [video_file,
         (video_file.to_ImageClip()
          .set_duration(0.7)
          .crossfadein(0.4)
          .set_start(video_file.duration-0.7)),
     ]
    )
    
    logo_size = video_file.h/6
    text = ImageClip(
        expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(
            video_file.duration).resize(width=logo_size).set_pos(
                (video_file.w-logo_size,video_file.h-logo_size))


    composite_video_file = CompositeVideoClip([fadein_video_file, text])
    composite_video_file.write_gif(gif_name,fps=20)

    fuzz_amt = 5
    commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'

    process = call(commands, shell=True)

    if getsize(gif_name) > max_file_size:
        process_video(filename,
                      max_height=video_file.h*0.95,
                      overwrite=True,
                      gifdir=gifdir,
                      max_file_size=max_file_size)
Esempio n. 27
0
	def openVideoFile(self):
		# open file explorer
		root = Tkinter.Tk()
		root.withdraw()
		# get video filename
		filename = tkFileDialog.askopenfilename(parent=root, title='Open file to encrypt')

		try:
			self.video = VideoFileClip(filename)
			print self.video.fps
		except Exception:
			print "Error in opening file - " + filename
			return None
		# create default txt filename
		self.txtFilename = filename[:filename.rfind(".")] + "_label.txt"
		# display txt filename
		self.ids.PathSaveFile.text = self.txtFilename
		# create temp directory for frames from video
		# self.dirPath = mkdtemp()
		self.dirPath = "asdf"
		makedirs("asdf")
		print self.video.duration
		# set number of frame
		self.numberOfFrame = 0
		# change winow size
		Window.size = (self.video.size[0], self.video.size[1] + pad)
		self.size = (self.video.size[0], self.video.size[1] + pad)

		self.firstFrame = True
		# find rectangle
		for obj in self.canvas.children:
			if type(obj) == Rectangle:
				self.rect = obj
		# set rectangle size
		self.rect.size = (self.size[0], self.size[1] - pad)
		# set frame to rectangle
		self.rect.source = self.GetFrameFilename()
Esempio n. 28
0
def video(filename, username, t0):
	# Orignal Video
	original = VideoFileClip("static/videos/"+filename+".mp4")

	first_half = VideoFileClip("static/videos/"+filename+".mp4").subclip(0, t0)
	second_half = VideoFileClip("static/videos/"+filename+".mp4").subclip(t0+1, original.duration)

	original.save_frame("static/videos/frame.png", t=t0)

	img = Image.open("static/videos/frame.png").convert(mode='RGB')
	stepic.encode_inplace(img, username)
	msg = stepic.decode(img)
	print(msg)
	img.save("static/videos/frame.png")

	encoded_clip = ImageClip('static/videos/frame.png', duration=1)

	new_mov = CompositeVideoClip([first_half.set_start(0),
								  encoded_clip.set_start(t0),
								  second_half.set_start(t0+1)])

	# Write the result to a file (many options available !)
	new_mov.write_videofile("static/"+username+"_"+filename+".avi", codec='png')
Esempio n. 29
0
        leftlane.recent_fitted = []
        rightlane.recent_fitted = []
        itr = 0
    return out_img


#######################
#      MAIN           #
#######################

# STEP 1: Calibrate camera
[objpoints, imgpoints] = calibrate()
leftlane = Line()
rightlane = Line()

### STEP 2: Process image/video

if (DEBUG):
    for file in glob.glob('test_images/*.jpg'):
        print(file)
        img = mpimg.imread(file)
        img = cv2.resize(img, (1280, 720), interpolation=cv2.INTER_AREA)
        out_img = process_image(img)
        cv2.imwrite('./output_images\\' + file, out_img)

else:
    white_output = 'output_harder_challenge1_video.mp4'
    clip1 = VideoFileClip("harder_challenge_video.mp4")
    white_clip = clip1.fl_image(process_image)
    white_clip.write_videofile(white_output, audio=False)
    max_line_gap = 20  # maximum gap in pixels between connectable line segments

    # Run Hough on edge detected image
    # Output "lines" is an array containing endpoints of detected line segments
    lines = hough_lines(
        masked_edges, rho, theta, threshold, min_line_length, max_line_gap,
        calibrationData
    )  #,lowerLeftVertex, upperLeftVertex, upperRightVertex, lowerRightVertex)

    return weighted_img(lines, myImage)


if __name__ == '__main__':

    yellow_output = 'test_videos_output/solidYellowLeftWithLanes.mp4'
    #yellow_output = 'test_videos_output/solidWhiteRightWithLanes.mp4'
    ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
    ## To do so add .subclip(start_second,end_second) to the end of the line below
    ## Where start_second and end_second are integer values representing the start and end of the subclip
    ## You may also uncomment the following line for a subclip of the first 5 seconds
    clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')  #.subclip(12,17)
    #clip2 = VideoFileClip('test_videos/solidWhiteRight.mp4')#.subclip(12,17)
    yellow_clip = clip2.fl_image(findLanes)
    yellow_clip.write_videofile(yellow_output, audio=False)

    HTML("""
    <video width="960" height="540" controls>
      <source src="{0}">
    </video>
    """.format(yellow_output))
Esempio n. 31
0
                                                 ystart=y_start,
                                                 ystop=y_stop)
        lanefinder = Lanefinder()
        processed_image = lanefinder.distortion_correction(image)
        processed_image = frame_classificator.classify_frame(processed_image)

        processed_image = cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB)
        processed_image = lanefinder.process_image(processed_image)
        processed_image = cv2.cvtColor(processed_image, cv2.COLOR_RGB2BGR)

        cv2.imwrite(path.join('./output_images', 'combi_' + test_image_name),
                    processed_image)

if PROCESS_MOVIES:
    for test_movie_name in glob.glob('*.mp4'):
        clip = VideoFileClip(test_movie_name)
        frame_classificator = FrameClassificator((clip.h, clip.w),
                                                 SETTINGS['feature'],
                                                 svm,
                                                 dt,
                                                 x_scaler,
                                                 x_pca,
                                                 scales=scales,
                                                 ystart=y_start,
                                                 ystop=y_stop,
                                                 convert_color=True,
                                                 draw_debug=True)
        new_clip = clip.fl_image(frame_classificator.classify_frame)
        new_clip.write_videofile(path.join('./output_videos', test_movie_name),
                                 audio=False)
Esempio n. 32
0
# should separate the training and predicting in two different processes
clf, scaler = train_vehicle_detection_classifier(cars,
                                                 notcars,
                                                 color_space=color_space,
                                                 orient=orient,
                                                 pix_per_cell=pix_per_cell,
                                                 cell_per_block=cell_per_block,
                                                 hog_channel=hog_channel,
                                                 spatial_size=spatial_size,
                                                 hist_bins=hist_bins,
                                                 spatial_feat=spatial_feat,
                                                 hist_feat=hist_feat,
                                                 hog_feat=hog_feat,
                                                 test_size=test_size)

# 3) store the classifier in pickle file which will be used later
joblib.dump(clf, 'classifer.pkl')
joblib.dump(scaler, 'scaler.pkl')

print('saved classifier and scaler')

# 4) generate videos
# video read in and process frame by frame
project_output = 'project_video_out1.mp4'
clip = VideoFileClip("project_video.mp4")
test_clip = clip.fl_image(process_img)
test_clip.write_videofile(project_output, audio=False)

print('done generating video output')
Esempio n. 33
0
def video_duration(video_path):
    clip = VideoFileClip(video_path)
    return clip.duration
        return result


if __name__ == '__main__':

    cal_input = ROOT + 'models/calibration.pkl'
    calibrator = Calibrator(path=cal_input)
    unwarp_input = ROOT + 'models/unwarp.pkl'
    unwarper = Unwarper(path=unwarp_input)
    line_detector = LineDetector(calibrator=calibrator,
                                 unwarper=unwarper,
                                 keep_n=10,
                                 alpha=0.6)

    model_path = ROOT +\
        'data/faster_rcnn/checkpoints/fasterrcnn_caffe_pretrain.pth'
    print('Loading model in {}...'.format(model_path))
    car_detector = FasterRCNNCarDetector(model_path)

    oracle_detector = OracleDetector(line_detector=line_detector,
                                     car_detector=car_detector)

    video_input = ROOT + 'video/project_video.mp4'
    video_output = ROOT + 'video/project_video_labeled_fasterrcnn.mp4'
    clip1 = VideoFileClip(video_input)

    # NOTE: this function expects    color images!!
    white_clip = clip1.fl_image(oracle_detector.process)
    white_clip.write_videofile(video_output, audio=False)
Esempio n. 35
0
    args = parser.parse_args()
    mtx, dist = calibrate_camera()
    Left = Line()
    Right = Line()

    SSD_GRAPH_FILE = './models/ssd_inception_v2_coco_2017_11_17/frozen_inference_graph.pb'

    categories = ['person','bicycle','car','motorcycle','airplane','bus','train','truck','boat','traffic light',
                  'fire hydrant','','stop sign','parking meter','bench','bird','cat','dog','horse','sheep','cow',
                  'elephant','bear','zebra','giraffe','backpack','umbrella','handbag','tie','suitcase','frisbee',
                  'skis','snowboard','sports ball']

    cmap = ImageColor.colormap
    COLOR_LIST = sorted([c for c in cmap.keys()])

    detection_graph = load_graph(SSD_GRAPH_FILE)

    with tf.Session(graph=detection_graph) as sess:
        image_tensor = sess.graph.get_tensor_by_name('image_tensor:0')
        detection_boxes = sess.graph.get_tensor_by_name('detection_boxes:0')
        detection_scores = sess.graph.get_tensor_by_name('detection_scores:0')
        detection_classes = sess.graph.get_tensor_by_name('detection_classes:0')
        if args.mode == 'video':
            clip = VideoFileClip(args.input)
            video_output = args.output
            output_clip = clip.fl_image(pipeline)
            output_clip.write_videofile(video_output, audio=False)
        else:
            image = mpimg.imread(args.input)
            mpimg.imsave(args.output, pipeline(image))
    return blend_output


if __name__ == '__main__':

    # first things first: calibrate the camera
    ret, mtx, dist, rvecs, tvecs = distortionCorrection(
        calib_images_dir='camera_cal')

    mode = 'video'

    if mode == 'video':

        selector = 'project'
        clip = VideoFileClip('{}_video.mp4'.format(selector)).fl_image(
            lambda image: laneDetection(image, mtx, dist, keep_state=True))
        clip.write_videofile('out_{}_{}.mp4'.format(selector, time_window),
                             audio=False)

    else:

        test_img_dir = 'test_images'
        for test_img in os.listdir(test_img_dir):

            frame = cv2.imread(os.path.join(test_img_dir, test_img))

            blend = laneDetection(frame, mtx, dist, keep_state=False)

            cv2.imwrite('output_images/{}'.format(test_img), blend)

            plt.imshow(cv2.cvtColor(blend, code=cv2.COLOR_BGR2RGB))
Esempio n. 37
0
                    bbox = ((np.min(nonzerox), np.min(nonzeroy)),
                            (np.max(nonzerox), np.max(nonzeroy)))

                    # Draw the box on the image
                    cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)
                else:
                    bbox = None

            else:
                bbox = None

            # Add the final drawn box to the accumulated respective vehicle
            self.vehicles[vehicle_number - 1].update_pos(bbox)

        # Return the image
        return img


vehicle_detector = VehicleDetector()


def vehicle_detect(img):
    vehicle_detector.process_frame(np.copy(img))
    return vehicle_detector.smooth_vehicle_boxes(img)


video_file = 'project_video.mp4'
track_output = 'tracked_' + video_file
clip = VideoFileClip(video_file)  #.subclip(7)
video_clip = clip.fl_image(vehicle_detect)
video_clip.write_videofile(track_output, audio=False)
Esempio n. 38
0
        _, _ = im_detect(net, im)

    im_dir = '/home/student/cmpe295-masters-project/faster-rcnn-resnet/data/input/'
    im_dir += '/*'
    bsdr = glob.glob(im_dir)

    if args.video_mode:
        ## Process video files as an input
        vid_out_dir = '/home/student/cmpe295-masters-project/faster-rcnn-resnet/data/output/'
        vid_dir = '/home/student/cmpe295-masters-project/faster-rcnn-resnet/data/input/'
        vid_dir += '*'
        v_dir = glob.glob(vid_dir)
        # clip = VideoFileClip("/home/student/cmpe295-masters-project/faster-rcnn-resnet/data/demo/P1_example.mp4")
        for video in v_dir:
            # print 'Demo for video {}'.format(video)
            clip = VideoFileClip(video)
            c = clip.duration
            frames = clip.fps
            print('duration of the file is {}'.format(c))
            print('Frames per second =  {}'.format(frames))

            start = time.time()
            # Transform video and perform image flip
            new_clip = clip.fl_image(demoVideo)

            ## Preview the processed video
            # new_clip.preview(fps=frames)
            # new_clip.preview()

            # Write a video to a file
            vid_out_dir = '/home/student/cmpe295-masters-project/faster-rcnn-resnet/data/output/'
Esempio n. 39
0
    right_fit = prediction[3:]

    fity = np.linspace(0, other.shape[0] - 1, other.shape[0])
    fit_leftx = left_fit[0] * fity**2 + left_fit[1] * fity + left_fit[2]
    fit_rightx = right_fit[0] * fity**2 + right_fit[1] * fity + right_fit[2]

    warp_zero = np.zeros_like(other).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    pts_left = np.array([np.transpose(np.vstack([fit_leftx, fity]))])
    pts_right = np.array(
        [np.flipud(np.transpose(np.vstack([fit_rightx, fity])))])
    pts = np.hstack((pts_left, pts_right))

    cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    newwarp = cv2.warpPerspective(color_warp, Minv,
                                  (image.shape[1], image.shape[0]))

    result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)

    return result


vid_output = 'reg_vid.mp4'

clip1 = VideoFileClip("Videos/1.MOV")

vid_clip = clip1.fl_image(road_lines)
vid_clip.write_videofile(vid_output, audio=False)
Esempio n. 40
0
def process_video(in_clip_name, out_clip_name):
    """process the video, this must be run command line!"""
    in_clip = VideoFileClip(in_clip_name)  #.subclip(0,20)
    out_clip = in_clip.fl_image(pl.process_image)
    out_clip.write_videofile(out_clip_name, audio=False)
    print('total bad {0}'.format(pl.total_bad))
Esempio n. 41
0
def extract_frames(config,mode='automatic',algo='kmeans',crop=False,checkcropping=False, userfeedback=True,cluster_step=1,cluster_resizewidth=30,cluster_color=False, Screens=1,scale_w=.8,scale_h=.8,opencv=True):
    """
    Extracts frames from the videos in the config.yaml file. Only the videos in the config.yaml will be used to select the frames.\n
    Use the function ``add_new_video`` at any stage of the project to add new videos to the config file and extract their frames.
    
    The provided function either selects frames from the videos in a randomly and temporally uniformly distributed way (uniform), \n 
    by clustering based on visual appearance (k-means), or by manual selection. 
    
    Three important parameters for automatic extraction: numframes2pick, start and stop are set in the config file. 
    
    Please refer to the user guide for more details on methods and parameters https://www.biorxiv.org/content/biorxiv/early/2018/11/24/476531.full.pdf
    
    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.
        
    mode : string
        String containing the mode of extraction. It must be either ``automatic`` or ``manual``.
        
    algo : string 
        String specifying the algorithm to use for selecting the frames. Currently, deeplabcut supports either ``kmeans`` or ``uniform`` based selection. This flag is
        only required for ``automatic`` mode and the default is ``uniform``. For uniform, frames are picked in temporally uniform way, kmeans performs clustering on downsampled frames (see user guide for details).
        Note: color information is discarded for kmeans, thus e.g. for camouflaged octopus clustering one might want to change this. 
        
    crop : bool, optional
        If this is set to True, the selected frames are cropped based on the ``crop`` parameters in the config.yaml file. 
        The default is ``False``; if provided it must be either ``True`` or ``False``.
        
    checkcropping: bool, optional
        If this is set to True, the cropping parameters are overlayed in a plot of the first frame to check and the user can decide if the program should proceed 
        with those parameters, or perhaps edit them. The default is ``False``; if provided it must be either ``True`` or ``False``.
    
    userfeedback: bool, optional
        If this is set to false during automatic mode then frames for all videos are extracted. The user can set this to true, which will result in a dialog,
        where the user is asked for each video if (additional/any) frames from this video should be extracted. Use this, e.g. if you have already labeled
        some folders and want to extract data for new videos. 
    
    cluster_resizewidth: number, default: 30
        For k-means one can change the width to which the images are downsampled (aspect ratio is fixed).
    
    cluster_step: number, default: 1
        By default each frame is used for clustering, but for long videos one could only use every nth frame (set by: cluster_step). This saves memory before clustering can start, however, 
        reading the individual frames takes longer due to the skipping.
    
    cluster_color: bool, default: False
        If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases 
        the computational complexity. 
    
    opencv: bool, default: True
        Uses openCV for loading & extractiong (otherwise moviepy (legacy))
        

    The three parameters Screens=1,scale_w=.8,scale_h=.8 define the relative height (scale_h), relative widht (scale_w) and number of screens (horizontally) and thereby 
    affect the dimensions of the manual frame extraction GUI.
        
    Examples
    --------
    for selecting frames automatically with 'kmeans' and want to crop the frames based on the ``crop`` parameters in config.yaml
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',True)
    --------
    for selecting frames automatically with 'kmeans' and considering the color information.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',cluster_color=True)
    --------
    for selecting frames automatically with 'uniform' and want to crop the frames based on the ``crop`` parameters in config.yaml
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True)
    --------
    for selecting frames automatically with 'uniform', want to crop the frames based on the ``crop`` parameters in config.yaml and check for cropping
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True,checkcropping=True)
    --------
    for selecting frames manually,
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual')
    
    While selecting the frames manually, you do not need to specify the ``crop`` parameter in the command. Rather, you will get a prompt in the graphic user interface to choose 
    if you need to crop or not.
    --------
    
    """
    import os
    import sys
    import yaml
    import numpy as np
    from pathlib import Path
    from skimage import io
    from skimage.util import img_as_ubyte
    import matplotlib.pyplot as plt
    import matplotlib.patches as patches
    from deeplabcut.utils import frameselectiontools

    if mode == "manual":
        wd = Path(config).resolve().parents[0]
        os.chdir(str(wd))
        from deeplabcut.generate_training_dataset import frame_extraction_toolbox 
        frame_extraction_toolbox.show(config,Screens,scale_w,scale_h)
        
    elif mode == "automatic":
        config_file = Path(config).resolve()
        with open(str(config_file), 'r') as ymlfile:
            cfg = yaml.load(ymlfile)
        print("Config file read successfully.")
        
        numframes2pick = cfg['numframes2pick']
        start = cfg['start']
        stop = cfg['stop']
        
        # Check for variable correctness
        if start>1 or stop>1 or start<0 or stop<0 or start>=stop:
            raise Exception("Erroneous start or stop values. Please correct it in the config file.")
        if numframes2pick<1 and not int(numframes2pick):
            raise Exception("Perhaps consider extracting more, or a natural number of frames.")
        
        videos = cfg['video_sets'].keys()
        if opencv:
            import cv2
        else:
            from moviepy.editor import VideoFileClip
        for vindex,video in enumerate(videos):
            #plt.close("all")
            coords = cfg['video_sets'][video]['crop'].split(',')
            
            if userfeedback:
                print("Do you want to extract (perhaps additional) frames for video:", video, "?")
                askuser = input("yes/no")
            else:
                askuser="******"
                
            if askuser=='y' or askuser=='yes' or askuser=='Ja' or askuser=='ha': # multilanguage support :)
                #indexlength = int(np.ceil(np.log10(clip.duration * clip.fps)))
                if opencv:
                    cap=cv2.VideoCapture(video)
                    fps = cap.get(5) #https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
                    nframes = int(cap.get(7))
                    duration=nframes*1./fps
                else:
                    #Moviepy:
                    clip = VideoFileClip(video)
                    fps=clip.fps
                    duration=clip.duration
                    nframes=int(np.ceil(clip.duration*1./fps))
                indexlength = int(np.ceil(np.log10(nframes)))
                if crop==True:
                    print("Make sure you change the crop parameters in the config.yaml file. The default parameters are set to the video dimensions.")
                    if opencv:
                        cap.set(2,start*duration)
                        ret, frame = cap.read()
                        if ret:
                            image=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    else:
                        image = clip.get_frame(start*clip.duration) #frame is accessed by index *1./clip.fps (fps cancels)
                    
                    fname = Path(video)
                    output_path = Path(config).parents[0] / 'labeled-data' / fname.stem
                    
                    if output_path.exists() and checkcropping==True:
                        fig,ax = plt.subplots(1)
                        # Display the image
                        ax.imshow(image)
                        # Create a Rectangle patch
                        rect = patches.Rectangle((int(coords[0]),int(coords[2])),int(coords[1])-int(coords[0]),int(coords[3])-int(coords[2]),linewidth=3,edgecolor='r',facecolor='none')
                        # Add the patch to the Axes
                        ax.add_patch(rect)
                        plt.show()
                        
                        print("The red boundary indicates how the cropped image will look.")
                        #saveimg = str(Path(config).parents[0] / Path('labeled-data','IsCroppingOK_'+fname.stem +".png")) 
                        #io.imsave(saveimg, image)
                        
                        msg = input("Is the cropping ok? (yes/no): ")
                        if msg == "yes" or msg == "y" or msg =="Yes" or msg == "Y":
                          if len(os.listdir(output_path))==0: #check if empty
                                #store full frame from random location (good for augmentation)
                                index=int(start*duration+np.random.rand()*duration*(stop-start))
                                if opencv:
                                    cap.set(1,index)
                                    ret, frame = cap.read()
                                    if ret:
                                        image=img_as_ubyte(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                                else:
                                    image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
                                    clip=clip.crop(y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1])) #now crop clip
                                    
                                saveimg = str(output_path) +'/img'+ str(index).zfill(indexlength) + ".png"
                                io.imsave(saveimg, image)

                          else:
                              askuser=input ("The directory already contains some frames. Do you want to add to it?(yes/no): ")
                              if askuser=='y' or askuser=='yes' or askuser=='Y' or askuser=='Yes':
                                  #clip=clip.crop(y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]))
                                  pass
                              else:
                                  sys.exit("Delete the frames and try again later!")
                        else:
                          sys.exit("Correct the crop parameters in the config.yaml file and try again!")
                    
                    elif output_path.exists(): #cropping without checking:
                            index=int(start*duration+np.random.rand()*duration*(stop-start))
                            if opencv:
                                cap.set(1,index)
                                ret, frame = cap.read()
                                if ret:
                                    image=img_as_ubyte(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                            else:
                                image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
                                clip=clip.crop(y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]))
                            
                            saveimg = str(output_path) +'/img'+ str(index).zfill(indexlength) + ".png"
                            io.imsave(saveimg, image)
                            
                else:
                    numframes2pick=cfg['numframes2pick']+1 # without cropping a full size frame will not be extracted >> thus one more frame should be selected in next stage.
                    
                print("Extracting frames based on %s ..." %algo)
                if algo =='uniform': #extract n-1 frames (0 was already stored)
                    if opencv:
                        frames2pick=frameselectiontools.UniformFramescv2(cap,numframes2pick-1,start,stop)
                    else:
                        frames2pick=frameselectiontools.UniformFrames(clip,numframes2pick-1,start,stop)
                elif algo =='kmeans':
                    if opencv:
                        frames2pick=frameselectiontools.KmeansbasedFrameselectioncv2(cap,numframes2pick-1,start,stop,crop,coords,step=cluster_step,resizewidth=cluster_resizewidth,color=cluster_color)
                    else:
                        frames2pick=frameselectiontools.KmeansbasedFrameselection(clip,numframes2pick-1,start,stop,step=cluster_step,resizewidth=cluster_resizewidth,color=cluster_color)
                else:
                    print("Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'.")
                    frames2pick=[]
                
                output_path = Path(config).parents[0] / 'labeled-data' / Path(video).stem
                if opencv:
                    for index in frames2pick:
                            cap.set(1,index) #extract a particular frame
                            ret, frame = cap.read()
                            if ret:
                                image=img_as_ubyte(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                                img_name = str(output_path) +'/img'+ str(index).zfill(indexlength) + ".png"
                                if crop:
                                    io.imsave(img_name,image[int(coords[2]):int(coords[3]),int(coords[0]):int(coords[1]),:]) #y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                                else:
                                    io.imsave(img_name,image)
                            else:
                                print("Frame", index, " not found!")
                    cap.release()
                else:
                    for index in frames2pick:
                        try:
                            image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
                            img_name = str(output_path) +'/img'+ str(index).zfill(indexlength) + ".png"
                            io.imsave(img_name,image)
                            if np.var(image)==0: #constant image
                                print("Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True")
                                
                        except FileNotFoundError:
                            print("Frame # ", index, " does not exist.")
                    
                    #close video. 
                    clip.close()
                    del clip
    else:
        print("Invalid MODE. Choose either 'manual' or 'automatic'. Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
              for ipython/jupyter notebook for more details.")
    
    print("\nFrames were selected.\nYou can now label the frames using the function 'label_frames' (if you extracted enough frames for all videos).")
Esempio n. 42
0
    warp_zero = np.zeros_like(binary_warped).astype(np.uint8)

    draw_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(draw_warp, np.int_([pts]), (0,255, 0))
    

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    final_warp = perspectiveInv(draw_warp)
    
    # Combine the result with the original image
    result = cv2.addWeighted(img, 1, final_warp, 0.3, 0)
    
    line.detected = True
    line.leftx = leftx
    line.lefty = lefty
    line.righty = righty
    line.rightx = rightx
    return result

    
### Last step: Apply pipeline to project video

line = Lane_Line()

video_output = 'laneLineText.mp4'
clip1 = VideoFileClip("project_video.mp4")
video_clip = clip1.fl_image(pipeline) #NOTE: this function expects color images!!
%time video_clip.write_videofile(video_output, audio=False)
Esempio n. 43
0
print('')
print('Retaining with best hyper-parameters')

scaler = StandardScaler().fit(X_features)
X_features = scaler.transform(X_features)
svc = LinearSVC(C=best_c, penalty=best_penalty, loss=best_loss).fit(X_features, y_features)

vehicle_detector = vehicle.VehicleDetector(color_space=color_space,
                                  orient=orient,
                                  pix_per_cell=pix_per_cell,
                                  cell_per_block=cell_per_block,
                                  hog_channel=hog_channel,
                                  spatial_size=spatial_size,
                                  hist_bins=hist_bins,
                                  spatial_feat=spatial_feat,
                                  hist_feat=hist_feat,
                                  hog_feat=hog_feat,
                                  y_start_stop=y_start_stop,
                                  x_start_stop=x_start_stop,
                                  xy_window=xy_window,
                                  xy_overlap=xy_overlap,
                                  heat_threshold = 15,
                                  scaler=scaler,
                                  classifier=svc)

output_file = './processed_project_video.mp4'
input_file = './project_video.mp4'

clip = VideoFileClip(input_file)
out_clip = clip.fl_image(vehicle_detector.detect)
out_clip.write_videofile(output_file, audio=False)
Esempio n. 44
0
 def process_clip(self, path):
     video = VideoFileClip(path)  # .subclip(0.0, 0.2)
     processed = video.fl_image(self.process_frame)
     return processed
Esempio n. 45
0
from moviepy.editor import VideoFileClip
from process_image import process_image

white_output = 'solidYellowLeft.mp4'
#solidWhiteRight.mp4
#solidYellowLeft.mp4
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds

#clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(1.95,2.1)
clip1 = VideoFileClip("test_videos/solidYellowLeft.mp4")

white_clip = clip1.fl_image(
    process_image)  #NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
Esempio n. 46
0
def getLength(stt_id):
    filename = str(stt_id) + "/input/input.mp4"
    clip = VideoFileClip(filename)

    return clip.duration
Esempio n. 47
0
async def media_to_pic(event, reply, noedits=False):  # sourcery no-metrics
    mediatype = media_type(reply)
    if mediatype not in [
        "Photo",
        "Round Video",
        "Gif",
        "Sticker",
        "Video",
        "Voice",
        "Audio",
        "Document",
    ]:
        return event, None
    if not noedits:
        catevent = await edit_or_reply(
            event, "`Transfiguration Time! Converting to ....`"
        )

    else:
        catevent = event
    catmedia = None
    catfile = os.path.join("./temp/", "meme.png")
    if os.path.exists(catfile):
        os.remove(catfile)
    if mediatype == "Photo":
        catmedia = await reply.download_media(file="./temp")
        im = Image.open(catmedia)
        im.save(catfile)
    elif mediatype in ["Audio", "Voice"]:
        await event.client.download_media(reply, catfile, thumb=-1)
    elif mediatype == "Sticker":
        catmedia = await reply.download_media(file="./temp")
        if catmedia.endswith(".tgs"):
            catcmd = f"lottie_convert.py --frame 0 -if lottie -of png '{catmedia}' '{catfile}'"
            stdout, stderr = (await runcmd(catcmd))[:2]
            if stderr:
                LOGS.info(stdout + stderr)
        elif catmedia.endswith(".webp"):
            im = Image.open(catmedia)
            im.save(catfile)
    elif mediatype in ["Round Video", "Video", "Gif"]:
        await event.client.download_media(reply, catfile, thumb=-1)
        if not os.path.exists(catfile):
            catmedia = await reply.download_media(file="./temp")
            clip = VideoFileClip(media)
            try:
                clip = clip.save_frame(catfile, 0.1)
            except Exception:
                clip = clip.save_frame(catfile, 0)
    elif mediatype == "Document":
        mimetype = reply.document.mime_type
        mtype = mimetype.split("/")
        if mtype[0].lower() == "image":
            catmedia = await reply.download_media(file="./temp")
            im = Image.open(catmedia)
            im.save(catfile)
    if catmedia and os.path.lexists(catmedia):
        os.remove(catmedia)
    if os.path.lexists(catfile):
        return catevent, catfile, mediatype
    return catevent, None
Esempio n. 48
0
    x1 = int(box[1] * width)
    y1 = int(box[0] * height)
    x2 = int(box[3] * width)
    y2 = int(box[2] * height)

    return frame[y1:y2, x1:x2, :]


parser = argparse.ArgumentParser(description='Extracts traffic signs')
parser.add_argument('--input', help='video stream')
parser.add_argument('--output', help='output folder',
                    default='output/traffic_signs')

args = parser.parse_args()

clip = VideoFileClip(args.input)
object_data = ObjectData()
object_data.load(args.input + '.json')

instances = object_data.get_instances_key_appears('Traffic sign')

idx = 0
signs = []


for i, data in tqdm(enumerate(instances)):
    frame = clip.get_frame(float(data['time']) / clip.fps)
    for box in data['boxes']:
        sign = extract_box_from_frame(frame, box)
        cv2.imwrite(os.path.join(args.output, "sign{:05d}.jpg".format(
            i)), cv2.cvtColor(sign, cv2.COLOR_RGB2BGR))
Esempio n. 49
0
from utils import *
from moviepy.editor import VideoFileClip
from IPython.display import HTML

test_output = 'test.mp4'
# clip = VideoFileClip('test_video.mp4')
clip = VideoFileClip('project_video.mp4')
test_clip = clip.fl_image(process_image)
test_clip.write_videofile(test_output, audio=False)
Esempio n. 50
0
 def setPath(self, filename):
     self.__file_name__ = filename
     self.__file_cv2__ = cv2.VideoCapture(filename)
     self.__file_ffmpeg__ = VideoFileClip(filename)
Esempio n. 51
0
    """
        Using pipeline to detect lane lines and vehicles on video
        """
    args = parse_arg(sys.argv)
    # load camera parameters (mtx, dist)
    mtx, dist = [], []
    with open('./AdvancedLaneLines/camera_cal/wide_dist_pickle.p', 'rb') as f:
        data = pickle.load(f)
        mtx, dist = data['mtx'], data['dist']
    # add matrix for perspective transform
    M, Minv = get_warped()

    # load the parameter of vehicles classifier
    param = None
    with open("./VehicleDetection/svc_pickle.p", "rb") as f:
        param = pickle.load(f)

    run = do_process()
    run.clf = param
    run.lane_param = (mtx, dist, M, Minv)

    # assign file name
    fn = args.inputFile

    project_output = args.outputFile
    clip1 = VideoFileClip(fn)
    proj_clip = clip1.fl_image(run.process_image)
    proj_clip.write_videofile(project_output, audio=False)
    plt.plot(run.n_box)
    plt.show()
Esempio n. 52
0
class VideoCheck():
    def __init__(self):
        self.__cap_prop_fps__ = cv2.CAP_PROP_FPS
        self.__cap_prop_frame_count__ = cv2.CAP_PROP_FRAME_COUNT
        self.__cap_frame_width__ = cv2.CAP_PROP_FRAME_WIDTH
        self.__cap_frame_height__ = cv2.CAP_PROP_FRAME_HEIGHT
        self.__file_name__ = None
        self.__file_cv2__ = None
        self.__file_ffmpeg__ = None
        pass

    def setPath(self, filename):
        self.__file_name__ = filename
        self.__file_cv2__ = cv2.VideoCapture(filename)
        self.__file_ffmpeg__ = VideoFileClip(filename)

    def getFileSize(self):
        u"""
        获取文件大小(M: 兆)
        """
        file_byte = os.path.getsize(self.__file_name__)
        return self.__sizeConvert__(file_byte)

    def getFileTimes(self):
        u"""
        获取视频时长(s:秒)
        """
        clip = self.__file_ffmpeg__
        return self.__timeConvert__(clip.duration)

    def getFileAudio(self):
        audio = self.__file_ffmpeg__.audio
        t = 0.2  #时间
        audio_frame = audio.get_frame(t)
        print type(audio_frame)
        return audio

    def getFileFPS(self):
        fps = self.__file_cv2__.get(self.__cap_prop_fps__)
        return fps

    def getFileCount(self):
        count = self.__file_cv2__.get(self.__cap_prop_frame_count__)
        return int(count)

    def getFrameWidth(self):
        return int(self.__file_cv2__.get(self.__cap_frame_width__))

    def getFrameHidth(self):
        return int(self.__file_cv2__.get(self.__cap_frame_height__))

    def getFrameAtTime(self, t):
        tm_total = self.__file_ffmpeg__.duration  #时间单位:s
        print tm_total
        assert t < tm_total, 'video tm: {}'.format(tm_total)
        img_rgb = self.__file_ffmpeg__.get_frame(t)
        # return img_rgb[:,:,::-1]
        return img_rgb[:, :, (2, 1, 0)]

    def __sizeConvert__(self, size):  # 单位换算
        K, M, G = 1024, 1024**2, 1024**3
        size *= 1.0
        if size >= G:
            return '%.2f G Bytes' % (size / G)
        elif size >= M:
            return '%.2f M Bytes' % (size / M)
        elif size >= K:
            return '%.2f K Bytes' % (size / K)
            return str(size / K) + 'K Bytes'
        else:
            return str(size) + 'Bytes'

    def __timeConvert__(self, size):  # 单位换算
        M, H = 60, 60**2
        if size < M:
            return str(size) + u'秒'
        if size < H:
            return u'%s分钟%s秒' % (int(size / M), int(size % M))
        else:
            hour = int(size / H)
            mine = int(size % H / M)
            second = int(size % H % M)
            tim_srt = u'%s小时%s分钟%s秒' % (hour, mine, second)
            return tim_srt
Esempio n. 53
0
    data = pickle.load(open(args.model, "rb"))

    files = glob.glob(args.file, recursive=True)
    for file in files:
        print("Processing", file)
        output_file = output_dir + os.sep + os.path.basename(file)

        carTracker = CarTracker(y_start, y_stop, scale, data['clf'], data['X_scaler'], color_space=data['color_space'],
                                spatial_features=data['spatial_features'], spatial_size=data['spatial_size'],
                                hist_features=data['hist_features'], hist_bins=data['hist_bins'],
                                hog_features=data['hog_features'], hog_orient=data['hog_orient'],
                                hog_pix_per_cell=data['hog_pix_per_cell'],
                                hog_cell_per_block=data['hog_cell_per_block'],
                                hog_channel=data['hog_channel'])

        _, file_extension = os.path.splitext(file)
        if ".jpg" == file_extension.lower():
            img = mpimg.imread(file)
            out_img = carTracker.next_image(img)
            cv2.imwrite(output_file, cv2.cvtColor(out_img, cv2.COLOR_RGB2BGR))
        elif ".mp4" == file_extension.lower():
            carTracker.smooth_over = 5  # Smooth detection over 1sec/25fps*5=0.2 secs
            clip = VideoFileClip(file)
            output_clip = clip.fl_image(carTracker.next_image)
            output_clip.write_videofile(output_file, audio=False)
        else:
            print("Unknown file format: " + args.file)
            continue

        print("Result saved to", output_file)
Esempio n. 54
0
 def mergeMp3ToMp4(self, mp3Path, mp4Path, rstName='merge.mp4'):
     video = VideoFileClip(mp4Path)
     video.write_videofile(rstName, audio=mp3Path)
Esempio n. 55
0
def video_to_audio(video_path):
    file_name = str(video_path.split('/')[-1]).split('.')[0]
    video = VideoFileClip(video_path)
    audio = video.audio
    audio.write_audiofile(
        os.path.join('instance', 'audios', file_name + '.wav'))
captionCC = list(cp)

CCPointList = []
for i in range(len(captionCC)):

    if captionCC[i].content.startswith("what") or captionCC[i].content.startswith("george"):
        CCPointList.append(i)
        print(captionCC[i].content)
        print(captionCC[i].start)

print(CCPointList)

filename = "Podcast1.mp4"
n = 0
for j in range(len(CCPointList)):
    lastNumber = CCPointList[-1]
    n += 1
    if CCPointList[j] >= 0 and CCPointList[j] != lastNumber:
        starttime = srt.timedelta_to_srt_timestamp(captionCC[CCPointList[j]].start)
        endtime = srt.timedelta_to_srt_timestamp(captionCC[CCPointList[j + 1]].start)
        clip = VideoFileClip(filename).subclip(starttime, endtime)
        clip.write_videofile(f"clip{n}.mp4")
        clip.close()

    elif CCPointList[j] == CCPointList[-1]:
        starttime = srt.timedelta_to_srt_timestamp(captionCC[CCPointList[j]].start)
        endtime = srt.timedelta_to_srt_timestamp(captionCC[-1].end)
        clip1 = VideoFileClip(filename).subclip(starttime, endtime)
        clip1.write_videofile(f"clip{n}.mp4")
        clip1.close()
Esempio n. 57
0
    l_line.add_fit(l_fit, l_lane_inds)
    r_line.add_fit(r_fit, r_lane_inds)

    if l_line.best_fit is not None and r_line.best_fit is not None:
        img_out = DrawLane(image, undist, ploty, l_fit_x, r_fit_x, Minv)

    else:
        img_out = image

    left_curverad, right_curverad, center_dist = Calculations(
        undist, l_fit, r_fit, l_lane_inds, r_lane_inds)

    img_out = Data(img_out, left_curverad, right_curverad, center_dist)
    #image = DrawLane(image, undist, ploty, left_fitx, right_fitx, Minv)
    #undistortedTestImages.append(undist)

    #PlotImages(undistortedTestImages)
    return img_out


if (__name__ == "__main__"):
    #PipeLine()
    #Debug()
    #VideoProcess()
    l_line = Line()
    r_line = Line()
    video_output1 = 'project_video_output.mp4'
    video_input1 = VideoFileClip('project_video.mp4')  #.subclip(22,26)
    processed_video = video_input1.fl_image(PipeLine)
    processed_video.write_videofile(video_output1, audio=False)
        _, _= im_detect(net, im)

    im_dir = '/home/student/cmpe295-masters-project/faster-rcnn-resnet/data/demo/'
    im_dir += '/*.jpg'
    bsdr = glob.glob(im_dir)
   
    """
    for im_name in bsdr:
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print 'Demo for {}'.format(im_name)
        print(matplotlib.backends.backend)
        demo(net, im_name)
    """
    
    ### Process video files as an input
    clip = VideoFileClip("/home/student/cmpe295-masters-project/faster-rcnn-resnet/data/demo/demo_input.mp4")
    
    start = time.time()
    # Transform video and perform image flip
    new_clip = clip.fl_image(demoVideo)

    # Write a video to a file
    new_clip.write_videofile("demo_input_lr.mp4", audio=False)

    end = time.time()
    total_time = (end - start)

    clip_len = time.strftime("%H:%M:%S", time.gmtime(clip.duration))
    print((('Image transformations took {:.3f}s for '
            '{} long video').format(total_time, clip_len)))
    
Esempio n. 59
0
    result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)

    return result

#%%
#import glob
#img_list = glob.glob('test_images/*.jpg')
#for i in range(len(img_list)):
#    process(cv2.imread(img_list[i]))

#%%
#import glob
#img_list = glob.glob('test_images/*.jpg')
#i = 0
#
#for img_path in img_list:
#    img = cv2.imread(img_path)
#    output_img = process(img)
#    print(output_img.shape)
#    i += 1
#    cv2.imwrite('test_images_output/output_image_'+str(i)+'.jpg', output_img)

#%%
video = VideoFileClip('project_video.mp4')
input = video.fl_image(process)
input.write_videofile('project_video_output_test.mp4', audio = False)




Esempio n. 60
0
def playVideo():
    #pygame.mixer.quit()  File "/home/pi/.local/lib/python3.7/site-packages/moviepy/audio/io/preview.py", line 45, in preview
    clip = VideoFileClip(f'videos/{eyeDesign}video.mp4', target_resolution=(480,800))
    clip.preview()