def demo(filename, tracking, output, t_start=0., t_end=None, shift=0.,
         labels=None, landmark=None, height=200):

    # parse label file
    if labels is not None:
        with open(labels, 'r') as f:
            labels = {}
            for line in f:
                identifier, label = line.strip().split()
                identifier = int(identifier)
                labels[identifier] = label

    video = Video(filename)

    import os
    os.environ['IMAGEIO_FFMPEG_EXE'] = 'ffmpeg'
    # from moviepy.video.io.VideoFileClip import VideoFileClip

    from moviepy.editor import VideoClip, AudioFileClip

    make_frame = get_make_frame(video, tracking, landmark=landmark,
                                labels=labels, height=height, shift=shift)
    video_clip = VideoClip(make_frame, duration=video.duration)
    audio_clip = AudioFileClip(filename)
    clip = video_clip.set_audio(audio_clip)

    if t_end is None:
        t_end = video.duration

    clip.subclip(t_start, t_end).write_videofile(output, fps=video.frame_rate)
Exemple #2
0
def pyannote_shot(videoPath):

    video = Video(videoPath)
    FPS = video._fps
    shots = Shot(video)
    shotBoundaries = []

    for x in shots:
        shotBoundaries.append([int(FPS * x.start), int(FPS * x.end)])

    return shotBoundaries
Exemple #3
0
def pyannote_shot(videoPath):
    """
    Perform shot segmentation using pyannote-video and return output
    Input : videoPath - Path to video file
    Output : shotBoundaries - list of [shot start, shot end]

    """
    video = Video(videoPath)
    FPS = video._fps
    shots = Shot(video)
    shotBoundaries = []

    for x in shots:
        shotBoundaries.append([int(FPS * x.start), int(FPS * x.end)])

    return shotBoundaries
def get_movie_infos():
    videos = pd.DataFrame(columns=[
        'name', 'season', 'episode', 'duration', 'frame_rate', 'width',
        'height'
    ])
    nb_total_serie = get_nb_series()
    for id_serie, folder_serie in enumerate(current_directory.iterdir()):
        print(str(id_serie) + '/' + str(nb_total_serie), str(folder_serie))
        for current_file in folder_serie.iterdir():
            current_file = str(current_file)
            if '.mkv' in current_file:
                if 'GameOfThrones.Season06.Episode09.mkv' in current_file or 'GameOfThrones.Season06.Episode10.mkv' in current_file:
                    continue
                #print(current_file)
                vid = Video(current_file)
                current_file_split = current_file.split('/')[-1].split('.')
                name = current_file_split[0]
                season = current_file_split[1].split('Season')[1]
                episode = current_file_split[2].split('Episode')[1]
                #print(vid.duration, vid.frame_rate, vid.frame_size)
                videos = pd.concat([
                    videos,
                    pd.DataFrame([[
                        name, season, episode,
                        float(vid.duration),
                        float(vid.frame_rate),
                        float(vid.frame_size[0]),
                        float(vid.frame_size[1])
                    ]],
                                 columns=[
                                     'name', 'season', 'episode', 'duration',
                                     'frame_rate', 'width', 'height'
                                 ])
                ])

    print(videos.describe())  #include='all'))

    return videos
                        # Raw Image
                        os.makedirs(os.path.dirname(target_file_path_raw_img))

                    if os.path.exists(target_file_path):
                        continue

                    # process video and audio files
                    #video_filename = '/cvhci/data/QA/Movie_Description_Dataset/MOVIE_DESCRIPTION_DATASET/0001_American_Beauty/0001_American_Beauty_01.55.05.110-01.55.21.331.avi'
                    #audio_filename = '/cvhci/data/QA/Movie_Description_Dataset/MOVIE_DESCRIPTION_DATASET/0001_American_Beauty/0001_American_Beauty_01.55.05.110-01.55.21.331.wav'
                    video_filename = os.path.join(root,
                                                  basefile_name_w_ext)  # Video
                    audio_filename = os.path.join(root, basefile_name +
                                                  '.wav')  # Audio

                    video = Video(video_filename)
                    num_frames = video._nframes
                    width = video._width
                    height = video._height
                    channels = 3
                    black_border_removed_indexes = 0  # Used afterwards
                    step_size_imagenet = 5

                    # Buffer memory to store frames for for fast accessing
                    frames_buffer_i3d = torch.zeros([
                        num_frames, channels, i3d_imagenet_expected_size,
                        i3d_imagenet_expected_size
                    ],
                                                    dtype=torch.float32)
                    frames_buffer_i3d = frames_buffer_i3d.to(device)
Exemple #6
0
    clip.subclip(t_start, t_end).write_videofile(output, fps=video.frame_rate)


if __name__ == '__main__':

    # parse command line arguments
    version = 'pyannote-face {version}'.format(version=__version__)
    arguments = docopt(__doc__, version=version)

    # initialize video
    filename = arguments['<video>']

    verbose = arguments['--verbose']

    video = Video(filename, verbose=verbose)

    # face tracking
    if arguments['track']:

        shot = arguments['<shot.json>']
        tracking = arguments['<tracking>']
        detect_min_size = float(arguments['--min-size'])
        detect_every = float(arguments['--every'])
        track_min_overlap_ratio = float(arguments['--min-overlap'])
        track_min_confidence = float(arguments['--min-confidence'])
        track_max_gap = float(arguments['--max-gap'])
        track(video,
              shot,
              tracking,
              detect_min_size=detect_min_size,
    clip.subclip(t_start, t_end).write_videofile(output, fps=video.frame_rate)

if __name__ == '__main__':

    # parse command line arguments
    version = 'pyannote-face {version}'.format(version=__version__)
    arguments = docopt(__doc__, version=version)

    # initialize video
    filename = arguments['<video>']
    ffmpeg = arguments['--ffmpeg']

    verbose = arguments['--verbose']

    video = Video(filename, ffmpeg=ffmpeg, verbose=verbose)

    # face tracking
    if arguments['track']:

        shot = arguments['<shot.json>']
        tracking = arguments['<tracking>']
        detect_min_size = float(arguments['--min-size'])
        detect_every = float(arguments['--every'])
        track_min_overlap_ratio = float(arguments['--min-overlap'])
        track_min_confidence = float(arguments['--min-confidence'])
        track_max_gap = float(arguments['--max-gap'])
        track(video, shot, tracking,
              detect_min_size=detect_min_size,
              detect_every=detect_every,
              track_min_overlap_ratio=track_min_overlap_ratio,
Exemple #8
0
def readInputFiles(arguments):

    #Assert argument

    assert len(
        arguments
    ) == 8, "Error with number of argument : python xml_parser.py <TRS file path> <XGTF file path> <Face track file path> <video file path> <Face landmark file path> <output folder path> <facetrack_talkingface folder path>"

    try:
        #Read TRS file
        trs_file = etree.parse(arguments[1])
    except:
        print("Error reading TRS file ")
        raise

    try:
        #Read XGTF file ( A different way than the former!)
        xgtf_file = minidom.parse(arguments[2])

    except:
        print("Error reading XGTF file")
        raise

    try:
        #Read face track file
        face_track_file = pd.read_csv(arguments[3], sep=" ", header=None)
        face_track_file.columns = [
            "time", "id", "left", "top", "right", "bottom", "state"
        ]
        face_track_file["state"] = 0

    except:
        print("Error reading Face track file")
        raise

    try:
        #Read MPG video file to generate images from it
        video = Video(filename=arguments[4])

        # Extract video id from video file path argument
        video_id = arguments[4].split('/')[LAST_ELEMENT].split('.')[0]

    except:
        print("Error reading Video file ")
        raise

    try:
        #Read face track file
        face_landmarks_file = np.loadtxt(arguments[5])

    except:
        print("Error reading Face landmarks file")
        raise

    if not os.path.isdir(arguments[6]):
        os.makedirs(arguments[6])
    output_dir = arguments[6]

    facetrack_talkingface_dir = arguments[7]

    return trs_file, xgtf_file, face_track_file, video, video_id, face_landmarks_file, output_dir, facetrack_talkingface_dir
        os.makedirs(output_picdir) 
    if not os.path.exists(output_cropped_picdir): 
        os.makedirs(output_cropped_picdir) 
    else:
        print(f'{file_name}_cropped_frames directory already exist')
        seen.append(file_name)
        continue
    
    track = get_track(embedding_file)
    if track is None: continue
    
    # e.g. './snippets/2018-03-30_0100_US_CNN_Anderson_Cooper_360_merged_Christine_Quinn.track.txt'
    df_one = choose_track(tracking_file, track)
    
    # cut
    video = Video(input_video)

    for i, row in df_one.iterrows():
        
        print('frames extracting...')
        output_pic = f'{output_picdir}/{i}_{file_name}.jpg'
        output_cropped_pic = f'{output_cropped_picdir}/{i}_{file_name}.jpg'
        if os.path.exists(output_pic) :continue
        
        time = row['t']
        print(f'extracting the frame at {time}...')
        extract_frame(time, input_video, output_pic)
        print(f'{output_pic} is done!')
        
        print('frames cropping...')
        img = cv2.imread(output_pic)