Ejemplo n.º 1
0
def dump_frames(video_path, output_directory, frames_per_second):
    """Dump frames at frames_per_second from a video to output_directory.

    If frames_per_second is None, the clip's fps attribute is used instead."""
    clip = VideoFileClip(video_path)
    if clip.rotation == 90:
        clip = clip.resize(clip.size[::-1])
        clip.rotation = 0
    info_path = '{}/info.json'.format(output_directory)
    name_format = '{}/frame%04d.png'.format(output_directory)

    if frames_per_second is None:
        frames_per_second = clip.fps
    frames_already_dumped_helper = lambda: \
            frames_already_dumped(video_path, output_directory,
                                  frames_per_second, info_path,
                                  name_format, clip.duration)
    if frames_already_dumped_helper():
        logging.info('Frames for {} exist, skipping...'.format(video_path))
        return

    successfully_wrote_images = False
    try:
        clip.write_images_sequence(name_format.format(output_directory),
                                   fps=frames_per_second)
        successfully_wrote_images = True
    except Exception as e:
        logging.error("Failed to dump images for %s", video_path)
        logging.error(e)

    if successfully_wrote_images:
        info = {
            'frames_per_second': frames_per_second,
            'input_video_path': os.path.abspath(video_path)
        }
        with open(info_path, 'w') as info_file:
            json.dump(info, info_file)

        if not frames_already_dumped_helper():
            logging.error(
                "Images for {} don't seem to be dumped properly!".format(
                    video_path))
Ejemplo n.º 2
0
        data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
        rnet = mtcnn_detect_face.RNet({'data':data})
        rnet.load(os.path.join(model_path, 'det2.npy'), sess)
    with tf.variable_scope('onet'):
        data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
        onet = mtcnn_detect_face.ONet({'data':data})
        onet.load(os.path.join(model_path, 'det3.npy'), sess)
    return pnet, rnet, onet

sess = K.get_session()
with sess.as_default():
    pnet, rnet, onet = create_mtcnn(sess, WEIGHTS_PATH)

pnet = K.function([pnet.layers['data']], [pnet.layers['conv4-2'], pnet.layers['prob1']])
rnet = K.function([rnet.layers['data']], [rnet.layers['conv5-2'], rnet.layers['prob1']])
onet = K.function([onet.layers['data']], [onet.layers['conv6-2'], onet.layers['conv6-3'], onet.layers['prob1']])

bbox_moving_avg_coef = 0.65

clip = VideoFileClip(clip_loc)

# A get around for https://github.com/Zulko/moviepy/issues/682
# using https://github.com/Zulko/moviepy/issues/586
if clip.rotation == 90:
    clip = clip.resize(clip.size[::-1])       # 90 degree rotate
    clip.rotation = 0

clip = clip.fl_image(process_video)           # Call process_video to swap face for every frame in the video
clip.write_videofile(output_clip_loc, audio=False)