def detection_video(file_name, file_id):
    """

    :param file_name: string
    :param file_id: string
    :return: dict
    """
    with graph.as_default():
        path_new_video = f'media/new-files/{file_name}'
        detected_video_name = str(math.ceil(time.time()))
        path_detected_video_avi = f'media/video-object-detection/{detected_video_name}'

        video_path = detector.detectObjectsFromVideo(input_file_path=os.path.join(execution_path, path_new_video),
                                                     output_file_path=os.path.join(execution_path,
                                                                                   path_detected_video_avi),
                                                     frames_per_second=20, log_progress=True)
        stream = ffmpeg.input(video_path).video.hflip()
        stream = ffmpeg.output(stream, os.path.join(execution_path,
                                                    f'media/video-object-detection/{detected_video_name}.mp4'))

        ffmpeg.run(stream)

        new_stream = ffmpeg.input(os.path.join(execution_path,
                                               f'media/video-object-detection/{detected_video_name}.mp4'))
        new_stream = ffmpeg.hflip(new_stream)
        new_stream = ffmpeg.output(new_stream, os.path.join(execution_path,
                                                            f'media/video-object-detection/flip-{detected_video_name}.mp4'))
        ffmpeg.run(new_stream)
        os.remove(video_path)
        print('end recognition')
        socketio.emit('video-object-detection-end',
                      {'link': f'http://localhost:5000/media/video-object-detection/flip-{detected_video_name}.mp4',
                       'id': file_id})
Example #2
0
def flip_video(file, orientation):
	stream = ffmpeg.input(file)
	if (orientation == "horizontal):
		stream = ffmpeg.hflip(stream)
	else:
		stream = ffmpeg.vflip(stream)
	stream = ffmpeg.output(stream, file)
Example #3
0
def mirror(path):
    video = [f for f in os.listdir(path) if ".mp4" in f]

    for v in video:
        name = v.split('.')[0]
        savepath = os.path.join(path, name + "_mirrored.mp4")
        stream = ffmpeg.input(os.path.join(path, v))
        stream = ffmpeg.hflip(stream)
        stream = ffmpeg.output(stream, savepath)
        ffmpeg.run(stream)
Example #4
0
 def record(self):
     Recorder.isRecording = True
     print("Recording started")
     camera = Camera()
     fourcc = cv2.VideoWriter_fourcc(*'XVID')
     timestamp = time.strftime("%d\%m\%Y_%H:%M:%S")
     directory = 'static/recordings/'
     frames = []
     start = time.time()
     while int(time.time() - start) < Recorder.record_length:
         frames.append(camera.get_frame())
         time.sleep(0.03)
     fps = len(frames) / Recorder.record_length
     out = cv2.VideoWriter(directory + timestamp + '.avi', fourcc, fps,
                           (640, 480))
     for frame in frames:
         out.write(frame)
     out.release()
     stream = ffmpeg.input(directory + timestamp + '.avi')
     stream = ffmpeg.hflip(stream)
     stream = ffmpeg.output(stream, directory + timestamp + '.mp4')
     ffmpeg.run(stream)
     Recorder.isRecording = False
Example #5
0
import ffmpeg
import sys

stream = ffmpeg.input(sys.argv[1])
stream = ffmpeg.hflip(stream)
stream = ffmpeg.output(stream, 'output.mp4')
ffmpeg.run(stream)
Example #6
0
if __name__ == '__main__':
    # Parse arguments
    args = docopt(__doc__)
    videos_path_in = join(os.getcwd(), args['--path_in'])
    videos_path_out = join(
        os.getcwd(),
        args['--path_out']) if args.get('--path_out') else videos_path_in
    # Training script expects videos in MP4 format
    VIDEO_EXT = '.mp4'

    # Create directory to save flipped videos
    os.makedirs(videos_path_out, exist_ok=True)

    for video in os.listdir(videos_path_in):
        print(f'Processing video: {video}')
        flipped_video_name = video.split('.')[0] + '_flipped' + VIDEO_EXT
        # Original video as input
        original_video = ffmpeg.input(join(videos_path_in, video))
        # Do horizontal flip
        flipped_video = ffmpeg.hflip(original_video)
        # Get flipped video output
        flipped_video_output = ffmpeg.output(flipped_video,
                                             filename=join(
                                                 videos_path_out,
                                                 flipped_video_name))
        # Run to render and save video
        ffmpeg.run(flipped_video_output)

    print("Processing complete!")
Example #7
0
	def __init__(self):
		stream = ffmpeg.input(WEBCAM_WEBM_FILE_NAME)
		stream = ffmpeg.hflip(stream)
		stream = ffmpeg.output(stream, WEBCAM_WEBM_FILE_NAME.replace('.webm', '.mp4'))
		ffmpeg.run(stream)
		os.remove(WEBCAM_WEBM_FILE_NAME)
def read_clips_from_video(dirname, model_settings):
    print('Dirname: ', dirname)
    # Input size for the network
    frames_per_batch = model_settings['frames_per_batch']
    video_fps = model_settings['video_fps']
    crop_size = model_settings['crop_size']
    np_mean = model_settings['np_mean']
    trans_max = model_settings['trans_max']

    # Data augmentation randoms
    horizontal_flip = random.random()
    trans_factor = random.randint(-trans_max, trans_max)

    # Video information
    probe = ffmpeg.probe(dirname)
    for video_info in probe['streams']:
        if 'width' in video_info:
            break
    video_width = video_info["width"]
    video_height = video_info["height"]
    video_duration = float(video_info["duration"])
    num_frame = int(video_info["nb_frames"])

    # Select which portion of the video will be input
    rand_max = int(num_frame - ((num_frame / video_duration) *
                                (frames_per_batch / video_fps)))

    start_frame = random.randint(0, max(rand_max - 1, 0))
    # end_frame = ceil(start_frame + (num_frame / video_duration) * frames_per_batch / video_fps + 1)
    video_start = (video_duration / num_frame) * start_frame
    video_end = min(video_duration,
                    video_start + ((frames_per_batch + 1) / video_fps))

    # Cropping factor
    x_pos = max(video_width - video_height + 2 * trans_factor, 0) // 2
    y_pos = max(video_height - video_width + 2 * trans_factor, 0) // 2
    crop_size1 = min(video_height, video_width)
    # Read specified times of the video
    ff = ffmpeg.input(dirname, ss=video_start, t=video_end - video_start)
    # Trim video -> did not work :(
    # ff = ff.trim(end_frame='50')
    # Divide into frames
    ff = ffmpeg.filter(ff, 'fps', video_fps)
    # Crop
    ff = ffmpeg.crop(ff, x_pos, y_pos, crop_size1, crop_size1)
    # Subsample
    ff = ffmpeg.filter(ff, 'scale', crop_size, crop_size)
    # Horizontal flip with some probability
    if horizontal_flip > 0.5:
        ff = ffmpeg.hflip(ff)
    # Output the video
    ff = ffmpeg.output(ff, 'pipe:', format='rawvideo', pix_fmt='rgb24')
    # Run Process in quiet mode
    out, _ = ffmpeg.run(ff, capture_stdout=True, quiet=True)
    # Extract to numpy array
    video = np.frombuffer(out, np.uint8). \
        reshape([-1, crop_size, crop_size, 3])

    # Copies last frame if # of frames < 16
    # Subtracts the mean and converts type to float32
    num_frames = video.shape[0]
    if num_frames < frames_per_batch:
        last_frame = video[-1]
        num_frame_repeat = frames_per_batch - num_frames
        # print('Frames repeated: ', num_frame_repeat)
        last_repeat = np.repeat(last_frame[np.newaxis],
                                num_frame_repeat,
                                axis=0)
        video = np.concatenate((video, last_repeat), axis=0) - np_mean
    else:
        video = video[:frames_per_batch] - np_mean

    return video
Example #9
0
def flip_videos():
    """
    Flip the videos horizontally for given class and
    copy tags of selected original videos for flipped version of it.
    """
    data = request.json
    project = data['projectName']
    path = project_utils.lookup_project_path(project)
    config = project_utils.load_project_config(path)
    counterpart_class_name = str(data['counterpartClassName'])
    original_class_name = str(data['originalClassName'])
    copy_video_tags = data['videosToCopyTags']

    if counterpart_class_name not in config['classes']:
        config['classes'][counterpart_class_name] = config['classes'][original_class_name] \
            if copy_video_tags['train'] or copy_video_tags['valid'] else []
        project_utils.write_project_config(path, config)

    for split in SPLITS:
        videos_path_in = os.path.join(path, f'videos_{split}', original_class_name)
        videos_path_out = os.path.join(path, f'videos_{split}', counterpart_class_name)
        original_tags_path = os.path.join(path, f'tags_{split}', original_class_name)
        counterpart_tags_path = os.path.join(path, f'tags_{split}', counterpart_class_name)

        # Create directory to save flipped videos
        os.makedirs(videos_path_out, exist_ok=True)
        os.makedirs(counterpart_tags_path, exist_ok=True)

        video_list = [video for video in os.listdir(videos_path_in) if video.endswith(VIDEO_EXT)]

        for video in video_list:
            output_video_list = [op_video for op_video in os.listdir(videos_path_out) if op_video.endswith(VIDEO_EXT)]
            print(f'Processing video: {video}')
            if '_flipped' in video:
                flipped_video_name = ''.join(video.split('_flipped'))
            else:
                flipped_video_name = video.split('.')[0] + '_flipped' + VIDEO_EXT

            if flipped_video_name not in output_video_list:
                # Original video as input
                original_video = ffmpeg.input(os.path.join(videos_path_in, video))
                # Do horizontal flip
                flipped_video = ffmpeg.hflip(original_video)
                # Get flipped video output
                flipped_video_output = ffmpeg.output(flipped_video,
                                                     filename=os.path.join(videos_path_out, flipped_video_name))
                # Run to render and save video
                ffmpeg.run(flipped_video_output)

                # Copy tags of original video to flipped video (in train/valid set)
                if video in copy_video_tags[split]:
                    original_tags_file = os.path.join(original_tags_path, video.replace(VIDEO_EXT, '.json'))
                    flipped_tags_file = os.path.join(counterpart_tags_path,
                                                     flipped_video_name.replace(VIDEO_EXT, '.json'))

                    if os.path.exists(original_tags_file):
                        with open(original_tags_file) as f:
                            original_video_tags = json.load(f)
                        original_video_tags['file'] = flipped_video_name
                        with open(flipped_tags_file, 'w') as f:
                            f.write(json.dumps(original_video_tags, indent=2))

    print("Processing complete!")
    return jsonify(status=True, url=url_for("project_details", project=project))