Beispiel #1
0
def detect_video(conf, video_file, out_path, yolo, level=0):
    """Use yolo v3 to detect video.
    # Argument:
        video: video file.
        yolo: YOLO, yolo model.
        level : on which resolution to run detection, 
        range[1 - 7], default is 416
        the resolution list is in conf.resolutions
    """
    videogen = io.vreader(video_file)
    metdata = io.ffprobe(video_file)
    frame_rate = int(
        int(metdata['video']['@avg_frame_rate'].split('/')[0]) /
        int(metdata['video']['@avg_frame_rate'].split('/')[1]))
    frame = next(videogen)
    shape = (frame.shape[1], frame.shape[0])
    video_writer = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'XVID'),
                                   frame_rate, shape)
    for frame in tqdm(videogen, total=int(metdata['video']['@nb_frames'])):
        detected_frame = np.array(
            yolo.detect_on_img(conf, Image.fromarray(frame),
                               level=level))[..., ::-1]
        #         pdb.set_trace()
        video_writer.write(detected_frame)
    video_writer.release()
    videogen.close()
Beispiel #2
0
def read_video(video_path):
    """
    Read a video file as a numpy array

    Resizes frames so that the minimum side is 256 pixels

    Args:
        video_path: Path to video file

    Returns:
        video: Numpy data array

    """
    vinfo = ffprobe(video_path)['video']
    width = int(vinfo['@width'])
    height = int(vinfo['@height'])

    scaling = 256.0 / min(width, height)
    new_width = int(math.ceil(scaling * width))
    new_height = int(math.ceil(scaling * height))

    # Resize frames
    reader = FFmpegReader(video_path,
                          outputdict={'-s': "{}x{}".format(new_width,
                                                           new_height) })

    frames = []
    for frame in reader.nextFrame():
        frames.append(frame)
    reader.close()
    return frames
Beispiel #3
0
def comma(base_dir):
    speed_path = r"processed_log\CAN\speed"
    video_file = r"video.mp4"
    speeds_file = "value"
    output_speed_file = "speed.txt"

    folders_paths = get_paths(base_dir, output_speed_file)
    print(f"Total folders to process {len(folders_paths)}")

    bar = tqdm(folders_paths)
    for fpath in bar:
        video_path = join(fpath, video_file)
        frame_path = join(fpath, "frames-160x320")
        frame_template = join(frame_path, "frame-{}.jpg")
        makedirs(frame_path, exist_ok=True)
        # Convert video to frames.
        bar.write(f"Processing video {video_path}")
        video_to_frames(video_path=video_path, frame_template=frame_template)
        # Convert speed.
        video_meta = ffprobe(video_path)["video"]
        output_speed_path = join(fpath, output_speed_file)
        speeds = load(join(fpath, speed_path, speeds_file))
        video_frames = int(video_meta["@nb_frames"])

        speed_ids = round_(linspace(0, speeds.shape[0] - 1,
                                    num=video_frames), ).astype("int32")
        savetxt(output_speed_path, speeds[speed_ids], "%.8f", "\n")
Beispiel #4
0
def get_fps(video_path):
    vinfo = ffprobe(video_path)['video']
    fps = vinfo['@avg_frame_rate']
    fps = fps.split('/')
    fps = int(fps[0]) / int(fps[1])

    return fps
Beispiel #5
0
def generate_dataset(video_dir):
    if not os.path.exists(video_dir):
        raise ValueError("video dir does not exist")
    video_files = os.listdir(video_dir)
    image_index = 1
    # for i in video_files:
    #     print i.decode('utf8')
    # exit()
    for video in video_files:
        if 'rmvb' in video:
            continue
        print video
        video_path = os.path.join(video_dir, video)
        metadata = ffprobe(video_path)
        frame_info = metadata["video"]["@avg_frame_rate"].split('/')
        rate = int(frame_info[0])/int(frame_info[1])

        try:
            frame_array = vreader(video_path)
            frame_index = 0
            for frame in frame_array:
                if frame_index % (rate * interval) == 0:
                    image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
                    image = cv2.resize(image, (image_height, image_width))
                    cv2.imwrite(test_raw_file + str(image_index) + '.jpg', image)
                    cv2.imwrite(test_copy_file + str(image_index) + '.jpg', image)
                    print("write image %d" % image_index)
                    image_index += 1
                elif frame_index % (rate * interval) == 1:
                    image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
                    image = cv2.resize(image, (image_height, image_width))
                    cv2.imwrite(test_copy_file + str(image_index -1) + '.jpg', image)
                frame_index += 1
        except RuntimeError:
            continue
Beispiel #6
0
def annotate_video(file_path, coordinates):
    """
    Annotates supplied video from predicted coordinates.
    
    Args:
        file_path: path
            System path of video to annotate
        coordinates: list
            Predicted body part coordinates for each frame in the video
    """

    # Load raw video
    from skvideo.io import vreader, ffprobe, FFmpegWriter
    videogen = vreader(file_path)
    video_metadata = ffprobe(file_path)['video']
    fps = video_metadata['@r_frame_rate']
    frame_height, frame_width = next(vreader(file_path)).shape[:2]
    frame_side = frame_width if frame_width >= frame_height else frame_height

    # Initialize annotated video
    vcodec = 'libvpx-vp9'  #'libx264'
    writer = FFmpegWriter(normpath(file_path.split('.')[0] + '_tracked.mp4'),
                          inputdict={'-r': fps},
                          outputdict={
                              '-r': fps,
                              '-bitrate': '-1',
                              '-vcodec': vcodec,
                              '-pix_fmt': 'yuv420p',
                              '-lossless': '1'
                          })  #'-lossless': '1'

    # Annotate video
    from PIL import Image, ImageDraw
    i = 0
    while True:
        try:
            frame = next(videogen)
            image = Image.fromarray(frame)
            image_draw = ImageDraw.Draw(image)
            image_coordinates = coordinates[i]
            image = helpers.display_body_parts(image,
                                               image_draw,
                                               image_coordinates,
                                               image_height=frame_height,
                                               image_width=frame_width,
                                               marker_radius=int(frame_side /
                                                                 150))
            image = helpers.display_segments(image,
                                             image_draw,
                                             image_coordinates,
                                             image_height=frame_height,
                                             image_width=frame_width,
                                             segment_width=int(frame_side /
                                                               100))
            writer.writeFrame(np.array(image))
            i += 1
        except:
            break

    writer.close()
Beispiel #7
0
def PreProcessVideo(fmt, filename, output, start=250, n_frames=5):
    info = ffprobe(filename)
    vinfo = info['video']

    v = FFmpegReader(filename, outputdict={'-pix_fmt': fmt})

    X = np.ndarray((int(vinfo['@height']) * int(vinfo['@width']) * 5, 6))
    n = 0
    t = 0
    frames = v.nextFrame()
    for t, frame in enumerate(frames):
        if t < start:
            continue
        if t >= start + n_frames:
            break
        print(t)
        sys.stdout.flush()
        printed = False
        for row_n, line in enumerate(frame):
            for col_n, pixel in enumerate(line):
                c1, c2, c3 = pixel
                t_scaled = (float(t - start) / float(vinfo['@width'])) * 255.0
                x_scaled = (float(col_n) / float(vinfo['@width'])) * 255.0
                y_scaled = (float(row_n) / float(vinfo['@width'])) * 255.0
                X[n] = np.array([t_scaled, x_scaled, y_scaled, c1, c2, c3])
                n += 1

    print("Done with the encode part")
    np.save(output, X, allow_pickle=False, fix_imports=False)
def process_file(filepath):
    filename = os.path.basename(filepath)
    video = vreader.vread(filepath)
    metadata = {
        'total_frames': video.shape[0],
        'rows': video.shape[1],
        'cols': video.shape[2],
        'channels': video.shape[3]
    }
    extras = vreader.ffprobe(filepath).get('video', {})
    metadata['frame_rate'] = extras.get('@avg_frame_rate', '')
    metadata['duration'] = extras.get('@duration', '')
    payload = {'video_id': filename}
    # now we would send each video in frames
    for idx, frame in enumerate(video):
        metadata['id'] = idx
        img_str = base64.b64encode(frame.flatten()).decode(
            'utf8')  # decode to plane string
        payload['data'] = img_str
        payload['metadata'] = metadata
        print('%s, sending frame: %s' % (filename, idx))
        producer.send(TOPIC, json.dumps(payload).encode('utf8'))
        print('%s, sent frame: %s' % (filename, idx))
    #once done move the file to
    print('%s, moving to completed folder' % (filename, ))
    shutil.move(filepath, COMPLETED_DIR)
Beispiel #9
0
def extract_frames(fname):
    meta = skvio.ffprobe(fname)
    fps = int(meta['video']['@r_frame_rate'].split('/')[0])
    size = (int(meta['video']['@width']), int(meta['video']['@height']))
    assert fps > 0, 'Broken video %s' % fname
    frames = list(skvio.vread(fname))
    return frames, fps, size
Beispiel #10
0
    def _make_data_set(root, video_paths, class_to_idx, init_shuffle,
                       video_index):
        def _is_video_file(filename_):
            return any(
                filename_.endswith(extension)
                for extension in VIDEO_EXTENSIONS)

        if init_shuffle and not video_index:
            list_shuffle(video_paths)  # shuffle

        videos = list()
        frames_per_video = list()
        frames_per_class = [0] * len(class_to_idx)
        frames_counter = 0
        for filename in tqdm(video_paths, ncols=80):
            class_ = filename.split('/')[0]
            data_path = join(root, filename)
            if _is_video_file(data_path):
                video_meta = ffprobe(data_path)
                start_idx = frames_counter
                frames = int(video_meta['video'].get('@nb_frames'))
                frames_per_video.append(frames)
                frames_per_class[class_to_idx[class_]] += frames
                frames_counter += frames
                item = ((frames_counter - 1, start_idx),
                        (filename, class_to_idx[class_]))
                videos.append(item)

        sleep(0.5)  # allows for progress bar completion
        return videos, frames_counter, frames_per_video, frames_per_class
    def __init__(self, data_path, motion_path=None):
        self.data_path = data_path
        self.motion_path = motion_path

        self.meta = skvio.ffprobe(self.data_path)
        fps = int(meta['video']['@r_frame_rate'].split('/')[0])
        assert fps > 0, 'Broken video %s' % self.data_path
        self._size = (int(meta['video']['@nb_frames']),
                      self.meta['video']['@height'],
                      self.meta['video']['@width'])
def extract_frames_from_vid(vid_path):
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    data = io.ffprobe(vid_path)['video']
    rate = int(data['@r_frame_rate'].split('/')[0])
    out = None
    for frame in tqdm(io.vreader(vid_path), unit=' frame'):
        if out is None:
            out = cv2.VideoWriter('out_' + vid_path, fourcc, rate,
                                  frame.shape[1:3][::-1])
        frame = frame_face_blur(frame)
        out.write(frame)
    out.release()
def main():

    # parse the arguments
    parser = argparse.ArgumentParser(description='Retouch video.')
    parser.add_argument('--src_path',
                        type=str,
                        default='../retouch_temporal_videos',
                        help='source path')
    parser.add_argument('--dst_path',
                        type=str,
                        default='../',
                        help='destination path')
    parser.add_argument('--intensity',
                        type=str,
                        default='strong',
                        help='strong or weak')
    args = parser.parse_args()

    # source directory validation check
    src_path = args.src_path
    dst_path = args.dst_path
    intensity = args.intensity

    # method validation check
    methods = ["blur", "median", "noise"]

    # set destination directory name
    for method in methods:
        try:
            makedirs(join(dst_path, intensity, method))
            pass
        except FileExistsError:
            pass

    counter = 1
    fnames = glob.glob(join(src_path, intensity, "*.mp4"))
    print("%8s| file name" % "counter")

    for method in methods:
        fnames = glob.glob(join(src_path, intensity, method, "*.mp4"))
        for fname in fnames:
            vid = np.array(vio.vread(fname))
            vid_meta = vio.ffprobe(fname)
            # print(vid_meta)
            output_file = join(dst_path, intensity, method,
                               fname.split("\\")[-1].split(".")[0] + ".png")
            img = Image.fromarray(vid[0], 'RGB')
            img.save(output_file)
            print("%8d: %s" % (counter, output_file))
            counter += 1

    print("Process end on directory \"%s\"" % src_path)
Beispiel #14
0
 def process_video(self,
                   video_p: Path,
                   output_p: Path,
                   reduce_rate: int = 1):
     meta = ffprobe(video_p)
     nb_frames = int(meta["video"]["@nb_frames"])
     frames = vreader(str(video_p))
     writer = FFmpegWriter(str(output_p),
                           outputdict={"-r": str(int(30 / reduce_rate))})
     for i, frame in enumerate(tqdm(frames, total=nb_frames)):
         if i % reduce_rate == 0:
             frame = self.process_frame(frame)
             writer.writeFrame(frame)
     writer.close()
Beispiel #15
0
def get_single_statistics(video_path):
    try:
        meta_data = skvid.ffprobe(video_path)['video']
    except KeyError:
        return False, None, None, None

    retry = True
    while retry:
        try:
            height, width, frames, retry = get_info(meta_data)
        except KeyError:
            height, width, frames, retry = get_info(meta_data)

    return True, int(height), int(width), int(frames)
Beispiel #16
0
    def _calc_bpp(self, video):
        # calculate video bits-per-pixel
        meta = sk.ffprobe(video)['video']

        # read video metadata
        w = float(meta['@width'])
        h = float(meta['@height'])

        # file_size in bits
        bits = os.path.getsize(video) * 8

        # calc bpp
        bpp = bits / (self.n_gop * w * h)

        return bpp
Beispiel #17
0
 def __init__(self, path, bw=False):
     self.path = os.path.realpath(path)
     self.probedata = ffprobe(self.path)
     self.num_frames = int(self.probedata['video']['@duration_ts'])
     fr = self.probedata['video']['@avg_frame_rate'].split("/")
     self.fps = float(fr[0])/float(fr[1])
     self.duration = float(self.num_frames)/self.fps
     self.width = int(self.probedata['video']['@width'])
     self.height = int(self.probedata['video']['@height'])
     if bw:
         self.depth = 1
     else:
         self.depth = 3
     self.framesize = self.width*self.height*self.depth
     self.pix_fmt = self.probedata['video']['@pix_fmt']
Beispiel #18
0
    def wanted(self, filename):
        info = ffprobe(filename)
        vinfo = info['video']

        avg = vinfo['@r_frame_rate']
        num = float(avg.split('/')[0])
        den = float(avg.split('/')[1])
        fps = float(num / den)

        duration = float(vinfo['@duration']) * 0.90
        n_frames = int(fps * duration)
        start = int(math.floor(float(vinfo['@duration']) * fps * 0.05))
        step = int(math.floor(n_frames / self.grab_n_frames))

        wanted = [n for n in range(start, n_frames, step)]
        return set(wanted[:self.grab_n_frames])
Beispiel #19
0
def get_video_info(video_path):

    cap = sk.vreader(video_path)
    seg_l = 4

    metadata = sk.ffprobe(video_path)
    # print (json.dumps(metadata, indent=4))
    # print (json.dumps(metadata["video"], indent=4))
    """
    fps : @r_frame_rate
    length : @duration
    frames : @nb_frames
    """
    length = float(json.dumps(metadata["video"]["@duration"]).replace('"', ''))
    # fnum = float(json.dumps(metadata["video"]["@nb_frames"]).replace('"', ''))
    fps = float(
        json.dumps(metadata["video"]["@r_frame_rate"]).replace(
            '"', '').split('/')[0]) / float(
                json.dumps(metadata["video"]["@r_frame_rate"]).replace(
                    '"', '').split('/')[1])
    fnum = int(np.ceil(length * fps))

    print('length : %.5f / frames : %d / fps : %.2f' % (length, fnum, fps))

    img_id = []
    frame_list = []
    id = 0
    for frame in cap:
        frame = cv2.resize(frame, dsize=(224, 224))
        frame_list.append(frame)
        img_id.append(id)
        id += 1

    segs = [img_id[i:i + seg_l] for i in range(len(img_id) - seg_l + 1)]
    segs = reduce(lambda x, y: x + y, segs)

    feat = []

    for seg in segs:
        feat.append(frame_list[seg])

    idx = np.arange(fps, fnum, fps)
    idx = np.floor(idx)
    idx = idx.tolist()
    idx = map(int, idx)

    return feat, fnum, fps, length, img_id, idx
def ucf101_stats():
    """UCF101 statistics"""
    collects = {'nb_frames': [], 'heights': [], 'widths': [], 
                'aspect_ratios': [], 'frame_rates': []}

    for filename in glob('../data/ucf101/video/*/*.avi'):
        metadata = ffprobe(filename)['video']
        collects['nb_frames'].append(eval(metadata['@nb_frames']))
        collects['heights'].append(eval(metadata['@height']))
        collects['widths'].append(eval(metadata['@width']))
        collects['aspect_ratios'].append(metadata['@display_aspect_ratio'])
        collects['frame_rates'].append(eval(metadata['@avg_frame_rate']))

    stats = {key: sorted(list(set(collects[key]))) for key in collects.keys()}
    stats['nb_frames'] = [stats['nb_frames'][0], stats['nb_frames'][-1]]

    pprint(stats)
Beispiel #21
0
def getFrames(path, interval=1):
    """ extract some frames from a video

    :param path: storage location of video
    :param interval: a frame every interval seconds will be retrieved
    :return: a list of length no_frames with ndarrays of frames with shape (frame_height, frame_width)
    """

    # known bug in OpenCV 3, can't do video caputure from file
    # https://github.com/ContinuumIO/anaconda-issues/issues/121
    if cv2.__version__.startswith("2."):
        print("capturing video:", path)
        cap = cv2.VideoCapture(path)
        assert cap.isOpened(), "Couldn't open capture for " + path
        frames_count = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
        try:
            fps = int(round(cap.get(cv2.cv.CV_CAP_PROP_FPS), 0))
        except ValueError:  # could not retrieve fps
            fps = 24

        def getFrame(frame_id):
            success, f_ = cap.read(frame_id)
            assert success, "could not capture frame " + str(
                frame_id * fps) + "\n" + str(frame_id)
            return f_
    else:
        # this solution does not work on pythonanywhere as there is no imageio installed.
        from imageio import get_reader
        from skvideo.io import ffprobe

        vid = get_reader(path, "ffmpeg")
        videometadata = ffprobe(path)
        rates = videometadata['video']['@avg_frame_rate'].split("/")
        fps = int(rates[0]) // int(rates[1])
        frames_count = np.int(videometadata['video']['@nb_frames'])

        def getFrame(frame_id):
            return vid.get_data(frame_id)

    height, width, _ = getFrame(0).shape
    rate = fps * interval  # every rate-th frame is extracted
    no_frames = frames_count // rate  # so many frames will be extracted
    return [getFrame(n * rate) for n in range(no_frames)]
Beispiel #22
0
def make_video_frames(datadir, outdir):
    # train video data
    for i in range(10000):
        if not os.path.exists(os.path.join(outdir, 'video%s' % i)):
            os.makedirs(os.path.join(outdir, 'video%s' % i))
        filename = ('video%s.mp4' % i)
        videopath = os.path.join(datadir, filename)
        cap = sk.vreader(videopath)

        metadata = sk.ffprobe(videopath)
        # print json.dumps(metadata["video"], indent=4)
        """
        fps : @r_frame_rate
        length : @duration
        frames : @nb_frames
        """
        length = float(
            json.dumps(metadata["video"]["@duration"]).replace('"', ''))
        frames = float(
            json.dumps(metadata["video"]["@nb_frames"]).replace('"', ''))
        fps = int(frames / length)

        print('%sth video' % i)
        print('length : %d / frames : %d / fps : %d' % (length, frames, fps))

        cent = np.linspace(0, length, 7)[1:-1]
        for x in range(len(cent)):
            cent[x] = int(cent[x])
        frames = cent * fps

        idx = 0
        filenum = 0
        for frame in cap:
            if idx in frames:
                frame = cv2.resize(frame, dsize=(224, 224))
                sk.vwrite(outdir + '/video%s/frame%s.png' % (i, filenum),
                          frame)
                filenum += 1
            idx += 1

        if i % 1000 == 0:
            print('%sth video processed...' % i)
    def __getitem__(self, index):
        annotation = self.annotation[index]
        video_path = annotation['path']
        clazz = annotation['class']

        metadata = ffprobe(video_path)
        duration = float(metadata["video"]["@duration"])

        output_parameter = self.base_parameter

        if self.clip_duration > 0:
            sta = int(random() * max((duration - self.clip_duration), 0.))
            output_parameter.update({
                "-ss": "{}".format(sta),
                "-t": "{}".format(min(self.clip_duration, duration - sta))
            })
        video_data = vread(video_path, outputdict=output_parameter)

        if self.transform:
            video_data = self.transform(video_data)

        return video_data, clazz
Beispiel #24
0
    def _shuffle(self):
        """
        Shuffles the video list
        by regenerating the sequence to sample sequentially
        """
        def _is_video_file(filename_):
            return any(
                filename_.endswith(extension)
                for extension in VIDEO_EXTENSIONS)

        root = self.root
        video_paths = self.video_paths
        class_to_idx = self.class_to_idx
        list_shuffle(video_paths)  # shuffle

        videos = list()
        frames_per_video = list()
        frames_counter = 0
        for filename in tqdm(video_paths, ncols=80):
            class_ = filename.split('/')[0]
            data_path = join(root, filename)
            if _is_video_file(data_path):
                video_meta = ffprobe(data_path)
                start_idx = frames_counter
                frames = int(video_meta['video'].get('@nb_frames'))
                frames_per_video.append(frames)
                frames_counter += frames
                item = ((frames_counter - 1, start_idx),
                        (filename, class_to_idx[class_]))
                videos.append(item)

        sleep(0.5)  # allows for progress bar completion
        # update the attributes with the altered sequence
        self.video_paths = video_paths
        self.videos = videos
        self.frames = frames_counter
        self.frames_per_video = frames_per_video
Beispiel #25
0
    def __getitem__(self, index):
        annotation = self.annotation[index]
        video_path = annotation['path']
        clazz = annotation['class']

        metadata = ffprobe(video_path)
        duration = float(metadata["video"]["@duration"])

        output_parameter = self.base_parameter

        if self.clip_duration > 0:
            sta = int(random() * max((duration - self.clip_duration), 0.))
            output_parameter.update({
                "-ss":
                "{}".format(sta),
                "-t":
                "{}".format(min(self.clip_duration, duration - sta))
            })
        video_data = vread(video_path, outputdict=output_parameter)

        if self.transform:
            video_data = self.transform(video_data)

        return video_data, clazz
def get_video_metadata(folder, file_name):
    return [int(skv.ffprobe(folder+file_name)['video']['@bit_rate']), os.path.getsize(folder+file_name)]
Beispiel #27
0
def main():

	# parse the arguments
	parser = argparse.ArgumentParser(description='Retouch video.')
	parser.add_argument('--src_path', type=str, default='./trainS_input', help='source path')
	parser.add_argument('--dst_path', type=str, default='./trainS_output', help='destination path')
	parser.add_argument('--intensity', type=str, default='strong', help='strong or weak')
	args = parser.parse_args()

	# source directory validation check
	src_path 	= args.src_path
	dst_path 	= args.dst_path
	intensity  	= args.intensity

	# method validation check
	bitrates = ["500k", "600k", "700k", "800k"]
	methods = ["blur", "median", "noise"]
	# methods = ["original"]

	# set destination directory name
	for method in methods:
		for bitrate in bitrates:
			try:
				makedirs(join(dst_path, "retouch_"+intensity, method, bitrate))
				pass
			except FileExistsError:
				pass

	counter = 1
	fnames = glob.glob(join(src_path, "*k", "*.mp4"))
	print("%8s| file name" % "counter")

	# retouch video
	for fname in fnames:
		# video read
		meta = vio.ffprobe(fname)
		vid = np.array(vio.vread(fname))
		vid_retouched = np.zeros(vid.shape)
		fn, w, h, c = vid.shape
		if w != 256 or h != 256 or c != 3: 
			print("================ wrong size file: \"{}\"".format(fname))
			continue

		# parse bitrate from file name
		bitrate = fname.split("\\")[-2]

		for method in methods:
			# get manipulated frame 
			for i in range(fn):
			    vid_retouched[i,:,:,:] = manipulate(vid[i,:,:,:], method, intensity=intensity) # manipulate.py 참고

			vid_retouched = vid_retouched.astype(np.uint8)

			# set output file name
			output_file = join(dst_path, "retouch_"+intensity, method, bitrate, basename(fname))
			print("%8d: %s" % (counter , output_file))
			counter += 1
			
			# load writer with parameter
			# "-vcodec = libx264" 	: h.264 codec
			# "-r = 30" 			: fps
			# "-g = 4"				: GOP size
			# "-bf = 0" 			: number of b frame
			# "-b:v = bitrate" 		: bitrate
			# "-pix_fmt = yuv420p"	: color space
			write_option = {'-vcodec': 'libx264', '-r': '30', '-g': '4', '-bf': '0', '-b:v': bitrate, '-pix_fmt': 'yuv420p'}
			writer = vio.FFmpegWriter(filename=output_file, inputdict={'-r': '30'}, outputdict=write_option)
			for i in range(fn):
				writer.writeFrame(vid_retouched[i, :, :, :])
			# writer.writeFrame(vid_retouched)
			writer.close()

	print("Process end on directory \"%s\"" % src_path)
Beispiel #28
0
 def _video_longer_enough(filename):
     """Return true if video `filename` is longer than `min_video_len`"""
     path = os.path.join(root_dir, 'video', filename)
     metadata = ffprobe(path)['video']
     return eval(metadata['@nb_frames']) >= min_video_len
Beispiel #29
0
def video_info(path):
    '''Returns number of frames and frame rate.'''
    video_info = ffprobe(str(path))
    n_frames = int(video_info['video']['@nb_frames'])
    a, b = L(video_info['video']['@r_frame_rate'].split('/')).map(int)
    return n_frames, a/b
Beispiel #30
0
def num_frames_in_vid(vid_path):
    metadata = ffprobe(vid_path)
    return int(metadata['video']['@nb_frames'])
Beispiel #31
0
def get_metdata(path: str):
    a = ffprobe(path)['video']
    # print('FFPROBE :', path, pformat(a))
    return a
Beispiel #32
0
def main():

    # parse the arguments
    parser = argparse.ArgumentParser(description='Retouch video.')
    parser.add_argument('--src_path',
                        type=str,
                        default='../27M',
                        help='source path')
    parser.add_argument('--gt_path',
                        type=str,
                        default='../gt',
                        help='ground truth path')
    parser.add_argument('--dst_path',
                        type=str,
                        default='../retouch_spatial',
                        help='destination path')
    parser.add_argument('--intensity',
                        type=str,
                        default='extreme',
                        help='strong or weak')
    args = parser.parse_args()

    # source directory validation check
    src_path = args.src_path
    gt_path = args.gt_path
    dst_path = args.dst_path
    intensity = args.intensity

    # method validation check
    bitrate = "27M"
    methods = ["blur", "median", "noise"]
    # methods = ["original"]

    # set destination directory name
    for method in methods:
        try:
            makedirs(join(dst_path, intensity, method))
            pass
        except FileExistsError:
            pass

    counter = 1
    fnames = glob.glob(join(src_path, "*.mp4"))
    gt_names = glob.glob(join(gt_path, "*.png"))
    print("%8s| file name" % "counter")

    # retouch video
    for fname in fnames:
        gt = None
        for gt_name in gt_names:
            if basename(fname).split('.')[0] in gt_name:
                gt = im2double(cv2.imread(gt_name, cv2.IMREAD_COLOR))
        # video read
        meta = vio.ffprobe(fname)
        vid = np.array(vio.vread(fname))
        vid_retouched = np.zeros(vid.shape)
        fn, w, h, c = vid.shape

        # parse bitrate from file name

        for method in methods:
            # get manipulated frame
            for i in range(fn):
                retouched = manipulate(vid[i, :, :, :],
                                       method,
                                       intensity=intensity)  # manipulate.py 참고
                original = vid[i, :, :, :]
                retouched_spatial = gt * retouched + (1 - gt) * original
                vid_retouched[i, :, :, :] = retouched_spatial

            vid_retouched = vid_retouched.astype(np.uint8)

            # load writer with parameter
            # "-vcodec = libx264"   : h.264 codec
            # "-r = 30"             : fps
            # "-g = 4"              : GOP size
            # "-bf = 0"             : number of b frame
            # "-b:v = bitrate"      : bitrate
            # "-pix_fmt = yuv420p"  : color space
            output_file = join(dst_path, intensity, method, basename(fname))
            write_option = {
                '-vcodec': 'libx264',
                '-r': '30',
                '-g': '4',
                '-bf': '0',
                '-b:v': bitrate,
                '-pix_fmt': 'yuv420p'
            }
            writer = vio.FFmpegWriter(filename=output_file,
                                      inputdict={'-r': '30'},
                                      outputdict=write_option)
            for i in range(fn):
                writer.writeFrame(vid_retouched[i, :, :, :])
            # writer.writeFrame(vid_retouched)
            writer.close()

            # set output file name

            print("%8d: %s" % (counter, output_file))
            counter += 1

    print("Process end on directory \"%s\"" % src_path)