예제 #1
0
 def make_video(self, replay, ext=''):
     n_frames = len(replay)
     b_s, n_channels, n_w, n_h = replay[0].shape
     writer = VideoWriter(self.filename + ext + '.mp4')
     for i in range(n_frames):
         writer.writeFrame(replay[i][0][[1, 2, 0]] * 255)
     writer.close()
예제 #2
0
    def _open_file(self, filename, frame=None):
        if frame is None:
            raise ValueError('[Recorder] Need to pass frame to open a file.')
        self.w = frame.shape[1]
        self.h = frame.shape[0]
        if self.frame_rate is None:
            self.frame_rate = 0
        if self.frame_rate == 0:
            display('Using 30Hz frame rate for ffmpeg')
            self.frame_rate = 30

        self.doutputs['-r'] = str(self.frame_rate)
        self.dinputs = {'-r': str(self.frame_rate)}

        # does a check for the datatype, if uint16 then save compressed lossless
        if frame.dtype in [np.uint16] and len(frame.shape) == 2:
            self.fd = FFmpegWriter(
                filename.replace(self.extension, '.mov'),
                inputdict={
                    '-pix_fmt': 'gray16le',
                    '-r': str(self.frame_rate)
                },  # this is important
                outputdict={
                    '-c:v': 'libopenjpeg',
                    '-pix_fmt': 'gray16le',
                    '-r': str(self.frame_rate)
                })
        else:
            self.fd = FFmpegWriter(filename,
                                   inputdict=self.dinputs,
                                   outputdict=self.doutputs)
예제 #3
0
class VideoProcessor:
    def __init__(self, trained_model, use_gpu=False):

        self.use_gpu = use_gpu
        self.cap = None
        self.video_writer = FFmpegWriter(
            "output.mp4",
            inputdict={'-r': str(12)},
            outputdict={'-c:v': 'libx264', '-pix_fmt': 'yuv420p', '-c:a': 'libvo_aacenc'})

        self.body_detector = BodyDetector(speed='fast')
        self.body_detector.load_model()

        self.fashion_classifier = torch.load(trained_model, map_location="cpu")
        if self.use_gpu:
            self.fashion_classifier.cuda()

        self.fashion_classifier.eval()
        self.label_dict = {0: "t-shirt/top", 1: "trouser", 2: "pullover", 3: "dress", 4: "coat",
                           5: "sandal", 6: "shirt", 7: "sneaker", 8: "bag", 9: "ankle boot"}
        self.data_transforms = transforms.Compose([transforms.ToPILImage(), transforms.Grayscale(),
                                                   transforms.Resize((28, 28)), transforms.ToTensor()])

    def classify_region(self, region, frame):
        # run fashion classification model on each detected body region
        try:
            region_img = frame[region.top: region.bottom, region.left: region.right]
            region_img = cv2.cvtColor(region_img, cv2.COLOR_BGR2RGB)
            # doing data transform for classification model
            region_img = self.data_transforms(region_img).expand(1, 1, 28, 28)
            output = self.fashion_classifier(region_img)
            predicted = output.argmax(dim=1, keepdim=True).item()
            return self.label_dict[predicted]

        except Exception as e:
            print(f"{e}")
            return ""

    def process(self, video):
        self.cap = cv2.VideoCapture(video)
        while self.cap.isOpened():
            _, frame = self.cap.read()
            if frame is not None:
                # downsize the original frame for speeding up
                frame = cv2.resize(frame, (640, 480))
                # doing body detection first
                body_regions = self.body_detector.process(frame)
                output_image = frame
                for b_r in body_regions:
                    # write predicted fashion class name on the frame
                    output_image = label_region(output_image, text=self.classify_region(b_r, frame), region=b_r,
                                                show_at_bottom=True, inside=True)
                # draw bounding box on the frame
                output_image = draw_regions(output_image, regions=body_regions)
                # write into a video
                self.video_writer.writeFrame(cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB))
            else:
                break
예제 #4
0
 def __init__(self, nfile, rate=2):
     self._nfile = nfile
     self._size = None
     self.out = FFmpegWriter(nfile,
                             inputdict={
                                 '-r': str(rate),
                             },
                             outputdict={
                                 '-r': str(rate),
                             })
def detect_video(yolo, video_path, output_path="", close_session=True):
	vid = cv2.VideoCapture(video_path)
	if not vid.isOpened():
		raise IOError("Couldn't open webcam or video")
	video_FourCC	= int(vid.get(cv2.CAP_PROP_FOURCC))
	video_fps	   = vid.get(cv2.CAP_PROP_FPS)
	video_size	  = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
						int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
	isOutput = True if output_path != "" else False
	if isOutput:
		from skvideo.io import FFmpegWriter
#		print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
#		out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
		out = FFmpegWriter(output_path, inputdict={'-r': str(video_fps)}, outputdict={'-r': str(video_fps)})
		
	accum_time = 0
	curr_fps = 0
	fps = "FPS: ??"
	prev_time = timer()
	while True:
		return_value, frame = vid.read()
		
		if frame is None: 
			break
	
		frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
		image = Image.fromarray(frame)
		image = yolo.detect_image(image)
		result = np.asarray(image)
		result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)
		
		curr_time = timer()
		exec_time = curr_time - prev_time
		prev_time = curr_time
		accum_time = accum_time + exec_time
		curr_fps = curr_fps + 1
		if accum_time > 1:
			accum_time = accum_time - 1
			fps = "FPS: " + str(curr_fps)
			curr_fps = 0
		cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
					fontScale=0.50, color=(255, 0, 0), thickness=2)
		cv2.namedWindow("result", cv2.WINDOW_NORMAL)
		cv2.imshow("result", result)
		if isOutput:
#			print(type(result), result.shape)
#			out.writeFrame(image)
			out.writeFrame(image)
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break
	
	if close_session: yolo.close_session()
예제 #6
0
    def denoise_video(self, PATH):
        self.cap = cv2.VideoCapture(PATH)
        self.H, self.W = int(self.cap.get(4)), int(self.cap.get(3))
        self.SCALE_H, self.SCALE_W = (self.H // 50 * 50), (self.W // 50 * 50)
        self.batch_size = ((self.SCALE_H * self.SCALE_W) // (50**2))

        outputFile = './denoise.mp4'
        writer = FFmpegWriter(outputFile,
                              outputdict={
                                  '-vcodec': 'libx264',
                                  '-crf': '0',
                                  '-preset': 'veryslow'
                              })

        while True:
            success, img = self.cap.read()
            if not success:
                break
            resize_img = cv2.resize(img, (self.SCALE_W, self.SCALE_H),
                                    cv2.INTER_CUBIC).astype(np.float32)

            noise_img = resize_img / 255.0
            patches = self.get_patches(noise_img).astype(np.float32)
            predictions = np.clip(self.model(patches), 0, 1)
            pred_img = (self.reconstruct_from_patches(
                predictions, self.SCALE_H, self.SCALE_W, self.H, self.W, 50) *
                        255.0)
            if self.merge_outputs:
                merge = np.vstack(
                    [img[:self.H // 2, :, :], pred_img[:self.H // 2, :, :]])
                writer.writeFrame(merge[:, :, ::-1])
            else:
                writer.writeFrame(pred_img[:, :, ::-1])
        writer.close()
예제 #7
0
def denoise_overlapped_strides(strides=(3, 3)):  #1 2 4 11

    #print '=== OVERLAPPING PATCHES',strides,'STRIDES ==============================='
    vidcap = cv2.VideoCapture(te_noisy_video)
    fname = te_noisy_video.rsplit('/', 1)[-1][:-4]
    outfile = './outputs/uber_video_llnet.mp4'
    writer = FFmpegWriter(outfile, outputdict={'-r': 24.4})
    writer = FFmpegWriter(outfile)
    i = 0
    while True:
        ret, image = vidcap.read()
        if not ret: break
        i += 1
        print("On Frame", i)
        te_noisy_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        test_set_x, te_h, te_w = load_data_overlapped_strides(
            te_dataset=te_noisy_image, patch_size=patch_size, strides=strides)
        im_ = test_set_x.get_value()
        im_noisy = im_.reshape((im_).shape[0], *patch_size)
        rec_n = im.reconstruct_from_patches_2d(im_noisy, (te_h, te_w))
        reconstructed = theano.function([],
                                        sda.logLayer.y_pred,
                                        givens={sda.x: test_set_x},
                                        on_unused_input='warn')
        result = reconstructed()
        im_recon = result.reshape((result).shape[0], *patch_size)
        rec_r = reconstruct_from_patches_with_strides_2d(im_recon,
                                                         (te_h, te_w),
                                                         strides=strides)
        writer.writeFrame(rec_r)
    writer.close()
예제 #8
0
def write_video(INPUT_VIDEO, tracks_per_frame, OUTPUT_VIDEO):
    #Reading from images under the given directory
    output_video = FFmpegWriter(OUTPUT_VIDEO)

    if os.path.isdir(INPUT_VIDEO):
        img_paths = natsorted(glob.glob(INPUT_VIDEO + "/*.jpg"))

        for i, img_path in enumerate(img_paths, start=1):
            frame = cv2.imread(img_path)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            tracks = tracks_per_frame.get(i, {})
            output_frame = render_frame(frame, tracks)
            output_video.writeFrame(output_frame)
            print("Writen Frame: {}".format(i))

    #Reading from a video
    else:
        print("Reading Video {}".format(INPUT_VIDEO))
        input_video = skvideo.io.vread(INPUT_VIDEO)
        print("Reading Finished")
        for i, frame in enumerate(input_video, start=1):
            tracks = tracks_per_frame.get(i, {})
            output_frame = render_frame(frame, tracks)
            output_video.writeFrame(output_frame)
            print("Writen Frame: {}".format(i))

    output_video.close()
예제 #9
0
    def process(self, artifacts):
        if 'full_vid_handle' not in artifacts:
            full_vid_handle = FFmpegWriter(artifacts['full_vid_filename'], outputdict={'-crf': '0' })
            artifacts['full_vid_handle'] = full_vid_handle
        if 'compressed_vid_handle' not in artifacts:
            compressed_vid_handle = FFmpegWriter(artifacts['compressed_vid_filename'], outputdict={'-crf': '25'})
            artifacts['compressed_vid_handle'] = compressed_vid_handle
        if 'seg_vid_handle' not in artifacts:
            seg_vid_handle = FFmpegWriter(artifacts['seg_vid_filename'], outputdict={'-crf': '0'})
            artifacts['seg_vid_handle'] = seg_vid_handle

        artifacts['full_vid_handle'].writeFrame(artifacts['current_frame'])
        artifacts['compressed_vid_handle'].writeFrame(artifacts['current_frame'])
        artifacts['seg_vid_handle'].writeFrame(artifacts['segmented_frame'])
        return artifacts
예제 #10
0
class TensorFrameWriter:
    """Writes N*(F*C)*H*W tensor frames to a video file."""
    def __init__(self,
                 out_path,
                 fps=25,
                 config=None,
                 adjust_axis=True,
                 make_grid=True):
        self.out_path = out_path
        ffmpeg_out_config = {
            '-r': str(fps),
            '-vcodec': 'libx264',
            '-pix_fmt': 'yuv420p',
        }
        if config is not None:
            ffmpeg_out_config.update(config)

        self.writer = FFmpegWriter(out_path, outputdict=ffmpeg_out_config)
        self.adjust_axis = adjust_axis
        self.make_grid = make_grid

    def add_tensor(self, np_grid):
        """Add a tensor of shape [..., C, H, W] representing the frame stacks
        for a single time step. Call this repeatedly for each time step you
        want to add."""
        if self.writer is None:
            raise RuntimeError("Cannot run add_tensor() again after closing!")
        if self.adjust_axis:
            # convert to (H, W, 3) numpy array
            np_grid = np_grid.transpose((1, 2, 0))
        self.writer.writeFrame(np_grid)

    def __enter__(self):
        assert self.writer is not None, \
            "cannot __enter__ this again once it is closed"
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()

    def close(self):
        if self.writer is None:
            return
        self.writer.close()
        self.writer = None

    def __del__(self):
        self.close()
예제 #11
0
    def __init__(self,
                 out_path,
                 fps=25,
                 config=None,
                 adjust_axis=True,
                 make_grid=True):
        self.out_path = out_path
        ffmpeg_out_config = {
            '-r': str(fps),
            '-vcodec': 'libx264',
            '-pix_fmt': 'yuv420p',
        }
        if config is not None:
            ffmpeg_out_config.update(config)

        self.writer = FFmpegWriter(out_path, outputdict=ffmpeg_out_config)
        self.adjust_axis = adjust_axis
        self.make_grid = make_grid
예제 #12
0
class MyVideoWriter():
    def __init__(self, file, fps=None, *args, **kwargs):
        if not fps is None:
            kwargs['inputdict'] = {'-r': str(fps)}
        self.video_writer = FFmpegWriter(file, *args, **kwargs)

    def writeFrame(self, im):
        if len(im.shape) == 3 and im.shape[0] == 3:
            transformed_image = im.transpose((1, 2, 0))
        elif len(im.shape) == 2:
            transformed_image = np.concatenate(
                (im[:, :, None], im[:, :, None], im[:, :, None]), axis=-1)
        else:
            transformed_image = im
        self.video_writer.writeFrame(transformed_image)

    def close(self):
        self.video_writer.close()
예제 #13
0
class SaveVideo:
    def __init__(self, nfile, rate=2):
        self._nfile = nfile
        self._size = None
        self.out = FFmpegWriter(nfile,
                                inputdict={
                                    '-r': str(rate),
                                },
                                outputdict={
                                    '-r': str(rate),
                                })

    def save_frame(self, frame, last=False):
        #stacked_img = np.stack((frame,) * 3, -1)
        #im_color = cv2.applyColorMap(stacked_img, cv2.COLORMAP_OCEAN)
        #im_color = cv2.applyColorMap(stacked_img, cv2.COLORMAP_HOT)

        self.out.writeFrame(frame)

        if last is True:
            self.out.close()
예제 #14
0
    def __init__(self, trained_model, use_gpu=False):

        self.use_gpu = use_gpu
        self.cap = None
        self.video_writer = FFmpegWriter(
            "output.mp4",
            inputdict={'-r': str(12)},
            outputdict={'-c:v': 'libx264', '-pix_fmt': 'yuv420p', '-c:a': 'libvo_aacenc'})

        self.body_detector = BodyDetector(speed='fast')
        self.body_detector.load_model()

        self.fashion_classifier = torch.load(trained_model, map_location="cpu")
        if self.use_gpu:
            self.fashion_classifier.cuda()

        self.fashion_classifier.eval()
        self.label_dict = {0: "t-shirt/top", 1: "trouser", 2: "pullover", 3: "dress", 4: "coat",
                           5: "sandal", 6: "shirt", 7: "sneaker", 8: "bag", 9: "ankle boot"}
        self.data_transforms = transforms.Compose([transforms.ToPILImage(), transforms.Grayscale(),
                                                   transforms.Resize((28, 28)), transforms.ToTensor()])
    def init(self):
        self.out_put_file = self.params['video_output']
        self.fps = self.params['fps']
        self.bit_rate = self.params['bitrate']
        self.width = self.params['trans_width']
        #self.height = self.params['trans_height']

        print(self.params)

        self.srt = self.params['srt']
        self.audio = self.params['audio']

        # X264 MP4V avc1 XVID MJPG
        # fourcc = cv.VideoWriter_fourcc(*'XVID')
        # self.out = cv.VideoWriter('%s.avi'%self.out_put_file, fourcc, self.fps, (640, 480), True)

        self.out = FFmpegWriter(self.out_put_file,
                                inputdict={
                                    '-i': self.audio,
                                    '-r': str(self.fps),
                                    '-pix_fmt': 'rgba'
                                },
                                outputdict={
                                    '-vcodec':
                                    'libx264',
                                    '-profile:v':
                                    'main',
                                    '-preset':
                                    'ultrafast',
                                    '-r':
                                    str(self.fps),
                                    '-b:v':
                                    str(self.bit_rate),
                                    '-vf':
                                    'format=yuv420p,scale=%s:%s,subtitles=%s' %
                                    (self.width, '-4', self.srt)
                                },
                                verbosity=1)

        return True
예제 #16
0
def generate_video_from_list(image_list, save_path, framerate=30, downsample=1, warning=True, debug=True):
	'''
	create video from a list of images with a framerate
	note that: the height and widht of the images should be a multiple of 2

	parameters:
		image_list:			a list of image path
		save_path:			the path to save the video file
		framerate:			fps 
	'''
	if debug: 
		assert islistofstring(image_list), 'the input is not correct'
		assert ispositiveinteger(framerate), 'the framerate is a positive integer'
	mkdir_if_missing(save_path)
	inputdict = {'-r': str(framerate)}
	outputdict = {'-r': str(framerate), '-crf': '18', '-vcodec': 'libx264', '-profile:V': 'high', '-pix_fmt': 'yuv420p'}
	video_writer = FFmpegWriter(save_path, inputdict=inputdict, outputdict=outputdict)
	count = 1
	num_images = len(image_list)
	for image_path in image_list:
		print('processing frame %d/%d' % (count, num_images))
		image = load_image(image_path, resize_factor=downsample, warning=warning, debug=debug)

		# make sure the height and width are multiple of 2
		height, width = image.shape[0], image.shape[1]
		if not (height % 2 == 0 and width % 2 == 0):
			height += height % 2
			width += width % 2
			image = image_resize(image, target_size=[height, width], warning=warning, debug=debug)

		video_writer.writeFrame(image)
		count += 1

	video_writer.close()
예제 #17
0
def annotate_video(file_path, coordinates):
    """
    Annotates supplied video from predicted coordinates.
    
    Args:
        file_path: path
            System path of video to annotate
        coordinates: list
            Predicted body part coordinates for each frame in the video
    """

    # Load raw video
    from skvideo.io import vreader, ffprobe, FFmpegWriter
    videogen = vreader(file_path)
    video_metadata = ffprobe(file_path)['video']
    fps = video_metadata['@r_frame_rate']
    frame_height, frame_width = next(vreader(file_path)).shape[:2]
    frame_side = frame_width if frame_width >= frame_height else frame_height

    # Initialize annotated video
    vcodec = 'libvpx-vp9'  #'libx264'
    writer = FFmpegWriter(normpath(file_path.split('.')[0] + '_tracked.mp4'),
                          inputdict={'-r': fps},
                          outputdict={
                              '-r': fps,
                              '-bitrate': '-1',
                              '-vcodec': vcodec,
                              '-pix_fmt': 'yuv420p',
                              '-lossless': '1'
                          })  #'-lossless': '1'

    # Annotate video
    from PIL import Image, ImageDraw
    i = 0
    while True:
        try:
            frame = next(videogen)
            image = Image.fromarray(frame)
            image_draw = ImageDraw.Draw(image)
            image_coordinates = coordinates[i]
            image = helpers.display_body_parts(image,
                                               image_draw,
                                               image_coordinates,
                                               image_height=frame_height,
                                               image_width=frame_width,
                                               marker_radius=int(frame_side /
                                                                 150))
            image = helpers.display_segments(image,
                                             image_draw,
                                             image_coordinates,
                                             image_height=frame_height,
                                             image_width=frame_width,
                                             segment_width=int(frame_side /
                                                               100))
            writer.writeFrame(np.array(image))
            i += 1
        except:
            break

    writer.close()
예제 #18
0
def main():
    dev = device("cuda")
    loggin_dir = r"C:\Users\tonys\projects\python\comma\effdepth-models"
    checkpoint_path = join(loggin_dir, r"manual-velocity\depth-epoch=16.ckpt")
    model = EffDepthTraining.load_from_checkpoint(checkpoint_path)
    model = model.to(dev)

    # frame_template = (
    #     r"C:\Users\tonys\projects\python\comma\2k19\2018-07-29--12-02-42"
    #     r"\30\frames-160x320\frame-{}.jpg"
    # )
    # output_path = (
    #     r"C:\Users\tonys\projects\python\comma\2k19\2018-07-29--12-02-42"
    #     r"\30\disparity.mp4"
    # )
    frame_template = (r"C:\Users\tonys\projects\python\comma\speedchallenge"
                      r"\test\frames-160x320\frame-{}.jpg")
    output_path = (r"C:\Users\tonys\projects\python\comma\speedchallenge"
                   r"\test\output.mp4")
    dataset = SequenceData.no_target_dataset(
        frame_template,
        961,
        model.hparams,
    )

    writer = FFmpegWriter(output_path)
    for i in tqdm(range(961)):
        image = dataset.load_images([i]).to(dev)
        disparity = model.depth_decoder(model.encoder(image))[0]
        disparity = disparity.detach().cpu().numpy()[0, 0]
        disparity = round_(disparity * 255).astype("uint8")
        writer.writeFrame(disparity)
    writer.close()
예제 #19
0
def main(filename):
    cap = cv2.VideoCapture(filename)
    frame_count = np.min((int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 1000))

    output_filename = path.join(path.dirname(filename),
                                path.basename(filename)[0:-4] + "_flt.mp4")
    full_vid_handle = FFmpegWriter(output_filename)

    for i in range(frame_count):
        _, current_frame = cap.read()
        current_frame = current_frame
        current_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
        #labeled_frame, n = label(current_frame)
        labeled_frame = current_frame

        #area = measurements.sum(labeled_frame != 0, labeled_frame, range(n))
        #badAreas = set(np.where((area < 180) | (area > 850))[0])
        before = time()
        #labeled_frame[np.isin(labeled_frame, set(badAreas))] = 0
        #labeled_frame[labeled_frame == badAreas[0]] = 0
        #should_remove = [[(i in badAreas for i in r] for r in labeled_frame]
        labeled_frame[labeled_frame != 0] = 1
        labeled_frame = ndimage.binary_opening(labeled_frame,
                                               structure=np.ones(
                                                   (5, 5))).astype(np.int32)
        after = time() - before
        print('Line time: %f' % (after, ))
        labeled_frame[labeled_frame > 0] = 255

        full_vid_handle.writeFrame(labeled_frame)

        print('Frame: %d' % (i, ))
        pass

    full_vid_handle.close()
예제 #20
0
def create_segmentation_demonstration_movie(first_movie_path,
                                            second_movie_path):
    # Some constants
    bar_width_frac = 1 / 80
    frames_per_screen = 25

    output_filename = path.join(path.dirname(first_movie_path), "seg_demo.mp4")
    output_file = FFmpegWriter(output_filename, outputdict={'-crf': '20'})

    first_vid = cv2.VideoCapture(first_movie_path)
    second_vid = cv2.VideoCapture(second_movie_path)

    frame_num_1 = int(first_vid.get(cv2.CAP_PROP_FRAME_COUNT))
    frame_num_2 = int(second_vid.get(cv2.CAP_PROP_FRAME_COUNT))

    frame_num = np.min((frame_num_1, frame_num_2))

    # Reading first frame for shape.
    _, first_frame = first_vid.read()
    _, second_frame = second_vid.read()

    # Alignining only the segmented version.
    second_frame_align = alignImage(second_frame)

    width = first_frame.shape[1]
    bar_width = bar_width_frac * width

    splits_num = int(1 / bar_width_frac)
    segments = [tuple(s) for s in np.split(np.array(range(width)), splits_num)]

    for i in range(frame_num - 2):
        #for i in range(750):
        print('Working on frame %d' % (i, ))

        _, first_frame = first_vid.read()
        _, second_frame = second_vid.read()

        first_frame = np.roll(first_frame, second_frame_align, axis=1)
        second_frame = np.roll(second_frame, second_frame_align, axis=1)

        bar_step = i % (splits_num * 2)
        if bar_step > splits_num:
            bar_step = splits_num - (bar_step - splits_num)

        #print(bar_step)
        if bar_step > 0 and bar_step < splits_num:
            first_part = first_frame[:, np.ravel(segments[bar_step:]), :]
            second_part = second_frame[:, np.ravel(segments[0:bar_step]), :]

            new_frame = np.concatenate((second_part, first_part), axis=1)

            output_file.writeFrame(new_frame)

    output_file.close()
    pass

    first_vid.release()
    second_vid.release()
예제 #21
0
    def create(cls, hertz, filename):
        """
        Creates a new instance of currently recording video

        window: the window from which the video is recording
        synchronized: synchronized datasources
        platform: dataset platform
        """

        skvideo.setFFmpegPath('./skvideo/')
        #TODO: Side effect, should try catch or return Result
        writer = FFmpegWriter(filename, inputdict = {'-r': str(hertz)})
        
        print(f"Creating video file {filename} in directory {os.getcwd()}...")
            
        return Video(cls.__create_key, filename, writer)
예제 #22
0
def generate_frame_renderer(output_path):
    input_dict = {"-vsync": "0", "-r": f"{args.read_fps}"}
    output_dict = {
        "-vsync": "0",
        "-r": f"{args.output_fps}",
        "-preset": args.preset
    }
    if args.audio != "":
        input_dict.update({"-i": "{}".format(args.audio)})
        output_dict.update({"-c:a": "copy"})
    if args.HDR:
        if not args.hwaccel:
            output_dict.update({
                "-c:v": "libx265",
                "-tune": "grain",
                "-profile:v": "main10",
                "-pix_fmt": "yuv420p10le",
                "-x265-params":
                "hdr-opt=1:repeat-headers=1:colorprim=bt2020:transfer=smpte2084:colormatrix=bt2020nc:master-display=G(13250,34500)B(7500,3000)R(34000,16000)WP(15635,16450)L(10000000,1):max-cll=0,0",
                "-crf": f"{args.crf}",
            })
        else:
            output_dict.update({
                "-c:v": "hevc_nvenc",
                "-rc:v": "vbr_hq",
                "-profile:v": "main10",
                "-pix_fmt": "p010le",
                "-cq:v": f"{args.crf}",
            })
    else:
        if not args.hwaccel:
            output_dict.update({
                "-c:v": "libx264",
                "-tune": "grain",
                "-pix_fmt": "yuv420p",
                "-crf": f"{args.crf}",
            })
        else:
            output_dict.update({
                "-c:v": "h264_nvenc",
                "-rc:v": "vbr_hq",
                "-pix_fmt": "yuv420p",
                "-cq:v": f"{args.crf}",
            })
    return FFmpegWriter(filename=output_path,
                        inputdict=input_dict,
                        outputdict=output_dict)
예제 #23
0
def writeAnnotatedVideo(write_file,annotated_frames,fps):

    video = FFmpegWriter(write_file, 
            inputdict={'-r': str(fps)},outputdict={'-r': str(fps)})

    frames = np.array(annotated_frames)

    for frame_num in tqdm(np.arange(frames.shape[0])):
        video.writeFrame(frames[frame_num,:,:])

    video.close()
예제 #24
0
    def thread_run(image_list: list, time_str: str, save_dir: str, fps: int,
                   size: tuple):
        # print("test1-------------------------------")
        # print(id)
        # print(len(image_list))
        # fourcc = cv2.VideoWriter_fourcc(*'MPEG')
        save_path = save_dir + '/video_' + time_str + '.mp4'
        # out = cv2.VideoWriter(save_path, fourcc, fps, size)
        writer = FFmpegWriter(save_path)
        # writer.open()
        for image in image_list:
            # print(image)
            # im = cv2.resize(image, size)
            writer.writeFrame(image)
        # out.release()
        writer.close()
        print('save video: ', save_path)


# class VideoProcessThread(QThread):
#     def __init__(self):
#         super(VideoProcessThread, self).__init__()
#         self.obj = None
#         self.processing_images_list = []
#         self.id = -1

#     def receive_image(self, image):
#         self.processing_images_list.append(image)

#     def run(self):
#         # print("test1-------------------------------")
#         while not self.obj.isThreadStop:
#             # print("test2-------------------------------")
#             if len(self.processing_images_list) > 0:
#                 print("len of processing_images: {}".format(len(self.processing_images_list)))
#             lock.acquire(True)
#             if len(self.processing_images_list) > 0 and self.obj.out is not None:
#                 print("test3-------------------------------")
#                 im = self.processing_images_list.pop(0)
#                 im = cv2.resize(im, (self.obj.width, self.obj.height))
#                 # print("视频写入:", self.out.write(im))  # 写入帧
#                 # print(type(im))
#                 # print(im.shape)
#                 try:
#                     print("{}|".format(self.obj.count), im.shape)
#                     self.obj.out.write(im)
#                 except:
#                     print("fail to save video")
#             lock.release()#释放
#             time.sleep(0.02)
#         self.obj.out.release()
예제 #25
0
 def process_video(self,
                   video_p: Path,
                   output_p: Path,
                   reduce_rate: int = 1):
     meta = ffprobe(video_p)
     nb_frames = int(meta["video"]["@nb_frames"])
     frames = vreader(str(video_p))
     writer = FFmpegWriter(str(output_p),
                           outputdict={"-r": str(int(30 / reduce_rate))})
     for i, frame in enumerate(tqdm(frames, total=nb_frames)):
         if i % reduce_rate == 0:
             frame = self.process_frame(frame)
             writer.writeFrame(frame)
     writer.close()
예제 #26
0
def write_video(input_path, output_path, sess):
    i = 0
    print("Reading Video {}".format(input_path))
    input_video = skvideo.io.vread(input_path)
    print("Reading Finished")
    output_video = FFmpegWriter(output_path)
    for input_frame in input_video:
        print(input_frame.shape)
        dts = detect_img(sess, input_frame, NMS_THRESH=NMS_THRESH)
        output_frame = render_frame(input_frame, dts)
        output_video.writeFrame(output_frame)
        i += 1
        print("Writen Frame: {}".format(i))
    output_video.close()
예제 #27
0
def write_video(input_path, output_path):
    i = 0
    print("Reading Video {}".format(input_path))
    input_video = skvideo.io.vread(input_path)
    print("Reading Finished")
    output_video = FFmpegWriter(output_path)
    inputs = glob.glob("tools/*.jpg")
    for input_frame in input_video:
        print(input_frame.shape)
        dts = detect(input_frame)
        output_frame = render_frame(input_frame, dts)
        output_video.writeFrame(output_frame)
        i += 1
        print("Writen Frame: {}".format(i))
    output_video.close()
예제 #28
0
def frames2vid_2(dir_path, output):
    images = []

    for f in glob.glob(dir_path + "/*.jpg"):
        images.append(f)
    images.sort()
    # Define the codec and create VideoWriter object
    writer = FFmpegWriter(output, outputdict={'-r': 60})
    for i in range(len(images)):
        # Modify below based on file names
        frame = cv2.imread(dir_path + 'frame' + str(i) + '.jpg')
        writer.writeFrame(frame)  # Write out frame to video
    # Release everything if job is finished
    writer.close()
    print("The output video is {}".format(output))
예제 #29
0
def retarget_video(gan, input_tensor, scales, must_divide, output_dir_path):
    max_scale = np.max(np.array(scales))
    frame_shape = np.uint32(np.array(input_tensor.shape[2:]) * max_scale)
    frame_shape[0] += (frame_shape[0] % 2)
    frame_shape[1] += (frame_shape[1] % 2)
    frames = np.zeros([len(scales), frame_shape[0], frame_shape[1], 3])
    for i, (scale_h, scale_w) in enumerate(scales):
        output_image = test_one_scale(gan, input_tensor, [scale_h, scale_w], must_divide)
        frames[i, 0:output_image.shape[0], 0:output_image.shape[1], :] = output_image
    writer = FFmpegWriter(output_dir_path + '/vid.mp4', verbosity=1, outputdict={'-b': '30000000', '-r': '100.0'})

    for i, _ in enumerate(scales):
        for j in range(3):
            writer.writeFrame(frames[i, :, :, :])
    writer.close()
예제 #30
0
def save_animation():
    w = 1 << 8
    h = 1 << 8
    sl = SmoothLife(h, w)
    sl.add_speckles()
    from skvideo.io import FFmpegWriter
    from matplotlib import cm
    fps = 60
    frames = 100
    w = FFmpegWriter("smoothlife.mp4", inputdict={"-r": str(fps)})
    for i in range(frames):
        frame = cm.gnuplot(sl.field)
        frame *= 255
        frame = frame.astype("uint8")
        w.writeFrame(frame)
        sl.step()
    w.close()