예제 #1
0
 def setUp(self):
     self.videofile_fr1 = os.path.join(
         settings['tmp_dir'], '~test_av_frame_source_test_case_fr_1.mp4')
     self.videofile_fr3 = os.path.join(
         settings['tmp_dir'], '~test_av_frame_source_test_case_fr_3.mp4')
     # make a 1fr and 3fr video
     mk_sample_video(self.videofile_fr1, 1, 320, 240, fractions.Fraction(1))
     mk_sample_video(self.videofile_fr3, 1, 320, 240, fractions.Fraction(3))
     # path to avutil fr to compare with
     self.imagefile = os.path.join(settings['tmp_dir'],
                                   '~test_av_frame_source_test_case.png')
     self.src_1 = FrameSource(self.videofile_fr1)
     self.src_3 = FrameSource(self.videofile_fr3)
     self.src_3.open()
예제 #2
0
    def render(self):
        src_path = self.vid_info['path']
        src_name = os.path.splitext(os.path.basename(src_path))[0]

        tmp_name = '~{filename}.{ext}'.format(
            filename=src_name, ext=settings['v_container']).lower()
        tmp_path = os.path.join(settings['tmp_dir'], tmp_name)

        if self.show_preview:
            # to get opengl on osx you have to build opencv --with-opengl
            # TODO: butterflow.rb and wiki needs to be updated for this
            self.window_title = '{} - Butterflow'.format(
                os.path.basename(src_path))
            flag = cv2.WINDOW_OPENGL
            cv2.namedWindow(self.window_title, flag)
            cv2.resizeWindow(self.window_title, self.w, self.h)

        self.make_pipe(tmp_path, self.playback_rate)
        self.source = FrameSource(src_path)
        self.source.open()
        renderable_seq = self.get_renderable_sequence()

        log.debug('Rendering sequence:')
        for s in renderable_seq.subregions:
            ra = renderable_seq.relative_position(s.ta)
            rb = renderable_seq.relative_position(s.tb)
            log.debug(
                'subregion: {},{},{} {:.3g},{:.3g},{:.3g} {:.3g},{:.3g},{:.3g}'
                .format(s.fa, s.fb, (s.fb - s.fa + 1), s.ta / 1000.0,
                        s.tb / 1000.0, (s.tb - s.ta) / 1000.0, ra, rb,
                        rb - ra))

        new_res = self.w * self.h
        src_res = self.vid_info['w'] * self.vid_info['h']
        if new_res == src_res:
            self.scaler = None
        elif new_res < src_res:
            self.scaler = settings['scaler_dn']
        else:
            self.scaler = settings['scaler_up']

        self.subs_to_render = 0
        for s in renderable_seq.subregions:
            if not s.trim:
                self.subs_to_render += 1

        self.curr_sub_idx = 0
        for x, s in enumerate(renderable_seq.subregions):
            if s.trim:
                # the region is being trimmed and shouldn't be rendered
                continue
            else:
                self.curr_sub_idx += 1
                self.render_subregion(s)

        self.source.close()
        if self.show_preview:
            cv2.destroyAllWindows()
        self.close_pipe()

        if self.mux:
            log.debug('muxing ...')
            aud_files = []
            for x, s in enumerate(renderable_seq.subregions):
                if s.trim:
                    continue
                tmp_name = '~{filename}.{sub}.{ext}'.format(
                    filename=src_name, sub=x,
                    ext=settings['a_container']).lower()
                aud_path = os.path.join(settings['tmp_dir'], tmp_name)
                extract_audio(src_path, aud_path, s.ta, s.tb, s.spd)
                aud_files.append(aud_path)
            merged_audio = '~{filename}.merged.{ext}'.format(
                filename=src_name, ext=settings['a_container']).lower()
            merged_audio = os.path.join(settings['tmp_dir'], merged_audio)
            concat_files(merged_audio, aud_files)
            mux(tmp_path, merged_audio, self.dst_path)
            for f in aud_files:
                os.remove(f)
            os.remove(merged_audio)
            os.remove(tmp_path)
        else:
            shutil.move(tmp_path, self.dst_path)