Esempio n. 1
0
    def mux_orig_audio_with_rendered_video(self, vid):
        log.info("Muxing progress:\t{:.2f}%".format(0))
        progress = 0

        def update_progress():
            log.info("Muxing progress:\t{:.2f}%".format(progress * 100))

        filename = os.path.splitext(os.path.basename(self.src))[0]
        audio_files = []
        to_extract = 0
        for sub in self.sequence.subregions:
            if not self.keep_subregions and sub.skip:
                continue
            else:
                to_extract += 1
        if to_extract == 0:
            progress += 1.0 / 3
            update_progress()
        progress_chunk = 1.0 / to_extract / 3
        for i, sub in enumerate(self.sequence.subregions):
            if not self.keep_subregions and sub.skip:
                continue
            tempfile1 = os.path.join(
                settings['tempdir'],
                '{}.{}.{}.{}'.format(filename, i, os.getpid(),
                                     settings['a_container']).lower())
            log.info("Start working on audio from subregion (%d):", i)
            log.info("Extracting to:\t%s", os.path.basename(tempfile1))
            speed = sub.target_spd
            if speed is None:
                reg_duration = (sub.tb - sub.ta) / 1000.0
                frs = self.calc_frs_to_render(sub)
                speed = (self.rate * reg_duration) / frs
                log.info("Speed not set for mux, calculated as: %fx", speed)
            mux.extract_audio(self.src, tempfile1, sub.ta, sub.tb, speed)
            audio_files.append(tempfile1)
            progress += progress_chunk
            update_progress()
        tempfile2 = os.path.join(
            settings['tempdir'],
            '{}.merged.{}.{}'.format(filename, os.getpid(),
                                     settings['a_container']).lower())
        log.info("Merging to:\t%s", os.path.basename(tempfile2))
        mux.concat_av_files(tempfile2, audio_files)
        progress += 1.0 / 3
        update_progress()
        mux.mux_av(vid, tempfile2, self.dest)
        progress += 1.0 / 3
        update_progress()
        for file in audio_files:
            log.info("Delete:\t%s", os.path.basename(file))
            os.remove(file)
        log.info("Delete:\t%s", os.path.basename(tempfile2))
        os.remove(tempfile2)
        log.info("Delete:\t%s", os.path.basename(vid))
        os.remove(vid)
Esempio n. 2
0
 def mux_orig_audio_with_rendered_video(self, vid):
     progress = 0
     def update_progress():
         if settings['quiet']:
             return
         sys.stdout.write('\rmux: {}%'.format(int(progress*100)))
         if progress >= 1:
             sys.stdout.write('\n')
         sys.stdout.flush()
     if not self.av_info['a_stream_exists']:
         log.warn('no audio stream exists')
         shutil.move(vid, self.dest)
         return
     filename = os.path.splitext(os.path.basename(self.src))[0]
     audio_files = []
     to_extract = 0
     for sub in self.sequence.subregions:
         if not self.keep_subregions and sub.skip:
             continue
         else:
             to_extract += 1
     if to_extract == 0:
         progress += 1.0/3
         update_progress()
     progress_chunk = 1.0/to_extract/3
     for i, sub in enumerate(self.sequence.subregions):
         if not self.keep_subregions and sub.skip:
             continue
         tempfile1 = os.path.join(
             settings['tempdir'],
             '{}.{}.{}'.format(filename,
                               i,
                               settings['a_container']).lower())
         mux.extract_audio(self.src,
                           tempfile1,
                           sub.ta,
                           sub.tb,
                           speed=sub.target_spd)
         audio_files.append(tempfile1)
         progress += progress_chunk
         update_progress()
     tempfile2 = os.path.join(
         settings['tempdir'],
         '{}.merged.{}'.format(filename,
                               settings['a_container']).lower())
     mux.concat_av_files(tempfile2, audio_files)
     progress += 1.0/3
     update_progress()
     mux.mux_av(vid, tempfile2, self.dest)
     progress += 1.0/3
     update_progress()
     for file in audio_files:
         os.remove(file)
     os.remove(tempfile2)
     os.remove(vid)
Esempio n. 3
0
    def render(self):
        src_path = self.vid_info['path']
        src_name = os.path.splitext(os.path.basename(src_path))[0]

        tmp_name = '~{filename}.{ext}'.format(filename=src_name,
                                              ext=settings['v_container']).lower()
        tmp_path = os.path.join(settings['tmp_dir'], tmp_name)

        if self.show_preview:
            # to get opengl on osx you have to build opencv --with-opengl
            # TODO: butterflow.rb and wiki needs to be updated for this
            self.window_title = '{} - Butterflow'.format(
                os.path.basename(src_path))
            flag = cv2.WINDOW_OPENGL
            cv2.namedWindow(self.window_title, flag)
            cv2.resizeWindow(self.window_title, self.w, self.h)

        self.make_pipe(tmp_path, self.playback_rate)
        self.source = FrameSource(src_path)
        self.source.open()
        renderable_seq = self.get_renderable_sequence()

        log.debug('Rendering sequence:')
        for s in renderable_seq.subregions:
            ra = renderable_seq.relative_position(s.ta)
            rb = renderable_seq.relative_position(s.tb)
            log.debug(
                'subregion: {},{},{} {:.3g},{:.3g},{:.3g} {:.3g},{:.3g},{:.3g}'.
                format(s.fa,
                       s.fb,
                       (s.fb - s.fa + 1),
                       s.ta / 1000.0,
                       s.tb / 1000.0,
                       (s.tb - s.ta) / 1000.0,
                       ra,
                       rb,
                       rb - ra))

        new_res = self.w * self.h
        src_res = self.vid_info['w'] * self.vid_info['h']
        if new_res == src_res:
            self.scaler = None
        elif new_res < src_res:
            self.scaler = settings['scaler_dn']
        else:
            self.scaler = settings['scaler_up']

        self.subs_to_render = 0
        for s in renderable_seq.subregions:
            if not s.trim:
                self.subs_to_render += 1

        self.curr_sub_idx = 0
        for x, s in enumerate(renderable_seq.subregions):
            if s.trim:
                # the region is being trimmed and shouldn't be rendered
                continue
            else:
                self.curr_sub_idx += 1
                self.render_subregion(s)

        self.source.close()
        if self.show_preview:
            cv2.destroyAllWindows()
        self.close_pipe()

        if self.mux:
            log.debug('muxing ...')
            aud_files = []
            for x, s in enumerate(renderable_seq.subregions):
                if s.trim:
                    continue
                tmp_name = '~{filename}.{sub}.{ext}'.format(
                        filename=src_name,
                        sub=x,
                        ext=settings['a_container']).lower()
                aud_path = os.path.join(settings['tmp_dir'], tmp_name)
                extract_audio(src_path, aud_path, s.ta, s.tb, s.spd)
                aud_files.append(aud_path)
            merged_audio = '~{filename}.merged.{ext}'.format(
                filename=src_name,
                ext=settings['a_container']
            ).lower()
            merged_audio = os.path.join(settings['tmp_dir'], merged_audio)
            concat_files(merged_audio, aud_files)
            mux(tmp_path, merged_audio, self.dst_path)
            for f in aud_files:
                os.remove(f)
            os.remove(merged_audio)
            os.remove(tmp_path)
        else:
            shutil.move(tmp_path, self.dst_path)
Esempio n. 4
0
 def mux_orig_audio_with_rendered_video(self, vid):
     log.info("Muxing progress:\t{:.2f}%".format(0))
     progress = 0
     def update_progress():
         log.info("Muxing progress:\t{:.2f}%".format(progress*100))
     filename = os.path.splitext(os.path.basename(self.src))[0]
     audio_files = []
     to_extract = 0
     for sub in self.sequence.subregions:
         if not self.keep_subregions and sub.skip:
             continue
         else:
             to_extract += 1
     if to_extract == 0:
         progress += 1.0/3
         update_progress()
     progress_chunk = 1.0/to_extract/3
     for i, sub in enumerate(self.sequence.subregions):
         if not self.keep_subregions and sub.skip:
             continue
         tempfile1 = os.path.join(
             settings['tempdir'],
             '{}.{}.{}.{}'.format(filename,
                                  i,
                                  os.getpid(),
                                  settings['a_container']).lower())
         log.info("Start working on audio from subregion (%d):", i)
         log.info("Extracting to:\t%s", os.path.basename(tempfile1))
         speed = sub.target_spd
         if speed is None:
             reg_duration = (sub.tb - sub.ta) / 1000.0
             frs = self.calc_frs_to_render(sub)
             speed = (self.rate * reg_duration) / frs
             log.info("Speed not set for mux, calculated as: %fx", speed)
         mux.extract_audio(self.src,
                           tempfile1,
                           sub.ta,
                           sub.tb,
                           speed)
         audio_files.append(tempfile1)
         progress += progress_chunk
         update_progress()
     tempfile2 = os.path.join(
         settings['tempdir'],
         '{}.merged.{}.{}'.format(filename,
                                  os.getpid(),
                                  settings['a_container']).lower())
     log.info("Merging to:\t%s", os.path.basename(tempfile2))
     mux.concat_av_files(tempfile2, audio_files)
     progress += 1.0/3
     update_progress()
     mux.mux_av(vid, tempfile2, self.dest)
     progress += 1.0/3
     update_progress()
     for file in audio_files:
         log.info("Delete:\t%s", os.path.basename(file))
         os.remove(file)
     log.info("Delete:\t%s", os.path.basename(tempfile2))
     os.remove(tempfile2)
     log.info("Delete:\t%s", os.path.basename(vid))
     os.remove(vid)
Esempio n. 5
0
    def render(self):
        src_path = self.vid_info['path']
        src_name = os.path.splitext(os.path.basename(src_path))[0]

        tmp_name = '~{filename}.{ext}'.format(
            filename=src_name, ext=settings['v_container']).lower()
        tmp_path = os.path.join(settings['tmp_dir'], tmp_name)

        if self.show_preview:
            # to get opengl on osx you have to build opencv --with-opengl
            # TODO: butterflow.rb and wiki needs to be updated for this
            self.window_title = '{} - Butterflow'.format(
                os.path.basename(src_path))
            flag = cv2.WINDOW_OPENGL
            cv2.namedWindow(self.window_title, flag)
            cv2.resizeWindow(self.window_title, self.w, self.h)

        self.make_pipe(tmp_path, self.playback_rate)
        self.source = FrameSource(src_path)
        self.source.open()
        renderable_seq = self.get_renderable_sequence()

        log.debug('Rendering sequence:')
        for s in renderable_seq.subregions:
            ra = renderable_seq.relative_position(s.ta)
            rb = renderable_seq.relative_position(s.tb)
            log.debug(
                'subregion: {},{},{} {:.3g},{:.3g},{:.3g} {:.3g},{:.3g},{:.3g}'
                .format(s.fa, s.fb, (s.fb - s.fa + 1), s.ta / 1000.0,
                        s.tb / 1000.0, (s.tb - s.ta) / 1000.0, ra, rb,
                        rb - ra))

        new_res = self.w * self.h
        src_res = self.vid_info['w'] * self.vid_info['h']
        if new_res == src_res:
            self.scaler = None
        elif new_res < src_res:
            self.scaler = settings['scaler_dn']
        else:
            self.scaler = settings['scaler_up']

        self.subs_to_render = 0
        for s in renderable_seq.subregions:
            if not s.trim:
                self.subs_to_render += 1

        self.curr_sub_idx = 0
        for x, s in enumerate(renderable_seq.subregions):
            if s.trim:
                # the region is being trimmed and shouldn't be rendered
                continue
            else:
                self.curr_sub_idx += 1
                self.render_subregion(s)

        self.source.close()
        if self.show_preview:
            cv2.destroyAllWindows()
        self.close_pipe()

        if self.mux:
            log.debug('muxing ...')
            aud_files = []
            for x, s in enumerate(renderable_seq.subregions):
                if s.trim:
                    continue
                tmp_name = '~{filename}.{sub}.{ext}'.format(
                    filename=src_name, sub=x,
                    ext=settings['a_container']).lower()
                aud_path = os.path.join(settings['tmp_dir'], tmp_name)
                extract_audio(src_path, aud_path, s.ta, s.tb, s.spd)
                aud_files.append(aud_path)
            merged_audio = '~{filename}.merged.{ext}'.format(
                filename=src_name, ext=settings['a_container']).lower()
            merged_audio = os.path.join(settings['tmp_dir'], merged_audio)
            concat_files(merged_audio, aud_files)
            mux(tmp_path, merged_audio, self.dst_path)
            for f in aud_files:
                os.remove(f)
            os.remove(merged_audio)
            os.remove(tmp_path)
        else:
            shutil.move(tmp_path, self.dst_path)