コード例 #1
0
def main():
    options = []
    option_names = []

    def add_argument(*names,
                     nargs=1,
                     type=str,
                     default=None,
                     action='default',
                     range=None,
                     choices=None,
                     help='',
                     extra=''):
        nonlocal options
        nonlocal option_names

        newDic = {}
        newDic['names'] = names
        newDic['nargs'] = nargs
        newDic['type'] = type
        newDic['default'] = default
        newDic['action'] = action
        newDic['help'] = help
        newDic['extra'] = extra
        newDic['range'] = range
        newDic['choices'] = choices
        options.append(newDic)
        option_names = option_names + list(names)

    add_argument('(input)',
                 nargs='*',
                 help='the path to a file, folder, or url you want edited.')
    add_argument('--help',
                 '-h',
                 action='store_true',
                 help='print this message and exit.')

    add_argument(
        '--frame_margin',
        '-m',
        type=int,
        default=6,
        range='0 to Infinity',
        help=
        'set how many "silent" frames of on either side of "loud" sections be included.'
    )
    add_argument(
        '--silent_threshold',
        '-t',
        type=float_type,
        default=0.04,
        range='0 to 1',
        help='set the volume that frames audio needs to surpass to be "loud".')
    add_argument(
        '--video_speed',
        '--sounded_speed',
        '-v',
        type=float_type,
        default=1.00,
        range='0 to 999999',
        help='set the speed that "loud" sections should be played at.')
    add_argument(
        '--silent_speed',
        '-s',
        type=float_type,
        default=99999,
        range='0 to 99999',
        help='set the speed that "silent" sections should be played at.')
    add_argument('--output_file',
                 '-o',
                 nargs='*',
                 help='set the name(s) of the new output.')

    add_argument('--no_open',
                 action='store_true',
                 help='do not open the file after editing is done.')
    add_argument(
        '--min_clip_length',
        '-mclip',
        type=int,
        default=3,
        range='0 to Infinity',
        help=
        'set the minimum length a clip can be. If a clip is too short, cut it.'
    )
    add_argument(
        '--min_cut_length',
        '-mcut',
        type=int,
        default=6,
        range='0 to Infinity',
        help=
        "set the minimum length a cut can be. If a cut is too short, don't cut"
    )
    add_argument('--combine_files',
                 action='store_true',
                 help='combine all input files into one before editing.')
    add_argument('--preview',
                 action='store_true',
                 help='show stats on how the input will be cut.')

    add_argument(
        '--cut_by_this_audio',
        '-ca',
        type=file_type,
        help="base cuts by this audio file instead of the video's audio.")
    add_argument('--cut_by_this_track',
                 '-ct',
                 type=int,
                 default=0,
                 range='0 to the number of audio tracks minus one',
                 help='base cuts by a different audio track in the video.')
    add_argument('--cut_by_all_tracks',
                 '-cat',
                 action='store_true',
                 help='combine all audio tracks into one before basing cuts.')
    add_argument('--keep_tracks_seperate',
                 action='store_true',
                 help="don't combine audio tracks when exporting.")

    add_argument(
        '--my_ffmpeg',
        action='store_true',
        help='use your ffmpeg and other binaries instead of the ones packaged.'
    )
    add_argument('--version',
                 action='store_true',
                 help='show which auto-editor you have.')
    add_argument('--debug',
                 '--verbose',
                 action='store_true',
                 help='show debugging messages and values.')
    add_argument('--show_ffmpeg_debug',
                 action='store_true',
                 help='show ffmpeg progress and output.')

    # TODO: add export_as_video
    add_argument('--export_as_audio',
                 '-exa',
                 action='store_true',
                 help='export as a WAV audio file.')
    add_argument(
        '--export_to_premiere',
        '-exp',
        action='store_true',
        help=
        'export as an XML file for Adobe Premiere Pro instead of outputting a media file.'
    )
    add_argument(
        '--export_to_resolve',
        '-exr',
        action='store_true',
        help=
        'export as an XML file for DaVinci Resolve instead of outputting a media file.'
    )

    add_argument('--video_bitrate',
                 '-vb',
                 help='set the number of bits per second for video.')
    add_argument('--audio_bitrate',
                 '-ab',
                 help='set the number of bits per second for audio.')
    add_argument('--sample_rate',
                 '-r',
                 type=sample_rate_type,
                 help='set the sample rate of the input and output videos.')
    add_argument('--video_codec',
                 '-vcodec',
                 default='uncompressed',
                 help='set the video codec for the output media file.')
    add_argument(
        '--preset',
        '-p',
        default='medium',
        choices=[
            'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium',
            'slow', 'slower', 'veryslow'
        ],
        help=
        'set the preset for ffmpeg to help save file size or increase quality.'
    )
    add_argument('--tune',
                 default='none',
                 choices=[
                     'film', 'animation', 'grain', 'stillimage', 'fastdecode',
                     'zerolatency', 'none'
                 ],
                 help='set the tune for ffmpeg to compress video better.')

    add_argument('--ignore',
                 nargs='*',
                 help='the range that will be marked as "loud"')
    add_argument('--cut_out',
                 nargs='*',
                 help='the range that will be marked as "silent"')
    add_argument('--motion_threshold',
                 type=float_type,
                 default=0.02,
                 range='0 to 1',
                 help='how much motion is required to be considered "moving"')
    add_argument('--edit_based_on',
                 default='audio',
                 choices=[
                     'audio', 'motion', 'not_audio', 'not_motion',
                     'audio_or_motion', 'audio_and_motion', 'audio_xor_motion',
                     'audio_and_not_motion'
                 ],
                 help='decide which method to use when making edits.')

    dirPath = os.path.dirname(os.path.realpath(__file__))
    # Fixes pip not able to find other included modules.
    sys.path.append(os.path.abspath(dirPath))

    from usefulFunctions import Log

    audioExtensions = [
        '.wav', '.mp3', '.m4a', '.aiff', '.flac', '.ogg', '.oga', '.acc',
        '.nfa', '.mka'
    ]

    # videoExtensions = ['.mp4', '.mkv', '.mov', '.webm', '.ogv']

    invalidExtensions = [
        '.txt', '.md', '.rtf', '.csv', '.cvs', '.html', '.htm', '.xml',
        '.json', '.yaml', '.png', '.jpeg', '.jpg', '.gif', '.exe', '.doc',
        '.docx', '.odt', '.pptx', '.xlsx', '.xls', 'ods', '.pdf', '.bat',
        '.dll', '.prproj', '.psd', '.aep', '.zip', '.rar', '.7z', '.java',
        '.class', '.js', '.c', '.cpp', '.csharp', '.py', '.app', '.git',
        '.github', '.gitignore', '.db', '.ini', '.BIN'
    ]

    class parse_options():
        def __init__(self, userArgs, log, *args):
            # Set the default options.
            for options in args:
                for option in options:
                    key = option['names'][0].replace('-', '')
                    if (option['action'] == 'store_true'):
                        value = False
                    elif (option['nargs'] != 1):
                        value = []
                    else:
                        value = option['default']
                    setattr(self, key, value)

            def get_option(item, the_args: list):
                for options in the_args:
                    for option in options:
                        if (item in option['names']):
                            return option
                return None

            # Figure out attributes changed by user.
            myList = []
            settingInputs = True
            optionList = 'input'
            i = 0
            while i < len(userArgs):
                item = userArgs[i]
                if (i == len(userArgs) - 1):
                    nextItem = None
                else:
                    nextItem = userArgs[i + 1]

                option = get_option(item, args)

                if (option is not None):
                    if (optionList is not None):
                        setattr(self, optionList, myList)
                    settingInputs = False
                    optionList = None
                    myList = []

                    key = option['names'][0].replace('-', '')

                    # Show help for specific option.
                    if (nextItem == '-h' or nextItem == '--help'):
                        print(' ', ', '.join(option['names']))
                        print('   ', option['help'])
                        print('   ', option['extra'])
                        if (option['action'] == 'default'):
                            print('    type:', option['type'].__name__)
                            print('    default:', option['default'])
                            if (option['range'] is not None):
                                print('    range:', option['range'])
                            if (option['choices'] is not None):
                                print('    choices:',
                                      ', '.join(option['choices']))
                        else:
                            print('    type: flag')
                        sys.exit()

                    if (option['nargs'] != 1):
                        settingInputs = True
                        optionList = key
                    elif (option['action'] == 'store_true'):
                        value = True
                    else:
                        try:
                            # Convert to correct type.
                            value = option['type'](nextItem)
                        except:
                            typeName = option['type'].__name__
                            log.error(
                                f'Couldn\'t convert "{nextItem}" to {typeName}'
                            )
                        if (option['choices'] is not None):
                            if (value not in option['choices']):
                                optionName = option['names'][0]
                                myChoices = ', '.join(option['choices'])
                                log.error(f'{value} is not a choice for {optionName}' \
                                    f'\nchoices are:\n  {myChoices}')
                        i += 1
                    setattr(self, key, value)
                else:
                    if (settingInputs and not item.startswith('-')):
                        # Input file names
                        myList.append(item)
                    else:
                        # Unknown Option!
                        hmm = difflib.get_close_matches(item, option_names)
                        potential_options = ', '.join(hmm)
                        append = ''
                        if (hmm != []):
                            append = f'\n\n    Did you mean:\n        {potential_options}'
                        log.error(f'Unknown option: {item}{append}')
                i += 1
            if (settingInputs):
                setattr(self, optionList, myList)

    args = parse_options(sys.argv[1:], Log(3), options)

    # Print the help screen for the entire program.
    if (args.help):
        print('')
        for option in options:
            print(' ', ', '.join(option['names']) + ':', option['help'])
        print('\nThe help command can also be used on a specific option.')
        print('example:')
        print('    auto-editor --frame_margin --help')
        print('\nHave an issue? Make an issue. '\
            'Visit https://github.com/wyattblue/auto-editor/issues')
        sys.exit()

    if (args.version):
        print('Auto-Editor version', version)
        sys.exit()

    from usefulFunctions import vidTracks, conwrite, getBinaries
    from wavfile import read

    if (not args.preview):
        if (args.export_to_premiere):
            conwrite('Exporting to Adobe Premiere Pro XML file.')
        elif (args.export_to_resolve):
            conwrite('Exporting to DaVinci Resolve XML file.')
        elif (args.export_as_audio):
            conwrite('Exporting as audio.')
        else:
            conwrite('Starting.')

    ffmpeg, ffprobe = getBinaries(platform.system(), dirPath, args.my_ffmpeg)
    makingDataFile = args.export_to_premiere or args.export_to_resolve
    is64bit = '64-bit' if sys.maxsize > 2**32 else '32-bit'

    if (args.debug and args.input == []):
        print('Python Version:', platform.python_version(), is64bit)
        print('Platform:', platform.system(), platform.release())
        # Platform can be 'Linux', 'Darwin' (macOS), 'Java', 'Windows'
        ffmpegVersion = pipeToConsole([ffmpeg, '-version']).split('\n')[0]
        ffmpegVersion = ffmpegVersion.replace('ffmpeg version', '').strip()
        ffmpegVersion = ffmpegVersion.split(' ')[0]
        print('FFmpeg path:', ffmpeg)
        print('FFmpeg version:', ffmpegVersion)
        print('Auto-Editor version', version)
        sys.exit()

    log = Log(args.debug, args.show_ffmpeg_debug)
    log.debug('')

    if (is64bit == '32-bit'):
        log.warning(
            'You have the 32-bit version of Python, which may lead to memory crashes.'
        )
    if (args.frame_margin < 0):
        log.error('Frame margin cannot be negative.')

    if (args.input == []):
        log.error(
            'You need the (input) argument so that auto-editor can do the work for you.'
        )

    try:
        from requests import get
        latestVersion = get(
            'https://raw.githubusercontent.com/wyattblue/auto-editor/master/resources/version.txt'
        )
        if (latestVersion.text != version):
            print('\nAuto-Editor is out of date. Run:\n')
            print('    pip3 install -U auto-editor')
            print('\nto upgrade to the latest version.\n')
        del latestVersion
    except Exception as err:
        log.debug('Check for update Error: ' + str(err))

    if (args.silent_speed <= 0 or args.silent_speed > 99999):
        args.silent_speed = 99999
    if (args.video_speed <= 0 or args.video_speed > 99999):
        args.video_speed = 99999

    inputList = []
    for myInput in args.input:
        if (os.path.isdir(myInput)):

            def validFiles(path: str, badExts: list):
                for f in os.listdir(path):
                    if (not f[f.rfind('.'):] in badExts):
                        yield os.path.join(path, f)

            inputList += sorted(validFiles(myInput, invalidExtensions))
        elif (os.path.isfile(myInput)):
            inputList.append(myInput)
        elif (myInput.startswith('http://') or myInput.startswith('https://')):
            basename = re.sub(r'\W+', '-', myInput)

            if (not os.path.isfile(basename + '.mp4')):
                print(
                    'URL detected, using youtube-dl to download from webpage.')
                cmd = [
                    'youtube-dl', '-f',
                    'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4', myInput,
                    '--output', basename, '--no-check-certificate'
                ]
                if (ffmpeg != 'ffmpeg'):
                    cmd.extend(['--ffmpeg-location', ffmpeg])
                subprocess.call(cmd)

            inputList.append(basename + '.mp4')
        else:
            log.error('Could not find file: ' + myInput)

    startTime = time.time()

    if (args.output_file is None):
        args.output_file = []

    # Figure out the output file names.
    if (len(args.output_file) < len(inputList)):
        for i in range(len(inputList) - len(args.output_file)):
            oldFile = inputList[i]
            dotIndex = oldFile.rfind('.')
            if (args.export_to_premiere or args.export_to_resolve):
                args.output_file.append(oldFile[:dotIndex] + '.xml')
            else:
                ext = oldFile[dotIndex:]
                if (args.export_as_audio):
                    ext = '.wav'
                end = '_ALTERED' + ext
                args.output_file.append(oldFile[:dotIndex] + end)

    TEMP = tempfile.mkdtemp()
    log.debug(f'\n   - Temp Directory: {TEMP}')

    if (args.combine_files):
        # Combine video files, then set input to 'combined.mp4'.
        cmd = [ffmpeg, '-y']
        for fileref in inputList:
            cmd.extend(['-i', fileref])
        cmd.extend([
            '-filter_complex', f'[0:v]concat=n={len(inputList)}:v=1:a=1',
            '-codec:v', 'h264', '-pix_fmt', 'yuv420p', '-strict', '-2',
            f'{TEMP}/combined.mp4'
        ])
        if (log.ffmpeg):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '8'])

        subprocess.call(cmd)
        inputList = [f'{TEMP}/combined.mp4']

    speeds = [args.silent_speed, args.video_speed]
    numCuts = 0
    for i, INPUT_FILE in enumerate(inputList):
        log.debug(f'   - INPUT_FILE: {INPUT_FILE}')
        # Ignore folders
        if (os.path.isdir(INPUT_FILE)):
            continue

        # Throw error if file referenced doesn't exist.
        if (not os.path.isfile(INPUT_FILE)):
            log.error(f"{INPUT_FILE} doesn't exist!")

        # Check if the file format is valid.
        fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]

        if (fileFormat in invalidExtensions):
            log.error(
                f'Invalid file extension "{fileFormat}" for {INPUT_FILE}')

        audioFile = fileFormat in audioExtensions

        # Get output file name.
        newOutput = args.output_file[i]
        log.debug(f'   - newOutput: {newOutput}')

        # Grab the sample rate from the input.
        sr = args.sample_rate
        if (sr is None):
            output = pipeToConsole([ffmpeg, '-i', INPUT_FILE, '-hide_banner'])
            try:
                matchDict = re.search(r'\s(?P<grp>\w+?)\sHz',
                                      output).groupdict()
                sr = matchDict['grp']
            except AttributeError:
                sr = 48000
        args.sample_rate = sr

        # Grab the audio bitrate from the input.
        abit = args.audio_bitrate
        if (abit is None):
            if (not INPUT_FILE.endswith('.mkv')):
                output = pipeToConsole([
                    ffprobe, '-v', 'error', '-select_streams', 'a:0',
                    '-show_entries', 'stream=bit_rate', '-of',
                    'compact=p=0:nk=1', INPUT_FILE
                ])
                try:
                    abit = int(output)
                except:
                    log.warning("Couldn't automatically detect audio bitrate.")
                    abit = '500k'
                    log.debug('Setting audio bitrate to ' + abit)
                else:
                    abit = str(round(abit / 1000)) + 'k'
        else:
            abit = str(abit)
        args.audio_bitrate = abit

        if (audioFile):
            fps = 30  # Audio files don't have frames, so give fps a dummy value.
            tracks = 1
            cmd = [
                ffmpeg, '-y', '-i', INPUT_FILE, '-b:a', args.audio_bitrate,
                '-ac', '2', '-ar',
                str(args.sample_rate), '-vn', f'{TEMP}/fastAud.wav'
            ]
            if (log.is_ffmpeg):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '8'])
            subprocess.call(cmd)

            sampleRate, audioData = read(f'{TEMP}/fastAud.wav')
        else:
            if (args.export_to_premiere):
                # This is the default fps value for Premiere Pro Projects.
                fps = 29.97
            else:
                # Grab fps to know what the output video's fps should be.
                # DaVinci Resolve doesn't need fps, but grab it away just in case.
                fps = ffmpegFPS(ffmpeg, INPUT_FILE, log)

            tracks = vidTracks(INPUT_FILE, ffprobe, log)
            if (args.cut_by_this_track >= tracks):
                log.error("You choose a track that doesn't exist.\n" \
                    f'There are only {tracks-1} tracks. (starting from 0)')

            vcodec = args.video_codec
            if (vcodec == 'copy'):
                output = pipeToConsole(
                    [ffmpeg, '-i', INPUT_FILE, '-hide_banner'])
                try:
                    matchDict = re.search(r'Video:\s(?P<video>\w+?)\s',
                                          output).groupdict()
                    vcodec = matchDict['video']
                    log.debug(vcodec)
                except AttributeError:
                    vcodec = 'uncompressed'
                    log.warning("Couldn't automatically detect video codec.")

            if (args.video_bitrate is not None and vcodec == 'uncompressed'):
                log.warning('Your bitrate will not be applied because' \
                        ' the video codec is "uncompressed".')

            if (vcodec == 'uncompressed'):
                # FFmpeg copies the uncompressed output that cv2 spits out.
                vcodec = 'copy'

            # Split audio tracks into: 0.wav, 1.wav, etc.
            for trackNum in range(tracks):
                cmd = [ffmpeg, '-y', '-i', INPUT_FILE]
                if (args.audio_bitrate is not None):
                    cmd.extend(['-ab', args.audio_bitrate])

                cmd.extend([
                    '-ac', '2', '-ar',
                    str(args.sample_rate), '-map', f'0:a:{trackNum}',
                    f'{TEMP}/{trackNum}.wav'
                ])

                if (log.is_ffmpeg):
                    cmd.extend(['-hide_banner'])
                else:
                    cmd.extend(['-nostats', '-loglevel', '8'])
                subprocess.call(cmd)

            # Check if the `--cut_by_all_tracks` flag has been set or not.
            if (args.cut_by_all_tracks):
                # Combine all audio tracks into one audio file, then read.
                cmd = [
                    ffmpeg, '-y', '-i', INPUT_FILE, '-filter_complex',
                    f'[0:a]amerge=inputs={tracks}', '-map', 'a', '-ar',
                    str(args.sample_rate), '-ac', '2', '-f', 'wav',
                    f'{TEMP}/combined.wav'
                ]
                if (log.is_ffmpeg):
                    cmd.extend(['-hide_banner'])
                else:
                    cmd.extend(['-nostats', '-loglevel', '8'])

                subprocess.call(cmd)

                sampleRate, audioData = read(f'{TEMP}/combined.wav')
            else:
                # Read only one audio file.
                if (os.path.isfile(f'{TEMP}/{args.cut_by_this_track}.wav')):
                    sampleRate, audioData = read(
                        f'{TEMP}/{args.cut_by_this_track}.wav')
                else:
                    log.error('Audio track not found!')

        from cutting import audioToHasLoud, motionDetection, applySpacingRules
        import numpy as np

        audioList = None
        motionList = None
        if ('audio' in args.edit_based_on):
            log.debug('Analyzing audio volume.')
            audioList = audioToHasLoud(audioData, sampleRate,
                                       args.silent_threshold, fps, log)

        if ('motion' in args.edit_based_on):
            log.debug('Analyzing video motion.')
            motionList = motionDetection(INPUT_FILE,
                                         ffprobe,
                                         args.motion_threshold,
                                         log,
                                         width=400,
                                         dilates=2,
                                         blur=21)

            if (audioList is not None):
                if (len(audioList) > len(motionList)):
                    log.debug(
                        'Reducing the size of audioList to match motionList')
                    log.debug(f'audioList Length:  {len(audioList)}')
                    log.debug(f'motionList Length: {len(motionList)}')
                    audioList = audioList[:len(motionList)]

        if (args.edit_based_on == 'audio'
                or args.edit_based_on == 'not_audio'):
            if (max(audioList) == 0):
                log.error(
                    'There was no place where audio exceeded the threshold.')
        if (args.edit_based_on == 'motion'
                or args.edit_based_on == 'not_motion'):
            if (max(motionList) == 0):
                log.error(
                    'There was no place where motion exceeded the threshold.')

        # Only raise a warning for other cases.
        if (audioList is not None and max(audioList) == 0):
            log.warning(
                'There was no place where audio exceeded the threshold.')
        if (motionList is not None and max(motionList) == 0):
            log.warning(
                'There was no place where motion exceeded the threshold.')

        if (args.edit_based_on == 'audio'):
            hasLoud = audioList

        if (args.edit_based_on == 'motion'):
            hasLoud = motionList

        if (args.edit_based_on == 'not_audio'):
            hasLoud = np.invert(audioList)

        if (args.edit_based_on == 'not_motion'):
            hasLoud = np.invert(motionList)

        if (args.edit_based_on == 'audio_and_motion'):
            log.debug('Applying "Python bitwise and" on arrays.')
            hasLoud = audioList & motionList

        if (args.edit_based_on == 'audio_or_motion'):
            log.debug('Applying "Python bitwise or" on arrays.')
            hasLoud = audioList | motionList

        if (args.edit_based_on == 'audio_xor_motion'):
            log.debug('Applying "numpy bitwise_xor" on arrays')
            hasLoud = np.bitwise_xor(audioList, motionList)

        if (args.edit_based_on == 'audio_and_not_motion'):
            log.debug(
                'Applying "Python bitwise and" with "numpy bitwise not" on arrays.'
            )
            hasLoud = audioList & np.invert(motionList)

        chunks, includeFrame = applySpacingRules(
            hasLoud, fps, args.frame_margin, args.min_clip_length,
            args.min_cut_length, args.ignore, args.cut_out, log)

        clips = []
        for chunk in chunks:
            if (speeds[chunk[2]] == 99999):
                numCuts += 1
            else:
                clips.append([chunk[0], chunk[1], speeds[chunk[2]] * 100])

        if (fps is None and not audioFile):
            if (makingDataFile):
                dotIndex = INPUT_FILE.rfind('.')
                end = '_constantFPS' + oldFile[dotIndex:]
                constantLoc = oldFile[:dotIndex] + end
            else:
                constantLoc = f'{TEMP}/constantVid{fileFormat}'
            cmd = [
                ffmpeg, '-y', '-i', INPUT_FILE, '-filter:v', 'fps=fps=30',
                constantLoc
            ]
            if (log.is_ffmpeg):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '8'])
            subprocess.call(cmd)
            INPUT_FILE = constantLoc

        if (args.preview):
            args.no_open = True
            from preview import preview

            preview(INPUT_FILE, chunks, speeds, fps, audioFile, log)
            continue

        if (args.export_to_premiere):
            args.no_open = True
            from premiere import exportToPremiere

            exportToPremiere(INPUT_FILE, TEMP, newOutput, clips, tracks,
                             sampleRate, audioFile, log)
            continue
        if (args.export_to_resolve):
            args.no_open = True
            duration = chunks[len(chunks) - 1][1]
            from resolve import exportToResolve

            exportToResolve(INPUT_FILE, newOutput, clips, duration, sampleRate,
                            audioFile, log)
            continue
        if (audioFile and not makingDataFile):
            from fastAudio import fastAudio

            fastAudio(ffmpeg, INPUT_FILE, newOutput, chunks, speeds,
                      args.audio_bitrate, sampleRate, True, TEMP, log, fps)
            continue

        from fastVideo import fastVideo
        fastVideo(ffmpeg, INPUT_FILE, newOutput, chunks, includeFrame, speeds,
                  tracks, args.audio_bitrate, sampleRate, TEMP,
                  args.keep_tracks_seperate, vcodec, fps, args.export_as_audio,
                  args.video_bitrate, args.preset, args.tune, log)

    if (not os.path.isfile(newOutput)):
        log.error(f'The file {newOutput} was not created.')

    if (not args.preview and not makingDataFile):
        timeLength = round(time.time() - startTime, 2)
        minutes = timedelta(seconds=round(timeLength))
        print(f'Finished. took {timeLength} seconds ({minutes})')

    if (not args.preview and makingDataFile):
        timeSave = numCuts * 2  # assuming making each cut takes about 2 seconds.
        units = 'seconds'
        if (timeSave >= 3600):
            timeSave = round(timeSave / 3600, 1)
            if (timeSave % 1 == 0):
                timeSave = round(timeSave)
            units = 'hours'
        if (timeSave >= 60):
            timeSave = round(timeSave / 60, 1)
            if (timeSave >= 10 or timeSave % 1 == 0):
                timeSave = round(timeSave)
            units = 'minutes'

        plural = 's' if numCuts != 1 else ''

        print(f'Auto-Editor made {numCuts} cut{plural}', end='')
        if (numCuts > 4):
            print(f', which would have taken about {timeSave} {units} if' \
                ' edited manually.')
        else:
            print('.')

    if (not args.no_open):
        try:  # should work on Windows
            os.startfile(newOutput)
        except AttributeError:
            try:  # should work on MacOS and most Linux versions
                subprocess.call(['open', newOutput])
            except:
                try:  # should work on WSL2
                    subprocess.call(['cmd.exe', '/C', 'start', newOutput])
                except:
                    log.warning('Could not open output file.')
    rmtree(TEMP)
コード例 #2
0
def exportToPremiere(myInput, temp, output, clips, tracks, sampleRate, log):
    def makepath(filepath):
        return 'file://localhost' + os.path.abspath(filepath)

    def speedup(speed):
        r = '\t\t\t\t\t\t<filter>\n'
        r += '\t\t\t\t\t\t\t<effect>\n'
        r += '\t\t\t\t\t\t\t\t<name>Time Remap</name>\n'
        r += '\t\t\t\t\t\t\t\t<effectid>timeremap</effectid>\n'
        r += '\t\t\t\t\t\t\t\t<effectcategory>motion</effectcategory>\n'
        r += '\t\t\t\t\t\t\t\t<effecttype>motion</effecttype>\n'
        r += '\t\t\t\t\t\t\t\t<mediatype>video</mediatype>\n'
        r += '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n'
        r += '\t\t\t\t\t\t\t\t\t<parameterid>variablespeed</parameterid>\n'
        r += '\t\t\t\t\t\t\t\t\t<name>variablespeed</name>\n'
        r += '\t\t\t\t\t\t\t\t\t<valuemin>0</valuemin>\n'
        r += '\t\t\t\t\t\t\t\t\t<valuemax>1</valuemax>\n'
        r += '\t\t\t\t\t\t\t\t\t<value>0</value>\n'
        r += '\t\t\t\t\t\t\t\t</parameter>\n'
        r += '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n'
        r += '\t\t\t\t\t\t\t\t\t<parameterid>speed</parameterid>\n'
        r += '\t\t\t\t\t\t\t\t\t<name>speed</name>\n'
        r += '\t\t\t\t\t\t\t\t\t<valuemin>-100000</valuemin>\n'
        r += '\t\t\t\t\t\t\t\t\t<valuemax>100000</valuemax>\n'
        r += f'\t\t\t\t\t\t\t\t\t<value>{speed}</value>\n'
        r += '\t\t\t\t\t\t\t\t</parameter>\n'
        r += '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n'
        r += '\t\t\t\t\t\t\t\t\t<parameterid>reverse</parameterid>\n'
        r += '\t\t\t\t\t\t\t\t\t<name>reverse</name>\n'
        r += '\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n'
        r += '\t\t\t\t\t\t\t\t</parameter>\n'
        r += '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n'
        r += '\t\t\t\t\t\t\t\t\t<parameterid>frameblending</parameterid>\n'
        r += '\t\t\t\t\t\t\t\t\t<name>frameblending</name>\n'
        r += '\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n'
        r += '\t\t\t\t\t\t\t\t</parameter>\n'
        r += '\t\t\t\t\t\t\t</effect>\n'
        r += '\t\t\t\t\t\t</filter>\n'
        return r

    pathurl = makepath(myInput)

    name = os.path.basename(myInput)

    audioFile = isAudioFile(myInput)

    log.debug('tracks: ' + str(tracks))
    log.debug(os.path.dirname(os.path.abspath(myInput)))

    if (tracks > 1):
        # XML in Adobe Premiere doesn't support multiple audio tracks so
        # we need to do some stupid things to get it working.
        from shutil import rmtree

        inFolder = os.path.dirname(os.path.abspath(myInput))

        hmm = name[:name.rfind('.')]

        newFolderName = os.path.join(inFolder, hmm + '_tracks')
        try:
            os.mkdir(newFolderName)
        except OSError:
            rmtree(newFolderName)
            os.mkdir(newFolderName)

        trackurls = [pathurl]
        for i in range(1, tracks):
            newtrack = os.path.join(newFolderName, f'{i}.wav')
            os.rename(os.path.join(temp, f'{i}.wav'), newtrack)
            trackurls.append(newtrack)

    ntsc = 'FALSE'
    ana = 'FALSE'  # anamorphic
    alphatype = 'none'
    depth = '16'
    if (not audioFile):
        try:
            import cv2

            cap = cv2.VideoCapture(myInput)
            width = str(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
            height = str(int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            cap.release()
            cv2.destroyAllWindows()
        except ImportError:
            width = '1280'
            height = '720'

    pixelar = 'square'  # pixel aspect ratio
    colordepth = '24'
    sr = sampleRate

    if (audioFile):
        groupName = 'Auto-Editor Audio Group'
        with open(output, 'w', encoding='utf-8') as outfile:
            outfile.write('<!-- Generated by Auto-Editor -->\n')
            outfile.write(
                '<!-- https://github.com/WyattBlue/auto-editor -->\n\n')
            outfile.write(
                '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n')
            outfile.write('<xmeml version="4">\n')
            outfile.write('\t<sequence>\n')
            outfile.write('\t<rate>\n')
            outfile.write('\t\t<timebase>30</timebase>\n')
            outfile.write('\t\t<ntsc>TRUE</ntsc>\n')
            outfile.write('\t</rate>\n')
            outfile.write(f'\t\t<name>{groupName}</name>\n')
            outfile.write('\t\t<media>\n')
            outfile.write('\t\t\t<audio>\n')
            outfile.write('\t\t\t\t<numOutputChannels>2</numOutputChannels>\n')
            outfile.write('\t\t\t\t<format>\n')
            outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
            outfile.write(f'\t\t\t\t\t\t<depth>{depth}</depth>\n')
            outfile.write(f'\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
            outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
            outfile.write('\t\t\t\t</format>\n')
            outfile.write(
                '\t\t\t\t<track currentExplodedTrackIndex="0" premiereTrackType="Stereo">\n'
            )

            total = 0
            for j, clip in enumerate(clips):
                myStart = int(total)
                total += (clip[1] - clip[0]) / (clip[2] / 100)
                myEnd = int(total)

                outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{j+1}">\n')
                outfile.write(
                    '\t\t\t\t\t\t<masterclipid>masterclip-1</masterclipid>\n')
                outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')
                outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
                outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')
                outfile.write(
                    f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
                outfile.write(
                    f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n'
                )

                if (j == 0):
                    outfile.write('\t\t\t\t\t\t<file id="file-1">\n')
                    outfile.write(f'\t\t\t\t\t\t\t<name>{name}</name>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t<pathurl>{pathurl}</pathurl>\n')
                    outfile.write('\t\t\t\t\t\t\t<rate>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                    outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                    outfile.write('\t\t\t\t\t\t\t</rate>\n')
                    outfile.write('\t\t\t\t\t\t\t<media>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<audio>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t\t\t\t<depth>{depth}</depth>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t<channelcount>2</channelcount>\n')
                    outfile.write('\t\t\t\t\t\t\t\t</audio>\n')
                    outfile.write('\t\t\t\t\t\t\t</media>\n')
                    outfile.write('\t\t\t\t\t\t</file>\n')
                else:
                    outfile.write(f'\t\t\t\t\t\t<file id="file-1"/>\n')
                outfile.write('\t\t\t\t\t</clipitem>\n')

            outfile.write('\t\t\t\t</track>\n')
            outfile.write('\t\t\t</audio>\n')
            outfile.write('\t\t</media>\n')
            outfile.write('\t</sequence>\n')
            outfile.write('</xmeml>')

            # Exit out of this function prematurely.
            return None

    groupName = 'Auto-Editor Video Group'

    with open(output, 'w', encoding='utf-8') as outfile:
        outfile.write('<!-- Generated by Auto-Editor -->\n')
        outfile.write('<!-- https://github.com/WyattBlue/auto-editor -->\n')
        outfile.write('\n')
        outfile.write(
            '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n')
        outfile.write('<xmeml version="4">\n')
        outfile.write('\t<sequence>\n')
        outfile.write(f'\t\t<name>{groupName}</name>\n')
        outfile.write('\t\t<media>\n')
        outfile.write('\t\t\t<video>\n')
        outfile.write('\t\t\t\t<format>\n')
        outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
        outfile.write('\t\t\t\t\t\t<rate>\n')
        outfile.write('\t\t\t\t\t\t\t<timebase>30</timebase>\n')
        outfile.write(f'\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
        outfile.write('\t\t\t\t\t\t</rate>\n')
        outfile.write(f'\t\t\t\t\t\t<width>{width}</width>\n')
        outfile.write(f'\t\t\t\t\t\t<height>{height}</height>\n')
        outfile.write(f'\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n')
        outfile.write(
            f'\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n')
        outfile.write('\t\t\t\t\t\t<fielddominance>none</fielddominance>\n')
        outfile.write(f'\t\t\t\t\t\t<colordepth>{colordepth}</colordepth>\n')
        outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
        outfile.write('\t\t\t\t</format>\n')
        outfile.write('\t\t\t\t<track>\n')

        # Handle clips.
        total = 0
        for j, clip in enumerate(clips):
            myStart = int(total)
            total += (clip[1] - clip[0]) / (clip[2] / 100)
            myEnd = int(total)

            outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{j+1}">\n')
            outfile.write(
                '\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n')
            outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')
            outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
            outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')
            outfile.write(
                f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
            outfile.write(
                f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n')

            if (j == 0):
                outfile.write('\t\t\t\t\t\t<file id="file-1">\n')
                outfile.write(f'\t\t\t\t\t\t\t<name>{name}</name>\n')
                outfile.write(f'\t\t\t\t\t\t\t<pathurl>{pathurl}</pathurl>\n')
                outfile.write('\t\t\t\t\t\t\t<rate>\n')
                outfile.write('\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                outfile.write('\t\t\t\t\t\t\t</rate>\n')
                outfile.write('\t\t\t\t\t\t\t<media>\n')
                outfile.write('\t\t\t\t\t\t\t\t<video>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t<rate>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t</rate>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<width>{width}</width>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<height>{height}</height>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n'
                )
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t\t<fielddominance>none</fielddominance>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                outfile.write('\t\t\t\t\t\t\t\t</video>\n')
                outfile.write('\t\t\t\t\t\t\t\t<audio>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<depth>{depth}</depth>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<channelcount>2</channelcount>\n')
                outfile.write('\t\t\t\t\t\t\t\t</audio>\n')
                outfile.write('\t\t\t\t\t\t\t</media>\n')
                outfile.write('\t\t\t\t\t\t</file>\n')
            else:
                outfile.write(f'\t\t\t\t\t\t<file id="file-1"/>\n')

            if (clip[2] != 100):
                outfile.write(speedup(clip[2]))

            # Linking for video blocks
            for i in range(max(3, tracks + 1)):
                outfile.write('\t\t\t\t\t\t<link>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)))+j+1}</linkclipref>\n'
                )
                if (i == 0):
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                else:
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')
                if (i == 2):
                    outfile.write('\t\t\t\t\t\t\t<trackindex>2</trackindex>\n')
                else:
                    outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')
                outfile.write(f'\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n')
                if (i > 0):
                    outfile.write('\t\t\t\t\t\t\t<groupindex>1</groupindex>\n')
                outfile.write('\t\t\t\t\t\t</link>\n')
            outfile.write('\t\t\t\t\t</clipitem>\n')
        outfile.write('\t\t\t\t</track>\n')
        outfile.write('\t\t\t</video>\n')
        outfile.write('\t\t\t<audio>\n')
        outfile.write('\t\t\t\t<numOutputChannels>2</numOutputChannels>\n')
        outfile.write('\t\t\t\t<format>\n')
        outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
        outfile.write(f'\t\t\t\t\t\t<depth>{depth}</depth>\n')
        outfile.write(f'\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
        outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
        outfile.write('\t\t\t\t</format>\n')

        # Audio Clips
        for t in range(tracks):
            if (t == 0):
                print('')
            log.debug('t variable: ' + str(t))
            total = 0
            outfile.write(
                '\t\t\t\t<track currentExplodedTrackIndex="0" premiereTrackType="Stereo">\n'
            )

            for j, clip in enumerate(clips):

                clipItemNum = len(clips) + 1 + j + (t * len(clips))

                outfile.write(
                    f'\t\t\t\t\t<clipitem id="clipitem-{clipItemNum}" premiereChannelType="stereo">\n'
                )
                outfile.write(
                    f'\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n')
                outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')

                myStart = int(total)
                total += (clip[1] - clip[0]) / (clip[2] / 100)
                myEnd = int(total)

                outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
                outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')

                outfile.write(
                    f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
                outfile.write(
                    f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n'
                )

                if (t > 0):
                    outfile.write(f'\t\t\t\t\t\t<file id="file-{t+1}">\n')
                    outfile.write(f'\t\t\t\t\t\t\t<name>{name}{t}</name>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t<pathurl>{trackurls[t]}</pathurl>\n')
                    outfile.write('\t\t\t\t\t\t\t<rate>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                    outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                    outfile.write('\t\t\t\t\t\t\t</rate>\n')
                    outfile.write('\t\t\t\t\t\t\t<media>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<audio>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t\t\t\t<depth>{depth}</depth>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t<channelcount>2</channelcount>\n')
                    outfile.write('\t\t\t\t\t\t\t\t</audio>\n')
                    outfile.write('\t\t\t\t\t\t\t</media>\n')
                    outfile.write('\t\t\t\t\t\t</file>\n')
                else:
                    outfile.write(f'\t\t\t\t\t\t<file id="file-{t+1}"/>\n')
                outfile.write('\t\t\t\t\t\t<sourcetrack>\n')
                outfile.write('\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')
                outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')
                outfile.write('\t\t\t\t\t\t</sourcetrack>\n')
                outfile.write('\t\t\t\t\t\t<labels>\n')
                outfile.write('\t\t\t\t\t\t\t<label2>Iris</label2>\n')
                outfile.write('\t\t\t\t\t\t</labels>\n')

                # Add speed effect for audio blocks
                if (clip[2] != 100):
                    outfile.write(speedup(clip[2]))

                outfile.write('\t\t\t\t\t</clipitem>\n')
            outfile.write(
                '\t\t\t\t\t<outputchannelindex>1</outputchannelindex>\n')
            outfile.write('\t\t\t\t</track>\n')

        outfile.write('\t\t\t</audio>\n')
        outfile.write('\t\t</media>\n')
        outfile.write('\t</sequence>\n')
        outfile.write('</xmeml>')

    conwrite('')
コード例 #3
0
def originalMethod(ffmpeg, vidFile, outFile, frameMargin, silentT,
                   LOUD_THRESHOLD, SAMPLE_RATE, audioBit, SILENT_SPEED,
                   VIDEO_SPEED, KEEP_SEP, BACK_MUS, BACK_VOL, NEW_TRAC,
                   BASE_TRAC, COMBINE_TRAC, verbose, HWACCEL, CACHE):
    """
    This function takes in the path the the input file (and a bunch of other options)
    and outputs a new output file. This is both the safest and slowest of all methods.

    Safest in the fact that if feature isn't supported here, like handling certain
    commands or support obscure file type, it's not supported anywhere.
    """

    print('Running from originalMethod.py')

    speeds = [SILENT_SPEED, VIDEO_SPEED]
    TEMP = tempfile.mkdtemp()

    dotIndex = vidFile.rfind('.')
    extension = vidFile[dotIndex:]
    if (outFile == ''):
        outFile = vidFile[:dotIndex] + '_ALTERED' + extension
    else:
        outFile = outFile

    if (not os.path.isfile(vidFile)):
        print('Could not find file:', vidFile)
        sys.exit(1)

    fileSize = os.stat(vidFile).st_size

    try:
        frameRate = getFrameRate(ffmpeg, vidFile)
    except AttributeError:
        print('Warning! frame rate detection has failed, defaulting to 30.')
        frameRate = 30

    SKIP = False
    print(CACHE)
    try:
        os.mkdir(CACHE)
    except OSError:
        # There must a cache already, check if that's usable.
        if (os.path.isfile(f'{CACHE}/cache.txt')):
            file = open(f'{CACHE}/cache.txt', 'r')
            x = file.read().splitlines()
            baseFile = os.path.basename(vidFile)
            if (x[:3] == [baseFile, str(frameRate),
                          str(fileSize)] and x[4] == str(COMBINE_TRAC)):
                print('Using cache.')
                SKIP = True
                tracks = int(x[3])
            file.close()
        if (not SKIP):
            print('Removing cache')
            rmtree(CACHE)
            os.mkdir(CACHE)

    if (not SKIP):
        # Videos can have more than one audio track os we need to extract them all.
        tracks = vidTracks(vidFile, ffmpeg)

        if (BASE_TRAC >= tracks):
            print("Error! You choose a track that doesn't exist.")
            print(f'There are only {tracks-1} tracks. (starting from 0)')
            sys.exit(1)
        for trackNumber in range(tracks):
            cmd = [ffmpeg]
            if (HWACCEL is not None):
                cmd.extend(['-hwaccel', HWACCEL])
            cmd.extend([
                '-i', vidFile, '-map', f'0:a:{trackNumber}',
                f'{CACHE}/{trackNumber}.wav'
            ])
            if (not verbose):
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)

            if (COMBINE_TRAC):
                from pydub import AudioSegment

                for i in range(tracks):
                    if (not os.path.isfile(f'{CACHE}/{i}.wav')):
                        print('Error! Audio file(s) could not be found.')
                        sys.exit(1)
                    if (i == 0):
                        allAuds = AudioSegment.from_file(f'{CACHE}/{i}.wav')
                    else:
                        newTrack = AudioSegment.from_file(f'{CACHE}/{i}.wav')
                        allAuds = allAuds.overlay(newTrack)
                allAuds.export(f'{CACHE}/0.wav', format='wav')
                tracks = 1

            # Now deal with the video.
            conwrite('Splitting video into jpgs. (This can take a while)')
            cmd = [ffmpeg]
            if (HWACCEL is not None):
                cmd.extend(['-hwaccel', HWACCEL])
            cmd.extend(
                ['-i', vidFile, '-qscale:v', '1', f'{CACHE}/frame%06d.jpg'])
            if (verbose):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)

    if (NEW_TRAC is None):
        sampleRate, audioData = read(f'{CACHE}/{BASE_TRAC}.wav')
    else:
        cmd = [
            ffmpeg, '-i', NEW_TRAC, '-ac', '2', '-ar',
            str(SAMPLE_RATE), '-vn', f'{TEMP}/NEW_TRAC.wav'
        ]
        if (verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

        sampleRate, audioData = read(f'{TEMP}/NEW_TRAC.wav')
    audioSampleCount = audioData.shape[0]
    maxAudioVolume = getMaxVolume(audioData)

    samplesPerFrame = sampleRate / frameRate
    audioFrameCount = int(math.ceil(audioSampleCount / samplesPerFrame))
    hasLoudAudio = np.zeros((audioFrameCount), dtype=np.uint8)

    for i in range(audioFrameCount):
        start = int(i * samplesPerFrame)
        end = min(int((i + 1) * samplesPerFrame), audioSampleCount)
        audiochunks = audioData[start:end]
        maxchunksVolume = getMaxVolume(audiochunks) / maxAudioVolume
        if (maxchunksVolume >= LOUD_THRESHOLD):
            hasLoudAudio[i] = 2
        elif (maxchunksVolume >= silentT):
            hasLoudAudio[i] = 1

    chunks = [[0, 0, 0]]
    shouldIncludeFrame = np.zeros((audioFrameCount), dtype=np.uint8)
    for i in range(audioFrameCount):
        start = int(max(0, i - frameMargin))
        end = int(min(audioFrameCount, i + 1 + frameMargin))
        shouldIncludeFrame[i] = min(1, np.max(hasLoudAudio[start:end]))

        if (i >= 1 and shouldIncludeFrame[i] != shouldIncludeFrame[i - 1]):
            chunks.append([chunks[-1][1], i, shouldIncludeFrame[i - 1]])

    chunks.append([chunks[-1][1], audioFrameCount, shouldIncludeFrame[i - 1]])
    chunks = chunks[1:]

    zooms = getZooms(chunks, audioFrameCount, hasLoudAudio, frameMargin,
                     frameRate)

    handleAudio(ffmpeg, tracks, CACHE, TEMP, silentT, frameMargin, SAMPLE_RATE,
                audioBit, verbose, SILENT_SPEED, VIDEO_SPEED, chunks,
                frameRate)

    splitVideo(ffmpeg, chunks, speeds, frameRate, zooms, samplesPerFrame,
               SAMPLE_RATE, audioData, extension, verbose, TEMP, CACHE)

    if (BACK_MUS is not None):
        from pydub import AudioSegment

        cmd = [
            ffmpeg, '-i', f'{TEMP}/new{BASE_TRAC}.wav', '-vn', '-ar', '44100',
            '-ac', '2', '-ab', '192k', '-f', 'mp3', f'{TEMP}/output.mp3'
        ]
        subprocess.call(cmd)

        vidSound = AudioSegment.from_file(f'{TEMP}/output.mp3')

        back = AudioSegment.from_file(BACK_MUS)
        if (len(back) > len(vidSound)):
            back = back[:len(vidSound)]

        def match_target_amplitude(back, vidSound, target):
            diff = back.dBFS - vidSound.dBFS
            change_in_dBFS = target - diff
            return back.apply_gain(change_in_dBFS)

        # Fade the background music out by 1 second.
        back = match_target_amplitude(back, vidSound, BACK_VOL).fade_out(1000)
        back.export(f'{TEMP}/new{tracks}.wav', format='wav')

        if (not os.path.isfile(f'{TEMP}/new{tracks}.wav')):
            print('Error! The new music audio file was not created.')
            sys.exit(1)
        tracks += 1

    if (KEEP_SEP):
        # Mux the video and audio so that there are still multiple audio tracks.
        cmd = [ffmpeg, '-y']
        if (HWACCEL is not None):
            cmd.extend(['-hwaccel', HWACCEL])
        for i in range(tracks):
            cmd.extend(['-i', f'{TEMP}/new{i}.wav'])
        cmd.extend(['-i', f'{TEMP}/output{extension}'])
        for i in range(tracks):
            cmd.extend(['-map', f'{i}:a:0'])
        cmd.extend([
            '-map', f'{tracks}:v:0', '-c:v', 'copy', '-movflags', '+faststart',
            outFile
        ])
        if (verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)
    else:
        if (tracks > 1):
            cmd = [ffmpeg]
            for i in range(tracks):
                cmd.extend(['-i', f'{TEMP}/new{i}.wav'])
            cmd.extend([
                '-filter_complex', f'amerge=inputs={tracks}', '-ac', '2',
                f'{TEMP}/newAudioFile.wav'
            ])
            if (verbose):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)
        else:
            os.rename(f'{TEMP}/new0.wav', f'{TEMP}/newAudioFile.wav')

        cmd = [ffmpeg, '-y']
        if (HWACCEL is not None):
            cmd.extend(['-hwaccel', HWACCEL])
        cmd.extend([
            '-i', f'{TEMP}/newAudioFile.wav', '-i',
            f'{TEMP}/output{extension}', '-c:v', 'copy', '-movflags',
            '+faststart', outFile
        ])
        if (verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

    with open(f'{TEMP}/Renames.txt', 'r') as f:
        renames = f.read().splitlines()
        for i in range(0, len(renames), 2):
            os.rename(renames[i + 1], renames[i])

    rmtree(TEMP)

    # Create cache.txt to see if the created cache is usable for next time.
    if (BACK_MUS is not None):
        tracks -= 1
    file = open(f'{CACHE}/cache.txt', 'w')
    baseFile = os.path.basename(vidFile)
    file.write(
        f'{baseFile}\n{frameRate}\n{fileSize}\n{tracks}\n{COMBINE_TRAC}\n')
    file.close()

    conwrite('')
    return outFile
コード例 #4
0
def fastVideo(ffmpeg, videoFile, outFile, silentT, frameMargin, SAMPLE_RATE,
    AUD_BITRATE, verbose, videoSpeed, silentSpeed, cutByThisTrack, keepTracksSep):

    print('Running from fastVideo.py')

    import cv2

    conwrite('Reading audio.')

    if(not os.path.isfile(videoFile)):
        print('Could not find file:', videoFile)
        sys.exit(1)

    TEMP = tempfile.mkdtemp()
    speeds = [silentSpeed, videoSpeed]

    cap = cv2.VideoCapture(videoFile)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    fps = cap.get(cv2.CAP_PROP_FPS)

    tracks = vidTracks(videoFile, ffmpeg)

    if(cutByThisTrack >= tracks):
        print("Error! You choose a track that doesn't exist.")
        print(f'There are only {tracks-1} tracks. (starting from 0)')
        sys.exit(1)

    for trackNumber in range(tracks):
        cmd = [ffmpeg, '-i', videoFile, '-ab', AUD_BITRATE, '-ac', '2', '-ar',
        str(SAMPLE_RATE),'-map', f'0:a:{trackNumber}', f'{TEMP}/{trackNumber}.wav']
        if(verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)


    sampleRate, audioData = read(f'{TEMP}/{cutByThisTrack}.wav')
    chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin)

    # Handle the Audio
    for trackNumber in range(tracks):
        fastAudio(ffmpeg, f'{TEMP}/{trackNumber}.wav', f'{TEMP}/new{trackNumber}.wav',
            silentT, frameMargin, SAMPLE_RATE, AUD_BITRATE, verbose, silentSpeed,
            videoSpeed, False, chunks=chunks, fps=fps)

        if(not os.path.isfile(f'{TEMP}/new{trackNumber}.wav')):
            print('Error! Audio file not created.')
            sys.exit(1)

    out = cv2.VideoWriter(f'{TEMP}/spedup.mp4', fourcc, fps, (width, height))
    totalFrames = chunks[len(chunks) - 1][1]
    beginTime = time.time()

    remander = 0
    framesWritten = 0

    while cap.isOpened():
        ret, frame = cap.read()
        if(not ret):
            break

        cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) # current frame
        state = None
        for chunk in chunks:
            if(cframe >= chunk[0] and cframe <= chunk[1]):
                state = chunk[2]
                break

        if(state is not None):
            mySpeed = speeds[state]

            if(mySpeed != 99999):
                doIt = (1 / mySpeed) + remander
                for __ in range(int(doIt)):
                    out.write(frame)
                    framesWritten += 1
                remander = doIt % 1

        progressBar(cframe, totalFrames, beginTime, title='Creating new video')

    conwrite('Writing the output file.')

    cap.release()
    out.release()
    cv2.destroyAllWindows()

    if(verbose):
        print('Frames written', framesWritten)

    first = videoFile[:videoFile.rfind('.')]
    extension = videoFile[videoFile.rfind('.'):]

    if(outFile == ''):
        outFile = f'{first}_ALTERED{extension}'

    # Now mix new audio(s) and the new video.
    if(keepTracksSep):
        cmd = [ffmpeg, '-y']
        for i in range(tracks):
            cmd.extend(['-i', f'{TEMP}/new{i}.wav'])
        cmd.extend(['-i', f'{TEMP}/spedup.mp4'])
        for i in range(tracks):
            cmd.extend(['-map', f'{i}:a:0'])
        cmd.extend(['-map', f'{tracks}:v:0','-c:v', 'copy', '-movflags', '+faststart',
            outFile])
        if(verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
    else:
        # Merge all the audio tracks into one.
        if(tracks > 1):
            cmd = [ffmpeg]
            for i in range(tracks):
                cmd.extend(['-i', f'{TEMP}/new{i}.wav'])
            cmd.extend(['-filter_complex', f'amerge=inputs={tracks}', '-ac', '2',
                f'{TEMP}/newAudioFile.wav'])
            if(verbose):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)
        else:
            os.rename(f'{TEMP}/new0.wav', f'{TEMP}/newAudioFile.wav')

        cmd = [ffmpeg, '-y', '-i', f'{TEMP}/newAudioFile.wav', '-i',
            f'{TEMP}/spedup.mp4', '-c:v', 'copy', '-movflags', '+faststart',
            outFile]
        if(verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

    rmtree(TEMP)
    conwrite('')

    return outFile
コード例 #5
0
def fastVideo(ffmpeg, vidFile, outFile, chunks, includeFrame, speeds, tracks,
              abitrate, samplerate, debug, temp, keepTracksSep, vcodec, fps,
              exportAsAudio, vbitrate, preset, tune, log):

    if (not os.path.isfile(vidFile)):
        log.error('fastVideo.py could not find file: ' + str(vidFile))

    cap = cv2.VideoCapture(vidFile)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')

    for trackNum in range(tracks):
        fastAudio(ffmpeg,
                  f'{temp}/{trackNum}.wav',
                  f'{temp}/new{trackNum}.wav',
                  chunks,
                  speeds,
                  abitrate,
                  samplerate,
                  debug,
                  False,
                  log,
                  fps=fps)

        if (not os.path.isfile(f'{temp}/new{trackNum}.wav')):
            log.error('Audio file not created.')

    if (exportAsAudio):
        if (keepTracksSep):
            log.warning("Audio files can't have multiple tracks.")
        else:
            pass
            # TODO: combine all the audio tracks
        move(f'{temp}/0.wav', outFile)
        return None

    out = cv2.VideoWriter(f'{temp}/spedup.mp4', fourcc, fps, (width, height))

    if (speeds[0] == 99999 and speeds[1] != 99999):
        totalFrames = int(np.where(includeFrame == 1)[0][-1])
        cframe = int(np.where(includeFrame == 1)[0][0])
    elif (speeds[0] != 99999 and speeds[1] == 99999):
        totalFrames = int(np.where(includeFrame == 0)[0][-1])
        cframe = int(np.where(includeFrame == 0)[0][0])
    else:
        totalFrames = chunks[len(chunks) - 1][1]
        cframe = 0

    beginTime = time.time()
    starting = cframe
    cap.set(cv2.CAP_PROP_POS_FRAMES, cframe)
    remander = 0
    framesWritten = 0

    while cap.isOpened():
        ret, frame = cap.read()
        if (not ret or cframe > totalFrames):
            break

        cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES))  # current frame
        try:
            state = includeFrame[cframe]
        except IndexError:
            state = 0

        mySpeed = speeds[state]

        if (mySpeed != 99999):
            doIt = (1 / mySpeed) + remander
            for __ in range(int(doIt)):
                out.write(frame)
                framesWritten += 1
            remander = doIt % 1

        progressBar(cframe - starting,
                    totalFrames - starting,
                    beginTime,
                    title='Creating new video')

    conwrite('Writing the output file.')

    cap.release()
    out.release()
    cv2.destroyAllWindows()

    log.debug('Frames written ' + str(framesWritten))

    # Now mix new audio(s) and the new video.
    if (keepTracksSep):
        cmd = [ffmpeg, '-y']
        for i in range(tracks):
            cmd.extend(['-i', f'{temp}/new{i}.wav'])
        cmd.extend(['-i', f'{temp}/spedup.mp4'])
        for i in range(tracks):
            cmd.extend(['-map', f'{i}:a:0'])

        cmd.extend(['-map', f'{tracks}:v:0', '-c:v', vcodec])
        if (vbitrate is None):
            cmd.extend(['-crf', '15'])
        else:
            cmd.extend(['-b:v', vbitrate])
        if (tune != 'none'):
            cmd.extend(['-tune', tune])
        cmd.extend(['-preset', preset, '-movflags', '+faststart', outFile])
        if (debug):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
    else:
        # Merge all the audio tracks into one.
        if (tracks > 1):
            cmd = [ffmpeg]
            for i in range(tracks):
                cmd.extend(['-i', f'{temp}/new{i}.wav'])
            cmd.extend([
                '-filter_complex', f'amerge=inputs={tracks}', '-ac', '2',
                f'{temp}/newAudioFile.wav'
            ])
            if (debug):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)
        else:
            move(f'{temp}/new0.wav', f'{temp}/newAudioFile.wav')

        def pipeToConsole(myCommands):
            process = subprocess.Popen(myCommands,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.STDOUT)
            stdout, __ = process.communicate()
            return stdout.decode()

        cmd = [
            ffmpeg, '-y', '-i', f'{temp}/newAudioFile.wav', '-i',
            f'{temp}/spedup.mp4', '-c:v', vcodec
        ]
        if (vbitrate is None):
            cmd.extend(['-crf', '15'])
        else:
            cmd.extend(['-b:v', vbitrate])
        if (tune != 'none'):
            cmd.extend(['-tune', tune])
        cmd.extend([
            '-preset', preset, '-movflags', '+faststart', outFile,
            '-hide_banner'
        ])

        log.debug(cmd)
        message = pipeToConsole(cmd)
        log.debug('')
        log.debug(message)

        if ('Conversion failed!' in message):
            log.warning('The muxing/compression failed. '\
                'This may be a problem with your ffmpeg, your codec, or your bitrate.'\
                '\nTrying, again but using the "copy" video codec.')
            cmd = [
                ffmpeg, '-y', '-i', f'{temp}/newAudioFile.wav', '-i',
                f'{temp}/spedup.mp4', '-c:v', 'copy', '-movflags',
                '+faststart', outFile, '-nostats', '-loglevel', '0'
            ]
            subprocess.call(cmd)
        log.debug(cmd)

    conwrite('')
コード例 #6
0
def exportToResolve(myInput, output, clips, duration, sampleRate, log):
    pathurl = 'file://localhost' + os.path.abspath(myInput)

    name = os.path.basename(myInput)
    audioFile = isAudioFile(myInput)

    ntsc = 'FALSE'
    ana = 'FALSE' # anamorphic
    depth = '16'
    if(not audioFile):
        try:
            import cv2
            conwrite('Grabbing video dimensions.')

            cap = cv2.VideoCapture(myInput)
            width = str(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
            height = str(int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            cap.release()
            cv2.destroyAllWindows()
        except ImportError:
            width = '1920'
            height = '1080'
    else:
        width = '1920'
        height = '1080'

    pixelar = 'square' # pixel aspect ratio
    colordepth = '24'
    sr = sampleRate

    if(audioFile):
        with open(output, 'w', encoding='utf-8') as outfile:
            outfile.write('<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n')
            outfile.write('<xmeml version="5">\n')
            outfile.write('\t<sequence>\n')
            outfile.write('\t\t<name>Auto-Editor Audio Group</name>\n')
            outfile.write(f'\t\t<duration>{duration}</duration>\n')
            outfile.write('\t\t<rate>\n')
            outfile.write('\t\t\t<timebase>30</timebase>\n')
            outfile.write(f'\t\t\t<ntsc>{ntsc}</ntsc>\n')
            outfile.write('\t\t</rate>\n')
            outfile.write('\t\t<in>-1</in>\n')
            outfile.write('\t\t<out>-1</out>\n')
            outfile.write('\t\t<media>\n')
            outfile.write('\t\t\t<video>\n')
            outfile.write('\t\t\t\t<format>\n')
            outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
            outfile.write(f'\t\t\t\t\t\t<width>{width}</width>\n')
            outfile.write(f'\t\t\t\t\t\t<height>{height}</height>\n')
            outfile.write(f'\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n')
            outfile.write('\t\t\t\t\t\t<rate>\n')
            outfile.write('\t\t\t\t\t\t\t<timebase>30</timebase>\n')
            outfile.write(f'\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
            outfile.write('\t\t\t\t\t\t</rate>\n')
            outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
            outfile.write('\t\t\t\t</format>\n')
            outfile.write('\t\t\t</video>\n')
            outfile.write('\t\t\t<audio>\n')
            outfile.write('\t\t\t\t<track>\n')

            total = 0
            for j, clip in enumerate(clips):
                myStart = int(total)
                total += (clip[1] - clip[0]) / (clip[2] / 100)
                myEnd = int(total)

                outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{j+1}">\n')
                outfile.write('\t\t\t\t\t\t<masterclipid>masterclip-1</masterclipid>\n')
                outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')
                outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
                outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')
                outfile.write(f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
                outfile.write(f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n')

                if(j == 0):
                    outfile.write('\t\t\t\t\t\t<file id="file-1">\n')
                    outfile.write(f'\t\t\t\t\t\t\t<name>{name}</name>\n')
                    outfile.write(f'\t\t\t\t\t\t\t<pathurl>{pathurl}</pathurl>\n')
                    outfile.write('\t\t\t\t\t\t\t<rate>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                    outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                    outfile.write('\t\t\t\t\t\t\t</rate>\n')
                    outfile.write('\t\t\t\t\t\t\t<media>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<audio>\n')
                    outfile.write('\t\t\t\t\t\t\t\t\t<channelcount>1</channelcount>\n')
                    outfile.write('\t\t\t\t\t\t\t\t</audio>\n')
                    outfile.write('\t\t\t\t\t\t\t</media>\n')
                    outfile.write('\t\t\t\t\t\t</file>\n')
                else:
                    outfile.write(f'\t\t\t\t\t\t<file id="file-1"/>\n')
                outfile.write('\t\t\t\t\t\t<sourcetrack>\n')
                outfile.write('\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')
                outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')
                outfile.write('\t\t\t\t\t\t</sourcetrack>\n')
                outfile.write('\t\t\t\t\t</clipitem>\n')

            outfile.write('\t\t\t\t</track>\n')
            outfile.write('\t\t\t</audio>\n')
            outfile.write('\t\t</media>\n')
            outfile.write('\t</sequence>\n')
            outfile.write('</xmeml>')

            # Exit out of this function prematurely.
            return None

            # End of audio file code.

    with open(output, 'w', encoding='utf-8') as outfile:
        outfile.write('<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n')
        outfile.write('<xmeml version="4">\n')
        outfile.write('\t<sequence id="sequence-1" TL.SQAudioVisibleBase="0" TL.SQVideoVisibleBase="0" TL.SQVisibleBaseTime="0" TL.SQAVDividerPosition="0.5" TL.SQHideShyTracks="0" TL.SQHeaderWidth="236" TL.SQTimePerPixel="0.013085939262623341" MZ.EditLine="0" MZ.Sequence.PreviewFrameSizeHeight="720" MZ.Sequence.AudioTimeDisplayFormat="200" MZ.Sequence.PreviewRenderingClassID="1297106761" MZ.Sequence.PreviewRenderingPresetCodec="1297107278" MZ.Sequence.PreviewRenderingPresetPath="EncoderPresets/SequencePreview/795454d9-d3c2-429d-9474-923ab13b7018/I-Frame Only MPEG.epr" MZ.Sequence.PreviewUseMaxRenderQuality="false" MZ.Sequence.PreviewUseMaxBitDepth="false" MZ.Sequence.EditingModeGUID="795454d9-d3c2-429d-9474-923ab13b7018" MZ.Sequence.VideoTimeDisplayFormat="104" MZ.WorkOutPoint="10770278400000" MZ.WorkInPoint="0" explodedTracks="true">\n')
        outfile.write('\t\t<rate>\n')
        outfile.write('\t\t\t<timebase>30</timebase>\n')
        outfile.write(f'\t\t\t<ntsc>{ntsc}</ntsc>\n')
        outfile.write('\t\t</rate>\n')
        outfile.write('\t\t<name>Auto-Editor Video Group</name>\n')
        outfile.write('\t\t<media>\n')
        outfile.write('\t\t\t<video>\n')
        outfile.write('\t\t\t\t<format>\n')
        outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
        outfile.write('\t\t\t\t\t\t<rate>\n')
        outfile.write('\t\t\t\t\t\t\t<timebase>30</timebase>\n')
        outfile.write(f'\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
        outfile.write('\t\t\t\t\t\t</rate>\n')
        outfile.write(f'\t\t\t\t\t\t<width>{width}</width>\n')
        outfile.write(f'\t\t\t\t\t\t<height>{height}</height>\n')
        outfile.write(f'\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n')
        outfile.write(f'\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n')
        outfile.write('\t\t\t\t\t\t<fielddominance>none</fielddominance>\n')
        outfile.write(f'\t\t\t\t\t\t<colordepth>{colordepth}</colordepth>\n')
        outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
        outfile.write('\t\t\t\t</format>\n')
        outfile.write('\t\t\t\t<track>\n')

        # Handle clips.
        total = 0
        for j, clip in enumerate(clips):
            myStart = int(total)
            total += (clip[1] - clip[0]) / (clip[2] / 100)
            myEnd = int(total)

            outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{j+7}">\n')
            outfile.write('\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n')
            outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')
            outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
            outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')
            outfile.write(f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
            outfile.write(f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n')

            if(j == 0):
                outfile.write('\t\t\t\t\t\t<file id="file-2">\n')
                outfile.write(f'\t\t\t\t\t\t\t<name>{name}</name>\n')
                outfile.write(f'\t\t\t\t\t\t\t<pathurl>{pathurl}</pathurl>\n')
                outfile.write('\t\t\t\t\t\t\t<rate>\n')
                outfile.write('\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                outfile.write('\t\t\t\t\t\t\t</rate>\n')
                outfile.write(f'\t\t\t\t\t\t\t<duration>{duration}</duration>\n')
                outfile.write('\t\t\t\t\t\t\t<media>\n')
                outfile.write('\t\t\t\t\t\t\t\t<video>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t<rate>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t</rate>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<width>{width}</width>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<height>{height}</height>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t<fielddominance>none</fielddominance>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                outfile.write('\t\t\t\t\t\t\t\t</video>\n')
                outfile.write('\t\t\t\t\t\t\t\t<audio>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<depth>{depth}</depth>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<channelcount>2</channelcount>\n')
                outfile.write('\t\t\t\t\t\t\t\t</audio>\n')
                outfile.write('\t\t\t\t\t\t\t</media>\n')
                outfile.write('\t\t\t\t\t\t</file>\n')
            else:
                outfile.write(f'\t\t\t\t\t\t<file id="file-2"/>\n')

            # Add the speed effect if nessecary
            if(clip[2] != 100):
                outfile.write('\t\t\t\t\t\t<filter>\n')
                outfile.write('\t\t\t\t\t\t\t<effect>\n')
                outfile.write('\t\t\t\t\t\t\t\t<name>Time Remap</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t<effectid>timeremap</effectid>\n')
                outfile.write('\t\t\t\t\t\t\t\t<effectcategory>motion</effectcategory>\n')
                outfile.write('\t\t\t\t\t\t\t\t<effecttype>motion</effecttype>\n')
                outfile.write('\t\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                outfile.write('\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<parameterid>variablespeed</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>variablespeed</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemin>0</valuemin>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemax>1</valuemax>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>0</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<parameterid>speed</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>speed</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemin>-100000</valuemin>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemax>100000</valuemax>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t<value>{clip[2]}</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<parameterid>reverse</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>reverse</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<parameterid>frameblending</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>frameblending</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t</effect>\n')
                outfile.write('\t\t\t\t\t\t</filter>\n')

            # Linking for video blocks
            for i in range(3):
                outfile.write('\t\t\t\t\t\t<link>\n')
                outfile.write(f'\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)+1))+7+j}</linkclipref>\n')
                if(i == 0):
                    outfile.write('\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                else:
                    outfile.write('\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')
                if(i == 2):
                    outfile.write('\t\t\t\t\t\t\t<trackindex>2</trackindex>\n')
                else:
                    outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')
                outfile.write(f'\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n')
                if(i == 1 or i == 2):
                    outfile.write('\t\t\t\t\t\t\t<groupindex>1</groupindex>\n')
                outfile.write('\t\t\t\t\t\t</link>\n')
            outfile.write('\t\t\t\t\t</clipitem>\n')
        outfile.write('\t\t\t\t</track>\n')
        outfile.write('\t\t\t</video>\n')
        outfile.write('\t\t\t<audio>\n')
        outfile.write('\t\t\t\t<numOutputChannels>2</numOutputChannels>\n')
        outfile.write('\t\t\t\t<format>\n')
        outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
        outfile.write(f'\t\t\t\t\t\t<depth>{depth}</depth>\n')
        outfile.write(f'\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
        outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
        outfile.write('\t\t\t\t</format>\n')
        outfile.write('\t\t\t\t<track PannerIsInverted="true" PannerStartKeyframe="-91445760000000000,0.5,0,0,0,0,0,0" PannerName="Balance" currentExplodedTrackIndex="0" totalExplodedTrackCount="2" premiereTrackType="Stereo">\n')

        # Audio Clips
        total = 0
        for j, clip in enumerate(clips):
            outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{len(clips)+8+j}" premiereChannelType="stereo">\n')
            outfile.write(f'\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n')
            outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')

            myStart = int(total)
            total += (clip[1] - clip[0]) / (clip[2] / 100)
            myEnd = int(total)

            outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
            outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')

            outfile.write(f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
            outfile.write(f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n')
            outfile.write('\t\t\t\t\t\t<file id="file-2"/>\n')
            outfile.write('\t\t\t\t\t\t<sourcetrack>\n')
            outfile.write('\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')
            outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')
            outfile.write('\t\t\t\t\t\t</sourcetrack>\n')

            # Add speed effect for audio blocks
            if(clip[2] != 100):
                outfile.write('\t\t\t\t\t\t<filter>\n')
                outfile.write('\t\t\t\t\t\t\t<effect>\n')
                outfile.write('\t\t\t\t\t\t\t\t<name>Time Remap</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t<effectid>timeremap</effectid>\n')
                outfile.write('\t\t\t\t\t\t\t\t<effectcategory>motion</effectcategory>\n')
                outfile.write('\t\t\t\t\t\t\t\t<effecttype>motion</effecttype>\n')
                outfile.write('\t\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                outfile.write('\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<parameterid>variablespeed</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>variablespeed</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemin>0</valuemin>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemax>1</valuemax>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>0</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<parameterid>speed</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>speed</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemin>-100000</valuemin>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemax>100000</valuemax>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t<value>{clip[2]}</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<parameterid>reverse</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>reverse</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<parameterid>frameblending</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>frameblending</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t</effect>\n')
                outfile.write('\t\t\t\t\t\t</filter>\n')

            if(audioFile):
                startOn = 1
            else:
                startOn = 0
            for i in range(startOn, 3):
                outfile.write('\t\t\t\t\t\t<link>\n')
                outfile.write(f'\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)+1))+7+j}</linkclipref>\n')
                if(i == 0):
                    outfile.write('\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                else:
                    outfile.write('\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')

                if(i == 2):
                    outfile.write('\t\t\t\t\t\t\t<trackindex>2</trackindex>\n')
                else:
                    outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')

                outfile.write(f'\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n')

                if(i == 1 or i == 2):
                    outfile.write('\t\t\t\t\t\t\t<groupindex>1</groupindex>\n')
                outfile.write('\t\t\t\t\t\t</link>\n')
            outfile.write('\t\t\t\t\t</clipitem>\n')
        outfile.write('\t\t\t\t\t<outputchannelindex>1</outputchannelindex>\n')
        outfile.write('\t\t\t\t</track>\n')
        outfile.write('\t\t\t</audio>\n')
        outfile.write('\t\t</media>\n')
        outfile.write('\t</sequence>\n')
        outfile.write('</xmeml>')

    conwrite('')
コード例 #7
0
def fastVideo(ffmpeg, vidFile, outFile, chunks, speeds, tracks, abitrate,
              samplerate, debug, temp, keepTracksSep, vcodec, fps,
              exportAsAudio, vbitrate, log):

    if (not os.path.isfile(vidFile)):
        log.error('Could not find file ' + vidFile)

    cap = cv2.VideoCapture(vidFile)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')

    for trackNum in range(tracks):
        fastAudio(ffmpeg,
                  f'{temp}/{trackNum}.wav',
                  f'{temp}/new{trackNum}.wav',
                  chunks,
                  speeds,
                  abitrate,
                  samplerate,
                  debug,
                  False,
                  log,
                  fps=fps)

        if (not os.path.isfile(f'{temp}/new{trackNum}.wav')):
            log.error('Audio file not created.')

    if (exportAsAudio):
        # TODO: combine all the audio tracks
        # TODO: warn the user if they add keep_tracks_seperate
        os.rename(f'{temp}/0.wav', outFile)
        return None

    out = cv2.VideoWriter(f'{temp}/spedup.mp4', fourcc, fps, (width, height))
    totalFrames = chunks[len(chunks) - 1][1]
    beginTime = time.time()

    remander = 0
    framesWritten = 0

    while cap.isOpened():
        ret, frame = cap.read()
        if (not ret):
            break

        cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES))  # current frame
        state = None
        for chunk in chunks:
            if (cframe >= chunk[0] and cframe <= chunk[1]):
                state = chunk[2]
                break

        if (state is not None):
            mySpeed = speeds[state]

            if (mySpeed != 99999):
                doIt = (1 / mySpeed) + remander
                for __ in range(int(doIt)):
                    out.write(frame)
                    framesWritten += 1
                remander = doIt % 1

        progressBar(cframe, totalFrames, beginTime, title='Creating new video')

    conwrite('Writing the output file.')

    cap.release()
    out.release()
    cv2.destroyAllWindows()

    log.debug('Frames written ' + str(framesWritten))

    # Now mix new audio(s) and the new video.
    if (keepTracksSep):
        cmd = [ffmpeg, '-y']
        for i in range(tracks):
            cmd.extend(['-i', f'{temp}/new{i}.wav'])
        cmd.extend(['-i', f'{temp}/spedup.mp4'])
        for i in range(tracks):
            cmd.extend(['-map', f'{i}:a:0'])
        cmd.extend([
            '-map', f'{tracks}:v:0', '-b:v', vbitrate, '-c:v', vcodec,
            '-movflags', '+faststart', outFile
        ])
        if (debug):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
    else:
        # Merge all the audio tracks into one.
        if (tracks > 1):
            cmd = [ffmpeg]
            for i in range(tracks):
                cmd.extend(['-i', f'{temp}/new{i}.wav'])
            cmd.extend([
                '-filter_complex', f'amerge=inputs={tracks}', '-ac', '2',
                f'{temp}/newAudioFile.wav'
            ])
            if (debug):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)
        else:
            os.rename(f'{temp}/new0.wav', f'{temp}/newAudioFile.wav')

        def pipeToConsole(myCommands):
            process = subprocess.Popen(myCommands,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.STDOUT)
            stdout, __ = process.communicate()
            return stdout.decode()

        cmd = [
            ffmpeg, '-y', '-i', f'{temp}/newAudioFile.wav', '-i',
            f'{temp}/spedup.mp4', '-b:v', vbitrate, '-c:v', vcodec,
            '-movflags', '+faststart', outFile, '-hide_banner'
        ]

        message = pipeToConsole(cmd)
        log.debug('')
        log.debug(message)

        if ('Conversion failed!' in message):
            log.warning('The muxing/compression failed. '\
                'This may be a problem with your ffmpeg, your codec, or your bitrate.'\
                '\nTrying, again but using the "copy" video codec.')
            cmd = [
                ffmpeg, '-y', '-i', f'{temp}/newAudioFile.wav', '-i',
                f'{temp}/spedup.mp4', '-c:v', 'copy', '-movflags',
                '+faststart', outFile, '-nostats', '-loglevel', '0'
            ]
            subprocess.call(cmd)
        log.debug(cmd)

    conwrite('')
コード例 #8
0
def main():
    options = []
    option_names = []

    def add_argument(*names,
                     nargs=1,
                     type=str,
                     default=None,
                     action='default',
                     range=None,
                     choices=None,
                     help='',
                     extra=''):
        nonlocal options
        nonlocal option_names

        newDic = {}
        newDic['names'] = names
        newDic['nargs'] = nargs
        newDic['type'] = type
        newDic['default'] = default
        newDic['action'] = action
        newDic['help'] = help
        newDic['extra'] = extra
        newDic['range'] = range
        newDic['choices'] = choices
        options.append(newDic)
        option_names = option_names + list(names)

    add_argument('(input)',
                 nargs='*',
                 help='the path to a file, folder, or url you want edited.')
    add_argument('--help',
                 '-h',
                 action='store_true',
                 help='print this message and exit.')

    add_argument(
        '--frame_margin',
        '-m',
        type=int,
        default=6,
        range='0 to Infinity',
        help=
        'set how many "silent" frames of on either side of "loud" sections be included.'
    )
    add_argument(
        '--silent_threshold',
        '-t',
        type=float_type,
        default=0.04,
        range='0 to 1',
        help='set the volume that frames audio needs to surpass to be "loud".')
    add_argument(
        '--video_speed',
        '--sounded_speed',
        '-v',
        type=float_type,
        default=1.00,
        range='0 to 999999',
        help='set the speed that "loud" sections should be played at.')
    add_argument(
        '--silent_speed',
        '-s',
        type=float_type,
        default=99999,
        range='0 to 99999',
        help='set the speed that "silent" sections should be played at.')
    add_argument('--output_file',
                 '-o',
                 nargs='*',
                 help='set the name(s) of the new output.')

    add_argument('--no_open',
                 action='store_true',
                 help='do not open the file after editing is done.')
    add_argument(
        '--min_clip_length',
        '-mclip',
        type=int,
        default=3,
        range='0 to Infinity',
        help=
        'set the minimum length a clip can be. If a clip is too short, cut it.'
    )
    add_argument(
        '--min_cut_length',
        '-mcut',
        type=int,
        default=6,
        range='0 to Infinity',
        help=
        "set the minimum length a cut can be. If a cut is too short, don't cut"
    )
    add_argument('--combine_files',
                 action='store_true',
                 help='combine all input files into one before editing.')
    add_argument('--preview',
                 action='store_true',
                 help='show stats on how the input will be cut.')

    add_argument(
        '--cut_by_this_audio',
        '-ca',
        type=file_type,
        help="base cuts by this audio file instead of the video's audio.")
    add_argument('--cut_by_this_track',
                 '-ct',
                 type=int,
                 default=0,
                 range='0 to the number of audio tracks',
                 help='base cuts by a different audio track in the video.')
    add_argument('--cut_by_all_tracks',
                 '-cat',
                 action='store_true',
                 help='combine all audio tracks into one before basing cuts.')
    add_argument('--keep_tracks_seperate',
                 action='store_true',
                 help="don't combine audio tracks when exporting.")

    add_argument(
        '--my_ffmpeg',
        action='store_true',
        help='use your ffmpeg and other binaries instead of the ones packaged.'
    )
    add_argument('--version',
                 action='store_true',
                 help='show which auto-editor you have.')
    add_argument('--debug',
                 '--verbose',
                 action='store_true',
                 help='show helpful debugging values.')

    # TODO: add export_as_video
    add_argument('--export_as_audio',
                 '-exa',
                 action='store_true',
                 help='export as a WAV audio file.')
    add_argument(
        '--export_to_premiere',
        '-exp',
        action='store_true',
        help=
        'export as an XML file for Adobe Premiere Pro instead of outputting a media file.'
    )
    add_argument(
        '--export_to_resolve',
        '-exr',
        action='store_true',
        help=
        'export as an XML file for DaVinci Resolve instead of outputting a media file.'
    )

    add_argument('--video_bitrate',
                 '-vb',
                 help='set the number of bits per second for video.')
    add_argument('--audio_bitrate',
                 '-ab',
                 help='set the number of bits per second for audio.')
    add_argument('--sample_rate',
                 '-r',
                 type=sample_rate_type,
                 help='set the sample rate of the input and output videos.')
    add_argument('--video_codec',
                 '-vcodec',
                 help='set the video codec for the output file.')
    add_argument(
        '--preset',
        '-p',
        default='medium',
        choices=[
            'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium',
            'slow', 'slower', 'veryslow'
        ],
        help=
        'set the preset for ffmpeg to help save file size or increase quality.'
    )
    add_argument('--tune',
                 default='none',
                 choices=[
                     'film', 'animation', 'grain', 'stillimage', 'fastdecode',
                     'zerolatency', 'none'
                 ],
                 help='set the tune for ffmpeg to help compress video better.')

    add_argument(
        '--ignore',
        nargs='*',
        help=
        "the range (in seconds) that shouldn't be edited at all. (uses range syntax)"
    )
    add_argument('--cut_out', nargs='*',
        help='the range (in seconds) that should be cut out completely, '\
            'regardless of anything else. (uses range syntax)')

    dirPath = os.path.dirname(os.path.realpath(__file__))
    # Fixes pip not able to find other included modules.
    sys.path.append(os.path.abspath(dirPath))

    from usefulFunctions import Log

    class parse_options():
        def __init__(self, userArgs, log, *args):
            # Set the default options.
            for options in args:
                for option in options:
                    key = option['names'][0].replace('-', '')
                    if (option['action'] == 'store_true'):
                        value = False
                    elif (option['nargs'] != 1):
                        value = []
                    else:
                        value = option['default']
                    setattr(self, key, value)

            def get_option(item, the_args):
                for options in the_args:
                    for option in options:
                        if (item in option['names']):
                            return option
                return None

            # Figure out attributes changed by user.
            myList = []
            settingInputs = True
            optionList = 'input'
            i = 0
            while i < len(userArgs):
                item = userArgs[i]
                if (i == len(userArgs) - 1):
                    nextItem = None
                else:
                    nextItem = userArgs[i + 1]

                option = get_option(item, args)

                if (option is not None):
                    if (optionList is not None):
                        setattr(self, optionList, myList)
                    settingInputs = False
                    optionList = None
                    myList = []

                    key = option['names'][0].replace('-', '')

                    # show help for specific option.
                    if (nextItem == '-h' or nextItem == '--help'):
                        print(' ', ', '.join(option['names']))
                        print('   ', option['help'])
                        print('   ', option['extra'])
                        if (option['action'] == 'default'):
                            print('    type:', option['type'].__name__)
                            print('    default:', option['default'])
                            if (option['range'] is not None):
                                print('    range:', option['range'])
                            if (option['choices'] is not None):
                                print('    choices:',
                                      ', '.join(option['choices']))
                        else:
                            print(f'    type: flag')
                        sys.exit()

                    if (option['nargs'] != 1):
                        settingInputs = True
                        optionList = key
                    elif (option['action'] == 'store_true'):
                        value = True
                    else:
                        try:
                            # Convert to correct type.
                            value = option['type'](nextItem)
                        except:
                            typeName = option['type'].__name__
                            log.error(
                                f'Couldn\'t convert "{nextItem}" to {typeName}'
                            )
                        if (option['choices'] is not None):
                            if (value not in option['choices']):
                                log.error(
                                    f'{value} is not a choice for {option}')
                        i += 1
                    setattr(self, key, value)
                else:
                    if (settingInputs and not item.startswith('-')):
                        # Input file names
                        myList.append(item)
                    else:
                        # Unknown Option!
                        hmm = difflib.get_close_matches(item, option_names)
                        potential_options = ', '.join(hmm)
                        append = ''
                        if (hmm != []):
                            append = f'\n\n    Did you mean:\n        {potential_options}'
                        log.error(f'Unknown option: {item}{append}')
                i += 1
            if (settingInputs):
                setattr(self, optionList, myList)

    args = parse_options(sys.argv[1:], Log(3), options)

    # Print help screen for entire program.
    if (args.help):
        for option in options:
            print(' ', ', '.join(option['names']) + ':', option['help'])
        print('\nHave an issue? Make an issue. '\
            'Visit https://github.com/wyattblue/auto-editor/issues')
        sys.exit()

    if (args.version):
        print('Auto-Editor version', version)
        sys.exit()

    from usefulFunctions import isAudioFile, vidTracks, conwrite, getAudioChunks
    from wavfile import read, write

    if (not args.preview):
        if (args.export_to_premiere):
            conwrite('Exporting to Adobe Premiere Pro XML file.')
        elif (args.export_to_resolve):
            conwrite('Exporting to DaVinci Resolve XML file.')
        elif (args.export_as_audio):
            conwrite('Exporting as audio.')
        else:
            conwrite('Starting.')

    newF = None
    newP = None
    if (platform.system() == 'Windows' and not args.my_ffmpeg):
        newF = os.path.join(dirPath, 'win-ffmpeg/bin/ffmpeg.exe')
        newP = os.path.join(dirPath, 'win-ffmpeg/bin/ffprobe.exe')
    if (platform.system() == 'Darwin' and not args.my_ffmpeg):
        newF = os.path.join(dirPath, 'mac-ffmpeg/bin/ffmpeg')
        newP = os.path.join(dirPath, 'mac-ffmpeg/bin/ffprobe')
    if (newF is not None and os.path.isfile(newF)):
        ffmpeg = newF
        ffprobe = newP
    else:
        ffmpeg = 'ffmpeg'
        ffprobe = 'ffprobe'

    makingDataFile = args.export_to_premiere or args.export_to_resolve

    is64bit = '64-bit' if sys.maxsize > 2**32 else '32-bit'

    if (args.debug):
        print('Python Version:', platform.python_version(), is64bit)
        print('Platform:', platform.system())
        # Platform can be 'Linux', 'Darwin' (macOS), 'Java', 'Windows'

        print('FFmpeg path:', ffmpeg)
        print('Auto-Editor version', version)
        if (args.input == []):
            sys.exit()

    log = Log(3 if args.debug else 2)

    if (is64bit == '32-bit'):
        # I should have put this warning a long time ago.
        log.warning("You have the 32-bit version of Python, which means you won't be " \
            'able to handle long videos.')

    if (args.frame_margin < 0):
        log.error('Frame margin cannot be negative.')

    if (args.input == []):
        log.error(
            'You need the (input) argument so that auto-editor can do the work for you.'
        )

    if (args.silent_speed <= 0 or args.silent_speed > 99999):
        args.silent_speed = 99999
    if (args.video_speed <= 0 or args.video_speed > 99999):
        args.video_speed = 99999

    inputList = []
    for myInput in args.input:
        if (os.path.isdir(myInput)):

            def validFiles(path):
                for f in os.listdir(path):
                    if (not f.startswith('.') and not f.endswith('.xml')
                            and not f.endswith('.png')
                            and not f.endswith('.md')
                            and not os.path.isdir(f)):
                        yield os.path.join(path, f)

            inputList += sorted(validFiles(myInput))
        elif (os.path.isfile(myInput)):
            inputList.append(myInput)
        elif (myInput.startswith('http://') or myInput.startswith('https://')):
            print('URL detected, using youtube-dl to download from webpage.')
            basename = re.sub(r'\W+', '-', myInput)
            cmd = [
                'youtube-dl', '-f',
                'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4', myInput,
                '--output', basename, '--no-check-certificate'
            ]
            if (ffmpeg != 'ffmpeg'):
                cmd.extend(['--ffmpeg-location', ffmpeg])
            subprocess.call(cmd)
            inputList.append(basename + '.mp4')
        else:
            log.error('Could not find file: ' + myInput)

    if (args.output_file is None):
        args.output_file = []

    if (len(args.output_file) < len(inputList)):
        for i in range(len(inputList) - len(args.output_file)):
            oldFile = inputList[i]
            dotIndex = oldFile.rfind('.')
            if (args.export_to_premiere or args.export_to_resolve):
                args.output_file.append(oldFile[:dotIndex] + '.xml')
            else:
                ext = oldFile[dotIndex:]
                if (args.export_as_audio):
                    ext = '.wav'
                end = '_ALTERED' + ext
                args.output_file.append(oldFile[:dotIndex] + end)

    TEMP = tempfile.mkdtemp()

    if (args.combine_files):
        with open(f'{TEMP}/combines.txt', 'w') as outfile:
            for fileref in inputList:
                outfile.write(f"file '{fileref}'\n")

        cmd = [
            ffmpeg, '-f', 'concat', '-safe', '0', '-i', f'{TEMP}/combines.txt',
            '-c', 'copy', 'combined.mp4'
        ]
        subprocess.call(cmd)
        inputList = ['combined.mp4']

    speeds = [args.silent_speed, args.video_speed]

    startTime = time.time()

    numCuts = 0
    for i, INPUT_FILE in enumerate(inputList):
        newOutput = args.output_file[i]
        fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]

        # Grab the sample rate from the input.
        sr = args.sample_rate
        if (sr is None):
            output = pipeToConsole([ffmpeg, '-i', INPUT_FILE, '-hide_banner'])
            try:
                matchDict = re.search(r'\s(?P<grp>\w+?)\sHz',
                                      output).groupdict()
                sr = matchDict['grp']
            except AttributeError:
                sr = 48000
        args.sample_rate = sr

        # Grab the audio bitrate from the input.
        abit = args.audio_bitrate
        if (abit is None):
            output = pipeToConsole([
                ffprobe, '-v', 'error', '-select_streams', 'a:0',
                '-show_entries', 'stream=bit_rate', '-of', 'compact=p=0:nk=1',
                INPUT_FILE
            ])
            try:
                abit = int(output)
            except:
                log.warning("Couldn't automatically detect audio bitrate.")
                abit = '500k'
                log.debug('Setting audio bitrate to ' + abit)
            else:
                abit = str(round(abit / 1000)) + 'k'
        else:
            abit = str(abit)
        args.audio_bitrate = abit

        if (isAudioFile(INPUT_FILE)):
            fps = 30
            tracks = 1
            cmd = [
                ffmpeg, '-y', '-i', INPUT_FILE, '-b:a', args.audio_bitrate,
                '-ac', '2', '-ar',
                str(args.sample_rate), '-vn', f'{TEMP}/fastAud.wav'
            ]
            if (args.debug):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)

            sampleRate, audioData = read(f'{TEMP}/fastAud.wav')
        else:
            if (args.export_to_premiere):
                fps = 29.97
            else:
                fps = ffmpegFPS(ffmpeg, INPUT_FILE, log)
            tracks = vidTracks(INPUT_FILE, ffprobe, log)
            if (args.cut_by_this_track >= tracks):
                log.error("You choose a track that doesn't exist.\n" \
                    f'There are only {tracks-1} tracks. (starting from 0)')

            vcodec = args.video_codec
            if (vcodec is None):
                output = pipeToConsole(
                    [ffmpeg, '-i', INPUT_FILE, '-hide_banner'])
                try:
                    matchDict = re.search(r'Video:\s(?P<video>\w+?)\s',
                                          output).groupdict()
                    vcodec = matchDict['video']
                    log.debug(vcodec)
                except AttributeError:
                    vcodec = 'copy'
                    log.warning(
                        "Couldn't automatically detect the video codec.")

            if (args.video_bitrate is not None and vcodec == 'copy'):
                log.warning('Your bitrate will not be applied because' \
                        ' the video codec is "copy".')

            for trackNum in range(tracks):
                cmd = [
                    ffmpeg, '-y', '-i', INPUT_FILE, '-ab', args.audio_bitrate,
                    '-ac', '2', '-ar',
                    str(args.sample_rate), '-map', f'0:a:{trackNum}',
                    f'{TEMP}/{trackNum}.wav'
                ]
                if (args.debug):
                    cmd.extend(['-hide_banner'])
                else:
                    cmd.extend(['-nostats', '-loglevel', '0'])
                subprocess.call(cmd)

            if (args.cut_by_all_tracks):
                cmd = [
                    ffmpeg, '-y', '-i', INPUT_FILE, '-filter_complex',
                    f'[0:a]amerge=inputs={tracks}', '-map', 'a', '-ar',
                    str(args.sample_rate), '-ac', '2', '-f', 'wav',
                    f'{TEMP}/combined.wav'
                ]
                if (args.debug):
                    cmd.extend(['-hide_banner'])
                else:
                    cmd.extend(['-nostats', '-loglevel', '0'])

                subprocess.call(cmd)

                sampleRate, audioData = read(f'{TEMP}/combined.wav')
            else:
                if (os.path.isfile(f'{TEMP}/{args.cut_by_this_track}.wav')):
                    sampleRate, audioData = read(
                        f'{TEMP}/{args.cut_by_this_track}.wav')
                else:
                    log.error('Audio track not found!')

        chunks = getAudioChunks(audioData, sampleRate, fps,
                                args.silent_threshold, args.frame_margin,
                                args.min_clip_length, args.min_cut_length,
                                args.ignore, args.cut_out, log)

        clips = []
        for chunk in chunks:
            if (speeds[chunk[2]] == 99999):
                numCuts += 1
            else:
                clips.append([chunk[0], chunk[1], speeds[chunk[2]] * 100])

        if (fps is None and not isAudioFile(INPUT_FILE)):
            if (makingDataFile):
                dotIndex = INPUT_FILE.rfind('.')
                end = '_constantFPS' + oldFile[dotIndex:]
                constantLoc = oldFile[:dotIndex] + end
            else:
                constantLoc = f'{TEMP}/constantVid{fileFormat}'
            cmd = [
                ffmpeg, '-y', '-i', INPUT_FILE, '-filter:v', f'fps=fps=30',
                constantLoc
            ]
            if (args.debug):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)
            INPUT_FILE = constancLoc

        if (args.preview):
            args.no_open = True
            from preview import preview

            preview(INPUT_FILE, chunks, speeds, args.debug)
            continue

        if (args.export_to_premiere):
            args.no_open = True
            from premiere import exportToPremiere

            exportToPremiere(INPUT_FILE, TEMP, newOutput, clips, tracks,
                             sampleRate, log)
            continue
        if (args.export_to_resolve):
            args.no_open = True
            duration = chunks[len(chunks) - 1][1]
            from resolve import exportToResolve

            exportToResolve(INPUT_FILE, newOutput, clips, duration, sampleRate,
                            log)
            continue
        if (isAudioFile(INPUT_FILE) and not makingDataFile):
            from fastAudio import fastAudio

            fastAudio(ffmpeg, INPUT_FILE, newOutput, chunks, speeds,
                      args.audio_bitrate, sampleRate, args.debug, True, log)
            continue

        from fastVideo import fastVideo
        fastVideo(ffmpeg, INPUT_FILE, newOutput, chunks, speeds, tracks,
                  args.audio_bitrate, sampleRate, args.debug, TEMP,
                  args.keep_tracks_seperate, vcodec, fps, args.export_as_audio,
                  args.video_bitrate, args.preset, args.tune, log)

    if (not os.path.isfile(newOutput)):
        log.error(f'The file {newOutput} was not created.')

    if (not args.preview and not makingDataFile):
        timeLength = round(time.time() - startTime, 2)
        minutes = timedelta(seconds=round(timeLength))
        print(f'Finished. took {timeLength} seconds ({minutes})')

    if (not args.preview and makingDataFile):
        timeSave = numCuts * 2  # assuming making each cut takes about 2 seconds.
        units = 'seconds'
        if (timeSave >= 3600):
            timeSave = round(timeSave / 3600, 1)
            if (timeSave % 1 == 0):
                timeSave = round(timeSave)
            units = 'hours'
        if (timeSave >= 60):
            timeSave = round(timeSave / 60, 1)
            if (timeSave >= 10 or timeSave % 1 == 0):
                timeSave = round(timeSave)
            units = 'minutes'

        print(f'Auto-Editor made {numCuts} cuts',
              end='')  # Don't add a newline.
        if (numCuts > 4):
            print(
                f', which would have taken about {timeSave} {units} if edited manually.'
            )
        else:
            print('.')

    if (not args.no_open):
        try:  # should work on Windows
            os.startfile(newOutput)
        except AttributeError:
            try:  # should work on MacOS and most Linux versions
                subprocess.call(['open', newOutput])
            except:
                try:  # should work on WSL2
                    subprocess.call(['cmd.exe', '/C', 'start', newOutput])
                except:
                    log.warning('Could not open output file.')
    rmtree(TEMP)
コード例 #9
0
ファイル: fastAudio.py プロジェクト: yhfy2006/auto-editor
def fastAudio(ffmpeg,
              theFile,
              outFile,
              chunks,
              speeds,
              audioBit,
              samplerate,
              debug,
              needConvert,
              log,
              fps=30):

    if (not os.path.isfile(theFile)):
        log.error('Could not find file ' + theFile)

    if (needConvert):
        # Only print this here so other scripts can use this function.
        print('Running from fastAudio.py')

        import tempfile
        from shutil import rmtree

        TEMP = tempfile.mkdtemp()

        cmd = [
            ffmpeg, '-i', theFile, '-b:a', audioBit, '-ac', '2', '-ar',
            str(samplerate), '-vn', f'{TEMP}/fastAud.wav'
        ]
        if (not debug):
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

        theFile = f'{TEMP}/fastAud.wav'

    samplerate, audioData = read(theFile)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0
    totalChunks = len(chunks)
    beginTime = time.time()

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * samplerate)
        audioSampleEnd = int(audioSampleStart + (samplerate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]
        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, samplerate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, samplerate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * samplerate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        progressBar(chunkNum,
                    totalChunks,
                    beginTime,
                    title='Creating new audio')

    log.debug('yPointer: ' + str(yPointer))
    log.debug('samples per frame: ' + str(samplerate / fps))
    log.debug('Expected video length: ' + str(yPointer / (samplerate / fps)))
    newAudio = newAudio[:yPointer]
    write(outFile, samplerate, newAudio)

    if ('TEMP' in locals()):
        rmtree(TEMP)

    if (needConvert):
        conwrite('')
コード例 #10
0
def motionDetection(path: str, ffprobe: str, motionThreshold: float, log,
                    width: int, dilates: int, blur: int) -> np.ndarray:

    import cv2
    import subprocess
    from usefulFunctions import progressBar, conwrite

    cap = cv2.VideoCapture(path)

    # Find total frames
    if (path.endswith('.mp4') or path.endswith('.mov')):
        # Query Container
        cmd = [
            ffprobe, '-v', 'error', '-select_streams', 'v:0', '-show_entries',
            'stream=nb_frames', '-of', 'default=nokey=1:noprint_wrappers=1',
            path
        ]
    else:
        # Count the number of frames (slow)
        cmd = [
            ffprobe, '-v', 'error', '-count_frames', '-select_streams', 'v:0',
            '-show_entries', 'stream=nb_read_frames', '-of',
            'default=nokey=1:noprint_wrappers=1', path
        ]

    # Read what ffprobe piped in.
    process = subprocess.Popen(cmd,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.STDOUT)
    stdout, __ = process.communicate()
    output = stdout.decode()

    totalFrames = int(output) + 1
    log.debug(f'   - Cutting totalFrames: {totalFrames}')
    prevFrame = None
    gray = None
    hasMotion = np.zeros((totalFrames), dtype=np.bool_)
    total = None

    def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
        if (width is None and height is None):
            return image

        h, w = image.shape[:2]
        if (width is None):
            r = height / h
            dim = (int(w * r), height)
        else:
            r = width / w
            dim = (width, int(h * r))

        return cv2.resize(image, dim, interpolation=inter)

    import time
    beginTime = time.time()

    while cap.isOpened():
        if (gray is None):
            prevFrame = None
        else:
            prevFrame = gray

        ret, frame = cap.read()

        if (not ret):
            break

        cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES))  # current frame

        frame = resize(frame, width=width)
        gray = cv2.cvtColor(frame,
                            cv2.COLOR_BGR2GRAY)  # Convert frame to grayscale.
        if (blur > 0):
            gray = cv2.GaussianBlur(gray, (blur, blur), 0)

        if (prevFrame is not None):
            frameDelta = cv2.absdiff(prevFrame, gray)
            thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

            # Dilate the thresholded image to fill in holes.
            if (dilates > 0):
                thresh = cv2.dilate(thresh, None, iterations=dilates)

            if (total is None):
                total = thresh.shape[0] * thresh.shape[1]

            if (np.count_nonzero(thresh) / total >= motionThreshold):
                hasMotion[cframe] = True

        progressBar(cframe, totalFrames, beginTime, title='Detecting motion')

    cap.release()
    cv2.destroyAllWindows()

    conwrite('')

    return hasMotion
コード例 #11
0
def exportToPremiere(myInput, output, clips, sampleRate, log):
    pathurl = 'file://localhost' + os.path.abspath(myInput)

    name = os.path.basename(myInput)
    audioFile = isAudioFile(myInput)

    ntsc = 'FALSE'
    ana = 'FALSE'  # anamorphic
    alphatype = 'none'
    depth = '16'
    if (not audioFile):
        try:
            import cv2
            conwrite('Grabbing video dimensions.')

            cap = cv2.VideoCapture(myInput)
            width = str(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
            height = str(int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            cap.release()
            cv2.destroyAllWindows()
        except ImportError:
            width = '1280'
            height = '720'

    pixelar = 'square'  # pixel aspect ratio
    colordepth = '24'
    sr = sampleRate

    if (audioFile):
        with open(output, 'w', encoding='utf-8') as outfile:
            outfile.write('<!-- Generated by Auto-Editor -->\n')
            outfile.write(
                '<!-- https://github.com/WyattBlue/auto-editor -->\n')
            outfile.write('\n')
            outfile.write(
                '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n')
            outfile.write('<xmeml version="4">\n')
            outfile.write('\t<sequence>\n')
            outfile.write('\t<rate>\n')
            outfile.write('\t\t<timebase>30</timebase>\n')
            outfile.write('\t\t<ntsc>TRUE</ntsc>\n')
            outfile.write('\t</rate>\n')
            outfile.write('\t\t<name>Auto-Editor Audio Group</name>\n')
            outfile.write('\t\t<media>\n')
            outfile.write('\t\t\t<audio>\n')
            outfile.write('\t\t\t\t<numOutputChannels>2</numOutputChannels>\n')
            outfile.write('\t\t\t\t<format>\n')
            outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
            outfile.write(f'\t\t\t\t\t\t<depth>{depth}</depth>\n')
            outfile.write(f'\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
            outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
            outfile.write('\t\t\t\t</format>\n')
            outfile.write(
                '\t\t\t\t<track currentExplodedTrackIndex="0" totalExplodedTrackCount="2" premiereTrackType="Stereo">\n'
            )

            total = 0
            for j, clip in enumerate(clips):
                myStart = int(total)
                total += (clip[1] - clip[0]) / (clip[2] / 100)
                myEnd = int(total)

                outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{j+1}">\n')
                outfile.write(
                    '\t\t\t\t\t\t<masterclipid>masterclip-1</masterclipid>\n')
                outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')
                outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
                outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')
                outfile.write(
                    f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
                outfile.write(
                    f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n'
                )

                if (j == 0):
                    outfile.write('\t\t\t\t\t\t<file id="file-1">\n')
                    outfile.write(f'\t\t\t\t\t\t\t<name>{name}</name>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t<pathurl>{pathurl}</pathurl>\n')
                    outfile.write('\t\t\t\t\t\t\t<rate>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                    outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                    outfile.write('\t\t\t\t\t\t\t</rate>\n')
                    outfile.write('\t\t\t\t\t\t\t<media>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<audio>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t\t\t\t<depth>{depth}</depth>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t<channelcount>2</channelcount>\n')
                    outfile.write('\t\t\t\t\t\t\t\t</audio>\n')
                    outfile.write('\t\t\t\t\t\t\t</media>\n')
                    outfile.write('\t\t\t\t\t\t</file>\n')
                else:
                    outfile.write(f'\t\t\t\t\t\t<file id="file-1"/>\n')
                outfile.write('\t\t\t\t\t</clipitem>\n')

            outfile.write('\t\t\t\t</track>\n')
            outfile.write('\t\t\t</audio>\n')
            outfile.write('\t\t</media>\n')
            outfile.write('\t</sequence>\n')
            outfile.write('</xmeml>')

            # Exit out of this function prematurely.
            return None

            # End of audio file code.

    with open(output, 'w', encoding='utf-8') as outfile:
        outfile.write('<!-- Generated by Auto-Editor -->\n')
        outfile.write('<!-- https://github.com/WyattBlue/auto-editor -->\n')
        outfile.write('\n')
        outfile.write(
            '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n')
        outfile.write('<xmeml version="4">\n')
        outfile.write('\t<sequence>\n')
        outfile.write('\t\t<name>Auto-Editor Video Group</name>\n')
        outfile.write('\t\t<media>\n')
        outfile.write('\t\t\t<video>\n')
        outfile.write('\t\t\t\t<format>\n')
        outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
        outfile.write('\t\t\t\t\t\t<rate>\n')
        outfile.write('\t\t\t\t\t\t\t<timebase>30</timebase>\n')
        outfile.write(f'\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
        outfile.write('\t\t\t\t\t\t</rate>\n')
        outfile.write(f'\t\t\t\t\t\t<width>{width}</width>\n')
        outfile.write(f'\t\t\t\t\t\t<height>{height}</height>\n')
        outfile.write(f'\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n')
        outfile.write(
            f'\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n')
        outfile.write('\t\t\t\t\t\t<fielddominance>none</fielddominance>\n')
        outfile.write(f'\t\t\t\t\t\t<colordepth>{colordepth}</colordepth>\n')
        outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
        outfile.write('\t\t\t\t</format>\n')
        outfile.write('\t\t\t\t<track>\n')

        # Handle clips.
        total = 0
        for j, clip in enumerate(clips):
            myStart = int(total)
            total += (clip[1] - clip[0]) / (clip[2] / 100)
            myEnd = int(total)

            outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{j+7}">\n')
            outfile.write(
                '\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n')
            outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')
            outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
            outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')
            outfile.write(
                f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
            outfile.write(
                f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n')

            if (j == 0):
                outfile.write('\t\t\t\t\t\t<file id="file-2">\n')
                outfile.write(f'\t\t\t\t\t\t\t<name>{name}</name>\n')
                outfile.write(f'\t\t\t\t\t\t\t<pathurl>{pathurl}</pathurl>\n')
                outfile.write('\t\t\t\t\t\t\t<rate>\n')
                outfile.write('\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                outfile.write('\t\t\t\t\t\t\t</rate>\n')
                outfile.write('\t\t\t\t\t\t\t<media>\n')
                outfile.write('\t\t\t\t\t\t\t\t<video>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t<rate>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t</rate>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<width>{width}</width>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<height>{height}</height>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n'
                )
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t\t<fielddominance>none</fielddominance>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                outfile.write('\t\t\t\t\t\t\t\t</video>\n')
                outfile.write('\t\t\t\t\t\t\t\t<audio>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<depth>{depth}</depth>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<channelcount>2</channelcount>\n')
                outfile.write('\t\t\t\t\t\t\t\t</audio>\n')
                outfile.write('\t\t\t\t\t\t\t</media>\n')
                outfile.write('\t\t\t\t\t\t</file>\n')
            else:
                outfile.write(f'\t\t\t\t\t\t<file id="file-2"/>\n')

            # Add the speed effect if nessecary
            if (clip[2] != 100):
                outfile.write('\t\t\t\t\t\t<filter>\n')
                outfile.write('\t\t\t\t\t\t\t<effect>\n')
                outfile.write('\t\t\t\t\t\t\t\t<name>Time Remap</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectid>timeremap</effectid>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectcategory>motion</effectcategory>\n'
                )
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effecttype>motion</effecttype>\n')
                outfile.write('\t\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>variablespeed</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>variablespeed</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemin>0</valuemin>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemax>1</valuemax>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>0</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>speed</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>speed</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemin>-100000</valuemin>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemax>100000</valuemax>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t<value>{clip[2]}</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>reverse</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>reverse</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>frameblending</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>frameblending</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t</effect>\n')
                outfile.write('\t\t\t\t\t\t</filter>\n')

            # Linking for video blocks
            for i in range(3):
                outfile.write('\t\t\t\t\t\t<link>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)+1))+7+j}</linkclipref>\n'
                )
                if (i == 0):
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                else:
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')
                if (i == 2):
                    outfile.write('\t\t\t\t\t\t\t<trackindex>2</trackindex>\n')
                else:
                    outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')
                outfile.write(f'\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n')
                if (i == 1 or i == 2):
                    outfile.write('\t\t\t\t\t\t\t<groupindex>1</groupindex>\n')
                outfile.write('\t\t\t\t\t\t</link>\n')
            outfile.write('\t\t\t\t\t</clipitem>\n')
        outfile.write('\t\t\t\t</track>\n')
        outfile.write('\t\t\t</video>\n')
        outfile.write('\t\t\t<audio>\n')
        outfile.write('\t\t\t\t<numOutputChannels>2</numOutputChannels>\n')
        outfile.write('\t\t\t\t<format>\n')
        outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
        outfile.write(f'\t\t\t\t\t\t<depth>{depth}</depth>\n')
        outfile.write(f'\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
        outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
        outfile.write('\t\t\t\t</format>\n')
        outfile.write(
            '\t\t\t\t<track PannerIsInverted="true" PannerStartKeyframe="-91445760000000000,0.5,0,0,0,0,0,0" PannerName="Balance" currentExplodedTrackIndex="0" totalExplodedTrackCount="2" premiereTrackType="Stereo">\n'
        )

        # Audio Clips
        total = 0
        for j, clip in enumerate(clips):
            outfile.write(
                f'\t\t\t\t\t<clipitem id="clipitem-{len(clips)+8+j}" premiereChannelType="stereo">\n'
            )
            outfile.write(
                f'\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n')
            outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')

            myStart = int(total)
            total += (clip[1] - clip[0]) / (clip[2] / 100)
            myEnd = int(total)

            outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
            outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')

            outfile.write(
                f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
            outfile.write(
                f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n')
            outfile.write('\t\t\t\t\t\t<file id="file-2"/>\n')
            outfile.write('\t\t\t\t\t\t<sourcetrack>\n')
            outfile.write('\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')
            outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')
            outfile.write('\t\t\t\t\t\t</sourcetrack>\n')

            # Add speed effect for audio blocks
            if (clip[2] != 100):
                outfile.write('\t\t\t\t\t\t<filter>\n')
                outfile.write('\t\t\t\t\t\t\t<effect>\n')
                outfile.write('\t\t\t\t\t\t\t\t<name>Time Remap</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectid>timeremap</effectid>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectcategory>motion</effectcategory>\n'
                )
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effecttype>motion</effecttype>\n')
                outfile.write('\t\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>variablespeed</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>variablespeed</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemin>0</valuemin>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemax>1</valuemax>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>0</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>speed</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>speed</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemin>-100000</valuemin>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemax>100000</valuemax>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t<value>{clip[2]}</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>reverse</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>reverse</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>frameblending</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>frameblending</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t</effect>\n')
                outfile.write('\t\t\t\t\t\t</filter>\n')

            if (audioFile):
                startOn = 1
            else:
                startOn = 0
            for i in range(startOn, 3):
                outfile.write('\t\t\t\t\t\t<link>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)+1))+7+j}</linkclipref>\n'
                )
                if (i == 0):
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                else:
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')

                if (i == 2):
                    outfile.write('\t\t\t\t\t\t\t<trackindex>2</trackindex>\n')
                else:
                    outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')

                outfile.write(f'\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n')

                if (i == 1 or i == 2):
                    outfile.write('\t\t\t\t\t\t\t<groupindex>1</groupindex>\n')
                outfile.write('\t\t\t\t\t\t</link>\n')
            outfile.write('\t\t\t\t\t</clipitem>\n')
        outfile.write('\t\t\t\t\t<outputchannelindex>1</outputchannelindex>\n')
        outfile.write('\t\t\t\t</track>\n')
        outfile.write('\t\t\t</audio>\n')
        outfile.write('\t\t</media>\n')
        outfile.write('\t</sequence>\n')
        outfile.write('</xmeml>')

    conwrite('')
コード例 #12
0
def fastAudio(ffmpeg: str, theFile: str, outFile: str, chunks: list,
              speeds: list, audioBit, samplerate, needConvert: bool, temp: str,
              log, fps: float):

    if (len(chunks) == 1 and chunks[0][2] == 0):
        log.error('Trying to create empty audio.')

    if (not os.path.isfile(theFile)):
        log.error('fastAudio.py could not find file: ' + theFile)

    if (needConvert):
        cmd = [ffmpeg, '-y', '-i', theFile]
        if (audioBit is not None):
            cmd.extend(['-b:a', str(audioBit)])
        cmd.extend(
            ['-ac', '2', '-ar',
             str(samplerate), '-vn', f'{temp}/faAudio.wav'])
        if (log.is_ffmpeg):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '8'])
        subprocess.call(cmd)

        theFile = f'{temp}/faAudio.wav'

    samplerate, audioData = read(theFile)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0
    totalChunks = len(chunks)
    beginTime = time.time()

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * samplerate)
        audioSampleEnd = int(audioSampleStart + (samplerate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]
        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, samplerate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, samplerate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * samplerate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        progressBar(chunkNum,
                    totalChunks,
                    beginTime,
                    title='Creating new audio')

    log.debug('\n   - Total Samples: ' + str(yPointer))
    log.debug('   - Samples per Frame: ' + str(samplerate / fps))
    log.debug('   - Expected video length: ' + str(yPointer /
                                                   (samplerate / fps)))
    newAudio = newAudio[:yPointer]
    write(outFile, samplerate, newAudio)

    if (needConvert):
        conwrite('')
コード例 #13
0
ファイル: premiere.py プロジェクト: lijovklm/auto-editor
def exportToPremiere(ffmpeg, myInput, newOutput, silentT, zoomT, frameMargin,
                     sampleRate, videoSpeed, silentSpeed):
    print('Running from premiere.py')
    TEMP = tempfile.mkdtemp()

    fps = 29.97

    cmd = [
        ffmpeg, '-i', myInput, '-ab', '160k', '-ac', '2', '-ar',
        str(sampleRate), '-vn', f'{TEMP}/output.wav', '-nostats', '-loglevel',
        '0'
    ]
    subprocess.call(cmd)

    sampleRate, audioData = read(f'{TEMP}/output.wav')
    chunks = getAudioChunks(audioData, sampleRate, fps, silentT, zoomT,
                            frameMargin)
    rmtree(TEMP)

    clips = []
    newSpeed = [silentSpeed, videoSpeed]
    for chunk in chunks:
        if (newSpeed[chunk[2]] != 99999):
            clips.append([chunk[0], chunk[1], newSpeed[chunk[2]] * 100])

    if (len(clips) < 1):
        print('Error! Less than 1 clip.')
        sys.exit(1)

    pathurl = 'file://localhost' + os.path.abspath(myInput)

    name = os.path.basename(myInput)

    extension = myInput[myInput.rfind('.'):]
    audioFile = extension in ['.wav', '.mp3', '.m4a']

    first = myInput[:myInput.rfind('.')]
    newFile = f'{first}.xml'

    ntsc = 'FALSE'
    ana = 'FALSE'  # anamorphic
    alphatype = 'none'
    depth = '16'
    if (not audioFile):
        try:
            import cv2
            conwrite('Grabbing video dimensions.')

            cap = cv2.VideoCapture(myInput)
            width = str(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
            height = str(int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))

            cap.release()
            cv2.destroyAllWindows()

        except ImportError:
            width = '1280'
            height = '720'

    pixelar = 'square'  # pixel aspect ratio
    colordepth = '24'
    sr = sampleRate

    if (audioFile):
        with open(newFile, 'w', encoding='utf-8') as outfile:
            outfile.write('<!-- Generated by Auto-Editor -->\n')
            outfile.write(
                '<!-- https://github.com/WyattBlue/auto-editor -->\n')
            outfile.write('\n')
            outfile.write(
                '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n')
            outfile.write('<xmeml version="4">\n')
            outfile.write('\t<sequence>\n')
            outfile.write('\t<rate>\n')
            outfile.write('\t\t<timebase>30</timebase>\n')
            outfile.write('\t\t<ntsc>TRUE</ntsc>\n')
            outfile.write('\t</rate>\n')
            outfile.write('\t\t<name>Auto-Editor Audio Group</name>\n')
            outfile.write('\t\t<media>\n')
            outfile.write('\t\t\t<audio>\n')
            outfile.write('\t\t\t\t<numOutputChannels>2</numOutputChannels>\n')
            outfile.write('\t\t\t\t<format>\n')
            outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
            outfile.write(f'\t\t\t\t\t\t<depth>{depth}</depth>\n')
            outfile.write(f'\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
            outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
            outfile.write('\t\t\t\t</format>\n')
            outfile.write(
                '\t\t\t\t<track currentExplodedTrackIndex="0" totalExplodedTrackCount="2" premiereTrackType="Stereo">\n'
            )

            total = 0
            for j, clip in enumerate(clips):
                myStart = int(total)
                total += (clip[1] - clip[0]) / (clip[2] / 100)
                myEnd = int(total)

                outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{j+1}">\n')
                outfile.write(
                    '\t\t\t\t\t\t<masterclipid>masterclip-1</masterclipid>\n')
                outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')
                outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
                outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')
                outfile.write(
                    f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
                outfile.write(
                    f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n'
                )

                if (j == 0):
                    outfile.write('\t\t\t\t\t\t<file id="file-1">\n')
                    outfile.write(f'\t\t\t\t\t\t\t<name>{name}</name>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t<pathurl>{pathurl}</pathurl>\n')
                    outfile.write('\t\t\t\t\t\t\t<rate>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                    outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                    outfile.write('\t\t\t\t\t\t\t</rate>\n')
                    outfile.write('\t\t\t\t\t\t\t<media>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<audio>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t\t\t\t<depth>{depth}</depth>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t<channelcount>2</channelcount>\n')
                    outfile.write('\t\t\t\t\t\t\t\t</audio>\n')
                    outfile.write('\t\t\t\t\t\t\t</media>\n')
                    outfile.write('\t\t\t\t\t\t</file>\n')
                else:
                    outfile.write(f'\t\t\t\t\t\t<file id="file-1"/>\n')
                outfile.write('\t\t\t\t\t</clipitem>\n')

            outfile.write('\t\t\t\t</track>\n')
            outfile.write('\t\t\t</audio>\n')
            outfile.write('\t\t</media>\n')
            outfile.write('\t</sequence>\n')
            outfile.write('</xmeml>')

            return newFile

            # End of audio file code.

    with open(newFile, 'w', encoding='utf-8') as outfile:
        outfile.write('<!-- Generated by Auto-Editor -->\n')
        outfile.write('<!-- https://github.com/WyattBlue/auto-editor -->\n')
        outfile.write('\n')
        outfile.write(
            '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n')
        outfile.write('<xmeml version="4">\n')
        outfile.write('\t<sequence>\n')
        outfile.write('\t\t<name>Auto-Editor Video Group</name>\n')
        outfile.write('\t\t<media>\n')
        outfile.write('\t\t\t<video>\n')
        outfile.write('\t\t\t\t<format>\n')
        outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
        outfile.write('\t\t\t\t\t\t<rate>\n')
        outfile.write('\t\t\t\t\t\t\t<timebase>30</timebase>\n')
        outfile.write(f'\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
        outfile.write('\t\t\t\t\t\t</rate>\n')
        outfile.write(f'\t\t\t\t\t\t<width>{width}</width>\n')
        outfile.write(f'\t\t\t\t\t\t<height>{height}</height>\n')
        outfile.write(f'\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n')
        outfile.write(
            f'\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n')
        outfile.write('\t\t\t\t\t\t<fielddominance>none</fielddominance>\n')
        outfile.write(f'\t\t\t\t\t\t<colordepth>{colordepth}</colordepth>\n')
        outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
        outfile.write('\t\t\t\t</format>\n')
        outfile.write('\t\t\t\t<track>\n')

        # Handle clips.
        total = 0
        for j, clip in enumerate(clips):
            myStart = int(total)
            total += (clip[1] - clip[0]) / (clip[2] / 100)
            myEnd = int(total)

            outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{j+7}">\n')
            outfile.write(
                '\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n')
            outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')
            outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
            outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')
            outfile.write(
                f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
            outfile.write(
                f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n')

            if (j == 0):
                outfile.write('\t\t\t\t\t\t<file id="file-2">\n')
                outfile.write(f'\t\t\t\t\t\t\t<name>{name}</name>\n')
                outfile.write(f'\t\t\t\t\t\t\t<pathurl>{pathurl}</pathurl>\n')
                outfile.write('\t\t\t\t\t\t\t<rate>\n')
                outfile.write('\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                outfile.write('\t\t\t\t\t\t\t</rate>\n')
                outfile.write('\t\t\t\t\t\t\t<media>\n')
                outfile.write('\t\t\t\t\t\t\t\t<video>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t<rate>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t</rate>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<width>{width}</width>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<height>{height}</height>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n'
                )
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t\t<fielddominance>none</fielddominance>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                outfile.write('\t\t\t\t\t\t\t\t</video>\n')
                outfile.write('\t\t\t\t\t\t\t\t<audio>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<depth>{depth}</depth>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<channelcount>2</channelcount>\n')
                outfile.write('\t\t\t\t\t\t\t\t</audio>\n')
                outfile.write('\t\t\t\t\t\t\t</media>\n')
                outfile.write('\t\t\t\t\t\t</file>\n')
            else:
                outfile.write(f'\t\t\t\t\t\t<file id="file-2"/>\n')

            # Add the speed effect if nessecary
            if (clip[2] != 100):
                outfile.write('\t\t\t\t\t\t<filter>\n')
                outfile.write('\t\t\t\t\t\t\t<effect>\n')
                outfile.write('\t\t\t\t\t\t\t\t<name>Time Remap</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectid>timeremap</effectid>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectcategory>motion</effectcategory>\n'
                )
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effecttype>motion</effecttype>\n')
                outfile.write('\t\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>variablespeed</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>variablespeed</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemin>0</valuemin>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemax>1</valuemax>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>0</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>speed</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>speed</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemin>-100000</valuemin>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemax>100000</valuemax>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t<value>{clip[2]}</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>reverse</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>reverse</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>frameblending</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>frameblending</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t</effect>\n')
                outfile.write('\t\t\t\t\t\t</filter>\n')

            # Linking for video blocks
            for i in range(3):
                outfile.write('\t\t\t\t\t\t<link>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)+1))+7+j}</linkclipref>\n'
                )
                if (i == 0):
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                else:
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')
                if (i == 2):
                    outfile.write('\t\t\t\t\t\t\t<trackindex>2</trackindex>\n')
                else:
                    outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')
                outfile.write(f'\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n')
                if (i == 1 or i == 2):
                    outfile.write('\t\t\t\t\t\t\t<groupindex>1</groupindex>\n')
                outfile.write('\t\t\t\t\t\t</link>\n')
            outfile.write('\t\t\t\t\t</clipitem>\n')
        outfile.write('\t\t\t\t</track>\n')
        outfile.write('\t\t\t</video>\n')
        outfile.write('\t\t\t<audio>\n')
        outfile.write('\t\t\t\t<numOutputChannels>2</numOutputChannels>\n')
        outfile.write('\t\t\t\t<format>\n')
        outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
        outfile.write(f'\t\t\t\t\t\t<depth>{depth}</depth>\n')
        outfile.write(f'\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
        outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
        outfile.write('\t\t\t\t</format>\n')
        outfile.write(
            '\t\t\t\t<track PannerIsInverted="true" PannerStartKeyframe="-91445760000000000,0.5,0,0,0,0,0,0" PannerName="Balance" currentExplodedTrackIndex="0" totalExplodedTrackCount="2" premiereTrackType="Stereo">\n'
        )

        # Audio Clips
        total = 0
        for j, clip in enumerate(clips):
            outfile.write(
                f'\t\t\t\t\t<clipitem id="clipitem-{len(clips)+8+j}" premiereChannelType="stereo">\n'
            )
            outfile.write(
                f'\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n')
            outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')

            myStart = int(total)
            total += (clip[1] - clip[0]) / (clip[2] / 100)
            myEnd = int(total)

            outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
            outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')

            outfile.write(
                f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
            outfile.write(
                f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n')
            outfile.write('\t\t\t\t\t\t<file id="file-2"/>\n')
            outfile.write('\t\t\t\t\t\t<sourcetrack>\n')
            outfile.write('\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')
            outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')
            outfile.write('\t\t\t\t\t\t</sourcetrack>\n')

            # Add speed effect for audio blocks
            if (clip[2] != 100):
                outfile.write('\t\t\t\t\t\t<filter>\n')
                outfile.write('\t\t\t\t\t\t\t<effect>\n')
                outfile.write('\t\t\t\t\t\t\t\t<name>Time Remap</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectid>timeremap</effectid>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectcategory>motion</effectcategory>\n'
                )
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effecttype>motion</effecttype>\n')
                outfile.write('\t\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>variablespeed</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>variablespeed</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemin>0</valuemin>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemax>1</valuemax>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>0</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>speed</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>speed</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemin>-100000</valuemin>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemax>100000</valuemax>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t<value>{clip[2]}</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>reverse</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>reverse</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>frameblending</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>frameblending</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t</effect>\n')
                outfile.write('\t\t\t\t\t\t</filter>\n')

            if (audioFile):
                startOn = 1
            else:
                startOn = 0
            for i in range(startOn, 3):
                outfile.write('\t\t\t\t\t\t<link>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)+1))+7+j}</linkclipref>\n'
                )
                if (i == 0):
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                else:
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')

                if (i == 2):
                    outfile.write('\t\t\t\t\t\t\t<trackindex>2</trackindex>\n')
                else:
                    outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')

                outfile.write(f'\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n')

                if (i == 1 or i == 2):
                    outfile.write('\t\t\t\t\t\t\t<groupindex>1</groupindex>\n')
                outfile.write('\t\t\t\t\t\t</link>\n')
            outfile.write('\t\t\t\t\t</clipitem>\n')
        outfile.write('\t\t\t\t\t<outputchannelindex>1</outputchannelindex>\n')
        outfile.write('\t\t\t\t</track>\n')
        outfile.write('\t\t\t</audio>\n')
        outfile.write('\t\t</media>\n')
        outfile.write('\t</sequence>\n')
        outfile.write('</xmeml>')

    conwrite('')
    return newFile