Beispiel #1
0
import moviepy.editor as mpe

my_clip = mpe.VideoFileClip('some_clip.mp4')
audio_background = mpe.AudioFileClip('some_background.mp3')
final_audio = mpe.CompositeAudioClip([my_clip.audio, audio_background])
final_clip = my_clip.set_audio(final_audio)
Beispiel #2
0
# =============================================================================
photometry_file_npy = io.convert_to_npy(
    photometry_file_csv, **args)  # Convert CSV file to NPY if needed

#%% ===========================================================================
# Preprocessing of the photometry data
# =============================================================================
photometry_data = preproc.load_photometry_data(
    photometry_file_npy, **args)  # Preprocesses the photometry data at once
args[
    "photometry_data"] = photometry_data  # Adds a new parameter containing all the photometry data

#%% ===========================================================================
# Reading the Video data
# =============================================================================
video_clip = mpy.VideoFileClip(video_file).subclip(
    t_start=args["video_start"], t_end=args["video_end"])  # Loads the video
plt.imshow(video_clip.get_frame(
    0))  # [OPTIONAL] Displays the first frame of the video

#%% ===========================================================================
# Cropping the video field of view (for future display)
# =============================================================================
video_clip_cropped = video_clip.crop(
    x1=80, y1=62, x2=844, y2=402)  # Crops the video file to the cage only
plt.imshow(video_clip_cropped.get_frame(
    0))  # [OPTIONAL] Displays the first frame of the cropped video

#%% ===========================================================================
# Importing behavioral data
# =============================================================================
args["behavior_to_segment"] = "Behavior1"
Beispiel #3
0
def compose(instruments, midipattern, width, height, source_dir,
            volume_file_name, num_threads, instrument_config_file):
    _create_working_dir()
    volumes = _try_load_json_file(volume_file_name)
    instrument_config = _try_load_json_file(instrument_config_file)
    tempo = midiparse.get_tempo(midipattern)
    resolution = midiparse.get_resolution(midipattern)
    pulse_length = 60.0 / (tempo * resolution)

    # analysed_tracks :: {(name1, name2, ...): (notes, max_velocity)}
    analysed_tracks = _analyse_all_tracks(midipattern, resolution)

    track_clip_file_names = []
    total_num_tracks = len(analysed_tracks)
    processes = []
    for instrument_names, (notes, max_velocity) in analysed_tracks.items():

        file_name = os.path.join(WORKING_DIR_NAME,
                                 '-'.join(instrument_names) + '.mp4')
        track_clip_file_names.append((len(notes), file_name))
        queue = multiprocessing.Queue()
        args = (instruments, instrument_names, source_dir, instrument_config,
                notes, pulse_length, width, height, max_velocity, queue,
                file_name, volumes, total_num_tracks)
        process = multiprocessing.Process(target=_process_track, args=args)
        processes.append((instrument_names, process, queue))

    running_processes = []

    progress_bar = bar.ChargingBar('', max=total_num_tracks)
    done = False
    num_processed_tracks = 0
    while not done:
        time.sleep(0.1)
        done_instruments = []
        for instrument_names, process, queue in running_processes:
            while not queue.empty():
                msg_type, contents = queue.get()
                if msg_type == MSG_PROCESSED_SEGMENT:
                    num_processed_tracks += 1
                    progress_bar.next()
                elif msg_type == MSG_DONE:
                    done_instruments.append(instrument_names)
                elif msg_type == MSG_FATAL_ERROR:
                    raise contents

        processes_changed = False

        # remove the instruments that are done
        for instrument_names in done_instruments:
            index = 0
            for i, (i_name, p, q) in enumerate(running_processes):
                if instrument_names == i_name:
                    index = i
                    break
            _, process, queue = running_processes.pop(index)
            process.join()
            processes_changed = True

        while len(running_processes) < num_threads and len(processes) > 0:
            p = processes.pop()
            p[1].start()
            running_processes.append(p)
            processes_changed = True

        if not running_processes:
            done = True

        if processes_changed and not done:
            progress_message = "Processing instruments: "
            for names, _, _ in running_processes:
                progress_message += '(' + ', '.join(names) + ')' + ', '
            progress_message = '\n' + progress_message[:-2]
            print(progress_message)

    progress_bar.finish()

    track_clip_file_names.sort(key=lambda k: k[0], reverse=True)

    final_clips = []
    for i, (_, file_name) in enumerate(track_clip_file_names):
        clip = edit.VideoFileClip(file_name)
        x, y, w, h = _partition(width, height, len(track_clip_file_names), i)
        final_clips.append(
            fx.resize(clip, newsize=(w, h)).set_position((x, y)))
    return edit.CompositeVideoClip(size=(width, height), clips=final_clips)
Beispiel #4
0
            clips = [ImageClip(m).set_duration(1)
                     for m in frame_array]
            # clip = ImageSequenceClip(clips, fps=30)
            # clip = ImageSequenceClip(np.array(frame_array), fps=30)
            concat_clip = concatenate_videoclips(clips, method="chain")
            # concat_clip.resize(width=1920, height=1080)
            concat_clip.write_videofile(
                pathVideoOut + "\\" + filename.split('.txt')[0].split("\\")[-1] + '_v.mp4',
                bitrate='50000k',
                fps=FPS, audio_bitrate='3000k',
                threads=4)
            time.sleep(2)

            print(pathVideoOut + "\\" + filename.split('.txt')[0].split("\\")[-1] + '.mp4')
            my_clip = mpe.VideoFileClip(
                filename=pathVideoOut + "\\" + filename.split('.txt')[0].split("\\")[-1] + '_v.mp4')
            # audio_background = mpe.AudioFileClip('welcome.mp3')
            # print("type audio_background", type(audio_background))
            final_audio = mpe.CompositeAudioClip([audio_background, bg_music])
            # final_clip = CompositeVideoClip([my_clip, subtitles.set_position(('center', 'bottom'))])
            final_clip = my_clip.set_audio(final_audio)
            path_f_video = pathVideoOut + "\\" + filename.split('.txt')[0].split("\\")[-1] + '.mp4'
            final_clip.write_videofile(path_f_video, bitrate='50000k',
                                       fps=FPS, audio_bitrate='3000k',
                                       threads=4)
            time.sleep(1)
            v_len = video_to_frames(path_f_video)
            if v_len == 0:
                null_video_counter += 1
                # delete file forever
                os.system("del " + path_f_video)
 def __init__(self, path):
     self.path = path
     self.name = self.path.rsplit("\\")[-1].split(".")[0]
     self.clip = mpy.VideoFileClip(self.path).resize((224, 224))
     self.predict_data = np.asarray(
         self.chop_video_into_frames(self.clip)[:-1])
Beispiel #6
0
import moviepy.editor as moviepy

clip = moviepy.VideoFileClip("output/final_project.avi")
clip.write_videofile("output/final_project.mp4")
    def split(self, classlist, datadictionary):

        """
            Implements a function to enable the splitting of video along with the creation of JSON files.

            Arguments:
                self
                classlist      -- List containing the names of the classes(actions) in that particular sheet
                datadictionary -- Dictionary that encapsulates the entire information about Excel sheet
                              keys -- "video_des + str(index)", "timeline + str(index)",
                                      "actions + str(index)" , "subline" + str(index)
            Returns:
                Null


        """

        global videobaselink
        global default_dir_link

        dictionary, dictionary2 = self.create_directory(classlist=classlist)
        tempdictionary = self.extractjson(end=len(dictionary), dictionary2=dictionary2)
        templist = tempdictionary.keys()
        jsondictionary = {}

        for i in range(len(classlist)):
            jsondictionary[str(classlist[i])] = []

        for i in templist:
            jsondictionary[str(i)] = tempdictionary[str(i)]

        print(datadictionary.keys())
        for j in range(0, len(datadictionary), 4):

            video_des = datadictionary["video_des"+str(int(j/3))]
            annotations = datadictionary["timeline"+str(int(j/3))]
            actions = datadictionary["actions"+str(int(j/3))]
            subclass = datadictionary["subline" + str(int(j / 3))]
            video_link = os.path.join(videobaselink, video_des["Videoname"])
            video_des["path"] = str(os.path.join(video_link.split('/')[-2], video_link.split('/')[-1]))
            videojson = []

            looplength = len(actions)

            for i in range(looplength):

                try:
                    destination = str(os.path.join(default_dir_link, dictionary[str(actions[i])]))
                    existing_elements = int(len(os.listdir(destination))/2)
                    split_video_name = "instance" + str(existing_elements + 1) + ".mp4"
                    final_video_link = str(os.path.join(destination, split_video_name))
                    relative_path = str(os.path.join(destination.split('/')[-2],
                                                     destination.split('/')[-1], split_video_name))
                    diff = str(float(annotations[i+1]) - float(annotations[i]))+" Secs"
                    video = mvp.VideoFileClip(video_link).subclip(float(annotations[i]), float(annotations[i + 1]))
                    # to change the fps add eg: fps=50
                    video.write_videofile(final_video_link)
                    frames = int(video.fps * video.duration)

                    data = {"videoname": split_video_name,
                            "duration": diff,
                            "frames": frames,
                            "path": relative_path,
                            "class": str(actions[i]),
                            "classid": dictionary[str(actions[i])],
                            "subclass": str(subclass[i]),
                            "operatorno": video_des["Operatorno"],
                            "viewpoints": video_des["ViewPoints"],
                            "basevideodes": {
                                "videoname": video_des["Videoname"],
                                "duration": video_des["Videolength"],
                                "videospecs": video_des["Videospecs"],
                                "datetime": video_des["datetime"],
                                "path": video_des["path"]
                            }
                            }

                    try:
                        videojson.append(data)
                        dirname = os.path.join(default_dir_link, "Videojson")
                        if not os.path.exists(dirname):
                            os.mkdir(dirname)
                        jsonpath2 = os.path.join(dirname, str(video_des["Videoname"]).split(".")[0] + ".json")
                        self.create_json(data=videojson, path=jsonpath2)
                    except Exception as e:
                        message = "Exception : " + str(e) + " Comments:" + "Cannot create Video JSON File " + \
                                  annotations[i] + " " + annotations[i + 1] + " " + actions[i] + " "
                        print(message)
                        self.create_log_file(message)

                    try:
                        textfilename = split_video_name.split(".")[0] + ".json"
                        jsonpath = os.path.join(destination, textfilename)
                        self.create_json(data=data, path=jsonpath)
                    except Exception as e:
                        message = "Exception : " + str(e) + " Comments:" + "Cannot create Clip JSON File " + \
                                  annotations[i] + " " + annotations[i + 1] + " " + actions[i] + " "
                        print(message)
                        self.create_log_file(message)

                    try:
                        jsondictionary[str(actions[i])].append(data)
                        path2 = os.path.join(default_dir_link, str(dictionary[str(actions[i])] + ".json"))
                        self.create_json(data=jsondictionary[str(actions[i])], path=path2)
                    except Exception as e:
                        message = "Exception : " + str(e) + " Comments:" + "Cannot create Class JSON File " + \
                                  annotations[i] + " " + annotations[i + 1] + " " + actions[i] + " "
                        print(message)
                        self.create_log_file(message)

                except Exception as e:
                    message = "Exception : " + str(e) + " Comments:" + "cannot be clipped " + annotations[i]+" " + \
                              annotations[i+1] + " " + actions[i]+" "
                    print(message)
                    self.create_log_file(message)
Beispiel #8
0
def create_clip(cliptype, options):
    '''
    Creates a moviepy clip.
    Output will either by a VideoFileClip, TextClip, or ImageClip
    '''

    if cliptype == 'sequencer':
        source_url = options['source']
        if isinstance(source_url, list):
            source_url = source_url[0]

        source = download_asset(source_url)
        clip = mpy.VideoFileClip(source)

        global_start = float(options['start'])
        global_end = float(options['end'])
        duration = global_end - global_start
        clip_length = float(options['duration'])
        subclip_start = float(options['from'])
        subclip_end = subclip_start + duration

        # hack to prevent moviepy error when trying to make a subclip that's too long
        if subclip_end >= clip_length:
            subclip_end = duration - .01

        clip = clip.subclip(subclip_start, subclip_end).set_start(global_start)

        width, height, x, y = get_dimensions(options)
        clip = clip.resize((width, height)).set_pos((x, y))

        print subclip_start, subclip_end, global_start, global_end
        print width, height, x, y
        print ''

        return clip

    elif cliptype == 'image':
        source = download_file(options["src"])
        global_start = float(options['start'])
        global_end = float(options['end'])
        duration = global_end - global_start

        width, height, x, y = get_dimensions(options)
        if width == 0:
            width = 1
        if height == 0:
            height = 1

        ext = source.split('.')[-1].lower()
        if ext in ['svg']:
            source = "http://www.msss.com/moc_gallery/broken_icon.jpg"

        clip = mpy.ImageClip(source).set_duration(duration).set_start(
            global_start)
        clip = clip.resize((width, height)).set_pos((x, y))

        return clip

    elif cliptype == 'text':
        global_start = float(options['start'])
        global_end = float(options['end'])
        duration = global_end - global_start
        font = options['fontFamily']
        alignment = {
            'left': 'NorthWest',
            'center': 'center',
            'right': 'NorthEast'
        }[options['alignment']]

        text = options['text'].encode('utf-8')
        options['height'] = int(options['fontSize'])
        width, height, x, y = get_dimensions(options)
        if not alignment == 'center':
            height = HEIGHT - y

        clip = mpy.TextClip(txt=text,
                            size=(width, height),
                            method='caption',
                            color=options['fontColor'],
                            align=alignment,
                            font=font,
                            fontsize=HEIGHT * int(options['fontSize']) / 100)

        clip = clip.set_duration(duration).set_start(global_start).set_pos(
            (x, y))

        return clip

    return None
import imageio
import os
import moviepy.editor as mp
clip1 = mp.VideoFileClip("cn.mp4").subclip(0,2)
clips=clip1.write_videofile("cnshort.mp4")
clip=os.path.abspath('cnshort.mp4')
#print(clip)
def gifmaker(inputPath,targetFormat):
    outputPath=os.path.splitext(inputPath)[0] + targetFormat
    print(f'converting {inputPath} \n to {outputPath}')
    reader=imageio.get_reader(inputPath)
    fps=reader.get_meta_data()['fps']
    writer=imageio.get_writer(outputPath,fps=fps)

    for frames in reader:
        writer.append_data(frames)
        print(f'Frame{frames}')
    print('Done!')
    writer.close()

gifmaker(clip,'.gif')
Beispiel #10
0
    def convertMediaFiles():
        try:
            # create the destination directory
            os.makedirs(dst_dir_path, exist_ok=True)

            logo_img = Image.open(logo_filepath)

            def scaleToImage(size_logo, size_img):

                new_size = size_logo

                for i in range(2):
                    if size_logo[i] > size_img[i]:
                        x = (size_img[i] - size_logo[i]) / size_logo[i]
                        new_size = [math.floor(y - y * x) for y in new_size]

                return tuple(new_size)

            for file in files:
                src_filepath = src_dir_path + '/' + file
                dst_filepath = dst_dir_path + '/' + file
                txt_log.insert(tk.END, addLine(lang['msg2'] + src_filepath))

                if file.lower().endswith('.mp4'):
                    video = mp.VideoFileClip(src_filepath)
                    new_logo_size = scaleToImage(logo_img.size, video.size)
                    limit_size = math.floor(min(video.size) * 0.2)
                    m = min(new_logo_size)

                    if m > limit_size:
                        x = (m - limit_size) / m
                        new_logo_size = tuple(
                            [math.floor(y - y * x) for y in new_logo_size])

                    print(video.size, new_logo_size)

                    logo = mp.ImageClip(
                        np.array(
                            logo_img.resize(new_logo_size,
                                            Image.ANTIALIAS))).set_duration(
                                                video.duration).set_pos(
                                                    ('right', 'bottom'))
                    final = mp.CompositeVideoClip([video, logo],
                                                  size=video.size).subclip(0)
                    final.write_videofile(dst_filepath)
                    final.close()
                    logo.close()
                    video.close()
                    txt_log.insert(tk.END,
                                   addLine(lang['msg8'] + dst_filepath))
                else:
                    img = Image.open(src_filepath)
                    new_logo_size = scaleToImage(logo_img.size, img.size)
                    limit_size = math.floor(min(img.size) * 0.2)
                    m = min(new_logo_size)

                    if m > limit_size:
                        x = (m - limit_size) / m
                        new_logo_size = tuple(
                            [math.floor(y - y * x) for y in new_logo_size])

                    print(img.size, new_logo_size)

                    scaled_logo_img = logo_img.resize(new_logo_size,
                                                      Image.ANTIALIAS)
                    img.paste(scaled_logo_img,
                              [(a - new_logo_size[i])
                               for i, a in enumerate(img.size)],
                              (scaled_logo_img
                               if scaled_logo_img.mode == "RGBA" else None))
                    img.save(dst_filepath, quality=100)
                    scaled_logo_img.close()
                    img.close()
                    txt_log.insert(tk.END,
                                   addLine(lang['msg7'] + dst_filepath))

                pgr_bar['value'] += unit

            logo_img.close()
            txt_log.insert(tk.END, '\n')
            txt_log.insert(tk.END, addLine(lang['msg9'] + dst_dir_path))
            txt_log.insert(tk.END, '\n')
            pgr_bar['value'] = 100
        except Exception as e:
            traceback.print_exc()
            txt_log.insert(tk.END, addLine(lang['msg10']))
Beispiel #11
0
def call():
    import  moviepy.editor as mp
    i = mp.VideoFileClip(e.get())
    i.audio.write_audiofile(e1.get())
Beispiel #12
0
    excvalue = exc[1]
    if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
        os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)  # 0777
        func(path)
    else:
        raise


video = cv2.VideoCapture(input)
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
if int(major_ver) < 3:
    fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
else:
    fps = video.get(cv2.CAP_PROP_FPS)

videoclip = mpe.VideoFileClip(input)
background_music = videoclip.audio

path = "photos"
shutil.rmtree(path, ignore_errors=False, onerror=handleRemoveReadonly)
time.sleep(1)
os.makedirs(path)

i = 0
while (video.isOpened()):
    ret, frame = video.read()
    if ret == False:
        break
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imwrite('photos/kang' + str(i) + '.jpg', frame)
    i += 1
def clip_maker(output_file="animation"):
    clip = mp.VideoFileClip(output_file+'.gif')
    os.system("rm *.gif")
    clip.write_videofile(output_file+".mp4")
Beispiel #14
0
import io
import youtube_dl
from tkinter import *
from PIL import ImageTk, Image
from urllib.request import urlopen
import urllib.request
from time import gmtime
from time import strftime
import subprocess
import moviepy.editor as mpe

my_clip = mpe.VideoFileClip('resd.mp4')
my_clip.write_videofile("resd.mp4", audio=True)
Beispiel #15
0
def video_metadata(filename):
    clip = me.VideoFileClip(filename)
    return clip.size, clip.duration
def convert_mp4_to_wav(vid_file_path):
    clip = mp.VideoFileClip(vid_file_path)
    clip.audio.write_audiofile(TEMP_FILE_PATH)
    return
Beispiel #17
0
def converttomp4(file, newname):
    clip = moviepy.VideoFileClip(file)
    clip.write_videofile(newname + ".mp4")
Beispiel #18
0
                                reverse=True)

for d in directories_to_convert:
    files_to_convert = [
        f for f in listdir(d)
        if isfile(join(d, f)) and splitext(f)[0][0] != "."
    ]
    files_to_convert = sorted(files_to_convert, key=str.lower, reverse=False)

    #be careful here, will permanently delete files
    for f in files_to_convert:
        rawstring = re.search('(2019.{13})', splitext(f)[0])
        starttime = datetime.strptime(rawstring[1], "%Y-%m-%d_%H%M%S")
        if starttime < datetime(2019, 2, 19, 16, 00,
                                00) and splitext(f)[0][0:2] != "TS":
            path = '{}{}{}'.format(d, "/", f)
            new_path_name = '{}{}TS_{}'.format(d, "/", f)
            print(path)
            myclip = mp.VideoFileClip(path)
            newclip = myclip.fl(add_timestamp)
            newclip.write_videofile(new_path_name, fps=30)

            delete_command = 'rm -Rf {}{}{}'.format(d, "/", f)
            try:
                print(delete_command)
                output = subprocess.check_output("".join(delete_command),
                                                 stderr=subprocess.STDOUT,
                                                 shell=True)
            except CalledProcessError as e:
                print('FAIL:\ncmd:{}\noutput:{}'.format(e.cmd, e.output))
Beispiel #19
0
def generateSRT(filePath, progress_bar):
    #Coverting .mp4 to .wav
    clip = mp.VideoFileClip(filePath)
    clip.audio.write_audiofile("audio.wav")

    #Extract end time
    f = sf.SoundFile('audio.wav')
    seconds = len(f) / f.samplerate
    m, s = divmod(seconds, 60)
    h, m = divmod(m, 60)
    #print(int(h),int(m),int(s))

    #Converting Speech to Text
    global filename
    filename = os.path.basename(filePath)
    filename, _ = filename.split(".")
    filePath, _ = filePath.split(filename)
    filename = filename + ".srt"
    filePath = os.path.join(filePath, filename)
    fp = open(filePath, "w")
    r = sr.Recognizer()
    delay = 8
    start_time = datetime.datetime(100, 1, 1, 0, 0, 0)
    max_time = datetime.datetime(100, 1, 1, int(h), int(m), int(s))
    block = 0
    audio_file = sr.AudioFile("audio.wav")
    current_second = 0
    bar = progressbar.ProgressBar(widgets=widget).start()

    def speech_to_srt(current_time, block, current_second, delay):
        if current_time >= max_time:
            print("Speech Recognition is over...")

        else:
            #block = block+1
            with audio_file as source:
                r.adjust_for_ambient_noise(source)
                audio = r.record(source, offset=current_second, duration=delay)

            try:
                block = block + 1
                sentence = r.recognize_google(audio)

                current_second = current_second + delay
                end_time = current_time + datetime.timedelta(0, delay)

                with open(filePath, "a") as fp:
                    fp.write(str(block))
                    fp.write("\n")
                    fp.write(str(current_time.time()))
                    fp.write(" --> ")
                    fp.write(str(end_time.time()))
                    fp.write("\n")
                    fp.write(sentence)
                    fp.write("\n")
                    fp.write("\n")
            except:
                current_second = current_second + delay
                end_time = current_time + datetime.timedelta(0, delay)

            bar.update(current_second, seconds)
            progress_bar.UpdateBar(current_second, seconds)
            #print("Finished... "+str(int((current_second/seconds)*100))+"%")
            speech_to_srt(end_time, block, current_second, delay)

    speech_to_srt(start_time, block, current_second, delay)
    return True


#generateSRT('C:/Users/HackMachine/Downloads/Video/test.mp4')
import cv2
import tqdm
import tiffile

path =  easygui.fileopenbox("Select the mp4 file")
pathout = path[:-4]+'_DB.tif'
indexout=path[:-4]+'_DB-index.csv'
DBout=path[:-4]+'_DB.csv'

print("=========================================")
print("ETEM movie preprocessor start!")

import moviepy.editor as mp
import moviepy.video.fx.all as vfx

video = mp.VideoFileClip(path)
fps = int(video.fps)
w = int(video.w)
h = int(video.h)
nFrames = int(fps*video.duration)
video =  (video.fx(vfx.crop, x1=0, y1=28, x2=w-2, y2=h-4))#crop movie

##Preview first frame
#fr = video.get_frame(mp.cvsecs(0/fps))
#fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
#cv2.imshow('Check croped Frame0',fr)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
print("------------------------------------------")
print("Detecting blank and duplicate frames~")
DB=np.zeros((nFrames,4)) #[i,is_blank,is_dup,is_blur]
Beispiel #21
0
def resize_video(fname):
    from math import ceil
    try:
        import moviepy.editor as mp
    except ImportError as e:
        print("ERROR: {}".format(e))
        print("Required module `moviepy` not installed\n"
              "Install with `pip install moviepy` and retry.\n\n"
              "You may need also:\n"
              "pip install --upgrade setuptools\n"
              "pip install numpy --upgrade --ignore-installed")
        return False
    print("Analizing `{}`".format(fname))
    h_lim = {'w': 90., 'h': 47.}
    v_lim = {'w': 4., 'h': 5.}
    d_lim = 30
    vid = mp.VideoFileClip(fname)
    (w, h) = vid.size
    deg = vid.rotation
    ratio = w * 1. / h * 1.
    print("FOUND w:{w}, h:{h}, rotation={d}, ratio={r}".format(w=w,
                                                               h=h,
                                                               r=ratio,
                                                               d=deg))
    if w > h:
        print("Horizontal video")
        if ratio > (h_lim['w'] / h_lim['h']):
            print("Cropping video")
            cut = int(ceil((w - h * h_lim['w'] / h_lim['h']) / 2))
            left = cut
            right = w - cut
            top = 0
            bottom = h
            vid = vid.crop(x1=left, y1=top, x2=right, y2=bottom)
            (w, h) = vid.size
        if w > 1080:
            print("Resizing video")
            vid = vid.resize(width=1080)
    elif w < h:
        print("Vertical video")
        if ratio < (v_lim['w'] / v_lim['h']):
            print("Cropping video")
            cut = int(ceil((h - w * v_lim['h'] / v_lim['w']) / 2))
            left = 0
            right = w
            top = cut
            bottom = h - cut
            vid = vid.crop(x1=left, y1=top, x2=right, y2=bottom)
            (w, h) = vid.size
        if h > 1080:
            print("Resizing video")
            vid = vid.resize(height=1080)
    else:
        print("Square video")
        if w > 1080:
            print("Resizing video")
            vid = vid.resize(width=1080)
    (w, h) = vid.size
    if vid.duration > d_lim:
        print("Cutting video to {} sec from start".format(d_lim))
        vid = vid.subclip(0, d_lim)
    new_fname = "{}.CONVERTED.mp4".format(fname)
    print("Saving new video w:{w} h:{h} to `{f}`".format(w=w, h=h,
                                                         f=new_fname))
    vid.write_videofile(new_fname, codec="libx264", audio_codec="aac")
    print("Generating thumbnail...")
    thumbnail_name = "{}.jpg".format(fname)
    vid.save_frame(thumbnail_name, t=(vid.duration / 2))
    return new_fname, thumbnail_name, w, h, vid.duration
Beispiel #22
0
#!/usr/bin/env python3
"""Pixelate video."""
import argparse
from skimage import io
from skimage import transform as tf
from skimage import exposure
from skimage import feature
from skimage import filters
import numpy as np
import sys
import moviepy.editor as mp
from main import pixel

try:
    in_video = sys.argv[1]
    out_video = sys.argv[2]
except IndexError:
    sys.stderr.write("Usage: {0} [in_video] [out_video]\n".format(sys.argv[0]))
    sys.exit(0)


vidos = mp.VideoFileClip(filename=in_video, audio=False).subclip(5, 8)
clip_pixel = vidos.fl_image(pixel)
clip_pixel.write_videofile(out_video)
Beispiel #23
0
def convertVideo(video):
    input_file = 'input.' + video.split('.')[1]
    bucket_Name = 'streaming-video-storage'
    input_dir = 'input'
    output_dir = video.split('.')[0]
    output_file = 'output.m3u8'
    s3 = boto3.client(
        's3',
        aws_access_key_id='AKIA5M3NQV6LPKYNTAWB',
        aws_secret_access_key='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')
    response = s3.list_buckets()

    # Output the bucket names
    print('Existing buckets:')
    for bucket in response['Buckets']:
        print(f'  {bucket["Name"]}')

    s3.download_file(bucket_Name, 'videos/' + video, input_file)
    video_capture = cv2.VideoCapture(input_file)
    height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
    width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)
    qualities = [144]
    if (height >= 240):
        qualities.append(240)
    if (height >= 360):
        qualities.append(360)
    if (height >= 480):
        qualities.append(480)
    if (height >= 720):
        qualities.append(720)
    if (height >= 1080):
        qualities.append(1080)
    if (height >= 1440):
        qualities.append(1440)
    if (height >= 2160):
        qualities.append(2160)

    print(height)
    print(width)

    video_capture.release()
    cv2.destroyAllWindows()

    for quality in qualities:
        print(quality)

    if not os.path.exists(input_dir):
        os.makedirs(input_dir)

    for quality in qualities:
        clip = mp.VideoFileClip(input_file)
        # make the height 360px ( According to moviePy documenation The width is then computed so that the width/height ratio is conserved.)
        clip_resized = clip.resize(height=quality)
        clip_resized.write_videofile(input_dir + '/' + str(quality) + '.mp4')

    files = os.listdir(input_dir)

    for file in files:
        print(file)

    for file in files:
        fileName = file.split('.')
        input_stream = ffmpeg.input(input_dir + '/' + file)
        if not os.path.exists(output_dir + '/' + fileName[0]):
            os.makedirs(output_dir + '/' + fileName[0])
        output_stream = ffmpeg.output(input_stream,
                                      output_dir + '/' + fileName[0] + '/' +
                                      output_file,
                                      format='hls',
                                      start_number=0,
                                      hls_time=5,
                                      hls_list_size=0)
        ffmpeg.run(output_stream)

    f = open(output_dir + '/' + output_file, 'a')
    f.write('#EXTM3U\n#EXT-X-VERSION:5\n\n')
    static_text = '#EXT-X-STREAM-INF:BANDWIDTH={0},CODECS=\"avc1.42c00d,mp4a.40.2\",RESOLUTION={1}\n{2}/output.m3u8\n\n'
    for quality in qualities:
        if (quality == 144):
            f.write(static_text.format('100000', '256x144', '144'))
            files = os.listdir(output_dir + '/144/')
            for file in files:
                s3.upload_file(
                    output_dir + '/144/' + file, bucket_Name,
                    'convertedVideos/' + video.split('.')[0] + '/144/' + file)
        if (quality == 240):
            f.write(static_text.format('200000', '426x240', '240'))
            files = os.listdir(output_dir + '/240/')
            for file in files:
                s3.upload_file(
                    output_dir + '/240/' + file, bucket_Name,
                    'convertedVideos/' + video.split('.')[0] + '/240/' + file)
        if (quality == 360):
            f.write(static_text.format('300000', '640x360', '360'))
            files = os.listdir(output_dir + '/360/')
            for file in files:
                s3.upload_file(
                    output_dir + '/360/' + file, bucket_Name,
                    'convertedVideos/' + video.split('.')[0] + '/360/' + file)
        if (quality == 480):
            f.write(static_text.format('400000', '854x480', '480'))
            files = os.listdir(output_dir + '/480/')
            for file in files:
                s3.upload_file(
                    output_dir + '/480/' + file, bucket_Name,
                    'convertedVideos/' + video.split('.')[0] + '/480/' + file)
        if (quality == 720):
            f.write(static_text.format('500000', '1280x720', '720'))
            files = os.listdir(output_dir + '/720/')
            for file in files:
                s3.upload_file(
                    output_dir + '/720/' + file, bucket_Name,
                    'convertedVideos/' + video.split('.')[0] + '/720/' + file)
        if (quality == 1080):
            f.write(static_text.format('600000', '1920x1080', '1080'))
            files = os.listdir(output_dir + '/1080/')
            for file in files:
                s3.upload_file(
                    output_dir + '/1080/' + file, bucket_Name,
                    'convertedVideos/' + video.split('.')[0] + '/1080/' + file)
        if (quality == 1440):
            f.write(static_text.format('700000', '2560x1440', '1440'))
            files = os.listdir(output_dir + '/1440/')
            for file in files:
                s3.upload_file(
                    output_dir + '/1440/' + file, bucket_Name,
                    'convertedVideos/' + video.split('.')[0] + '/1440/' + file)
        if (quality == 2160):
            f.write(static_text.format('800000', '3840x2160', '2160'))
            files = os.listdir(output_dir + '/2160/')
            for file in files:
                s3.upload_file(
                    output_dir + '/2160/' + file, bucket_Name,
                    'convertedVideos/' + video.split('.')[0] + '/2160/' + file)

    f.close()
    s3.upload_file(
        output_dir + '/' + output_file, bucket_Name,
        'convertedVideos/' + video.split('.')[0] + '/' + output_file)

    shutil.rmtree(input_dir)
    shutil.rmtree(output_dir)
    os.remove(input_file)

    headers = {'Content-Type': 'application/json'}
    api_url = 'http://172.19.140.115:8081/video/activate/' + output_dir
    response = requests.put(api_url, headers=headers)

    return "Done!"
Beispiel #24
0
    def anonymize_video(self, video_path, target_path,
                        start_frame=None,
                        end_frame=None,
                        with_keypoints=False,
                        anonymize_source=False,
                        max_face_size=1.0,
                        without_source=True):
        # Read original video
        original_video = mp.VideoFileClip(video_path)
        fps = original_video.fps
        total_frames = int(original_video.duration * original_video.fps)
        start_frame = 0 if start_frame is None else start_frame
        end_frame = total_frames if end_frame is None else end_frame
        assert start_frame <= end_frame, f"Start frame{start_frame} has to be smaller than end frame {end_frame}"
        assert end_frame <= total_frames, f"End frame ({end_frame}) is larger than number of frames {total_frames}"
        subclip = original_video.subclip(start_frame/fps, end_frame/fps)
        print("="*80)
        print("Anonymizing video.")
        print(
            f"Duration: {original_video.duration}. Total frames: {total_frames}, FPS: {fps}")
        print(
            f"Anonymizing from: {start_frame}({start_frame/fps}), to: {end_frame}({end_frame/fps})")

        frames = list(tqdm.tqdm(subclip.iter_frames(), desc="Reading frames",
                                total=end_frame - start_frame))
        if with_keypoints:
            im_bboxes, im_keypoints = detection_api.batch_detect_faces_with_keypoints(
                frames)
            im_bboxes, im_keypoints = inference_utils.filter_image_bboxes(
                im_bboxes, im_keypoints,
                [im.shape for im in frames],
                max_face_size,
                filter_type="width"
            )
            anonymized_frames = self.anonymize_images(frames,
                                                      im_keypoints,
                                                      im_bboxes)
        else:
            im_bboxes = detection_api.batch_detect_faces(frames,
                                                         self.face_threshold)
            im_keypoints = None
            anonymized_frames = self.anonymize_images(frames, im_bboxes=im_bboxes)

        def make_frame(t):
            frame_idx = int(round(t * original_video.fps))
            anonymized_frame = anonymized_frames[frame_idx]
            orig_frame = frames[frame_idx]
            orig_frame = vis_utils.draw_faces_with_keypoints(
                orig_frame, im_bboxes[frame_idx], im_keypoints[frame_idx],
                radius=None,
                black_out_face=anonymize_source)
            if without_source:
                return np.concatenate((orig_frame, anonymized_frame), axis=1)
            return anonymized_frame

        anonymized_video = mp.VideoClip(make_frame)
        anonymized_video.duration = (end_frame - start_frame) / fps
        anonymized_video.fps = fps
        to_concatenate = []
        if start_frame != 0:
            to_concatenate.append(original_video.subclip(0, start_frame/fps))
        to_concatenate.append(anonymized_video)
        if end_frame != total_frames:
            to_concatenate.append(original_video.subclip(end_frame/fps, total_frames/fps))
        anonymized_video = mp.concatenate(to_concatenate)

        anonymized_video.audio = original_video.audio
        print("Anonymized video stats.")
        total_frames = int(anonymized_video.duration * anonymized_video.fps)
        print(f"Duration: {anonymized_video.duration}. Total frames: {total_frames}, FPS: {fps}")
        print(f"Anonymizing from: {start_frame}({start_frame/fps}), to: {end_frame}({end_frame/fps})")

        anonymized_video.write_videofile(target_path, fps=original_video.fps,
                                         audio_codec='aac')
Beispiel #25
0
def transcribe_file():
    if request.method == 'POST':
        res = {}
        res['total'] = 0
        res['seconds'] = 0
        t0 = time.time()
        transTxt = ""
        if 'file' not in request.files and 'url' not in request.form:
            res['code'] = 403
            res['data'] = "Missed audio files or url of mp3 file."
            return jsonify(res)
        try:
            #đây là trường hợp có 1 trong 2 tham số 'file' và 'url', hoặc có cả 2

            #***TH1 có tham số 'file'
            file_extension = ""
            path = ""  #đường dẫn lưu file âm thanh ở server cần nhận dạng
            if ('file' in request.files):
                file = request.files['file']
                filename = file.filename
                _, file_extension = os.path.splitext(filename)
                if file_extension.lower() not in ALLOWED_EXTENSIONS:
                    res['code'] = 403
                    res['data'] = "{} is not supported format.".format(
                        file_extension)
                    return jsonify(res)
                with NamedTemporaryFile(prefix="product_",
                                        suffix=file_extension,
                                        dir='/work/dataset_product/wav',
                                        delete=False) as temp_audio:
                    file.save(
                        temp_audio.name
                    )  #lưu file cần nhận dạng vào đường dẫn temp_audio.name
                    path = temp_audio.name
            elif ('url'
                  in request.form):  #***TH2 ko có 'file', có tham số 'url'
                url = request.form['url']  #đường dẫn mp3 hoặc video

                #tải về
                folder = 'work/dataset_recording/wav'
                absolute_path = download_file(url, folder)

                #nếu là mp3
                if (absolute_path).endswith('.mp3'):
                    path = absolute_path

                #nếu là mp4 : tách audio trong video
                elif (absolute_path).endswith('.mp4'):
                    my_clip = mp.VideoFileClip(absolute_path)
                    filename = os.path.splitext(absolute_path)[0]
                    path = os.path.join(folder, filename + '.mp3')
                    my_clip.audio.write_audiofile(path)

                #nếu ko phải
                else:
                    res['code'] = 403
                    res['data'] = "Extension is not supported."
                    return jsonify(res)

            #Chuyển đổi file âm thanh đúng định dạng wav, trả về đường dẫn wav sau chuyển đổi, file wav mới có tên giống file cũ
            path = ConvertAudioToWav(path)

            print("File name : " + str(path))
            # strCovert = "ffmpeg -i "+"/transcribe_tmp/tmpbh97i2v0.webm" +" -c:a pcm_f32le "+/transcribe_tmp/ou2t.wav"
            choose = 1
            try:
                choose = int(request.form['model'])
            except:
                pass

            global model, model2, model3
            runingModel = model
            if (choose == 2):
                runingModel = model2
                print("Using model 2")
            if (choose == 3):
                runingModel = model3
                print("Using model 3")
            transcription, transcriptionGreedy, _, _ = run_transcribe(
                audio_path=path,
                spect_parser=spect_parser,
                model=runingModel,
                decoder=decoder,
                device=device,
                use_half=True)
            res['status'] = 200
            res_text = ""
            if (len(transcription) > 0):
                res_text = transcription[0][0]
                res['total'] = len(transcription[0])
            else:
                res_text = transcription
                res['total'] = len(transcription)

            res['data'] = transcribe_comma.runTranscribe(
                commo_model, dict_data, word_dict, char_dict, res_text)
            res['path'] = path
            res['greedy'] = transcribe_comma.runTranscribe(
                commo_model, dict_data, word_dict, char_dict,
                transcriptionGreedy[0][0])
            transTxt = path.replace("wav", "txt")
            with open(transTxt, "w") as textFile:
                textFile.write(res['data'])
            logging.info('Success transcript')
            logging.debug(res)
        except Exception as exx:
            res['status'] = 403
            res['data'] = "Không thể nhận dạng\n" + str(exx)
        t1 = time.time()
        total = t1 - t0
        targetString = ""
        wer = 100
        cer = 0
        try:
            targetString = request.form['targetString']
            wer = werPecentage(targetString, res_text)
            cer = cerPecentage(targetString, res_text)
        except:
            wer = 0
            er = 0
        res['seconds'] = total
        res['wer'] = round(wer, 3)
        res['cer'] = round(cer, 3)
        return res
Beispiel #26
0
def VideoLoader(directory, in_folder=False, **kwargs):
    '''Function that loads all the video from a directory and returns 
    a testFram for ROI selection, and the captures
    
    Params :
        directory (str) : directory where the videos to load are
        kwargs (dict) : dictionnary holding parameters for the videoLoader function
        (see videoParameters in Kinect.py file for more info)
        
    Returns :
        Captures (capture) : captures from all videos in directory
        testFrame (array) : test frame that is loaded for the croppingROI function
    '''

    RGBTrigger = False
    DEPTHTrigger = False

    RGBCaptures = []
    DEPTHCaptures = []
    RGBTestFrame = None
    DEPTHTestFrame = None

    print("\n")

    if not in_folder:

        for folder in natsorted(os.listdir(directory)):

            dirPath = os.path.join(directory, folder)

            if os.path.isdir(
                    dirPath) and dirPath in kwargs["main"]["workingDir"]:

                for file in natsorted(
                        os.listdir(os.path.join(directory, folder))):

                    if file.split('.')[-1] == kwargs["main"]["extensionLoad"]:

                        if file.split(
                                "_")[0] == kwargs["main"]["rgbVideoName"]:
                            #                    if file.split('_')[0] == "Raw" :
                            #cap = cv2.VideoCapture(os.path.join(directory,file));
                            cap = mpy.VideoFileClip(os.path.join(
                                dirPath, file))
                            #cap = skvideo.io.vreader(os.path.join(directory,file));
                            RGBCaptures.append(cap)

                            if not RGBTrigger:

                                frame = cap.get_frame(
                                    kwargs["main"]["testFramePos"])
                                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                                RGBTestFrame = frame
                                RGBTrigger = True

                            utils.PrintColoredMessage(
                                "[INFO] {0} loaded successfully".format(file),
                                "darkgreen")

                            if kwargs["main"]["playSound"]:

                                try:
                                    utils.PlaySound(1, params.sounds['Purr'])
                                except:
                                    pass

                        elif file.split(
                                "_")[0] == kwargs["main"]["depthVideoName"]:

                            #cap = cv2.VideoCapture(os.path.join(directory,file));
                            cap = mpy.VideoFileClip(os.path.join(
                                dirPath, file))
                            #cap = skvideo.io.vreader(os.path.join(directory,file));
                            DEPTHCaptures.append(cap)

                            if not DEPTHTrigger:

                                frame = cap.get_frame(
                                    kwargs["main"]["testFramePos"])

                                DEPTHTestFrame = frame
                                DEPTHTrigger = True

                            utils.PrintColoredMessage(
                                "[INFO] {0} loaded successfully".format(file),
                                "darkgreen")

                            if kwargs["main"]["playSound"]:

                                try:
                                    utils.PlaySound(1, params.sounds['Purr'])
                                except:
                                    pass

    elif in_folder:

        for file in natsorted(os.listdir(directory)):

            if file.split('.')[-1] == kwargs["main"]["extensionLoad"]:

                if file.split("_")[0] == kwargs["main"]["rgbVideoName"]:

                    cap = mpy.VideoFileClip(os.path.join(directory, file))
                    RGBCaptures.append(cap)

                    if not RGBTrigger:

                        frame = cap.get_frame(kwargs["main"]["testFramePos"])
                        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                        RGBTestFrame = frame
                        RGBTrigger = True

                    utils.PrintColoredMessage(
                        "[INFO] {0} loaded successfully".format(file),
                        "darkgreen")

                    if kwargs["main"]["playSound"]:

                        try:

                            utils.PlaySound(1, params.sounds['Purr'])

                        except:

                            pass

                elif file.split("_")[0] == kwargs["main"]["depthVideoName"]:

                    cap = mpy.VideoFileClip(os.path.join(directory, file))
                    DEPTHCaptures.append(cap)

                    if not DEPTHTrigger:

                        frame = cap.get_frame(kwargs["main"]["testFramePos"])

                        DEPTHTestFrame = frame
                        DEPTHTrigger = True

                    utils.PrintColoredMessage(
                        "[INFO] {0} loaded successfully".format(file),
                        "darkgreen")

                    if kwargs["main"]["playSound"]:

                        try:

                            utils.PlaySound(1, params.sounds['Purr'])

                        except:

                            pass

    if not RGBTrigger and not DEPTHTrigger:

        utils.PrintColoredMessage(
            "[WARNING] Sorry, no video file in the right format was found",
            "darkred")

    return RGBCaptures, DEPTHCaptures, RGBTestFrame, DEPTHTestFrame
def generate_qr_code_from_animecoin_id_privkey_func(
        animecoin_id_private_key_b16_encoded):
    assert (isinstance(animecoin_id_private_key_b16_encoded, str))
    assert (len(animecoin_id_private_key_b16_encoded) == 2084)
    key_character_set = 'ABCDEF1234567890'
    assert ([(x in key_character_set)
             for x in animecoin_id_private_key_b16_encoded])
    print('Generating Video of QR Codes now...')
    number_of_key_chunks = 25
    key_chunk_size = int(math.ceil(2084 / number_of_key_chunks))
    key_chunks = [
        str(int(math.ceil(ii / key_chunk_size) + 1)) + ' ' +
        str(number_of_key_chunks) + ' ' +
        animecoin_id_private_key_b16_encoded[ii:ii + key_chunk_size]
        for ii in range(0, len(animecoin_id_private_key_b16_encoded),
                        key_chunk_size)
    ]
    key_chunks_duplicated = [key_chunks[0]] * 10 + key_chunks * 5
    qr_error_correcting_level = 'L'  # L, M, Q, H
    qr_encoding_type = 'alphanumeric'
    qr_scale_factor = 12
    large_font_size = 18
    start_x = 50
    list_of_qr_code_pil_objects = list()
    for cnt, current_key_chunk in enumerate(key_chunks_duplicated):
        current_key_chunk_qr_code = pyqrcode.create(
            current_key_chunk,
            error=qr_error_correcting_level,
            version=4,
            mode=qr_encoding_type)
        current_key_chunk_qr_code_png_string = current_key_chunk_qr_code.png_as_base64_str(
            scale=qr_scale_factor)
        current_key_chunk_qr_code_png_data = base64.b64decode(
            current_key_chunk_qr_code_png_string)
        pil_qr_code_image = Image.open(
            io.BytesIO(current_key_chunk_qr_code_png_data))
        img_width, img_height = pil_qr_code_image.size  #getting the base image's size
        if pil_qr_code_image.mode != 'RGB':
            pil_qr_code_image = pil_qr_code_image.convert("RGB")
        pil_qr_code_image = ImageOps.expand(pil_qr_code_image,
                                            border=(500, 150, 500, 0))
        drawing_context = ImageDraw.Draw(pil_qr_code_image)
        large_font = ImageFont.truetype('FreeSans.ttf', large_font_size)
        larger_font = ImageFont.truetype('FreeSans.ttf', large_font_size * 2)
        warning_message_1 = "Warning! Once you close this window,  these QR codes will be lost! The video of QR codes below represents your Animecoin ID Private Key-- make sure it stays secret!"
        warning_message_2 = "Since your smartphone is likely more secure than your computer, we suggest saving the video to your phone using Google Drive, Dropbox, or iCloud."
        warning_message_3 = "This assumes you have secured your phone using 2-factor authentication on iCloud, Dropbox, Google Photos, etc. If not, do this first!"
        warning_message_4 = "Then, you will be able to unlock your Animecoin wallet by holding up your phone to your computer's web cam while the video plays on your phone, which is convenient and secure."
        current_chunk_number = int(current_key_chunk.split(' ')[0])
        label_message = "QR CODE " + str(current_chunk_number) + ' of ' + str(
            number_of_key_chunks)
        drawing_context.text((start_x, 1 * large_font_size * 1.5),
                             warning_message_1, (255, 255, 255),
                             font=large_font)
        drawing_context.text((start_x, 2 * large_font_size * 1.5),
                             warning_message_2, (255, 255, 255),
                             font=large_font)
        drawing_context.text((start_x, 3 * large_font_size * 1.5),
                             warning_message_3, (255, 255, 255),
                             font=large_font)
        drawing_context.text((start_x, 4 * large_font_size * 1.5),
                             warning_message_4, (255, 255, 255),
                             font=large_font)
        drawing_context.text((start_x, 5 * large_font_size * 3),
                             label_message, (255, 255, 255),
                             font=larger_font)
        output = io.BytesIO()
        pil_qr_code_image.save(output, format='GIF')
        hex_data = output.getvalue()
        list_of_qr_code_pil_objects.append(imageio.imread(hex_data))
        if cnt % 3 == 0:
            print(
                str(round(100 * cnt / len(key_chunks_duplicated), 1)) +
                '% complete')
    output_filename_string = 'animated_qr_code_privkey'
    print('Generating output video...')
    imageio.mimsave(output_filename_string + '.gif',
                    list_of_qr_code_pil_objects,
                    duration=0.13)
    clip = mp.VideoFileClip(output_filename_string + '.gif')
    clip.write_videofile(output_filename_string + '.mp4')
    print('Done!')
    time.sleep(2)
    subprocess.call(['open', output_filename_string + '.mp4'])
Beispiel #28
0
 def __init__(self, filename, audioname):
     self.total_duration = 0
     self.clip_list = []
     self.clip = mpe.VideoFileClip(filename)
     self.audio = mpe.AudioFileClip(audioname)
     self.overlay = mpe.VideoFileClip('assets/overlay2.mp4').subclip().resize(self.clip.size).set_opacity(0.55)
def make_video(gifname, datepath):
    clip = mp.VideoFileClip(f"{gifname}")
    clip.write_videofile(f"{videos_folder}vid_{datepath}.mp4")
import moviepy.editor as mp
clip = mp.VideoFileClip("video.mp4")
clip.audio.write_audiofile("theaudio.mp3")

import pydub
sound = pydub.AudioSegment.from_mp3("theaudio.mp3")
sound = sound.set_channels(1)
sound = sound.set_sample_width(2)
sound = sound.set_frame_rate(16000)
sound.export("english.wav", format="wav")