Exemplo n.º 1
0
 async def vdownscale(self,ctx):
     import moviepy.video.fx.all as vfx
     os.chdir(path+"\\sounds")
     url = await AV.mhwnt(ctx)
     form = url[-3:]
     AV.dwn(url,"base."+form)
     video = VideoFileClip("base."+form)
     audio = video.audio
     audio.write_audiofile("temp.mp3")
     clip = audi.from_mp3("temp.mp3")
     clip = clip.set_frame_rate(24000)
     clip.export("temp.mp3", bitrate="16k", format="mp3")
     audio = AudioFileClip("temp.mp3")
     video = video.set_audio(audio)
     w,h = video.size
     w = int(w/16)
     h = int(h/16)
     video = vfx.resize(video, (w,h))
     #audio = audio.fx(resize, 0.125, method='bilinear')
     w = int(w*16)
     h = int(h*16)
     video = vfx.resize(video, (w,h))
     video.write_videofile("res.mp4")
     try :
         await ctx.send(file=discord.File('res.mp4')) 
     except:
         await ctx.send("File to large")
Exemplo n.º 2
0
    async def pvdownscale(self,ctx):
        import moviepy.video.fx.all as vfx
        os.chdir(path+"\\sounds")
        url = await AV.mhwnt(ctx)
        form = url[-3:]
        AV.dwn(url,"base."+form)
        video = VideoFileClip("base."+form)
        audio = video.audio
        audio.write_audiofile("temp.mp3")
        clip = audi.from_mp3("temp.mp3")
        clip = clip.set_frame_rate(24000)
        flag = True
        bit = 32
        seg = int(video.duration/6)
        aclips = []
        for x in range(1,7) :
            clip.export("temp.mp3", bitrate=str(bit)+'k', format="mp3")
            audio = AudioFileClip("temp.mp3")
            if x == 6 :
                taudio = audio.subclip((x)*seg, video.duration)

            else :
                taudio = audio.subclip((x-1)*seg, seg*x)
            bit/=2
            aclips.append(taudio)
        clips = []
        for x in range(1,7) :
            if x == 6 :
                print("fa")
                tvideo = video.subclip((x)*seg, video.duration)
            else :
                tvideo = video.subclip((x-1)*seg, seg*x)
            h,w=video.size
            h /= int(2*x)
            w /= int(2*x)
            tvideo = vfx.resize(tvideo, (w,h))
            h *= (2*x)
            w *= (2*x)
            tvideo = vfx.resize(tvideo, (w,h))
            tvideo = tvideo.set_audio(aclips[x-1])
            clips.append(tvideo)
            
        fclip = concatenate_videoclips(clips)
        fclip.write_videofile("res.mp4")
        try :
            await ctx.send(file=discord.File('res.mp4')) 
        except:
            await ctx.send("File to large")          
Exemplo n.º 3
0
def _process_track(instruments, instrument_names, source_dir,
                   instrument_config, notes, pulse_length, width, height,
                   max_velocity, queue, file_name, volumes, num_sim_tracks):
    """
    Composes one midi track into a stop motion video clip.
    Writes a file of this with the given file name.
    """
    try:
        instrument_clips = {
            name: _load_instrument_clips(name, instruments[name], source_dir,
                                         instrument_config)
            for name in instrument_names
        }
        parsed_clips = []
        scale_factor = int(math.floor(math.log(num_sim_tracks, 2) + 1))
        if os.path.isfile(file_name):
            queue.put((MSG_PROCESSED_SEGMENT, 0))
            queue.put((MSG_DONE, 1))
            return
        for note in notes:
            note_number = note.note_number
            clips, min_vol = instrument_clips[note.instrument_name]
            vol = 0.5
            if volumes is not None:
                vol = volumes.get(note.instrument_name, 0.5)

            c, offset, max_vol = clips[note_number]
            clip = c.copy()
            num_sim_notes = note.get_num_sim_notes()

            x, y, w, h = _partition(width, height, num_sim_notes,
                                    note.video_position)

            volume = (float(note.velocity) / float(max_velocity)) * (min_vol /
                                                                     max_vol)

            clip = clip.subclip(offset)
            clip = clip.set_start((note.start) * pulse_length)
            clip = clip.volumex(volume * vol)
            d = clip.duration
            clip = clip.set_duration(min(note.duration * pulse_length, d))
            clip = clip.set_position((x // scale_factor, y // scale_factor))
            clip = fx.resize(clip,
                             newsize=(w // scale_factor, h // scale_factor))
            parsed_clips.append(clip)
        track_clip = edit.CompositeVideoClip(size=(width // scale_factor,
                                                   height // scale_factor),
                                             clips=parsed_clips)
        track_clip.write_videofile(file_name,
                                   fps=30,
                                   verbose=False,
                                   progress_bar=False)

        queue.put((MSG_PROCESSED_SEGMENT, 0))
        queue.put((MSG_DONE, 1))

    except Exception as e:
        queue.put((MSG_FATAL_ERROR, e))
        traceback.print_exc(file=sys.stdout)
Exemplo n.º 4
0
def mix_video_ink(t):
    """
    Insert ink from src to dest
    """
    src = ink_src
    dst = ink_dst
    vsrc = src['vid']
    vdst = dst['vid']
    fdst = vdst.get_frame(t) * (1.0 / 255.0)

    h_offset = 0
    if do_square:
        fw = fdst.shape[1]
        fh = fdst.shape[0]
        h_offset = (fw - fh) >> 1
        h_offset -= 100 # temp hack
        fdst = fdst[:,h_offset:h_offset+fh]

    if dst['t2'] is not None:
        total_time = dst['t2'] - dst['t1']
    if src['t2'] is not None:
        total_time = src['t2'] - src['t1']

    if t >= dst['t1'] and t <= dst['t1'] + total_time:
        if src['w'] != dst['w']:
            scale = float(dst['w'])/float(src['w'])
            vsrc = resize(vsrc, scale, 'bilinear')
        if do_monochrome_ink:
            vsrc = blackwhite(vsrc)
        fsrc = vsrc.get_frame(t + src['t1'] - dst['t1']) * (1.0 / 255.0)
        sx = int(scale*src['x'])
        sy = int(scale*src['y'])
        dx = int(dst['x'] - h_offset)
        dy = int(dst['y'])
        w = int(scale*src['w'])
        h = int(scale*src['h'])
#        fdst[dy:dy+h,dx:dx+w] = fsrc[sy:sy+h,sx:sx+w]

        fdst_hsv = rgb_to_hsv(fdst[dy:dy+h,dx:dx+w])
        saturation = fdst_hsv[:,:,1]
        hand = saturation > 0.25
        hand = hand.reshape((w * h))
        hand = np.dstack([hand, hand, hand])
        hand = hand.reshape((h, w, 3))

        inked = fdst[dy:dy+h,dx:dx+w] * fsrc[sy:sy+h,sx:sx+w]
        fdst[dy:dy+h,dx:dx+w] = hand * fdst[dy:dy+h,dx:dx+w] + (1 - hand) * fdst[dy:dy+h,dx:dx+w] * fsrc[sy:sy+h,sx:sx+w]

    return fdst * 255
Exemplo n.º 5
0
 async def vshrink(self,ctx, ds=5):
     import moviepy.video.fx.all as vfx
     os.chdir(path+"\\sounds")
     url = await AV.mhwnt(ctx)
     form = url[-3:]
     AV.dwn(url,"base."+form)
     video = VideoFileClip("base."+form)
     w,h = video.size
     w = int(w/2)
     h = int(h/2)
     video = vfx.resize(video, (w,h))
     video.write_videofile("res.mp4")    
     try :
         await ctx.send(file=discord.File('res.mp4')) 
     except:
         await ctx.send("File to large") 
Exemplo n.º 6
0
def home():
    if request.method == "GET":
        return render_template("home.html")
    else:
        data = request.form.get("data")
        video = request.files.get("video")
        video.save("video.mp4")
        qr_codes = generate_QR(data)
        clip = ImageSequenceClip(qr_codes,
                                 durations=[5] * len(qr_codes),
                                 with_mask=False,
                                 fps=1)
        clip1 = VideoFileClip("video.mp4")
        clip1 = resize(clip1, (1290, 1290))
        final_clip = concatenate_videoclips([clip1, clip], method="compose")
        final_clip.write_videofile("result.mp4")
        for path in set(qr_codes):
            remove(path)
        return send_file("result.mp4", mimetype="video/*", as_attachment=True)
Exemplo n.º 7
0
def writeClips(selectedSubtitles, outputBase):
	clips = []
	count = 0
	superclips = 0
	volumeFn = lambda array: np.sqrt(((1.0*array)**2).mean())
	desiredVolume = 0.03
	desiredHeight = 720
	# create subtitle file to go along with it
	newSubs = pysrt.srtfile.SubRipFile()
	for CurrentSubtitle in selectedSubtitles:
		print CurrentSubtitle
		appendSubtitle(newSubs, CurrentSubtitle)
		movieFilename = getFilename(CurrentSubtitle.filename)
		clip = mpy.VideoFileClip(movieFilename).subclip(CurrentSubtitle.timeStart,CurrentSubtitle.timeEnd)
		volume = volumeFn(clip.audio.to_soundarray())
		clip.audio = clip.audio.fx(mpy.afx.volumex, desiredVolume/volume)
		clip = mpyfx.resize(clip, width=clip.w * desiredHeight / clip.h , height=desiredHeight)
		clips.append(clip)
		count += 1
		if (count == NUM_SEGMENTS_MAX):
			superClip = mpy.concatenate_videoclips(clips, method="compose")
			superClip.write_videofile(outputBase + str(superclips) + ".mp4")
			superClip = None
			clips = []
			newSubs.save(outputBase + str(superclips) + ".srt")
			gc.collect()
			count = 0
			superclips += 1
			newSubs = pysrt.srtfile.SubRipFile()

	print "Concatenating clips"
	superClip = mpy.concatenate_videoclips(clips, method="compose")
	print "Writing final clip"
	if (superclips > 0):
		superClip.write_videofile(outputBase + str(superclips) + ".mp4")
		newSubs.save(outputBase + str(superclips) + ".srt")
	else:
		superClip.write_videofile(outputBase + ".mp4")
		newSubs.save(outputBase + ".srt")
	superClip = None
	clips = []
	newSubs = None
	gc.collect()	
def prepare_video(vid, thumbnail_frame_ts=0.0,
                  max_size=(1080, 1350),
                  aspect_ratios=(4.0 / 5.0, 90.0 / 47.0),
                  max_duration=60.0,
                  save_path=None,
                  skip_reencoding=False,
                  **kwargs):
    """
    Prepares a video file for posting.
    Defaults for size and aspect ratio from https://help.instagram.com/1469029763400082

    :param vid: file path
    :param thumbnail_frame_ts: the frame of clip corresponding to time t (in seconds) to be used as the thumbnail
    :param max_size: tuple of (max_width,  max_height)
    :param aspect_ratios: single float value or tuple of (min_ratio, max_ratio)
    :param max_duration: maximum video duration in seconds
    :param save_path: optional output video file path
    :param skip_reencoding: if set to True, the file will not be re-encoded
        if there are no modifications required. Default: False.
    :param kwargs:
         - **min_size**: tuple of (min_width,  min_height)
         - **progress_bar**: bool flag to show/hide progress bar
         - **save_only**: bool flag to return only the path to the saved video file. Requires save_path be set.
         - **preset**: Sets the time that FFMPEG will spend optimizing the compression.
         Choices are: ultrafast, superfast, veryfast, faster, fast, medium,
         slow, slower, veryslow, placebo. Note that this does not impact
         the quality of the video, only the size of the video file. So
         choose ultrafast when you are in a hurry and file size does not matter.
    :return:
    """
    from moviepy.video.io.VideoFileClip import VideoFileClip
    from moviepy.video.fx.all import resize, crop

    min_size = kwargs.pop('min_size', (612, 320))
    logger = 'bar' if kwargs.pop('progress_bar', None) else None
    save_only = kwargs.pop('save_only', False)
    preset = kwargs.pop('preset', 'medium')
    if save_only and not save_path:
        raise ValueError('"save_path" cannot be empty.')
    if save_path:
        if not save_path.lower().endswith('.mp4'):
            raise ValueError('You must specify a .mp4 save path')

    vid_is_modified = False     # flag to track if re-encoding can be skipped

    temp_video_file = tempfile.NamedTemporaryFile(prefix='ipae_', suffix='.mp4', delete=False)

    if is_remote(vid):
        # Download remote file
        res = requests.get(vid)
        temp_video_file.write(res.content)
        video_src_filename = temp_video_file.name
    else:
        shutil.copyfile(vid, temp_video_file.name)
        video_src_filename = vid

    # Ref: https://github.com/Zulko/moviepy/issues/833#issuecomment-537885162
    with VideoFileClip(temp_video_file.name) as vidclip:

        if vidclip.duration < 3 * 1.0:
            raise ValueError('Duration is too short')

        if vidclip.duration > max_duration * 1.0:
            vidclip = vidclip.subclip(0, max_duration)
            vid_is_modified = True

        if thumbnail_frame_ts > vidclip.duration:
            raise ValueError('Invalid thumbnail frame')

        if aspect_ratios:
            crop_box = calc_crop(aspect_ratios, vidclip.size)
            if crop_box:
                vidclip = crop(vidclip, x1=crop_box[0], y1=crop_box[1], x2=crop_box[2], y2=crop_box[3])
                vid_is_modified = True

        if max_size or min_size:
            new_size = calc_resize(max_size, vidclip.size, min_size=min_size)
            if new_size:
                vidclip = resize(vidclip, newsize=new_size)
                vid_is_modified = True

        temp_vid_output_file = tempfile.NamedTemporaryFile(prefix='ipae_', suffix='.mp4', delete=False)
        if vid_is_modified or not skip_reencoding:
            # write out
            vidclip.write_videofile(
                temp_vid_output_file.name, codec='libx264', audio=True, audio_codec='aac',
                verbose=False, logger=logger, preset=preset, remove_temp=True)
        else:
            # no reencoding
            shutil.copyfile(video_src_filename, temp_vid_output_file.name)

        if save_path:
            shutil.copyfile(temp_vid_output_file.name, save_path)

        # Temp thumbnail img filename
        temp_thumbnail_file = tempfile.NamedTemporaryFile(prefix='ipae_', suffix='.jpg', delete=False)
        vidclip.save_frame(temp_thumbnail_file.name, t=thumbnail_frame_ts)

        video_duration = vidclip.duration
        video_size = vidclip.size

        video_thumbnail_content = temp_thumbnail_file.read()

        if not save_only:
            video_content_len = os.path.getsize(temp_vid_output_file.name)
            video_content = temp_vid_output_file.read()
        else:
            video_content_len = os.path.getsize(save_path)
            video_content = save_path    # return the file path instead

        if video_content_len > 50 * 1024 * 1000:
            raise ValueError('Video file is too big.')

        return video_content, video_size, video_duration, video_thumbnail_content
Exemplo n.º 9
0
def randomizeClip(clip, x):
    try:
        if x == "accel_decel":
            dur = random.randint(0, np.floor(clip.duration) * 2)
            if dur == 0:
                dur == None
            a = random.uniform(-1, 1)
            s = random.uniform(0, 100)
            return vfx.accel_decel(clip, new_duration=dur, abruptness=a, soonness=s)
        elif x == "blackwhite":
            return vfx.blackwhite(clip)
        elif x == "blink":
            do = random.randint(0, 10)
            doff = random.randint(0, 10)
            return vfx.blink(clip, d_on=do, d_off=doff)
        elif x == "colorx":
            factor = random.randint(1, 1000)
            return vfx.colorx(clip, factor=factor)
        elif x == "crop":
            return clip
        elif x == "even_size":
            return vfx.even_size(clip)
        elif x == "fadein":
            d = random.randint(0, np.floor(clip.duration))
            i = random.random()
            return vfx.fadein(clip, d, i)
        elif x == "fadeout":
            d = random.randint(0, np.floor(clip.duration))
            i = random.random()
            return vfx.fadeout(clip, d, i)
        elif x == "freeze":
            t = random.randint(0, np.floor(clip.duration))
            td = random.randint(0, np.floor(clip.duration))
            return vfx.freeze(clip, t=t, total_duration=td)
        elif x == "freeze_region":
            return vfx.freeze_region(clip, mask=ImageClip(np.random.rand(clip.size[0], clip.size[1]), ismask=True))
        elif x == "gamma_corr":
            g = random.randint(0, 10)
            return vfx.gamma_corr(clip, g)
        elif x == "headblur":
            pass
        elif x == "invert_colors":
            return vfx.invert_colors(clip)
        elif x == "loop":
            ls = random.randint(0, 10)
            return vfx.loop(clip, n=ls)
        elif x == "lum_contrast":
            return vfx.lum_contrast(clip)
        elif x == "make_loopable":
            ls = random.randint(0, np.floor(clip.duration))
            return vfx.make_loopable(clip, ls)
        elif x == "margin":
            s = clip.size(0) / random.randint(2, 10)
            o = random.random()
            return vfx.margin(clip, left=s, right=s, top=s, bottom=s, opacity=o)
        elif x == "mask_and":
            return vfx.mask_and(clip, ImageClip(np.random.rand(clip.size[0], clip.size[1]), ismask=True))
        elif x == "mask_color":
            thr = random.random()
            return vfx.mask_color(clip, thr=thr)
        elif x == "mask_or":
            return vfx.mask_or(clip, ImageClip(np.random.rand(clip.size[0], clip.size[1]), ismask=True))
        elif x == "mirror_x":
            return vfx.mirror_x(clip)
        elif x == "mirror_y":
            return vfx.mirror_y(clip)
        elif x == "painting":
            s = random.uniform(0, np.floor(clip.duration))
            b = random.randint(0, 100)/1000
            return vfx.painting(clip, saturation=s, black=b)
        elif x == "resize":
            u = random.random()
            return vfx.resize(clip, u)
        elif x == "rotate":
            u = random.uniform(0, 360)
            return vfx.rotate(clip, u)
        elif x == "scroll":
            return clip
        elif x == "speedx":
            u = random.uniform(0, 100)
            return vfx.speedx(clip, u)
        elif x == "supersample":
            g = random.randint(0, 10)
            d = int(clip.duriation/2)
            return vfx.supersample(clip, d, g)
        elif x == "time_mirror":
            return vfx.time_mirror(clip)
        elif x == "time_symmetrize":
            return vfx.time_symmetrize(clip)
        else:
            return clip
    except:
        return clip
Exemplo n.º 10
0

# Iterate over all files in files folder
for file in os.listdir(folder):
    print("")  # print empty line

    if "mp4" not in file:  # check if file is not a valid video file
        if file != ".input" and file != ".output":
            print(f"File '{file}' is not a mp4! Skipping...")
        continue

    # Get a few values to be able to calculate the bitrate we would like to reach
    origsize = os.path.getsize('./files/' + file) / 1000000  # in MB
    origclip = VideoFileClip('./files/' + file)
    origduration = origclip.duration

    # Resize if the user wishes
    if "-res" in arguments:
        newres = arguments[arguments.index("-res") + 1].split("x")
        print(
            f"Resizing the clip from {origclip.size[0]}x{origclip.size[1]} to {newres[0]}x{newres[1]}"
        )

        # RESIZE!
        import moviepy.video.fx.all as vfx
        origclip = vfx.resize(origclip, (newres[0], newres[1]))  # pylint: disable=no-member

    # Calculate goalbitrate and export
    goalbitrate = 8 * targetsize / origduration  # estimated bitrate we need to have to reach the provided filesize
    goalbitrate = goalbitrate - 0.15  # subtract a little bit for good measures to maybe avoid unnecessary retries
    exportvideo(goalbitrate, 1, 0)
Exemplo n.º 11
0
def compose(instruments, midipattern, width, height, source_dir,
            volume_file_name, num_threads, instrument_config_file):
    _create_working_dir()
    volumes = _try_load_json_file(volume_file_name)
    instrument_config = _try_load_json_file(instrument_config_file)
    tempo = midiparse.get_tempo(midipattern)
    resolution = midiparse.get_resolution(midipattern)
    pulse_length = 60.0 / (tempo * resolution)

    # analysed_tracks :: {(name1, name2, ...): (notes, max_velocity)}
    analysed_tracks = _analyse_all_tracks(midipattern, resolution)

    track_clip_file_names = []
    total_num_tracks = len(analysed_tracks)
    processes = []
    for instrument_names, (notes, max_velocity) in analysed_tracks.items():

        file_name = os.path.join(WORKING_DIR_NAME,
                                 '-'.join(instrument_names) + '.mp4')
        track_clip_file_names.append((len(notes), file_name))
        queue = multiprocessing.Queue()
        args = (instruments, instrument_names, source_dir, instrument_config,
                notes, pulse_length, width, height, max_velocity, queue,
                file_name, volumes, total_num_tracks)
        process = multiprocessing.Process(target=_process_track, args=args)
        processes.append((instrument_names, process, queue))

    running_processes = []

    progress_bar = bar.ChargingBar('', max=total_num_tracks)
    done = False
    num_processed_tracks = 0
    while not done:
        time.sleep(0.1)
        done_instruments = []
        for instrument_names, process, queue in running_processes:
            while not queue.empty():
                msg_type, contents = queue.get()
                if msg_type == MSG_PROCESSED_SEGMENT:
                    num_processed_tracks += 1
                    progress_bar.next()
                elif msg_type == MSG_DONE:
                    done_instruments.append(instrument_names)
                elif msg_type == MSG_FATAL_ERROR:
                    raise contents

        processes_changed = False

        # remove the instruments that are done
        for instrument_names in done_instruments:
            index = 0
            for i, (i_name, p, q) in enumerate(running_processes):
                if instrument_names == i_name:
                    index = i
                    break
            _, process, queue = running_processes.pop(index)
            process.join()
            processes_changed = True

        while len(running_processes) < num_threads and len(processes) > 0:
            p = processes.pop()
            p[1].start()
            running_processes.append(p)
            processes_changed = True

        if not running_processes:
            done = True

        if processes_changed and not done:
            progress_message = "Processing instruments: "
            for names, _, _ in running_processes:
                progress_message += '(' + ', '.join(names) + ')' + ', '
            progress_message = '\n' + progress_message[:-2]
            print(progress_message)

    progress_bar.finish()

    track_clip_file_names.sort(key=lambda k: k[0], reverse=True)

    final_clips = []
    for i, (_, file_name) in enumerate(track_clip_file_names):
        clip = edit.VideoFileClip(file_name)
        x, y, w, h = _partition(width, height, len(track_clip_file_names), i)
        final_clips.append(
            fx.resize(clip, newsize=(w, h)).set_position((x, y)))
    return edit.CompositeVideoClip(size=(width, height), clips=final_clips)
Exemplo n.º 12
0
    clip: editor.ImageClip = clip.set_duration(5)

    # -------------------------------------------------------------
    # CLIP TRANSFORMATIONS
    # Applying different video effects and styling to all Clips.
    # -------------------------------------------------------------
    clip: editor.ImageClip = vfx.fadein(clip, duration=1)
    clip: editor.ImageClip = vfx.fadeout(clip, duration=1)
    clip: editor.ImageClip = vfx.lum_contrast(clip, contrast=0.2, lum=3)

    # -------------------------------------------------------------
    # RESIZING VIDEO
    # https://zulko.github.io/moviepy/ref/videofx/moviepy.video.fx.all.resize.html
    # -------------------------------------------------------------
    if clip.size[0] > max_width:
        clip: editor.ImageClip = vfx.resize(clip, width=max_width)
    if clip.size[1] > max_height:
        clip: editor.ImageClip = vfx.resize(clip, height=max_height)

    # -------------------------------------------------------------
    # NEXT CLIP
    # Appending each Clip to the list of Clips.
    # -------------------------------------------------------------
    clips.insert(i, clip)
    names.insert(i, filename)

# -------------------------------------------------------------
# CLIPS MERGE
# Conctenating all clips into a single long video.
# -------------------------------------------------------------
final: editor.VideoFileClip = editor.concatenate_videoclips(clips,