Пример #1
0
def repeat_frames(videofile, segment_length, repeat):
    original_video = mp.VideoFileClip(videofile)
    duration = original_video.duration

    clips = []

    clip_start = 0
    while clip_start < duration:
        clip_end = clip_start + segment_length

        if clip_end > duration:
            clip_end = duration

        clip = original_video.subclip(clip_start, clip_end)

        for i in range(0, repeat):
            clips.append(clip)

        clip_start = clip_end

    final_video = mp.concatenate(clips)

    final_video.write_videofile('repeated.mp4',
                                codec="libx264",
                                temp_audiofile='temp-audio.m4a',
                                remove_temp=True,
                                audio_codec='aac')
Пример #2
0
def create_supercut_in_batches(composition, outputfile, padding):
    """Create & concatenate video clips in groups of size BATCH_SIZE and output
    finished video file to output directory.
    """
    total_clips = len(composition)
    start_index = 0
    end_index = BATCH_SIZE
    batch_comp = []
    while start_index < total_clips:
        filename = outputfile + '.tmp' + str(start_index) + '.mp4'
        try:
            create_supercut(composition[start_index:end_index], filename, padding)
            batch_comp.append(filename)
            gc.collect()
            start_index += BATCH_SIZE
            end_index += BATCH_SIZE
        except:
            start_index += BATCH_SIZE
            end_index += BATCH_SIZE
            next

    clips = [VideoFileClip(filename) for filename in batch_comp]
    video = concatenate(clips)
    video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')


    # remove partial video files
    for filename in batch_comp:
        os.remove(filename)

    cleanup_log_files(outputfile)
Пример #3
0
def hebin(mp4_path, target_path):
    # 定义一个数组
    L = []

    # 访问 video 文件夹 (假设视频都放在这里面)
    for root, dirs, files in os.walk(mp4_path):
        # 按文件名排序
        files = natsorted(files)
        # 遍历所有文件
        print(files)
        for file in files:
            # 如果后缀名为 .mp4
            if file.split(".")[1] == 'ts':
                # 拼接成完整路径
                filePath = os.path.join(root, file)
                print(filePath)
                # 载入视频
                video = VideoFileClip(filePath)
                print(video)
                # 添加到数组
                L.append(video)

    # 拼接视频
    final_clip = concatenate(L)

    # 生成目标视频文件
    final_clip.to_videofile(target_path + '.mp4', fps=24, remove_temp=False)
Пример #4
0
def convert_to_video(imgs):
    fps = 30
    clips = [ImageClip(m).set_duration(1 / fps) for m in imgs]
    folder = "/".join(imgs[0].split("/")[:-1])
    video = concatenate(clips, method="compose")
    filename = '%s/video.mp4' % folder
    video.write_videofile(filename, fps=fps)
    return filename
Пример #5
0
def video_render(txt_file,image_file,sound_file,save_file):
        from moviepy.editor import ImageClip
        from moviepy.editor import CompositeVideoClip
        from moviepy.editor import CompositeAudioClip
        from moviepy.editor import TextClip
        from moviepy.editor import AudioFileClip
        from moviepy.editor import concatenate
        from moviepy.config import change_settings
        change_settings({"IMAGEMAGICK_BINARY": "/usr/local/bin/convert"})
        text=[]
        
        with open(txt_file,'r') as file:
            for lines in file:
                if lines!="\n":
                    text.append(lines.rstrip('\n'))
        durs=[]
        for i in text:            
            res = len(re.findall(r'\w+', i)) 
            if res/2>3:
                durs.append(res/2)
            else:
                durs.append(3)
        total_duration=sum(durs)
        
        a_clip = AudioFileClip(sound_file)
        if a_clip.duration<total_duration:
            new_audioclip = CompositeAudioClip([a_clip, a_clip.set_start(a_clip.duration-1)]).set_duration(total_duration+3)
        else:
            new_audioclip=a_clip.set_duration(total_duration+3)
        
        screen=(1920,1080)
        clip_list = []
        i=0
        for string in text:
            duration=durs[i]
            i+=1
            try:
                txt_clip = TextClip(string, fontsize = 70, color = 'white', method='caption',size=screen ).set_duration(duration).set_pos('center')
                clip_list.append(txt_clip)
            except UnicodeEncodeError:
                txt_clip = TextClip("Issue with text", fontsize = 70, color = 'white').set_duration(2) 
                clip_list.append(txt_clip)
        
        final_text_clip = concatenate(clip_list, method = "compose").set_start(3)  
            
        v_clip = ImageClip(image_file).set_duration(total_duration+3)
        video=CompositeVideoClip([v_clip, final_text_clip])
        # video = video.set_audio(AudioFileClip('sound/Serenity (1).mp3'))
        video = video.set_audio(new_audioclip)
        video.write_videofile(save_file, 
                              codec='libx264',
                              fps=10, 
                              threads=4,
                              audio_codec='aac', 
                              temp_audiofile='temp-audio.m4a', 
                              remove_temp=True
                              )
Пример #6
0
def process_video(filename, overwrite=False, max_width=1600, max_height=1600, max_file_size=5*1024**2, gifdir='gifs/'):

    gif_name = gifdir + filename + '.gif'

    if isfile(gif_name) and overwrite == False:
        print "Skipping " + gif_name + " as it already exists."
        return 
    
    video_file = VideoFileClip(filename)

    try:
        assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
        video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
    except:
        print "Not resizing video."

    if video_file.h > max_height:
        video_file = video_file.resize(height=max_height)

    if video_file.w > max_width:
        video_file = video_file.resize(width=max_width)

    end_image = video_file.to_ImageClip(video_file.end-(1/video_file.fps)).set_duration(0.7)
    
    video_file = concatenate([video_file, end_image])
    fadein_video_file = CompositeVideoClip(
        [video_file,
         (video_file.to_ImageClip()
          .set_duration(0.7)
          .crossfadein(0.4)
          .set_start(video_file.duration-0.7)),
     ]
    )
    
    logo_size = video_file.h/6
    text = ImageClip(
        expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(
            video_file.duration).resize(width=logo_size).set_pos(
                (video_file.w-logo_size,video_file.h-logo_size))


    composite_video_file = CompositeVideoClip([fadein_video_file, text])
    composite_video_file.write_gif(gif_name,fps=20)

    fuzz_amt = 5
    commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'

    process = call(commands, shell=True)

    if getsize(gif_name) > max_file_size:
        process_video(filename,
                      max_height=video_file.h*0.95,
                      overwrite=True,
                      gifdir=gifdir,
                      max_file_size=max_file_size)
Пример #7
0
def make_gif_with_count(images,
                        counts,
                        fname,
                        duration=2,
                        true_image=False,
                        salience=False,
                        salIMGS=None):
    import moviepy.editor as mpy

    def make_frame(t):
        try:
            x = images[int(len(images) / duration * t)]
        except:
            x = images[-1]

        if true_image:
            return x.astype(np.uint8)
        else:
            return ((x + 1) / 2 * 255).astype(np.uint8)

    def make_mask(t):
        try:
            x = salIMGS[int(len(salIMGS) / duration * t)]
        except:
            x = salIMGS[-1]
        return x

    clips = []
    num_frame = len(images)
    for f in range(num_frame):
        txtClip = mpy.TextClip(str(counts[f]),
                               color='white',
                               font="Amiri-Bold",
                               kerning=5,
                               fontsize=10)
        _clip = mpy.ImageClip(images[f])
        _clip = mpy.CompositeVideoClip([_clip, txtClip])
        _clip.duration = duration / num_frame
        clips.append(_clip)
    clip = mpy.concatenate(clips)
    if salience == True:
        mask = mpy.VideoClip(make_mask, ismask=True, duration=duration)
        clipB = clip.set_mask(mask)
        clipB = clip.set_opacity(0)
        mask = mask.set_opacity(0.1)
        mask.write_gif(fname, fps=len(images) / duration, verbose=False)
        # clipB.write_gif(fname, fps = len(images) / duration,verbose=False)
    else:
        clip.write_gif(fname, fps=len(images) / duration, verbose=False)
def write_HL_video(mp4_saveName, HL_list):
    full_video = os.listdir('videos/')[0]
    video = mp.VideoFileClip('videos/' + full_video)

    clips = []
    for HL in HL_list:
        clip = video.subclip(HL[0], HL[1])
        clips.append(clip)

    faded_clips = [clip.crossfadein(1) for clip in clips]
    final_clip = concatenate(faded_clips, padding=-1, method="compose")
    final_clip.write_videofile('output/' + mp4_saveName + '.mp4',
                               threads=12,
                               fps=23.976)
    video.close()
Пример #9
0
def assemble_cuts(inputfile, cuts, outputfile):
    """ Concatenate cuts and generate a video file. """
    '''
    input :::
    inputfile (str) : 작업할 비디오 파일 dir.
    cuts (list) : 클립할 영상의 시작점과 끝점을 넣는다. [[시작점1, 끝점1],[시작점2,끝점2],...]
    outputfile (str) : 저장할 파일명
    
    return ::: 편집된 영상파일이 저장된다.
    (None) 
    '''
    video = VideoFileClip(inputfile)
    final = concatenate([video.subclip(start, end) for (start, end) in cuts])
    # 파일 저장 경로를 설정하고 싶으면 사용한다.
    # os.chdir('/home/pirl/PycharmProjects/NAVER_hack/save_video')
    final.to_videofile(outputfile)
Пример #10
0
def render_video(sentences, output_path, audio_path):
    print("Rendering video...")
    image_slides = []
    for key, sentence in enumerate(sentences):
        image_slide = ImageClip("{}{}".format(
            key, CONVERTED_IMAGE_SUFFIX)).set_duration(10)
        text_slide = ImageClip("{}{}{}".format(
            key, SENTENCE_IMAGE_TAG, CONVERTED_IMAGE_SUFFIX)).set_duration(10)
        slided_slide = text_slide.fx(transfx.slide_in, 1,
                                     get_slide_position_by_sentence_key(key))
        slides_video = CompositeVideoClip([image_slide, slided_slide])
        image_slides.append(slides_video)

    final_video = concatenate(image_slides)
    final_video.write_videofile(output_path,
                                audio=audio_path,
                                fps=DEFAULT_VIDEO_FPS)
Пример #11
0
def create_supercut(composition, outputfile, padding):
    """Concatenate video clips together and output finished video file to the
    output directory.
    """
    print("[+] Creating clips.")
    demo_supercut(composition, padding)

    # add padding when necessary
    for (clip, nextclip) in zip(composition, composition[1:]):
        if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
            nextclip['start'] += padding

    # put all clips together:
    all_filenames = set([c['file'] for c in composition])
    videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
    cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]

    print("[+] Concatenating clips.")
    final_clip = concatenate(cut_clips)

    print("[+] Writing ouput file.")
    final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
Пример #12
0
def process_video(filename, video_height=480, overwrite=False):

    gif_name = 'gifs/' + filename + '.gif'

    if isfile(gif_name) and overwrite == False:
        print "Skipping " + gif_name + " as it already exists."
        return 
    
    video_file = VideoFileClip(filename)

    try:
        assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
        video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
    except:
        print "Not resizing video."


    video_file = video_file.resize(height=video_height)

    end_image = video_file.to_ImageClip(0).set_duration(0.7)
    
    video_file = concatenate([video_file, end_image])

    logo_size = video_height/6
    text = ImageClip(expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(video_file.duration).resize(width=logo_size).set_pos((video_file.w-logo_size,video_file.h-logo_size))


    composite_video_file = CompositeVideoClip([video_file, text])
    composite_video_file.write_gif(gif_name,fps=20)

    fuzz_amt = 5
    commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'

    process = call(commands, shell=True)

    if getsize(gif_name) > 5*1024**2:
        process_video(filename, video_height=video_height*0.75, overwrite=True)
Пример #13
0
def similar_srt(seed, srtfiles, total=20):
    t = AnnoyIndex(384, metric='angular')
    sentences = []
    alllines = []
    random.shuffle(srtfiles)
    for srt in srtfiles[0:10]:
        lines = clean_srt(srt)
        sentences += [lines[k] for k in lines]
        alllines += [(k, lines[k], srt) for k in lines]

    out = []
    seedline = seed
    while len(out) < total:
        try:
            sentences.remove(seedline)
        except:
            pass
        nextline = nearest_neighbor(sentences, seedline, t)
        options = [l for l in alllines if l[1].lower().strip() == nextline.lower().strip()]
        print(nextline)
        out.append(options)
        seedline = nextline

    clips = []
    vids = {}

    for o in out:
        ts, text, srt = random.choice(o)
        vid = srt.replace('.srt', '.mp4')
        if vid not in vids:
            vids[vid] = VideoFileClip(vid)
        start, end = convert_timespan(ts)
        clip = vids[vid].subclip(start, end + 0.5)
        clips.append(clip)

    comp = concatenate(clips)
    comp.write_videofile('ok.mp4')
from moviepy.editor import ImageClip, concatenate
from os.path import isfile

start = 459
end = 706
height = 640
width = 480

image_paths = ['images/DSC_0{}.jpg'.format(i) for i in range(start, end+1)]

clips = []
for path in image_paths:
    if not isfile(path):
        continue
    clip = ImageClip(path).set_duration(0.04)
    clips.append(clip)

video = concatenate(clips).resize((height, width))
video.write_videofile('movie.mp4')
Пример #15
0
    if os.path.exists(filename):
        os.remove(filename)


def get_video(path, with_audio):
    video = VideoFileClip(path).subclip(start_sec, end_sec)
    if not with_audio:
        video = video.without_audio()
    if do_downscale:
        video = video.resize(0.25)
    return video


preds = np.load(preds_path)[start_frame:end_frame]
video1 = get_video(video1_path, with_audio=True)
video4 = get_video(video4_path, with_audio=False)

stacked_video = clips_array([[video1, video4]])

text_clip = concatenate([pred_to_text_clip(pred) for pred in preds],
                        method="compose")

print("creating compositevideoclip")
result = CompositeVideoClip([stacked_video, text_clip])

# so we don't get permission issues when writing
delete_file_if_exists(out_filename)

print("writing video out to: {}".format(out_filename))
result.write_videofile(out_filename, fps=fps, codec='mpeg4')
Пример #16
0
def slice_video(video, timeRanges):
    """
    We use Zulko's excellent moviepy, and a line from
    http://zulko.github.io/blog/2014/06/21/some-more-videogreping-with-python/
    """
    return concatenate([video.subclip(start, end) for (start, end) in timeRanges])
Пример #17
0
w, h = d.textsize(msg, fnt)
bounding_box = [0, 0, W, H]
x1, y1, x2, y2 = bounding_box

x = (x2 - x1 - w) / 2 + x1
y = (y2 - y1 - h) / 2 + y1

d.multiline_text((x, y),
                 msg,
                 font=fnt,
                 fill=(255, 255, 255),
                 align='center',
                 spacing=20)

img.save("./temp_titlecard.jpg")

inputVideoClip = mpy.VideoFileClip("./input.mp4")

imgClip = mpy.ImageClip("./temp_titlecard.jpg")
audio = mpy.AudioFileClip("./always-sunny-theme.mp3").set_duration(5)

imgClip = imgClip.set_duration(audio.duration)
imgClip = imgClip.set_audio(audio)

clips = [inputVideoClip, imgClip]
vid = mpy.concatenate(clips)
vid.write_videofile("meme.mp4", fps=60)

if os.path.exists("temp_titlecard.jpg"):
    os.remove("temp_titlecard.jpg")
Пример #18
0
def createHighlights(inDir, inFile, inExt, compression_rate):
	inDir = inDir + inFile + inExt

	cap = cv2.VideoCapture(inDir)
	clip = VideoFileClip(inDir)

	fps = int(cap.get(5))
	num_frames = cap.get(7)

	audiopeaks = getAudioPeaks(inDir)

	num_seconds = num_frames*fps*compression_rate

	max_interval = ( num_seconds/(2*len(audiopeaks)) )*fps
	audiopeaks[:] = [fps*x for x in audiopeaks]
	
	print("FPS: " + str(fps) + " \n#Frames: " + str(num_frames))
	print('Found audio peaks at frames:\n\t' + str(audiopeaks))

	best_cuts = []

	for peak in audiopeaks:
		print("\tFinding before and after cuts for audio peak: " + str(peak))
		currt = current_milli_time()
		start = max(0, peak - max_interval)
		end = min(int(num_frames), peak + max_interval)
		
		print("\t\tbefore:")
		before_cuts = getCutScenes(inDir, start, peak, 5, True)
		
		print("\t\t\t" + str(before_cuts))
		print("\t\tafter:")
		after_cuts = getCutScenes(inDir, peak, end, 5, True)

		print("\t\t\t" + str(after_cuts))

		best_cut = [before_cuts[0] if len(before_cuts) > 0 else start, after_cuts[0] if len(after_cuts) > 0 else end]
		extra_interval_allowance = max_interval*0.667

		print("\t\t\Initial bestcuts for audiopeak:" + str(best_cut))

		curr = peak
		for cut in before_cuts:
			if(curr - cut < extra_interval_allowance and curr - cut > 1):
				best_cut[0] = cut
				curr = cut
				print("\t\t\tUpdated beforecut to frame:" + str(curr))

		curr = peak
		for cut in after_cuts:
			if(cut - curr < extra_interval_allowance and cut - curr > 1):
				best_cut[1] = cut
				curr = cut
				print("\t\t\tUpdated aftercut to frame:" + str(curr))

		print("\t\tBestcuts for audiopeak:") 
		print("\t\t\t" + str(best_cut))
		best_cuts.append(best_cut)

		print("\tFinished finding cuts for " + str(peak) + " in " + str((current_milli_time() - currt) / 1000) + " secs")

	print("\n\nBest cuts:")
	print("\t" + str(best_cuts))

	final = concatenate([clip.subclip(s*1.0/fps,e*1.0/fps) for s,e in best_cuts])

	directory = 'results/' + inFile
	if not os.path.exists(directory):
		os.makedirs(directory)

	final.to_videofile(directory + '/hl.mp4') # low quality is the default
Пример #19
0
############################## audio processing ##################################
print("Analyzing Audio...")
peak_times = get_peak_times(VIDEO_PATH)
print(peak_times)

############################## removing duplicate cuts ##################################

final_times = []
for peak in peak_times:
    index = find_gt(cuts, peak)
    if cuts[index] < peak:
        final_times.append((cuts[index], cuts[index + 1]))
    else:
        final_times.append((cuts[index - 1], cuts[index + 1]))

final_times = [t for t in (set(tuple(i) for i in final_times))]
print(final_times)

################################## rendering video  ######################################
print("rendering video...")
blockPrint()
clip = VideoFileClip(VIDEO_PATH)
final = concatenate([
    clip.subclip(max(int(t[0]), 0), min(int(t[1]), clip.duration))
    for t in final_times
])

enablePrint()
final.to_videofile('soccer_cuts.mp4', fps=24)  # low quality is the default
Пример #20
0
def combine_clips(clip_list):
    video = mp.concatenate(clip_list, method="compose")
    return video
Пример #21
0
def assemble_cuts(cuts, outputfile):
    """ Concatenate cuts and generate a video file. """
    final = concatenate([video.subclip(start, end)
                         for (start,end) in cuts])
    final.to_videofile(outputfile)
Пример #22
0
"""
increases = np.diff(smooth_volumes)[:-1] >= 0
decreases = np.diff(smooth_volumes)[1:] <= 0
peak_times = (increases * decreases).nonzero()[0]
peak_volumes = smooth_volumes[peak_times]
peak_times = peak_times[peak_volumes > np.percentile(peak_volumes,90)]

"""
For at least sporting events we can refine the peak times to 
group those that are less than one minute apart. The assumption
is that these times most likely correspond to the same event
"""
highlight_times = [peak_times[0]]
for time in peak_times:
	if(time - highlight_times[-1]) < 60:
		if smooth_volumes[time] > smooth_volumes[highlight_times[-1]]:
			highlight_times[-1] = time #use the time with the highest volume in chunks of 60 sec
	else:
		highlight_times.append(time)

"""
Final times contains the times in seconds of the most important
events based on this naive sound model. For each event, we can now
cut the original video 5 seconds before its time and stop 5 seconds
after its time to get 11 second clips for each event. 
TODO: play around with this span
"""
final_highlights = concatenate([clip.subclip(max(time-5,0),min(time+5,clip.duration))
	for time in highlight_times])
final_highlights.to_videofile('barca_madrid_highlights.mp4',fps=60)
print "Reely is done generating highlight for the video"
def assemble_cuts(cuts, outputfile):
    final = concatenate([video.subclip(start, end) for (start, end) in cuts])
    final.to_videofile(outputfile)
Пример #24
0
    return video


def add_sound(local_sound_path, video):
    audioclip = mp.AudioFileClip(local_sound_path)
    video = video.set_audio(audioclip)
    return video


def random_rgb():
    r = np.random.randint(0, 255)
    g = np.random.randint(0, 255)
    b = np.random.randint(0, 255)
    return [r, g, b]


if __name__ == "__main__":
    # Random color
    rgb = random_rgb()
    audioclip = mp.AudioFileClip("Recording3.wav")
    clip_list = []
    start_clip = generate_blank_video((960, 540), rgb, 1)
    clip_list.append(start_clip)
    # Insert array of words for the song
    clip_list.extend(generate_text_clips(["Peter's", "New", "Song"]))
    clip_list.append(start_clip)
    video = mp.concatenate(clip_list, method="compose")
    video = video.set_audio(audioclip)
    video.write_videofile("lyric_video.mp4", fps=25)
    print("Mike is very hot")
import numpy as np
from moviepy.editor import VideoFileClip, concatenate

clip = VideoFileClip("match.mp4")
cut = lambda i: clip.audio.subclip(i, i + 1).to_soundarray(fps=22000)
volume = lambda array: np.sqrt(((1.0 * array)**2).mean())
volumes = [volume(cut(i)) for i in range(0, int(clip.audio.duration - 2))]
averaged_volumes = np.array(
    [sum(volumes[i:i + 10]) / 10 for i in range(len(volumes) - 10)])

increases = np.diff(averaged_volumes)[:-1] >= 0
decreases = np.diff(averaged_volumes)[1:] <= 0
peaks_times = (increases * decreases).nonzero()[0]
peaks_vols = averaged_volumes[peaks_times]
peaks_times = peaks_times[peaks_vols > np.percentile(peaks_vols, 90)]

final_times = [peaks_times[0]]
for t in peaks_times:
    if (t - final_times[-1]) < 60:
        if averaged_volumes[t] > averaged_volumes[final_times[-1]]:
            final_times[-1] = t
    else:
        final_times.append(t)

final = concatenate([
    clip.subclip(max(t - 10, 0), min(t + 10, clip.duration))
    for t in final_times
])
final.to_videofile('highlights.mp4')
Пример #26
0
                             frames[frame_index_2 + 1])):
                shot_end = frame_times[frame_index_2]
                break
            frame_index_2 += 1

        if (shot_end == shot_start):
            continue

        Final_Video.append((shot_start, shot_end))
        included_times = [
            x for x in included_times
            if not (x >= shot_start and x <= shot_end)
        ]
        print(cut_time, shot_start, shot_end)

    patch += 1

    if out:
        break

t2 = time.time()
print(t2 - t1)
################################## rendering video  ######################################
print("rendering video...")
clip = VideoFileClip(VIDEO_PATH)
final = concatenate([
    clip.subclip(max(int(t[0]), 0), min(int(t[1]), clip.duration))
    for t in Final_Video
])
final.to_videofile('soccer_cuts.mp4', fps=FPS)  # low quality is the default
Пример #27
0
time = (artis * azalma).nonzero()[0]
vol = ortalama_volume[time]


time = time[vol>np.percentile(vol,80)] #volume değerinin yüzde kaçını alacağını bu değerle belirliyoruz. Kullanıcının girdiği değer.
finaltime=[time[0]]  


for zaman in time:
    if (zaman - finaltime[-1]) < 60:
        if ortalama_volume[zaman] > ortalama_volume[finaltime[-1]]:
            finaltime[-1] = zaman
    else:
        finaltime.append(zaman)
        final = concatenate([clip.subclip(max(zaman-10,0),min(zaman+8, clip.duration)) #keseceği anın eksi ve artı ne kadar saniye alacak belirleniyor.
                     for zaman in finaltime])
  

while success:
	#image'i hsv renk uzayına çevirdik
	hsv = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
	#beyaz
	lower_beyaz = np.array([0,0,0])
	upper_beyaz = np.array([0,0,255])
	
	#yesil
	lower_yesil = np.array([40,40, 40])
	upper_yesil = np.array([70, 255, 255])
	
	#kirmizi
	lower_kirmizi = np.array([0,100,255])
Пример #28
0
# clip3 = VideoFileClip("video/"+VIDEO_SUB_PATH+"/slide3.mp4")
clip4 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide4.mp4")
clip5 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide5.mp4")
clip6 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide6.mp4")
clip7 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide7.mp4")
# clip8 = VideoFileClip("video/"+VIDEO_SUB_PATH+"/slide6_1.mp4")
# clip9 = VideoFileClip("video/"+VIDEO_SUB_PATH+"/slide7_1.mp4")
# clip10 = VideoFileClip("video/"+VIDEO_SUB_PATH+"/slide8.mp4")
clip11 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide10.mp4")

audio_background = AudioFileClip(BG_AUDIO)
final_audio = CompositeAudioClip([audio_background])

# slide1 = CompositeVideoClip([clip1.fx( transfx.crossfadein, delay)])
slide2 = CompositeVideoClip([clip2.fx(transfx.slide_in, delay, 'bottom')])
slide3 = CompositeVideoClip([clip11.fx(transfx.crossfadeout, 2)])
slide4 = CompositeVideoClip([clip4.fx(transfx.slide_out, delay, 'left')])
slide5 = CompositeVideoClip([clip5.fx(transfx.crossfadein, delay)])
slide6 = CompositeVideoClip([clip6.fx(transfx.crossfadein, delay)])
slide7 = CompositeVideoClip([clip7.fx(transfx.crossfadein, delay)])
# slide8 = CompositeVideoClip([clip8.fx( transfx.slide_in, delay, 'right')])
# slide9 = CompositeVideoClip([clip9.fx( transfx.crossfadein, delay)])
# slide10 = CompositeVideoClip([clip10.fx( transfx.crossfadein, delay)])
# slided_clips = concatenate([clip0, slide1, slide2, slide3, slide4, slide6, slide7, slide8, slide9, slide5], padding=-delay, method="compose")
# slided_clips = concatenate([clip0, slide1, slide2, slide6, slide7, slide8, slide9, slide10, slide4, slide3, slide5], padding=-delay, method="compose").set_audio(final_audio)
slided_clips = concatenate(
    [clip0, slide2, slide6, slide7, slide4, slide3, slide5],
    padding=-delay,
    method="compose").set_audio(final_audio)

slided_clips.write_videofile(output)
Пример #29
0
def createVideo(outFile, final_times):
	#concatenate the highlights
	final = concatenate([clip.subclip(max(t-5,0),min(t+5, clip.duration))
	                     for t in final_times])
	#output the videofiles
	final.to_videofile('hl/hl_' + inFile) # low quality is the default
Пример #30
0
increases = np.diff(averaged_volumes)[:-1] >= 0
decreases = np.diff(averaged_volumes)[1:] <= 0
peaks_times = (increases * decreases).nonzero()[0]
peaks_vols = averaged_volumes[peaks_times]
peaks_times = peaks_times[peaks_vols > np.percentile(peaks_vols, 80)]

final_times = [peaks_times[0]]
for t in peaks_times:
    if (t - final_times[-1]) < 20:
        if averaged_volumes[t] > averaged_volumes[final_times[-1]]:
            final_times[-1] = t
    else:
        final_times.append(t)

final = mpy.concatenate([
    clip.subclip(max(t - 2.5, 0), min(t + 2.5, clip.duration))
    for t in final_times
])

ui2 = raw_input("Enter location and name of final clip, eg: d:/finalclip.mp4 ")
final.write_videofile(ui2)
dispclip = mpy.VideoFileClip(ui2)
#dispclip.preview()

print("Converting .mp4 to a .gif file: ")
ui3 = raw_input("Enter gif file location and name to store, eg:d:/test.gif ")
dispclip.write_gif(ui3,
                   fps=None,
                   program='ffmpeg',
                   verbose=True,
                   loop=0,
                   dispose=False,
Пример #31
0
    draw = ImageDraw.Draw(img)
    font = ImageFont.truetype(fontname, 32)
    draw.text((20, windowheight - 54), "Key Signature: {0}, Time Signature: {1}, Tempo: {2}".format(keysignature,  timesignature, round(tempolist[tempoidx-1][1], 3)),(255,255,255),font=font)
    elapsed = timedelta(seconds=min(curr/speed/fps, maxtime/speed/fps))
    endtime = timedelta(seconds=maxtime/speed/fps)
    draw.text((20, 22), "{0} / {1}".format(
        '%02d:%02d:%02d.%06d' % (elapsed.seconds // 3600, (elapsed.seconds // 60) % 60, elapsed.seconds % 60, elapsed.microseconds),
        '%02d:%02d:%02d.%06d' % (endtime.seconds // 3600, (endtime.seconds // 60) % 60, endtime.seconds % 60, endtime.microseconds)
    ),(255,255,255),font=font)
    frameimage = np.array(img)

    if saveframes: img.save('out/%06d.png' % (int(curr/speed)))

    clips.append(ImageClip(frameimage).set_duration(1/fps))
    if frameidx % recyclerate == recyclerate - 1: #write
        video = concatenate(clips, method="compose")
        video.write_videofile('mem/%03d.mp4' % (frameidx//recyclerate), fps=fps, verbose=False, logger=None)
        clips = None
        del clips
        clips = []
        video = None
        del video


    if frameidx <= int(maxtime/speed):
        frameimage = None
        del frameimage

    bar.next()
    frameidx += 1
Пример #32
0
    def anonymize_video(self,
                        video_path,
                        target_path,
                        start_frame=None,
                        end_frame=None,
                        with_keypoints=False,
                        anonymize_source=False,
                        max_face_size=1.0,
                        without_source=True):
        # Read original video
        original_video = mp.VideoFileClip(video_path)
        fps = original_video.fps
        total_frames = int(original_video.duration * original_video.fps)
        start_frame = 0 if start_frame is None else start_frame
        end_frame = total_frames if end_frame is None else end_frame
        assert start_frame <= end_frame, f"Start frame{start_frame} has to be smaller than end frame {end_frame}"
        assert end_frame <= total_frames, f"End frame ({end_frame}) is larger than number of frames {total_frames}"
        subclip = original_video.subclip(start_frame / fps, end_frame / fps)
        print("=" * 80)
        print("Anonymizing video.")
        print(
            f"Duration: {original_video.duration}. Total frames: {total_frames}, FPS: {fps}"
        )
        print(
            f"Anonymizing from: {start_frame}({start_frame/fps}), to: {end_frame}({end_frame/fps})"
        )

        frames = list(
            tqdm.tqdm(subclip.iter_frames(),
                      desc="Reading frames",
                      total=end_frame - start_frame))
        if with_keypoints:
            im_bboxes, im_keypoints = detection_api.batch_detect_faces_with_keypoints(
                frames)
            im_bboxes, im_keypoints = inference_utils.filter_image_bboxes(
                im_bboxes,
                im_keypoints, [im.shape for im in frames],
                max_face_size,
                filter_type="width")
            anonymized_frames = self.anonymize_images(frames, im_keypoints,
                                                      im_bboxes)
        else:
            im_bboxes = detection_api.batch_detect_faces(
                frames, self.face_threshold)
            im_keypoints = None
            anonymized_frames = self.anonymize_images(frames, im_bboxes)

        def make_frame(t):
            frame_idx = int(round(t * original_video.fps))
            anonymized_frame = anonymized_frames[frame_idx]
            orig_frame = frames[frame_idx]
            orig_frame = vis_utils.draw_faces_with_keypoints(
                orig_frame,
                im_bboxes[frame_idx],
                im_keypoints[frame_idx],
                radius=None,
                black_out_face=anonymize_source)
            if without_source:
                return np.concatenate((orig_frame, anonymized_frame), axis=1)
            return anonymized_frame

        anonymized_video = mp.VideoClip(make_frame)
        anonymized_video.duration = (end_frame - start_frame) / fps
        anonymized_video.fps = fps
        to_concatenate = []
        if start_frame != 0:
            to_concatenate.append(original_video.subclip(0, start_frame / fps))
        to_concatenate.append(anonymized_video)
        if end_frame != total_frames:
            to_concatenate.append(
                original_video.subclip(end_frame / fps, total_frames / fps))
        anonymized_video = mp.concatenate(to_concatenate)

        anonymized_video.audio = original_video.audio
        print("Anonymized video stats.")
        total_frames = int(anonymized_video.duration * anonymized_video.fps)
        print(
            f"Duration: {anonymized_video.duration}. Total frames: {total_frames}, FPS: {fps}"
        )
        print(
            f"Anonymizing from: {start_frame}({start_frame/fps}), to: {end_frame}({end_frame/fps})"
        )

        anonymized_video.write_videofile(target_path,
                                         fps=original_video.fps,
                                         audio_codec='aac')
Пример #33
0
def time_symetrize(video_clip):
    return mp.concatenate([video_clip, video_clip.fx(mp.vfx.time_mirror)])
Пример #34
0
def make_scene(output_dir, input_data, seconds, transform_video,
               transform_data, api):
    '''
  Make a scene with the given parameters
  '''
    t = AnnoyIndex(300, metric='angular')

    with open(input_data) as infile:
        input_data = json.load(infile)

    with open(transform_data) as infile:
        transform_data = json.load(infile)

    # get best description for the given input seconds
    seconds = seconds.split(',')
    START = int(seconds[0])
    END = int(seconds[1])
    DURATION = END - START
    seconds_range = [i for i in range(START, END)]

    # get the original descriptions for the input video in the given seconds
    best_match = {}
    for description in input_data:
        for frame in input_data[description]:
            if (frame in seconds_range):
                if (description in best_match):
                    best_match[description] = best_match[description] + 1
                else:
                    best_match[description] = 1

    best_description_for_scene = ''
    times = 0
    for n in best_match:
        if (best_match[n] > times):
            times = best_match[n]
            best_description_for_scene = n

    # using the best description get the closet semantic meaning in the transform data
    closest_meaning = nearest_neighbor(transform_data,
                                       best_description_for_scene, t)
    print('Original scene is {}. Closest meaning found: {}'.format(
        best_description_for_scene, closest_meaning))
    # get a largest sequence of frames from that description
    closet_meaning_frames = transform_data[closest_meaning]

    # from that description, group frames in scenes
    scenes = []
    for i in range(len(closet_meaning_frames)):
        if (i == 0):
            scenes.append([closet_meaning_frames[i]])
        else:
            if (closet_meaning_frames[i] - scenes[-1][-1] < 2):
                scenes[-1].append(closet_meaning_frames[i])
            else:
                scenes.append([closet_meaning_frames[i]])

    # get the largest continuous scene
    largest_continuous_scene = []
    for scene in scenes:
        if (len(scene) > len(largest_continuous_scene)):
            largest_continuous_scene = scene

    start_frame = largest_continuous_scene[0]
    end_frame = largest_continuous_scene[-1]
    frames_duration = end_frame - start_frame
    if (frames_duration == 0):
        start_frame = start_frame - 1
        end_frame = end_frame + 1

    # create the video
    clip = mp.VideoFileClip(transform_video).subclip(start_frame, end_frame)
    composition = mp.concatenate([clip])
    video_name = "/{}.mp4".format(str(time()))
    composition.write_videofile(output_dir + video_name)
    if (api == True):
        return {"name": video_name, "scene_closest_meaning": closest_meaning}
Пример #35
0
def time_symetrize(clip):
    return mpy.concatenate([clip, clip.fx(mpy.vfx.time_mirror)])