Пример #1
0
def concat(head, tail):

    tmp_file= "/tmp/{salt}.mp4".format(salt=uuid.uuid4())
    head_file = ffmpeg.input(head)
    tail_file = ffmpeg.input(tail)
    (ffmpeg
        .concat(
            head_file,
            tail_file
        )
        .output(tmp_file)
        # https://github.com/kkroening/ffmpeg-python/blob/7669492575141c13b63fd89dde8b44ecf6bf31cb/ffmpeg/_ffmpeg.py#L28
        .overwrite_output() # -y option
        .run()
    )
    return tmp_file
Пример #2
0
# print(len([frame for frame in video.iter_frames()]))

# ffmpeg -i current.webm -c copy -fflags +genpts new.webm

video_bytes = read_fn(video_path)
print(get_audio_obj(video_bytes))
exit(0)

with tempfile.TemporaryDirectory() as temp_dir:
    print(temp_dir)
    tmp_video = temp_dir + "/sisa.webm"
    tmp_video2 = temp_dir + "/sisa2.webm"
    with open(tmp_video, "wb") as f:
        f.write(video_bytes)

    input = ffmpeg.input(tmp_video)
    out = ffmpeg.output(input,
                        tmp_video2,
                        vcodec="copy",
                        acodec="copy",
                        fflags="+genpts")
    ffmpeg.run(out)

    audio = AudioFileClip(tmp_video2)
print(audio)
exit()

frames = list(bytes2video(video_bytes))

for i, frame in enumerate(frames):
    cv2.putText(frame, str(i), (10, 50), 0, 2, (255, 255, 255))
Пример #3
0
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 21 22:43:09 2018

@author: CARE-DESK-04
"""

import ffmpeg

if __name__ == "__main__":
    name_folder = "D:/TV/Encode"
    name_file = "input.ts"
    file = name_folder + "/" + name_file

    stream = ffmpeg.input(file)
    stream = ffmpeg.output(stream, name_folder + "/Images/output%05d.png")
    ffmpeg.run(stream)
Пример #4
0
def decode_audio(filename, target):
    ffmpeg.input(filename).output(target, map_metadata=0).global_args(
        '-loglevel', 'error').overwrite_output().run()
Пример #5
0
import numpy as np
import time
import cv2
import ray
import ffmpeg
from vujade import vujade_videocv as videocv_
from vujade import vujade_imgcv as imgcv_

num_cpus = os.cpu_count()

is_process_single = False
is_process_multi = True
is_ray = False

process1 = (ffmpeg.input(os.path.join(
    os.getcwd(), 'test_input',
    'test_1.mp4')).output('pipe:', format='rawvideo',
                          pix_fmt='bgr24').run_async(pipe_stdout=True))

if is_ray is True:
    ray.init(num_cpus=num_cpus)

    @ray.remote(num_cpus=num_cpus)
    def worker(_frame, _path_img):
        cv2.imwrite(filename=_path_img, img=_frame)


if __name__ == "__main__":
    eps_val = 1e-9

    name_video_src = 'test_1.mp4'
    path_video_src = os.path.join(os.getcwd(), 'test_input', name_video_src)
Пример #6
0
 def load(
     _,
     path: Union[Path, str],
     offset: Optional[float] = None,
     duration: Optional[float] = None,
     sample_rate: Optional[float] = None,
     dtype: np.dtype = np.float32,
 ) -> Signal:
     """
     Loads the audio file denoted by the given path
     and returns it data as a waveform.
     Parameters:
         path (Union[Path, str]:
             Path of the audio file to load data from.
         offset (Optional[float]):
             Start offset to load from in seconds.
         duration (Optional[float]):
             Duration to load in seconds.
         sample_rate (Optional[float]):
             Sample rate to load audio with.
         dtype (numpy.dtype):
             (Optional) Numpy data type to use, default to `float32`.
     Returns:
         Signal:
             Loaded data a (waveform, sample_rate) tuple.
     Raises:
         SpleeterError:
             If any error occurs while loading audio.
     """
     if isinstance(path, Path):
         path = str(path)
     if not isinstance(path, str):
         path = path.decode()
     try:
         probe = ffmpeg.probe(path)
     except ffmpeg._run.Error as e:
         raise SpleeterError(
             "An error occurs with ffprobe (see ffprobe output below)\n\n{}".format(
                 e.stderr.decode()
             )
         )
     if "streams" not in probe or len(probe["streams"]) == 0:
         raise SpleeterError("No stream was found with ffprobe")
     metadata = next(
         stream for stream in probe["streams"] if stream["codec_type"] == "audio"
     )
     n_channels = metadata["channels"]
     if sample_rate is None:
         sample_rate = metadata["sample_rate"]
     output_kwargs = {"format": "f32le", "ar": sample_rate}
     if duration is not None:
         output_kwargs["t"] = str(dt.timedelta(seconds=duration))
     if offset is not None:
         output_kwargs["ss"] = str(dt.timedelta(seconds=offset))
     process = (
         ffmpeg.input(path)
         .output("pipe:", **output_kwargs)
         .run_async(pipe_stdout=True, pipe_stderr=True)
     )
     buffer, _ = process.communicate()
     waveform = np.frombuffer(buffer, dtype="<f4").reshape(-1, n_channels)
     if not waveform.dtype == np.dtype(dtype):
         waveform = waveform.astype(dtype)
     return (waveform, sample_rate)
Пример #7
0
import os
import ffmpeg
from pathlib import Path

inputdir = Path('D:/Lab/anime60/data/720/ani_like')
outputdir = Path('D:/Lab/anime60/data/1080/ani_like')
kwargs = {
    'vcodec': 'h264_nvenc',
    'rc:v': 'vbr_hq',
    'cq:v': '18',
    'video_bitrate': '30M',
    'profile:v': 'high',
    'preset': 'slow',
    's': '1920x1080'
}
for f in os.listdir(inputdir):

    (ffmpeg.input(str(inputdir / f)).video.output(
        str(outputdir / f'{Path(f).stem}_1080.mp4'), **kwargs).run())
Пример #8
0
def main():

    #Make sense of audio

    filename = "./HBFS.mp3"
    VidName = f'{filename}_TEMP.avi'

    no_of_circles = 4

    ts, sr = librosa.load(filename)
    stft = np.abs(librosa.stft(ts, hop_length=512, n_fft=2048 * 4))
    spectrogram = librosa.amplitude_to_db(stft, ref=np.max)
    # frequencies = librosa.core.fft_frequencies(n_fft=2048*4)
    # freq_index_ratio = len(frequencies)/frequencies[-1]
    # max_freq = int(frequencies[-1])
    freq_step = int(max_freq / no_of_bars)
    music_duration = librosa.get_duration(ts, sr)
    print("duration", music_duration)
    seconds = int(music_duration)

    #Video things
    fourcc = VideoWriter_fourcc(*'MP42')
    video = VideoWriter(VidName, fourcc, float(FPS),
                        (screen_width, screen_height))

    bar_width = int((screen_width - left_space) / no_of_bars)
    bar_max_height = int(screen_height * 8 / 14)
    bars = []
    for number in range(no_of_bars):
        bars.append(
            bar(left_space * number, 20, (255, 0, 0), bar_max_height, 30,
                bar_width))

    for video_frame_no in range(FPS * seconds):
        time_frame = librosa.core.time_to_frames(video_frame_no / FPS, sr=sr)

        video_frame = np.empty((screen_height, screen_width, 3), np.uint8)
        # video_frame.fill(255)
        video_frame[:][:] = [52, 42, 37]  # [37, 42, 52]

        bar_count = 0
        # for each in bars:

        #     barLeft = each.x + (bar_width)*bar_count + left_space
        #     barBottom = screen_height - each.min_height
        #     barRight = each.x + (bar_width)*(bar_count+1) + left_space
        #     barTop = screen_height - int(each.max_height*video_frame_no/(FPS*seconds))-each.min_height

        #     cv.rectangle(video_frame, (barLeft, barBottom), (barRight, barTop), (255, 0, 0), -1)
        #     bar_count += 1
        bar_heights = []
        # for i in range(10, max_freq, freq_step):
        #     x = np.mean(spectrogram[int(i*freq_index_ratio):int((i+freq_step)*freq_index_ratio), time_frame])
        #     bar_heights.append(
        #         clamp(bar_max_height, bar_max_height*(1.1**(80+x))/1.1**65)
        #         )
        len_of_freq = len(spectrogram.T[time_frame])
        no_of_els_to_add = (freq_step - len_of_freq % freq_step)
        x = np.pad(spectrogram.T[time_frame], (0, no_of_els_to_add))
        mean = np.mean(x.reshape(-1, freq_step), axis=1)
        # bar_heights = np.power(1.1, np.mean(x.reshape(-1, freq_step), axis = 1)+ 80)*bar_max_height/1.1**65
        # bar_heights = (np.mean(x.reshape(-1, freq_step), axis = 1)+ 80)*bar_max_height/80*1.2
        bar_heights = bar_max_height * (
            (np.power(1.1, (mean + 80)) - 1) / division_number)
        bar_heights[bar_heights > bar_max_height] = bar_max_height
        w = 3
        bar_heights_convolved = np.convolve(bar_heights, np.ones(w),
                                            'valid') / w

        circleradi = ((mean + 80) / 80 * 300).astype(int)
        # cv.circle(video_frame, (int(screen_width/2), int(screen_height/3)), 7, (30,174,152)[::-1], -1)
        cv.circle(video_frame, (int(screen_width / 2), int(screen_height / 3)),
                  50 * int(circleradi[3] / 300), (30, 174, 152)[::-1], -1)
        colors = [(8, 217, 214), (255, 46, 99), (234, 234, 234),
                  (31, 171, 137)]
        for j in range(no_of_circles):
            cv.circle(video_frame, (int(640 * (j)), int(screen_height / 3)),
                      circleradi[100 * j + no_of_bars], colors[j][::-1], 5)

        no_of_available_divisions = len(bar_heights)
        for each in bars:

            barLeft = each.x + (bar_width) * bar_count + left_space
            barBottom = screen_height - each.min_height
            barRight = each.x + (bar_width) * (bar_count + 1) + left_space
            if (bar_count < no_of_available_divisions):
                barTop = screen_height - int(
                    bar_heights_convolved[bar_count]) - each.min_height
            else:
                barTop = screen_height - each.min_height

            cv.rectangle(video_frame, (barLeft, barBottom), (barRight, barTop),
                         (bar_count / no_of_bars * 244,
                          (no_of_bars - bar_count) / no_of_bars * 244, 234),
                         -1)
            bar_count += 1

        video.write(video_frame)
    video.release()

    input_video = ffmpeg.input(VidName)
    input_audio = ffmpeg.input(filename)
    try:
        ffmpeg.concat(input_video, input_audio, v=1,
                      a=1).output(f'{filename}_finished.mp4').run()
    except ffmpeg.Error as e:
        print(e.stderr)
    if os.path.exists(VidName):
        os.remove(VidName)
    print(filename)
Пример #9
0
def test__compile():
    out_file = ffmpeg.input('dummy.mp4').output('dummy2.mp4')
    assert out_file.compile() == ['ffmpeg', '-i', 'dummy.mp4', 'dummy2.mp4']
    assert out_file.compile(cmd='ffmpeg.old') == [
        'ffmpeg.old', '-i', 'dummy.mp4', 'dummy2.mp4'
    ]
Пример #10
0
def test__output__video_size(video_size):
    args = (ffmpeg.input('in').output('out', video_size=video_size).get_args())
    assert args == ['-i', 'in', '-video_size', '320x240', 'out']
Пример #11
0
def test__output__bitrate():
    args = (ffmpeg.input('in').output('out',
                                      video_bitrate=1000,
                                      audio_bitrate=200).get_args())
    assert args == ['-i', 'in', '-b:v', '1000', '-b:a', '200', 'out']
Пример #12
0
def _get_simple_example():
    return ffmpeg.input(TEST_INPUT_FILE1).output(TEST_OUTPUT_FILE1)
Пример #13
0
def test_global_args():
    out_file = ffmpeg.input('dummy.mp4').output('dummy2.mp4').global_args(
        '-progress', 'someurl')
    assert out_file.get_args() == [
        '-i', 'dummy.mp4', 'dummy2.mp4', '-progress', 'someurl'
    ]
Пример #14
0
# import uuid

# #%%
# %matplotlib inline
# import matplotlib.pyplot as plt
# from IPython.display import display, HTML
# display(HTML("<style>.container {width: 100%; !important}</style>"))

#%%
import ffmpeg

# Просмотр выбранного видео
#%%
file_full_name = '/somepath/0000000A.XI1'
out, err = (ffmpeg.input(file_full_name, f='H264').filter(
    'fps', fps=1,
    round='down').filter('scale', 256,
                         144).output('pipe:',
                                     format='rawvideo',
                                     pix_fmt='rgb24').run(capture_stdout=True))
video = (np.frombuffer(out, np.uint8).reshape([-1, 144, 256, 3]))

vv = np.array(video)


# Показ кадров
@interact(frame=(0, vv.shape[0] - 1))
def show_frame(frame=0):
    plt.rcParams['figure.figsize'] = (20, 25)
    plt.imshow(vv[frame, :, :, :])
Пример #15
0
def test__run__multi_output():
    in_ = ffmpeg.input(TEST_INPUT_FILE1)
    out1 = in_.output(TEST_OUTPUT_FILE1)
    out2 = in_.output(TEST_OUTPUT_FILE2)
    ffmpeg.run([out1, out2], overwrite_output=True)
Пример #16
0
# # visualize the results in a new window
# # show_result(img, result, model.CLASSES)
# # or save the visualization results to image files
# out_path='/home/ld/RepPoints/out/0000/'
# out_name=out_path+img_name.split('/')[-1]
# show_result(img, result, model.CLASSES, out_file=out_name,show=False)
# print(out_name)

# test a video and show the results
# video = mmcv.VideoReader('video.mp4')
input_dir = '/home/ld/RepPoints/kitti/0000/'
sequence = os.listdir(input_dir)
sequence.sort()
out_path = '/home/ld/RepPoints/out/retinanet/0000/'
compute_time = 0
for frame in sequence:
    start = time.time()
    img_name = input_dir + frame
    print(img_name)
    img = mmcv.imread(img_name)
    result = inference_detector(model, img)
    print(time.time() - start)
    compute_time += time.time() - start
    out_name = out_path + frame
    show_result(img, result, model.CLASSES, show=True, out_file=out_name)
print('compute_time:', compute_time / len(sequence))

out_path = '/home/ld/RepPoints/out/retinanet/0000'
(ffmpeg.input(os.path.join(out_path + '/*.png'),
              pattern_type='glob',
              framerate=10).output(os.path.join(out_path + '.mp4')).run())
Пример #17
0
def test_fluent_output():
    (ffmpeg.input('dummy.mp4').trim(start_frame=10,
                                    end_frame=20).output('dummy2.mp4'))
Пример #18
0
# ffmpeg-python
import ffmpeg
stream = ffmpeg.input('in.mp4')
stream = ffmpeg.hflip(stream)
stream = ffmpeg.output(stream, 'output.mp4')
ffmpeg.run(stream)


in_file = ffmpeg.input('in.mp4')
overlay_file = ffmpeg.input('overlay.png')
(
    ffmpeg
    .concat(
        in_file.trim(start_frame=10, end_frame=20),
        in_file.trim(start_frame=30, end_frame=40),
    )
    .overlay(overlay_file.hflip())
    .drawbox(50, 50, 120, 120, color='red', thickness=5)
    .output('out.mp4')
    .run()
)
Пример #19
0
def test_fluent_complex_filter():
    in_file = ffmpeg.input('dummy.mp4')
    return (ffmpeg.concat(in_file.trim(start_frame=10, end_frame=20),
                          in_file.trim(start_frame=30, end_frame=40),
                          in_file.trim(start_frame=50,
                                       end_frame=60)).output('dummy2.mp4'))
import ffmpeg
import numpy as np
from keras.models import Model, load_model
from matplotlib import pyplot as plt

cnn_model = load_model("model_cnn_run7.h5")
cnn_encoder = Model(cnn_model.inputs, cnn_model.layers[-5].output)

out, _ = (ffmpeg.input("calving/958/0192.mp4").output(
    'pipe:', format='rawvideo', pix_fmt='gray').run(quiet=True))
video = (np.frombuffer(out, np.uint8).reshape([-1, 400, 400, 1]))

frame = video[0] / 255

frame = np.reshape(frame, (1, 400, 400, 1))
show_frame = np.reshape(frame, (400, 400))
#plt.imshow(show_frame, cmap="gray")
#plt.show()

predicted = cnn_encoder.predict(frame)
emb_size = predicted.size
emb_length = emb_size**.5

predicted = np.reshape(predicted, (int(emb_length), int(emb_length)))

plt.imshow(predicted, cmap="gray")
plt.show()
Пример #21
0
def transcode(filename):
    ffmpeg.input(filename).output("input.raw", format='s16le', acodec='pcm_s16le', ac=2, ar='48k').overwrite_output().run() 
    os.remove(filename)
# Advect the particles and save all the data to NetCDF.
pa.time_step(start_time, end_time, dt)
pa.create_netcdf_file(start_time, end_time, dt)

# Create an interaction simulator that uses the rock-paper-scissors pair interaction.
rps_interaction = rock_paper_scissors(N_microbes=N, pRS=0.5, pPR=0.5, pSP=0.5)
isim = InteractionSimulator(pair_interaction=rps_interaction,
                            interaction_radius=0.05,
                            output_dir=output_dir)

# Simulate the interactions.
isim.time_step(start_time, end_time, dt)

# Create a microbe plotter that will produce a plot of all the microbes at a single iteration.
mp = MicrobePlotter(N_procs=-1,
                    dark_theme=True,
                    input_dir=output_dir,
                    output_dir=output_dir)

# Plot the first 100 frames and save them to disk.
mp.plot_frames(start_time, end_time, dt)

# Make movie!
(ffmpeg.input(os.path.join(output_dir, "lagrangian_microbes_%05d.png"),
              framerate=30).output(os.path.join(output_dir, "movie.mp4"),
                                   crf=15,
                                   pix_fmt='yuv420p').run())

for fl in glob.glob(os.path.join(output_dir, "*.png")):
    os.remove(fl)
Пример #23
0
def addAudio():
    input_video = ffmpeg.input("output.avi")
    input_audio = ffmpeg.input("test_images/test_video.mp4").audio
    ffmpeg.concat(input_video, input_audio, v=1, a=1).output('output_with_audio.mp4').run()
Пример #24
0
def compress_audio(filename, target, quality="128k"):
    ffmpeg.input(filename).output(target, ab=quality).global_args(
        '-loglevel', 'error').overwrite_output().run()
Пример #25
0
def convert(name_vid, name_sound):
    audio = ffmpeg.input(name_vid).audio
    ffmpeg.output(audio, name_sound).run()
Пример #26
0
 def _extract_raw_frames(self, source_path:Path):
     bwframes_folder = self.bwframes_root/(source_path.stem)
     bwframe_path_template = str(bwframes_folder/'%5d.jpg')
     bwframes_folder.mkdir(parents=True, exist_ok=True)
     self._purge_images(bwframes_folder)
     ffmpeg.input(str(source_path)).output(str(bwframe_path_template), format='image2', vcodec='mjpeg', qscale=0).run(capture_stdout=True)
Пример #27
0
"""Script for pulling images from a video stream via URL (like a Surfline cam rewind)"""

import os
import time
import requests
import ffmpeg

TIMESTAMP = int(time.time())
DATA_DIR = os.path.dirname(__file__)
URL = "https://camrewinds.cdn-surfline.com/live/wc-southoceanbeach.stream.20191125T201826428.mp4"
SPOT_ID = "20191125T201826428"

ffmpeg_fileformat = os.path.join(DATA_DIR,
                                 f"images/{TIMESTAMP}_{SPOT_ID}_%02d.jpg")

stream = ffmpeg.input(URL, t=30)
stream = ffmpeg.filter(stream, 'fps', fps=1, round='down')
stream = ffmpeg.output(stream, ffmpeg_fileformat)
stream.run()

print("Complete")
Пример #28
0
def video_object_recognition(video, outpt_fldr, temp_fldr_base, video_creation,
                             sampling, sampling_rate):
    try:
        # Sleep random amount to start due to multithreading
        time.sleep(random.random() * 5)

        # Get file names and extensions
        file_name, extension = os.path.splitext(video)
        file_name = os.path.basename(file_name)

        # Create a temp folder for the split video files (1) and processed images (2)
        # Check for existing temp_folder with the same name
        temp_folder_name_used = True
        while temp_folder_name_used:
            rand = random.randrange(0, 1000000000000000, 1)
            temp_1_fldr = os.path.join(temp_fldr_base, "temp_1 - " + str(rand))
            temp_2_fldr = os.path.join(temp_fldr_base, "temp_2 - " + str(rand))
            if not os.path.exists(temp_1_fldr):
                temp_folder_name_used = False
        # If folder exists, delete it.
        if os.path.exists(temp_1_fldr):
            shutil.rmtree(temp_1_fldr)
        if os.path.exists(temp_2_fldr):
            shutil.rmtree(temp_2_fldr)
        # Create new temp folder
        os.mkdir(temp_1_fldr)
        os.mkdir(temp_2_fldr)

        # Create a list of the frames in order
        frame_list = list()
        frame_list_processed = list()

        print("Splitting video file [" + video + "] into individual images...")

        # Capture video
        vidcap = cv2.VideoCapture(video)

        # Read the first frame
        success, image = vidcap.read()
        frame = 0

        # While continuing to get new frames, read each one
        while success:
            # print(str(rand_int) + " : " + str(sampling_rate))
            image_out_name = os.path.join(
                temp_1_fldr, file_name + "_" + str(frame) + extension + ".jpg")

            # If sampling, write only at sample rate, else always write.
            if sampling:
                if random.random() <= sampling_rate:
                    cv2.imwrite(image_out_name, image)
                    frame_list.append(image_out_name)
            else:
                cv2.imwrite(image_out_name, image)
                frame_list.append(image_out_name)
            success, image = vidcap.read()
            frame += 1
        print("Video file [" + video + "] into split into individual images!")

        # If sampling selected, remove frames from frame list based on sampling amount
        # if sampling:
        #     frame_list_keep = list()
        #     for i in frame_list:
        #         if random.randint(1, 100) <= sampling_rate:
        #             frame_list_keep.append(i)
        #     frame_list = frame_list_keep

        # Process images and save to the processed image temp folder
        num_frames = len(frame_list)
        start_time = time.time()
        label_list = list()
        print("Processing individual images for " + video + "...")
        for index, image_file in enumerate(frame_list):
            frame_tuple = object_recognition_image(image_file)
            processed_frame = frame_tuple[0]
            label_list.append(frame_tuple[1])
            processed_image_name = os.path.basename(image_file)
            processed_image_out = os.path.join(temp_2_fldr,
                                               processed_image_name)
            cv2.imwrite(processed_image_out, processed_frame)
            frame_list_processed.append(processed_image_out)
            # Print for frames processed and est time
            if index % 100 == 0 and index > 0:
                fps_process = index / (time.time() - start_time)
                remaining_frames = num_frames - index
                est_time = chop_microseconds(
                    datetime.timedelta(seconds=remaining_frames / fps_process))
                statement = "{}: Completed {}th frame out of {}. FPS: {}. Estimated Time Remaining: {}"
                print(
                    statement.format(file_name, str(index), str(num_frames),
                                     str(round(fps_process, 2)),
                                     str(est_time)))
        print("Individual images processed for " + video + "!")

        # If video_creation specified, combine the video
        if video_creation:
            # Get image frame size
            print("Creating video for " + video + "...")
            img = cv2.imread(frame_list_processed[0])
            height, width, layers = img.shape
            size = (width, height)

            # Get FPS
            fps = frames_per_second(video)

            # Convert images to intermediate video
            intermediate_video_file = os.path.join(temp_2_fldr,
                                                   os.path.basename(video))
            video_out = cv2.VideoWriter(intermediate_video_file,
                                        cv2.VideoWriter_fourcc(*'DIVX'), fps,
                                        size)
            for j in frame_list_processed:
                img = cv2.imread(j)
                video_out.write(img)
            video_out.release()

            # Create video with sound
            # video_clip = mpe.VideoFileClip(intermediate_video_file)
            # audio_clip = mpe.AudioFileClip(video)
            # video_clip.write_videofile(os.path.join(outpt_fldr, os.path.basename(video)), audio=audio_clip, fps=fps)
            try:
                # Delete file if it exists
                delete_file(os.path.join(outpt_fldr, os.path.basename(video)))

                # Combine audio and video together
                input_video = ffmpeg.input(intermediate_video_file)
                input_audio = ffmpeg.input(video)
                ffmpeg.output(input_video,
                              input_audio,
                              os.path.join(outpt_fldr,
                                           os.path.basename(video)),
                              vcodec='copy').run()
            except FileNotFoundError:
                print(
                    "Could not open ffmpeg. Please ffmpeg in the root directory and try again, run [brew install ffmpeg]."
                )

            print("Video created for " + video + " and saved as " +
                  os.path.join(outpt_fldr, os.path.basename(video)) + "!")

        # Convert the label list into a DF
        # *Columns = number of objects in video
        # *Each row is a frame
        frame_label_dict_list = list()
        for i in label_list:
            frame_label_dict = dict()
            for j in i:
                if j not in frame_label_dict.keys():
                    frame_label_dict[j] = 1
                else:
                    frame_label_dict[j] += 1

            # Append frame dictionary to list
            frame_label_dict_list.append(frame_label_dict)

        # Convert into dataframe
        label_df_out = pd.DataFrame(frame_label_dict_list)

        # Append name of video to dataframe
        label_df_out['video'] = video

        # Create frame_list from index
        label_df_out['frame'] = label_df_out.index

        # Try to delete the temp folder
        try:
            if os.path.exists(temp_1_fldr):
                shutil.rmtree(temp_1_fldr)
            if os.path.exists(temp_2_fldr):
                shutil.rmtree(temp_2_fldr)
        except:
            print("Couldn't delete temp folder...")

        # Return the frame list to original call
        return label_df_out

    except:
        print('Cannot perform object recognition.')
Пример #29
0
import ffmpeg

#fadeout fadein
file_path = "C:/Users/masho/Desktop/work/python/Python/lib/movie/Café_22728.mp4"  #編集したい動画のパス
save_path = "C:/Users/masho/Desktop/work/python/Python/lib/movie/sample.mp4"  #トリミングしたい動画のパス

#動画全体の時間を調べる
video_info = ffmpeg.probe(file_path)
duration = float(video_info['streams'][0]['duration'])

#秒数抽出
divide_sec = duration
stream = ffmpeg.input(file_path, ss=0, t=0)

#音声取り出し
audio_stream = stream.audio

#開始から5秒かけてフェードインフェードアウト
stream = stream.filter('fade', type='in', start_time=0, duration=2)
stream = stream.filter('fade', type='out', start_time=duration - 2, duration=2)
audio_stream = audio_stream.filter('afade',
                                   type='in',
                                   start_time=0,
                                   duration=2)
stream = ffmpeg.output(stream, audio_stream, save_path)

ffmpeg.run(stream)
Пример #30
0
def test_get_args_simple():
    out_file = ffmpeg.input('dummy.mp4').output('dummy2.mp4')
    assert out_file.get_args() == ['-i', 'dummy.mp4', 'dummy2.mp4']
Пример #31
0
    filter(
        lambda x: ('a' in x) and (x.get('a') == ('start') or x.get('a') ==
                                  ('move') or x.get('a') == ('end')), data))

PAD = {
    'width': '768',
    'height': '600',
    'x': '(ow-iw)/2',
    'y': '(oh-ih)/2',
    'color': 'black'
}

stream = ffmpeg.concat(
    ffmpeg.input('video_test/2.jpeg', loop=1,
                 t=4).filter('scale',
                             size='768:600',
                             force_original_aspect_ratio='decrease').filter_(
                                 "pad", **PAD),
    ffmpeg.input('video_test/3.jpeg', loop=1,
                 t=4).filter('scale',
                             size='768:600',
                             force_original_aspect_ratio='decrease').filter_(
                                 "pad", **PAD),
    ffmpeg.input('video_test/4.jpeg', loop=1,
                 t=6).filter('scale',
                             size='768:600',
                             force_original_aspect_ratio='decrease').filter_(
                                 "pad", **PAD),
)

time = 0
Пример #32
0
 def upload(self):
     context = self.context
     request = self.request
     schema = SongUploadSchema().bind(request=request, context=context)
     form = deform.Form(schema, buttons=('Save',))
     rendered = None
     if 'Save' in request.POST:
         controls = request.POST.items()
         try:
             appstruct = form.validate(controls)
         except deform.ValidationFailure as e:
             rendered = e.render()
         else:
             audio_file = appstruct['audio_file']
             tmpdir = request.registry.settings['substanced.uploads_tempdir']
             job = uuid.uuid4().hex
             jobdir = os.path.join(tmpdir, job)
             try:
                 try:
                     os.makedirs(jobdir)
                 except FileExistsError:
                     pass
                 inputfn = os.path.join(jobdir, 'inputfile')
                 inputfile = open(inputfn, 'wb')
                 fp = audio_file['fp']
                 fp.seek(0)
                 shutil.copyfileobj(fp, inputfile)
                 M = 1024 * 1024 * 1024
                 md5 = hashlib.md5()
                 f = open(inputfn, 'rb')
                 while True:
                     data = f.read(M)
                     if not data:
                         break
                     md5.update(data)
                 opus_filename = os.path.join(jobdir, 'output.opus')
                 ffmpeg.input(inputfn).output(
                     opus_filename, ar=48000).run()
                 song = request.registry.content.create(
                     'Song',
                     appstruct['title'],
                     appstruct['artist'],
                     appstruct['lyrics'],
                     timings='',
                     audio_stream=open(opus_filename, 'rb'),
                     audio_mimetype='audio/opus',
                     language=appstruct['language'],
                 )
             finally:
                 shutil.rmtree(jobdir, ignore_errors=True)
             request.session.flash(
                 'Song uploaded. Now voice lyrics like William Shatner in '
                 'rhythm with the song in order to time the karaoke '
                 'display text.',
                 'info')
             song.language = appstruct['language']
             song.genre = appstruct['genre']
             song.year = appstruct['year']
             songname = slug.slug(appstruct['title'])
             hashval = md5.hexdigest()
             songname = f'{songname}-{hashval}'
             if songname in self.context:
                 request.session.flash('this song has already been uploaded')
                 raise HTTPFound(request.resource_url(self.context))
             self.context[songname] = song
             song.uploader = request.performer # NB must be after seating
             set_acl(song,
                     [
                         (Allow, request.user.__oid__, ['yss.edit']),
                         (Deny, Everyone, ['yss.indexed']),
                     ]
             )
             event = ObjectModified(song)
             self.request.registry.subscribers((event, song), None)
             return HTTPFound(request.resource_url(song, '@@retime'))
     else:
         appstruct = {
             'title':colander.null,
             'artist':colander.null,
             'audio_file':colander.null,
             'genre':colander.null,
             'language':colander.null,
             'lyrics':colander.null,
             'year':colander.null,
             }
     if rendered is None:
         rendered = form.render(appstruct, readonly=False)
     return {'form':rendered, 'page_title':'Upload Song'}
Пример #33
0
    def _prep_data_worker(self, start, end, filenames):
        session = self.DBSession()
        i = start
        total_writes = 0

        while i < end:
            print('{}: {}/{}'.format(filenames[i], i - start, end - start))
            extract_path = os.path.join(self.extract_root, filenames[i].replace(" ", "_"))
            os.makedirs(extract_path, exist_ok=True)
            try:
                with zipfile.ZipFile(os.path.join(self.beatmaps_root, filenames[i]), "r") as zip_ref:
                    zip_info = zip_ref.infolist()
                    beatmap_list = []

                    # search for all beatmap files
                    for info in zip_info:
                        if '.osu' in info.filename:
                            # extract beatmap
                            data = zip_ref.read(info)
                            bmfile_path = os.path.join(extract_path, os.path.basename(info.filename))
                            bmfile = open(bmfile_path, 'wb')
                            bmfile.write(data)
                            bmfile.close()
                            # read the beatmap to find related audio file
                            try:
                                file = open(bmfile_path)
                                p = pyttanko.parser()
                                bmap = p.map(file)

                                audio_file_name = bmap.audio_filename.strip().lower()
                                audio_path = os.path.join(extract_path, audio_file_name)
                                wave_filepath = audio_path + '.wav'

                                for jifo in zip_info:
                                    if jifo.filename.lower() == audio_file_name and not os.path.isfile(
                                            os.path.join(extract_path, audio_file_name)):
                                        # extract audio for beatmap
                                        data = zip_ref.read(jifo)
                                        audio = open(audio_path, 'wb')
                                        audio.write(data)
                                        audio.close()
                                        # convert to wav
                                        stream = ffmpeg.input(audio_path)
                                        stream = ffmpeg.output(stream, wave_filepath, ac=1)
                                        ffmpeg.run(stream, quiet=True, overwrite_output=True)
                                # calculate difficulty
                                if bmap.mode == 0:
                                    file = open(bmfile_path)
                                    p = pyttanko.parser()
                                    bmap = p.map(file)
                                    diff = pyttanko.diff_calc().calc(bmap)
                                    file.close()
                                # save metadata to mysql db
                                date = datetime.datetime(*info.date_time[0:6])
                                new_bm_metadata = BeatmapMetadata(bmFilePath=bmfile_path, audioFilePath=wave_filepath,
                                                                  gamemodeType=bmap.mode, difficulty=diff.total,
                                                                  dateCreated=date)
                                session.add(new_bm_metadata)
                            except Exception as e:
                                print("error parsing beatmap or audiofile, deleting beatmap: ", e)
                                os.remove(bmfile_path)
            except(zipfile.BadZipFile):
                print("Bad zipfile: ", filenames[i])
            i += 1
        session.commit()