Esempio n. 1
0
 def watermark():
     nonlocal video, audio, height
     height = int(height)
     d['watermark'] = ceil(constrain(d['watermark'], 1, 100) / 4.5)
     j = f"{parentPath}/images/watermark"
     watermarks = [f"{j}/{i}" for i in listdir(j)]
     t = [watermarks[int(r(0, len(watermarks)))] for i in range(d['watermark'])]
     cb, ch = True, True
     for i in t:
         if getName(i) in ["9gag", "memebase", "ifunny", "laugh"]:
             w, h = getImageRes(i)
             height += h
             nn = ffmpeg.filter_multi_output([ffmpeg.input(i), video], "scale2ref", w='iw',h=f"iw * {(h / w)}")
             video = nn[1].filter("pad", "iw", f"ih+(iw * {h / w})").overlay(nn[0], x = 0, y = f"main_h - (main_w * {h / w})")
         if getName(i) == "mematic":
             w, h = getImageRes(i)
             height += h
             nn = ffmpeg.filter_multi_output([ffmpeg.input(i), video], "scale2ref", w='iw / 3',h=f"iw / 3 * {(h / w)}")
             video = nn[1].overlay(nn[0], x = "main_w * 0.05", y = "main_h * 0.95")
         if getName(i) == "reddit":
             nn = ffmpeg.filter_multi_output([ffmpeg.input(i), video], "scale2ref", w = "iw", h = "ih")
             video = nn[1].overlay(nn[0])
         if cb and getName(i) == "bandicam":
             bandicam()
             cb = False
         if ch and getName(i) == "hypercam":
             hypercam()
             ch = False
     height = str(height)
Esempio n. 2
0
def main():
    mass_parts = mass_part_inputs(os.path.join(MASS_VIDEOS_DIR, '2020-05-23'))

    # TODO Read from mass video
    FRAME_RATE = 25

    # Before/after
    before_logo = input('stalbans_logo_5.mp4')
    after_screen = input('after-message.png', loop_frames=FRAME_RATE * 30)

    # Superimposed bits
    announcements = input('announcements-background.png')
    offertory = input('offertory-background.png')

    print(before_logo)
    # print(mass_parts)
    print(after_screen)

    split_last_mass_part = ffmpeg.filter_multi_output(mass_parts[-1], 'split')
    mass_parts[-1] = split_last_mass_part.stream(0)
    last_mass_part_fade = split_last_mass_part.stream(1)
    # ffmpeg.concat(split0, split1).output('out.mp4').run()

    print(mass_parts[-1])
    mass_parts[-1] = ffmpeg.trim(mass_parts[-1], end=10)
    print(mass_parts[-1])

    result = ffmpeg.concat(
        mass_parts[-1],
        # ffmpeg.filter([last_mass_part_fade, after_screen], 'xfade'),
        after_screen,
    ).output('out.mp4')
    print(' '.join(ffmpeg.get_args(result)))
    result.run()
Esempio n. 3
0
def process(item) -> Result:
    if os.path.isfile(f"./gifs/{item.uuid}.gif"):
        return Result(stdout=b"File exists")
    try:
        # s = ffmpeg.input(f"./videos/{item.source_fname}")
        fname = list(glob.glob(f"./videos/{item.uuid}*"))[0]
        s = ffmpeg.input(fname)
        if item.end:
            s = ffmpeg.trim(s, start=item.start, end=item.end)
        else:
            s = ffmpeg.trim(s, start=item.start)

        s = ffmpeg.filter(s, 'fps', fps=item.fps)
        s = ffmpeg.filter(s, 'scale', item.size, -1)
        split = ffmpeg.filter_multi_output(s, "split")
        p = ffmpeg.filter(split[0], "palettegen")
        s = ffmpeg.filter([split[1], p], filter_name="paletteuse")

        o = ffmpeg.output(s, f"./gifs/{item.uuid}.gif")
        out, err = o.run(quiet=True)
        return Result(stderr=err, stdout=out)

    except Exception as e:
        if hasattr(e, "stderr"):
            return Result(exception=e, stderr=e.stderr, stdout=e.stdout)
        else:
            return Result(exception=e)
Esempio n. 4
0
File: dl.py Progetto: leegggg/ogmlib
def test(orgasmos):
    import ffmpeg
    from datetime import datetime
    stream = ffmpeg.input(
        "data\\upload\\orgasmos\\1-x-ti\\pqaY1OJ9eWOzWgVZDn-L1TAnkcdnJrUR.mp3")
    stream = ffmpeg.filter(stream, 'loudnorm')
    concatList = []
    selected = {1: stream}
    for _ in range(5):
        id = 1
        stream = selected.get(id)
        streams = ffmpeg.filter_multi_output(stream, 'asplit')
        selected[id] = streams.stream(0)
        stream = streams.stream(1)
        concatList.append(stream)

    fullStream = ffmpeg.concat(*concatList, v=0, a=1)
    outputFilename = "mix-test-{:d}.m4a".format(int(
        datetime.now().timestamp()))
    outputPath = basePath.joinpath("mixed").joinpath(outputFilename)
    fullStream = fullStream.output(str(outputPath))
    cmd = ffmpeg.compile(fullStream)
    print(cmd)
    ffmpeg.view(fullStream, filename=str(outputPath) + ".png")
    # ffmpeg.run(fullStream)
    pass
Esempio n. 5
0
def test__multi_output_edge_label_order():
    scale2ref = ffmpeg.filter_multi_output(
        [ffmpeg.input('x'), ffmpeg.input('y')], 'scale2ref')
    out = ffmpeg.merge_outputs(
        scale2ref[1].filter('scale').output('a'),
        scale2ref[10000].filter('hflip').output('b'),
    )

    args = out.get_args()
    flt_cmpl = args[args.index('-filter_complex') + 1]
    out1, out2 = get_filter_complex_outputs(flt_cmpl, 'scale2ref')
    assert out1 == get_filter_complex_input(flt_cmpl, 'scale')
    assert out2 == get_filter_complex_input(flt_cmpl, 'hflip')
Esempio n. 6
0
File: dl.py Progetto: leegggg/ogmlib
def ffmpegConcat(orgs):
    import ffmpeg
    import random
    import json
    from datetime import datetime
    max_nb_layers = 20
    hard_limit = 30000
    dur_limit = 600
    tags_not = {
        "Couple", "Electric", 'Threesome', 'Public transport', 'Male stranger'
    }
    tags_must = {"Alone"}
    tags_may = {"Powerful", "Finger"}

    contactList = []
    amixList = []
    length = 0

    selected = dict()
    selectedOrgs = []
    for _ in range(hard_limit):
        id = random.randint(0, len(orgs) - 1)
        # id = random.randint(0, 5)
        org = orgs[id]
        tags = set(org.get("tags"))
        if tags_not & tags:
            # print(tags_not & tags)
            continue
        if tags_must - tags:
            # print(tags_must - tags)
            continue
        if not tags_may & tags:
            # print(tags_may & tags)
            continue

        stream = selected.get(id, None)
        if not stream:
            stream = ffmpeg.input(str(getAudioFilePath(org)))
            stream = ffmpeg.filter(stream, 'loudnorm')

        streams = ffmpeg.filter_multi_output(stream, 'asplit')
        selected[id] = streams.stream(0)
        stream = streams.stream(1)
        contactList.append(stream)

        audioLength = float(org.get("duration", "30"))
        length += audioLength
        if length >= dur_limit:
            contactStream = ffmpeg.concat(*contactList, v=0, a=1)
            amixList.append(contactStream)
            contactList = []
            length = 0

        selectedOrgs.append(org)
        if len(amixList) >= max_nb_layers:
            break
    else:
        if contactList:
            contactStream = ffmpeg.concat(*contactList, v=0, a=1)
            amixList.append(contactStream)

    fullStream = ffmpeg.filter(amixList,
                               'amix',
                               inputs=len(amixList),
                               duration='longest')
    fullStream = ffmpeg.filter(fullStream, 'atrim', duration=dur_limit)

    outputPath = basePath.joinpath("mixed").joinpath("mix-{:d}.m4a".format(
        int(datetime.now().timestamp())))
    metadata = {
        'length': dur_limit,
        'nb': len(selectedOrgs),
        'nb_dist': len(selected),
        'mix_layers': len(amixList),
        'tags_not': list(tags_not),
        'tags_must': list(tags_must),
        'tags_may': list(tags_may),
        'tags': list(getTags(selectedOrgs))
    }
    metadataJson = json.dumps(metadata, ensure_ascii=False, sort_keys=False)
    outputParam = {
        'metadata': 'comment="{}"'.format(metadataJson),
        'acodec': 'aac'
    }

    fullStream = fullStream.output(str(outputPath), **outputParam)
    cmd = ffmpeg.compile(fullStream)
    print(cmd)
    ffmpeg.view(fullStream, filename=str(outputPath) + ".png")
    ffmpeg.run(fullStream, overwrite_output=True)
    printTags(selectedOrgs)
    pass