Ejemplo n.º 1
0
def main():
    clips = []
    with open("names.txt") as f:
        name = f.readlines()
        print(name)
        for i in name:
            i = i.split('\n')[0]
            clips.append(make(i))
    print(clips)
    concatenate_videoclips(clips).set_fps(30).write_videofile("飞跃起点理.mp4")
    exit()
    clip1 = ImageClip("./images/2.jpg")
    txt = TextClip("吼哇!123ASDasd".encode("utf-8"),
                   font="SimSun",
                   color='white',
                   fontsize=48)
    txt_col = txt.on_color(size=(clip1.w, txt.h + 10),
                           color=(0, 0, 0),
                           pos=(6, 'center'),
                           col_opacity=0.6).set_pos(lambda t: ((200), (800)))
    w, h = moviesize = clip1.size
    txt_mov = txt_col.set_pos(lambda t: (max(w / 30, int(w - 1 * w * t)),
                                         max(5 * h / 6, int(100 * t))))

    CompositeVideoClip([
        clip1, txt_mov
    ]).set_duration(1).set_fps(30).write_videofile("my_concatenation.mp4")
    CompositeVideoClip([clip1, txt_mov
                        ]).set_duration(1).set_fps(30).save_frame("test.png",
                                                                  t="00:00:01")
 def description_helper(self, description, currloc, position, clip):
     txt = TextClip(description, font='Arial', color="MediumSpringGreen", \
                                                                 fontsize=44)
     txt_col = txt.on_color(col_opacity=0).set_duration(clip.duration).\
                                                         set_start(currloc)
      
     txt_mov = txt_col.set_position(('center', position))
      
     return txt_mov
Ejemplo n.º 3
0
def sub_config(txt):
    text_clip = TextClip(txt,
                         font='Helvetica Neue',
                         fontsize=40,
                         color='white')
    text_clip = text_clip.on_color(size=(int(text_clip.w * 1.05),
                                         int(text_clip.h * 1.05)),
                                   col_opacity=0.5)
    return text_clip
Ejemplo n.º 4
0
def make(name):
    clip1 = ImageClip("./images/{}.jpg".format(name))
    txt = TextClip(name.encode("utf-8"),
                   font="SimSun",
                   color='white',
                   fontsize=96)
    txt = txt.on_color(size=(clip1.w, txt.h + 10),
                       color=(0, 0, 0),
                       pos=(6, "center"),
                       col_opacity=0.6)
    txt = txt.set_pos(lambda t: (max(clip1.w / 7, int(
        clip1.w - 1 * clip1.w * t)), max(3 * clip1.h / 4, int(100 * t))))
    return CompositeVideoClip([clip1, txt]).set_duration(3)
Ejemplo n.º 5
0
def construct_subclip(index, my_text, video, id):
    ukulele = video
    w, h = moviesize = ukulele.size
    txt = TextClip(my_text, font='DejaVu-Serif', color='white', fontsize=24)
    txt_col = txt.on_color(size=(ukulele.w + txt.w, txt.h + 10),
                           color=(0, 0, 0),
                           pos=(6, 'center'),
                           col_opacity=0.6)
    txt_mov = txt_col.set_pos(lambda t: (max(w / 30, int(w - 0.5 * w * t)),
                                         max(5 * h / 6, int(100 * t))))
    result = CompositeVideoClip([video, txt_mov])
    os.makedirs(store_path + "/session" + str(id), exist_ok=True)
    url = store_path + "/session" + str(id) + "/sample_edited" + str(
        index) + ".mp4"  # Overlay text on video
    result.subclip(0, 10).write_videofile(url)  # Many options...
    view_url = request_url + "/videos/" + "/session" + str(
        id) + "/sample_edited" + str(index) + ".mp4"
    return view_url
Ejemplo n.º 6
0
def main():
    '''
  Main method.
  Depends on global variables: lang, full, cache, subtitles

  :returns: Nothing.
  '''

    global lang, full, cache, subtitles

    getargs()

    checkdir('Data_TxtToSL/')

    checkdir('Data_TxtToSL/phrases/')
    checkdir('Data_TxtToSL/autoskip/')

    checkdir('Data_TxtToSL/cache/')

    print()

    if lang == None:
        while True:
            print(
                "Select Sign Language:\n1) BSL (British Sign Language)\n2) ASL (American Sign Language)\n3) DGS (German Sign Language / Deutsche Gebärdensprache)\n"
            )
            innum = input("Number: ")

            if innum == "1":
                lang = "BSL"
                break
            elif innum == "2":
                lang = "ASL"
                break
            elif innum == "3":
                lang = "DGS"
                break

    print()

    checklang(lang.lower())

    print()

    loadautoskip(lang.lower())

    print()

    loadphrases(lang.lower())

    print()

    if full == None:
        full = input("\nInput: ")

    words = interpret(full)

    print(words)

    for word in copy.deepcopy(words):
        if lang == "BSL" or lang == "ASL":
            content = signorg_getvid(word)
        elif lang == "DGS":
            content = dgs_getvid(word)

        if content != "cache" and content is not False and not isinstance(
                content, list):
            savevid(content, word)
        elif isinstance(content, list):
            index = words.index(word)

            words.remove(word)

            words[index:index] = content
        elif content is False:  # Skip word
            words.remove(word)

        print()

    print(words)
    print()

    clips = []

    for word in words:
        replaced = word.replace('[', '').replace(']', '')

        if not os.path.isfile("Data_TxtToSL/cache/{}/words/{}.mp4".format(
                lang.lower(), replaced.replace(' ', '-'))):
            continue

        originalClip = VideoFileClip(
            "Data_TxtToSL/cache/{}/words/{}.mp4".format(
                lang.lower(), replaced.replace(' ', '-')))

        if subtitles:
            with yaspin(
                    text="Generating subtitles for '{}'".format(word)) as sp:
                txt = TextClip(replaced,
                               font='Arial',
                               color='white',
                               fontsize=24)

                txt_col = txt.on_color(size=(originalClip.w, txt.h + 30),
                                       color=(0, 0, 0),
                                       pos=('center', 'center'),
                                       col_opacity=0.2)

                txt_mov = txt_col.set_pos(('center', 0.7), relative=True)

                composite = CompositeVideoClip([originalClip, txt_mov])
                composite.duration = originalClip.duration

                clips.append(composite)

                sp.ok(ansi.GREEN + "✓" + ansi.END)
        else:
            clips.append(originalClip)

    print()
    print(clips)
    print()

    final = concatenate_videoclips(clips, method="compose")

    final.write_videofile("finished.mp4", fps=30, audio=False, threads=4)

    if not cache:
        with yaspin(text="Deleting video files (because caching is disabled)"
                    ) as sp2:  # Use pymovie to combine video files
            for word in words:
                os.remove("cache/{}/words/{}.mp4".format(
                    lang.lower(), word.replace(' ', '-')))

            sp2.ok(ansi.GREEN + "✓" + ansi.END)
    def generate_vid_list(self, option, currloc):
        tmpclips = []
        totalcliptime = 0

        for fldr_date in self.FLDR_DATES:
            folderloc = os.path.join(SECTIONS_PATH, option, 'videos', \
                                                                str(fldr_date))
            datafolderloc = os.path.join(SECTIONS_PATH, option, 'data', \
                                                                str(fldr_date))
         
            for root, _, filenames in os.walk(folderloc):
                for filename in fnmatch.filter(filenames, '*.mp4'):
                    datavar = filename.split('.')[0]
                    dname = os.path.join(datafolderloc, datavar + ".json")
                    if not os.path.isfile(dname):
                        sys.stderr.write("ERROR: no data found for: %s\n" % \
                                                                        dname)
                        continue
                    else:
                        with open(dname) as json_file:
                            json_data = json.load(json_file)
         
                    if self.check_blacklisted_users(json_data["username"]):
                        continue
                    
                    fname = os.path.join(root, filename)
                    if not os.path.isfile(fname):
                        sys.stderr.write("ERROR: no video found for: %s\n" % \
                                                                        fname)
                        continue 
                    else:
                        sys.stdout.write("Adding file name: %s\n" % \
                                                fname[len(SECTIONS_PATH)+1:])
         
                    try:
                        clip = (VideoFileClip(fname).resize((1157, 830)).\
                                            set_start(currloc).crossfadein(1).\
                                            set_position("center").\
                                            set_position((383, 88)))
                    except:
                        sys.stderr.write("ERROR: cannot open video: %s\n" % \
                                                                        fname)
         
                        sys.stderr.write("DELETING: %s\n" % fname)
                        os.remove(fname)
                        sys.stderr.write("DELETING: %s\n" % dname)
                        os.remove(dname)
                        continue
         
                    tmpclips.append(clip)
                     
                    # add creator image
                    self.make_creator_icon(json_data["avatarUrl"], \
                                                            datavar + ".jpg")
                    creatorclip = (ImageClip(datavar + ".jpg", \
                                         transparent=True).set_start(currloc).\
                                         set_duration(clip.duration).\
                                         set_position((383, 10)))
         
                    tmpclips.append(creatorclip)
                    time.sleep(1)
                    os.remove(datavar + ".jpg")
                     
                    # add creator name
                    try:
                        creatortxt = TextClip(json_data["username"].encode(\
                                       'ascii', 'ignore'), font='Arial-Black', \
                                        color="MediumSpringGreen", fontsize=30)
                    except:
                        sys.stderr.write("\nERROR: using default username.\n")
                        creatortxt = TextClip("Default UserName", \
                                                  font='Arial-Black', \
                                                  color="MediumSpringGreen", \
                                                  fontsize=30)

                    creatortxt_col = creatortxt.on_color(col_opacity=0).\
                                                set_duration(clip.duration).\
                                                set_start(currloc)
         
                    creatortxt_mov = creatortxt_col.set_position((465, 23))
                    tmpclips.append(creatortxt_mov)
         
                    # add the description
                    desc_clip = self.create_description(\
                                        json_data["description"], currloc, clip)
                    for item in desc_clip:
                        tmpclips.append(item)
         
                    currloc += clip.duration
                    totalcliptime += clip.duration
     
        return (tmpclips, currloc, totalcliptime)