def createSlide(i, status, handle, hash): """ Generate a short video (slide) for every tweet """ tweet = status[i].text stream = ffmpeg.input('imgIn/*.jpg', pattern_type='glob', framerate=1) stream = ffmpeg.drawtext(stream, text="@" + handle, x=450, y=100, box=1, boxborderw=10, escape_text=True, fontsize=30, font="OpenSansEmoji") stream = ffmpeg.drawtext(stream, text=status[i]._json["user"]["description"], x=450, y=150, box=1, boxborderw=10, escape_text=True, fontsize=30, font="OpenSansEmoji") stream = ffmpeg.drawtext(stream, text="Tweet: " + str(i + 1), x=450, y=300, box=1, boxborderw=10, escape_text=True, fontsize=30, font="OpenSansEmoji") fomattedTweet = wrap(tweet, 30) for j, line in enumerate(fomattedTweet): stream = ffmpeg.drawtext(stream, text=line, x=500, y=350 + (50 * j), box=1, boxborderw=10, escape_text=True, fontsize=30, font="OpenSansEmoji") stream = ffmpeg.output(stream, 'img/' + str(hash) + "_" + str(i) + '.mp4') ffmpeg.run(stream)
def start(self): Logger.LOGGER.log(Logger.TYPE_INFO, 'Starting Server, output to: {}'.format(self.output)) in1 = ffmpeg.input('pipe:') v1 = ffmpeg.drawtext(in1['v'], '%{localtime:%R}', x=c.SERV_DRAWTEXT_X, y=c.SERV_DRAWTEXT_Y, escape_text=False, shadowcolor=c.SERV_DRAWTEXT_SHADOW_COLOR, shadowx=c.SERV_DRAWTEXT_SHADOW_X, shadowy=c.SERV_DRAWTEXT_SHADOW_Y, fontsize=c.SERV_DRAWTEXT_FONT_SIZE, fontfile=c.SERV_DRAWTEXT_FONT_FILE, fontcolor=c.SERV_DRAWTEXT_FONT_COLOR) a1 = in1['a'] joined = ffmpeg.concat(v1, a1, v=1, a=1) self.ff = ffmpeg.output(joined, self.output, vcodec='h264', aspect=c.SERV_OUTPUT_ASPECT, acodec=c.SERV_OUTPUT_ACODEC, crf=c.SERV_OUTPUT_CRF, preset=c.SERV_OUTPUT_PRESET, format='flv', pix_fmt='yuv444p') self.cmd = ['ffmpeg', '-re'] + ffmpeg.get_args(self.ff) self.process = subprocess.Popen(self.cmd, stdin=subprocess.PIPE, stdout=devnull, stderr=devnull) return self.process
def combine_videos(videos, scores): sorted_videos = sorted(Path(videos).iterdir()) sorted_videos = [ video for video in sorted_videos if "meta" not in str(video) ] indices = (get_iteration(v) for v in sorted_videos) inputs = [ffmpeg.input(str(v)) for v in sorted_videos] texted = [ ffmpeg.drawtext(v, text=f"{idx} - {score}", x='(w-tw)/2', y='(h-th)', fontcolor='[email protected]', box=1, boxcolor='[email protected]', fontfile='ttf/Hack-Bold.ttf') for idx, v, score in zip(indices, inputs, scores) ] #texted = [ffmpeg.drawtext(v, text=f"{idx} - {score}", x='(w-tw)/2', y='(h-th)', fontcolor='[email protected]', box=1, boxcolor='[email protected]', fontfile='ttf/Hack-Regular.ttf') for idx, v, score in zip(indices, inputs, scores)] joined = ffmpeg.concat(*texted) out_file = f"/tmp/{uuid.uuid1()}.mp4" out = ffmpeg.overwrite_output(ffmpeg.output(joined, out_file)) out.run() return out_file
def add_text(stream, text): ''' Put text on the video stream ''' return ffmpeg.drawtext(stream, text=text, x=50, y=50, fontfile=r'C:/Windows/Fonts/arial.ttf', fontsize=36, fontcolor='white', borderw=4)
def buildpreroll(stream, filelocation): titleoffset = ((len(name) * 33) / 2) - 7 if titleoffset > 716: title = textwrap.fill(name, width=40, break_long_words=False) titlenl = title.find("\n") titleoffset = ((titlenl * 33) / 2) - 7 description = textwrap.fill(summary, width=22, break_long_words=False) num_of_lines = description.count("\n") if num_of_lines > 22: descriptionSize = 580 / num_of_lines else: descriptionSize = 26 sidebar = ffmpeg.input( "{}overlays/prerolloverlay.mov".format(container_folder)) poster = ffmpeg.input("{}poster.jpg".format(container_folder)) fadeout = ffmpeg.input("{}overlays/fadeout.mov".format(container_folder)) titlefont = "{}fonts/Bebas-Regular.ttf".format(container_folder) descriptionfont = "{}fonts/Roboto-Light.ttf".format(container_folder) poster = ffmpeg.filter(poster, "scale", 200, -1) preroll = ffmpeg.input("{}".format(filelocation), ss=10, t=Preroll_length) preroll = ffmpeg.filter(preroll, "scale", 1600, -1) prerollaudio = ffmpeg.input("{}prerollaudio.mp3".format(container_folder)) preroll = ffmpeg.overlay(sidebar, preroll, x=300, y=125) preroll = ffmpeg.overlay(preroll, poster, x=40, y=195, enable="gte(t,1)") if CriticRating == "": preroll = ffmpeg.drawtext( preroll, text="Audiance Rating: {}%".format(AudienceRating), fontfile=titlefont, x=3, y=150, escape_text=True, fontcolor="0xFFFFFF@0xff", fontsize=36, enable="gte(t,1)", ) elif AudienceRating == "": preroll = ffmpeg.drawtext( preroll, text="Audiance Rating: {}%".format(CriticRating), fontfile=titlefont, x=3, y=150, escape_text=True, fontcolor="0xFFFFFF@0xff", fontsize=36, enable="gte(t,1)", ) elif CriticRating == "" and AudienceRating == "": print("we have no ratings available") else: preroll = ffmpeg.drawtext( preroll, text="Audiance Rating: {}%".format(AudienceRating), fontfile=titlefont, x=3, y=165, escape_text=True, fontcolor="0xFFFFFF@0xff", fontsize=32, enable="gte(t,1)", ) preroll = ffmpeg.drawtext( preroll, text="Critic Rating: {}%".format(CriticRating), fontfile=titlefont, x=3, y=135, escape_text=True, fontcolor="0xFFFFFF@0xff", fontsize=32, enable="gte(t,1)", ) preroll = ffmpeg.drawtext( preroll, text=name, fontfile=titlefont, x=(1106 - titleoffset), y=20, escape_text=True, fontcolor="0xFFFFFF@0xff", fontsize=76, enable="gte(t,1)", ) preroll = ffmpeg.drawtext( preroll, text=description, fontfile=descriptionfont, x=3, y=500, escape_text=True, fontcolor="0xFFFFFF@0xff", fontsize=descriptionSize, enable="gte(t,1)", ) preroll = ffmpeg.overlay(preroll, fadeout) preroll = ffmpeg.output( prerollaudio, preroll, ("{}prerolls/{} Preroll.mp4".format(container_folder, name))) ffmpeg.run(preroll) dirListing = os.listdir("{}prerolls/".format(container_folder)) full_path = ["Prerolls/{0}".format(x) for x in dirListing] if len(dirListing) > 26: oldest_file = min(full_path, key=os.path.getctime) os.remove(oldest_file) plexsetting = re.sub("{}{}".format(container_folder, oldest_file), "", plexsetting) preroll_list = (';{}prerolls/'.format(folder)).join( os.listdir("{}prerolls/".format(container_folder))) preroll_list = (";{}{}".format(folder, preroll_list)) print(preroll_list) plex.settings.get("cinemaTrailersPrerollID").set(preroll_list) plex.settings.save() os.remove("{}poster.jpg".format(container_folder)) os.remove("{}prerollaudio.mp3".format(container_folder)) os.remove("{}".format(filelocation)) print("done!")
def ffmpeg_process(file_, ffmpegPath, ffprobePath, basePath): log.info("starting processing: {}".format(file_)) filename, ext = os.path.splitext(file_) outputFile = filename + "_post" + ext log.info("output path: {}".format(outputFile)) # ffprobe to get media info jProbe = ffmpeg.probe(file_, cmd=ffprobePath)["streams"] # json load vProbe = [ jProbe[indx] for indx, stream in enumerate(jProbe) if stream["codec_type"] == "video" ][0] streamInfo = { "width": int(vProbe["width"]), "height": int(vProbe["height"]), "start": float(vProbe["start_time"]), "duration": float(vProbe["duration"]), "frames": int(vProbe["nb_frames"]), "fps": int(vProbe["nb_frames"]) / float(vProbe["duration"]) } log.debug("streamInfo: {}".format(streamInfo)) # random choices CODE_DUR = 30 rCode = str(random.randint(0, 9999)).zfill(4) # zero pad to 4 places colors = [ "red", "orange", "yellow", "green", "blue", "purple", "black", "white" ] rColor = random.choice(colors) rPer = random.randrange( 5, 75) / 100 # percentage through the video to start code rStFrame = math.floor(streamInfo["frames"] * rPer) rStTime = math.floor(rStFrame * 1 / streamInfo["fps"]) log.info("Random choices: color: {}, code: {}, stTime {}".format( rColor, rCode, rStTime)) input = ffmpeg.input(file_) audioOrig = input.audio soundBite = ffmpeg.input( os.path.join(basePath, "resources", "audio", "beep.mp3")) soundBite = soundBite.filter("adelay", rStTime * 1000).filter("apad") # ms to s audio = ffmpeg.filter([audioOrig, soundBite], "amerge") video = input.video video = ffmpeg.drawtext(video, text=rCode, x=40, y=40, fontfile=os.path.join(basePath, "resources", "fonts", "OpenSans-Regular.ttf"), fontsize=80, fontcolor=rColor, box=True, boxcolor="gray", boxborderw=10, enable="".join([ "between", "(t,", str(rStTime), ",", str(rStTime + CODE_DUR), ")" ])) video = ffmpeg.drawtext(video, text="LegalTechnicality.com", x=40, y="h-th-40", fontfile=os.path.join(basePath, "resources", "fonts", "OpenSans-Regular.ttf"), fontsize=60, alpha=0.75, fontcolor="white") output = ffmpeg.output(video, audio, outputFile) # **{"ac": 1} fullCmd = ffmpeg.compile(output, cmd=ffmpegPath, overwrite_output=True) log.debug("ffmpeg command to be called: {}".format(fullCmd)) try: stdout, stderr = ffmpeg.run(output, cmd=ffmpegPath, quiet=False, capture_stdout=False, capture_stderr=False, overwrite_output=True) # strings are falsy; False if empty if stdout: log.debug("ffmpeg output: {}".format(stdout.decode("utf-8"))) if stderr: # actually a false error -- just the ffmpeg output log.debug("ffmpeg output: {}".format(stderr.decode("utf-8"))) log.info("sucessfully processed file") except ffmpeg.Error as e: log.exception("failed to process file") log.exception(e.stderr)
def play(self): output_stream = None if self.media_type == "upnext": Logger.LOGGER.log( Logger.TYPE_INFO, 'Playing upnext v:{} a:{} (Duration: {})'.format( self.media_item.video_path, self.media_item.audio_path, self.media_item.duration_readable)) in1 = ffmpeg.input(self.media_item.video_path) in2 = ffmpeg.input(self.media_item.audio_path) v1 = ffmpeg.filter(in1['v'], 'scale', c.CLIENT_VIDEO_SCALE) v1 = ffmpeg.drawtext(v1, '{}'.format(self.media_item.overlay_text), x=c.CLIENT_DRAWTEXT_X, y=c.CLIENT_DRAWTEXT_Y, escape_text=False, shadowcolor=c.CLIENT_DRAWTEXT_SHADOW_COLOR, shadowx=c.CLIENT_DRAWTEXT_SHADOW_X, shadowy=c.CLIENT_DRAWTEXT_SHADOW_Y, fontsize=c.CLIENT_DRAWTEXT_FONT_SIZE, fontfile=c.CLIENT_DRAWTEXT_FONT_FILE, fontcolor=c.CLIENT_DRAWTEXT_FONT_COLOR) a1 = in1['a'] a2 = in2['a'] audio_join = ffmpeg.filter([a1, a2], 'amix', duration="first") output_stream = ffmpeg.concat(v1, audio_join, v=1, a=1) else: Logger.LOGGER.log( Logger.TYPE_INFO, 'Playing v:{} (Duration: {})'.format( self.media_item, self.media_item.duration_readable)) in1 = ffmpeg.input(self.media_item.video_path) v1 = ffmpeg.filter(in1['v'], 'scale', c.CLIENT_VIDEO_SCALE) a1 = in1['a'] output_stream = ffmpeg.concat(v1, a1, v=1, a=1) self.ff = ffmpeg.output(output_stream, 'pipe:', vcodec=c.CLIENT_VCODEC, aspect=c.CLIENT_ASPECT, flags=c.CLIENT_FLAGS, g=c.CLIENT_G, acodec=c.CLIENT_ACODEC, strict=c.CLIENT_STRICT, ab=c.CLIENT_AUDIO_BITRATE, ar=c.CLIENT_AUDIO_RATE, preset=c.CLIENT_PRESET, hls_allow_cache=c.CLIENT_HLS_ALLOW_CACHE, hls_list_size=c.CLIENT_HLS_LIST_SIZE, hls_time=c.CLIENT_HLS_TIME, format=c.CLIENT_FORMAT, pix_fmt=c.CLIENT_PIX_FMT) self.cmd = ['ffmpeg'] + ffmpeg.get_args(self.ff) self.process = subprocess.Popen( self.cmd, stdout=self.server.stdin, stderr=(None if CLIENT_DEBUG else devnull)) try: flex = c.CLIENT_FLEX # Number of seconds of extra time before timeout timeout = (self.media_item.duration / 1000 ) # Content length in seconds self.process.wait(timeout=timeout + flex) except subprocess.TimeoutExpired: Logger.LOGGER.log( Logger.TYPE_ERROR, 'Taking longer to play than expected, killing current item') kill(self.process.pid) self.process.returncode = 0 return self.process.returncode # returncode 0 if process exited without problems, 1 for general error
def create_single_tweet(pos, tweet, unique_code): stream = ffmpeg.input(f'{parpath}/img/white.jpg', pattern_type='glob', framerate=1) stream = ffmpeg.overlay(stream, ffmpeg.input(tweet.profile_pic), x=100, y=75) stream = ffmpeg.drawtext(stream, text=tweet.name, font=f"{parpath}/fonts/OpenSansEmoji.ttf", fontsize=25, box=1, boxborderw=15, escape_text=True, x=200, y=50) stream = ffmpeg.drawtext(stream, text=tweet.username, font=f"{parpath}/fonts/OpenSansEmoji.ttf", fontsize=25, box=1, boxborderw=15, escape_text=True, x=200, y=100) stream = ffmpeg.drawtext(stream, text=tweet.time_stamp, font=f"{parpath}/fonts/OpenSansEmoji.ttf", fontsize=25, box=1, boxborderw=15, escape_text=True, x=1200, y=50) wrapped_tweet = wrap(tweet.text, 90) # The y value where the text begins vertical_y = 200 for i, line in enumerate(wrapped_tweet): stream = ffmpeg.drawtext(stream, text=line, fontfile=f"{parpath}/fonts/OpenSansEmoji.ttf", fontsize=28, box=1, boxborderw=15, escape_text=True, x=200, y=200 + (50 * i)) # Remember the offset for each new line of text vertical_y = vertical_y + 50 num_images = len(tweet.images) if num_images != 0: for position in range(0, num_images): # resize the image and return the location # The order of images depebds on the number of images url = resize_image(tweet.images[position], unique_code, position, "link") if position < 2: stream = ffmpeg.overlay( stream, ffmpeg.input(url), x=200 + (position * 400), # Incorporate the offset and start below the final # line of text y=vertical_y) else: stream = ffmpeg.overlay( stream, ffmpeg.input(url), x=200 + ((position - 2) * 400), # Start another row of pictures y=vertical_y + 300) stream = ffmpeg.output(stream, f'{parpath}/videos/{unique_code}-{pos}.mp4', loglevel='panic') ffmpeg.run(stream)
stream2 = ffmpeg.setpts(stream2, 'PTS+%s/TB' % (start_offset - 1)) for i in xrange(0, len(dive_profile)): string = annotator.next() if (i + 1) < len(dive_profile): enable_str = 'between(t,%s,%s)' % (i + start_offset, i + 1 + start_offset) else: enable_str = 'gte(t,%s)' % (i + start_offset) stream = ffmpeg.drawtext(stream, string, x=50, y=50, fontfile=fontfile, fontsize=70, escape_text=False, shadowcolor='Black', shadowx=3, shadowy=3, start_number=100, enable=enable_str, fontcolor='WhiteSmoke') stream = ffmpeg.overlay(stream, stream2, x=50, y=500, enable='gte(t,%s)' % (start_offset - 1)) stream = ffmpeg.output(stream, audio, output) stream = ffmpeg.overwrite_output(stream) ffmpeg.run(stream)