def handler(event, context): common.cleanup() project_id = event["project_id"] # project info response = TABLE.query(KeyConditionExpression=Key('id').eq(project_id) & Key('item').eq('info'), ) project_data = response['Items'][0]['data'] print(project_data) # chunks chunks = TABLE.query(KeyConditionExpression=Key('id').eq(project_id) & Key('item').begins_with('chunk_')) paths = [] for i in chunks['Items']: print i # download files to disk local_path = common.downloadFile(i['chunkData']["url"], "chunk", "mp4") paths.append("file '%s" % local_path) # concat.txt file with open('/tmp/concat.txt', 'w') as the_file: the_file.write("\n".join(paths)) # Concatenate segments video_file = "/tmp/concat.mp4" cmd = "%s -f concat -safe 0 -i /tmp/concat.txt -c copy -y %s" % ( common.FFMPEG_BIN, video_file) common.executeCmd(cmd) # Get final video duration cmd = common.FFMPEG_BIN + " -i /tmp/concat.mp4 2>&1 | grep \"Duration\"| cut -d ' ' -f 4 | sed s/,// | sed 's@\..*@@g' | awk '{ split($1, A, \":\"); split(A[3], B, \".\"); print 3600*A[1] + 60*A[2] + B[1] }'" res = common.executeCmd(cmd) duration = res["result"].replace("\n", "") # duration = check_output(cmd, shell=True,stderr=subprocess.STDOUT).replace("\n","") # print "duration = %s" % duration # Add audio track - with 3 second audio fade out final_file = "/tmp/%s" % project_data['fileName'] cmd = [ common.FFMPEG_BIN, "-i %s -i %s" % (video_file, project_data['audioUrl']), "-filter_complex \"[1]afade=t=out:st=%.2f:d=3[a]\" -map 0:0 -map \"[a]\"" % (int(duration) - 3), "-c:v copy -c:a aac -t %s -y %s" % (duration, final_file) ] common.executeCmd(" ".join(cmd)) # upload final file to S3 video_url = common.uploadS3( final_file, "%s/%s" % (project_data['folderName'], project_data['fileName'])) # post result to webhook common.notifyWebhook(project_id, video_url, "ready")
def render(self): # FACE DETECTION if 'IS_LOCAL' in os.environ: start_y_percent = 0.50 else: roi, dimensions, focus = FaceDetect.detect(self.path) start_y_percent = 0.00 if dimensions[1] > dimensions[0]: start_y_percent = roi[1] / float(dimensions[1]) # Animate Photo vf = [] if self.data["animation"] == "panup": f = filters.photoPanUp(start_y_percent, self.data["duration"]) self.vf.append("[0:v]%s[animated]" % f) elif self.data["animation"] == "pandown": f = filters.photoPanDown(start_y_percent, self.data["duration"]) self.vf.append("[0:v]%s[animated]" % f) self.addEffects() video_path = '/tmp/video-' + common.randomString(10) + '.mp4' cmd = [ common.FFMPEG_BIN, "-framerate 25 -i %s -t %.2f" % (self.path, self.data["duration"]), "-filter_complex \"%s\"" % ";".join(self.vf), "-map \"[final]\"", "-pix_fmt yuv420p -s 1280x720 -y %s" % video_path ] res = common.executeCmd(" ".join(cmd)) if res["error"] is True: return {'statusCode': 400, 'error': res["body"]} else: print res['result'] return {'statusCode': 200, 'video_path': video_path}
def create(self): # DOWNLOAD FONT f = Font(self.data['resourceUrl']) # RENDER TEXT JPG WITH IMAGICK self.path = '/tmp/text-' + common.randomString(10) + '.png' label_cmd = "./render_label.sh \"%s\" \"%s\" \"%s\" \"%s\" %i \"%s\"" % ( self.data['text'], f.ttf, self.data['color'], self.data['fontSize'] * self.comp.scale_factor, self.data['kerning'], self.path) res = common.executeCmd(label_cmd) if res["error"] is True: print('Error running IMAGEMAGIK') print( json.dumps({ 'command': res["body"].cmd, "code": res["body"].returncode, "error_output": res["body"].output })) # return { # 'statusCode': 400, # 'error': res["body"] # } else: if self.data['transitionIn'] == 'wipeLeftToRight': self.new_path = '/tmp/resource-' + common.randomString( 10) + '.mp4' resource_cmd = "./render_wipe.sh \"%s\" \"%.2f\" \"%.2f\" \"%s\"" % ( self.path, self.data['transitionInDuration'], self.comp.duration, self.new_path) common.executeCmd(resource_cmd) self.path = self.new_path self.data["transitionIn"] = "immediate" self.data["transitionInDuration"] = 0 self.shouldLoop = False
def render(self): self.addEffects() video_path = '/tmp/video-' + common.randomString(10) + '.mp4' # cmd = FFMPEG_BIN + " %s -filter_complex \"%s\" -map \"[v%i]\" -pix_fmt yuv420p -s 1280x720 -y %s" % (" ".join(self.inputs), ";".join(self.filters), len(self.inputs)-1, video_path) cmd = [ common.FFMPEG_BIN, " ".join(self.inputs), "-filter_complex \"%s\"" % ";".join(self.filters), "-map \"[comp]\"", "-pix_fmt yuv420p -s 1280x720 -y %s" % video_path ] res = common.executeCmd(" ".join(cmd)) if res["error"] is True: return {'statusCode': 400, 'error': res["body"]} else: return {'statusCode': 200, 'video_path': video_path}
def create(self): # DOWNLOAD IMAGE self.path = '/tmp/photo-' + common.randomString(10) # Download urllib.urlretrieve(self.data["resourceUrl"], self.path) # Trying another way to download - does not seem to make a difference # print "downloading photo file" # f = urllib2.urlopen(self.data["resourceUrl"]) # data = f.read() # with open(self.path, "wb") as code: # code.write(data) # print "photo saved to disk" # convert to jpg - need this step to make sure the images are encoded correctrly # Without this step getting glitches in the rendered videos # convert="%s -i %s -pix_fmt yuvj420p -y %s" % (common.FFMPEG_BIN, self.path, self.path+'.jpg') convert = "./tojpeg.sh %s" % self.path res = common.executeCmd(convert) self.path = self.path + '.jpg'
def PhotoPanDown(img, d): # FACE DETECTION roi, dimensions, focus = FaceDetect.detect(img) start_y_percent = 0.00 if dimensions[1] > dimensions[0]: start_y_percent = roi[1] / float(dimensions[1]) video_path = '/tmp/video-' + common.randomString(10) + '.mp4' cmd = common.FFMPEG_BIN + " -y -loop 1 -loglevel panic -i %s \ -c:v libx264 -pix_fmt yuv420p \ -filter_complex \ \"[0:v]crop=h=ih:w='if(gt(a,16/9),ih*16/9,iw)':y=0:x='if(gt(a,16/9),(ow-iw)/2,0)'[v01]; \ [v01]scale=-1:4000,crop=w=iw:h='min(iw*9/16,ih)':x=0:y='max((ih-oh)/6,%.2f*ih-((ih-oh)/6))+((t/%.2f)*(ih-oh)/6)',trim=duration=%.2f[v02]; \ [v02]zoompan=z='min(pzoom+0.0005,1.5)':x='iw/2-(iw/zoom/2)':y='ih/2-(ih/zoom/2)':d=1,setsar=sar=1:1[v]\" \ -map \"[v]\" -s \"1280x720\" %s" % (img, start_y_percent, d, d, video_path) res = common.executeCmd(cmd) if res["error"] is True: return {'statusCode': 400, 'error': res["body"]} else: return {'statusCode': 200, 'video_path': video_path}
def handler(event, context): common.cleanup() # log.debug("Received event {}".format(json.dumps(event))) project_id = event["project_id"] slide_from = event["slide_from"] slide_to = event["slide_to"] chunk_idx = event["chunk_idx"] chunk_sub_idx = 0 slide_from_data = getSlideData(project_id, slide_from) slide_to_data = getSlideData(project_id, slide_to) from_path = common.downloadFile(slide_from_data["renderedUrl"], "slide", "mp4") to_path = common.downloadFile(slide_to_data["renderedUrl"], "slide", "mp4") filters = [] outs = [] chunk_ids = [] # # If no overlapping transition between slides: # [from] # slide_from |********************| [to] # slide_to |****************************] # # if 'transitionOut' not in slide_from_data or slide_from_data[ "transitionOut"] != "fadeOutOverNext": if chunk_idx == 0: # *[from]* # slide_from |********************| [to] # slide_to |**************************] # chunk_id = getChunkId(chunk_idx, chunk_sub_idx) saveChunkData(project_id, chunk_id, slide_from_data["renderedUrl"]) chunk_sub_idx += 1 if 'transitionOut' in slide_to_data and slide_to_data[ "transitionOut"] == "fadeOutOverNext": # [from] # slide_from |********************| *[to-start]* [fadeOutOverNext] # slide_to |**********************|*******] # slide_to_start_t = 0.00 slide_to_end_t = slide_to_data["transitionOutStart"] chunk_id = getChunkId(chunk_idx, chunk_sub_idx) cmd = [ common.FFMPEG_BIN, "-i %s" % to_path, "-vf trim=%.2f:%.2f" % (slide_to_start_t, slide_to_end_t), "-y /tmp/%s.mp4" % chunk_id ] common.executeCmd(" ".join(cmd)) chunk_url = uploadChunk(project_id, chunk_id, "/tmp/%s.mp4" % chunk_id) saveChunkData(project_id, chunk_id, chunk_url) chunk_sub_idx += 1 else: # [from] # slide_from |********************| *[to]* # slide_to |********************| # chunk_id = getChunkId(chunk_idx, chunk_sub_idx) saveChunkData(project_id, chunk_id, slide_to_data["renderedUrl"]) chunk_sub_idx += 1 # Decrement counter in DB updateCounter(project_id, context) sys.exit() # # Overlapping transition between slides: # # [from] [transition] # slide_from |******************|******| [to-middle] [transition] # slide_to |******|*********************|*******] # # if chunk_idx == 0: # # *[v0-start]* [v0-end] # slide_from |******************|******| # slide_to |******|*********************|*******] # # filters.append( "[0:v]split[v0-start][v0-end];[v0-start]trim=0:%.2f[v0-start];[v0-end]trim=%.2f:%.2f,setpts=PTS-STARTPTS[v0-end]" % (slide_from_data["transitionOutStart"], slide_from_data["transitionOutStart"], slide_from_data["transitionOutStart"] + slide_from_data["transitionOutDuration"])) chunk_id = getChunkId(chunk_idx, chunk_sub_idx) outs.append("-map \"[v0-start]\" -y /tmp/%s.mp4" % chunk_id) chunk_ids.append(chunk_id) chunk_sub_idx += 1 else: # # *[v0-end]* # slide_from |******************|******| # slide_to |******|*********************|*******] # # filters.append("[0:v]trim=%.2f:%.2f,setpts=PTS-STARTPTS[v0-end]" % (slide_from_data["transitionOutStart"], slide_from_data["duration"])) # # # slide_from |******************|******| [fadeOutOverNext?] # slide_to |******|************************|*******] # *[v1-transition]* *[v1-middle]* # slide_to_end_t = slide_to_data["duration"] if 'transitionOut' in slide_to_data and slide_to_data[ "transitionOut"] == "fadeOutOverNext": slide_to_end_t = slide_to_data["transitionOutStart"] # filters.append( # "[1:v]split[v1-transition][v1-middle];\ # [v1-transition]trim=0:%.2f[v1-transition];\ # [v1-middle]trim=%.2f:%.2f,setpts=PTS-STARTPTS[v1-middle]" # % (slide_from_data["transitionOutDuration"], # slide_from_data["transitionOutDuration"], # slide_to_end_t # ) # ) filters.append("[1:v]split[v1-transition][v1-middle];\ [v1-transition]trim=0:%.2f[v1-transition];\ [v1-middle]trim=%.2f:%.2f,setpts=PTS-STARTPTS[v1-middle]" % (slide_from_data["transitionOutDuration"], slide_from_data["transitionOutDuration"], slide_to_end_t)) # # *[from-to-transition]* # combined |***************|***********|********************| # [from] [to] # # if 'transitionOut' in slide_from_data and slide_from_data["transitionOut"]=="fadeOutOverNext": filters.append("[v0-end]fade=out:st=0:d=%.2f:alpha=1[v0-transition]" % slide_from_data["transitionOutDuration"]) filters.append( "[v1-transition][v0-transition]overlay=x=0:y=0:eof_action=pass[from-to-transition]" ) # save chunks chunk_id = getChunkId(chunk_idx, chunk_sub_idx) outs.append("-map \"[from-to-transition]\" -y /tmp/%s.mp4" % chunk_id) chunk_ids.append(chunk_id) chunk_sub_idx += 1 chunk_id = getChunkId(chunk_idx, chunk_sub_idx) outs.append("-map \"[v1-middle]\" -y /tmp/%s.mp4" % chunk_id) chunk_ids.append(chunk_id) chunk_sub_idx += 1 cmd = [ common.FFMPEG_BIN, "-i %s -i %s" % (from_path, to_path), "-filter_complex \"%s\"" % ";".join(filters), " ".join(outs) ] common.executeCmd(" ".join(cmd)) # Upload and save chunks data for cid in chunk_ids: chunk_url = uploadChunk(project_id, cid, "/tmp/%s.mp4" % cid) saveChunkData(project_id, cid, chunk_url) # Update counter_chunks numLeft = common.decrementCounter(project_id, 'counter_chunks') if numLeft == 0: render_video_function_name = context.function_name.replace( "render_transition", "render_video") event = {'project_id': project_id} common.invokeLambda(render_video_function_name, event)