def create_video_thumbnails(repo, file_id, path, size, thumbnail_file, file_size): t1 = timeit.default_timer() token = seafile_api.get_fileserver_access_token(repo.id, file_id, 'view', '', use_onetime=False) if not token: return (False, 500) inner_path = gen_inner_file_get_url(token, os.path.basename(path)) clip = VideoFileClip(inner_path) tmp_path = str(os.path.join(tempfile.gettempdir(), '%s.png' % file_id[:8])) clip.save_frame(tmp_path, t=THUMBNAIL_VIDEO_FRAME_TIME) t2 = timeit.default_timer() logger.debug('Create thumbnail of [%s](size: %s) takes: %s' % (path, file_size, (t2 - t1))) try: ret = _create_thumbnail_common(tmp_path, thumbnail_file, size) os.unlink(tmp_path) return ret except Exception as e: logger.error(e) os.unlink(tmp_path) return (False, 500)
def decrypt_video(filename, t0=56): vid = VideoFileClip(filename) vid.save_frame("frame.png", t=t0+0.05) img = Image.open("frame.png").convert(mode='RGB') msg = stepic.decode(img) return msg
def video(filename, username, t0): # Orignal Video original = VideoFileClip("static/videos/"+filename+".mp4") first_half = VideoFileClip("static/videos/"+filename+".mp4").subclip(0, t0) second_half = VideoFileClip("static/videos/"+filename+".mp4").subclip(t0+1, original.duration) original.save_frame("static/videos/frame.png", t=t0) img = Image.open("static/videos/frame.png").convert(mode='RGB') stepic.encode_inplace(img, username) msg = stepic.decode(img) print(msg) img.save("static/videos/frame.png") encoded_clip = ImageClip('static/videos/frame.png', duration=1) new_mov = CompositeVideoClip([first_half.set_start(0), encoded_clip.set_start(t0), second_half.set_start(t0+1)]) # Write the result to a file (many options available !) new_mov.write_videofile("static/"+username+"_"+filename+".avi", codec='png')
async def media_to_pic(event, reply, noedits=False): # sourcery no-metrics mediatype = media_type(reply) if mediatype not in [ "Photo", "Round Video", "Gif", "Sticker", "Video", "Voice", "Audio", "Document", ]: return event, None if not noedits: catevent = await edit_or_reply( event, "`Transfiguration Time! Converting to ....`" ) else: catevent = event catmedia = None catfile = os.path.join("./temp/", "meme.png") if os.path.exists(catfile): os.remove(catfile) if mediatype == "Photo": catmedia = await reply.download_media(file="./temp") im = Image.open(catmedia) im.save(catfile) elif mediatype in ["Audio", "Voice"]: await event.client.download_media(reply, catfile, thumb=-1) elif mediatype == "Sticker": catmedia = await reply.download_media(file="./temp") if catmedia.endswith(".tgs"): catcmd = f"lottie_convert.py --frame 0 -if lottie -of png '{catmedia}' '{catfile}'" stdout, stderr = (await runcmd(catcmd))[:2] if stderr: LOGS.info(stdout + stderr) elif catmedia.endswith(".webm"): clip = VideoFileClip(catmedia) try: clip = clip.save_frame(catfile, 0.1) except Exception: clip = clip.save_frame(catfile, 0) elif catmedia.endswith(".webp"): im = Image.open(catmedia) im.save(catfile) elif mediatype in ["Round Video", "Video", "Gif"]: await event.client.download_media(reply, catfile, thumb=-1) if not os.path.exists(catfile): catmedia = await reply.download_media(file="./temp") clip = VideoFileClip(catmedia) try: clip = clip.save_frame(catfile, 0.1) except Exception: clip = clip.save_frame(catfile, 0) elif mediatype == "Document": mimetype = reply.document.mime_type mtype = mimetype.split("/") if mtype[0].lower() == "image": catmedia = await reply.download_media(file="./temp") im = Image.open(catmedia) im.save(catfile) if catmedia and os.path.lexists(catmedia): os.remove(catmedia) if os.path.lexists(catfile): return catevent, catfile, mediatype return catevent, None
# Load pre-trained per-column scaler X_scaler = joblib.load(scaler_model_path) print('Load SVM and Scaler') # Run on video file if true else run on test images TEST_ON_VIDEO = True if TEST_ON_VIDEO == True: # Video is at 25 FPS clip = VideoFileClip(video_input) #.subclip(40,50) clip_output = clip.fl_image( track_vehicles) # NOTE: this function expects color images!! clip_output.write_videofile(video_output, audio=False) else: if not os.listdir(video_img_dir): v_start = 0 v_end = 1 video_times = np.linspace(v_start, v_end, 25) print(video_times) clip = VideoFileClip(video_input) for vt in video_times: video_img_file = video_img_dir + 'video{:3.3}.jpg'.format(vt) clip.save_frame(video_img_file, vt) # Read camera frames from disk img_files = glob.glob(video_img_dir + 'video*.jpg') #img_files = glob.glob(img_dir + '*.jpg') for img_file in img_files: img = cv2.imread(img_file) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) track_vehicles(img, visualise=True)