def saveVideos(): path = os.path.join(location, "video_uploads") redvid = Downloader(max_q=True) submissions = reddit.user.me().submissions.new(limit=None) for submission in submissions: if "v.redd" in submission.url: # # #print("VIDEO URL: " + video_url) video_url = "https://www.reddit.com" + submission.permalink print(f"{bcolors.WARNING}\nVIDEO URL: " + video_url) print(f"{bcolors.ENDC}The following video will be downloaded: " + submission.title) try: if not os.path.exists(path): os.mkdir(path) # os.chdir(path) redvid.download(video_url) for file in os.listdir(os.getcwd()): if file.endswith(".mp4"): filestring = '%.14s' % submission.title + ".mp4" filestring = filestring.replace(" ", "_") filestring = filestring.replace("?", "q") print("FILESTRING: " + filestring) print("THIS WILL BE RENAMED: " + file + " to: " + path + "/" + filestring) os.rename(file, path + "/" + filestring) except Exception as err: print(f"{bcolors.FAIL}Exception: " + str(err)) print( "\nIt is advised that save the videos to the same partition/drive that the script is running on." ) print(f"{bcolors.ENDC} ") print(f"{bcolors.OKBLUE} COMPLETE. ") print(f"{bcolors.ENDC} ")
def download_video(self, media_path, media, url): for media_type, item in media.items(): if media_type == "reddit_video": if item.get("transcoding_status") != "completed": continue Downloader(url=url, path = os.path.abspath(media_path), max_q=True).download() else: logger.warn(f"unhandled media type in media_metadata: {media_type}")
type=int, default=1e1000) optional.add_argument( '-am', '--automax', help= 'Automatically download video with maximum size (Helps for old reddit videos with unknown qualities)', action='store_true') optional.add_argument( '-px', '--proxies', help='Download videos through proxies for blocked regions', type=dict, default={}) args = parser.parse_args() # Link arguments with Redvid reddit = Downloader() print(args) reddit.url = args.url reddit.path = '' reddit.overwrite = False reddit.max_q = True reddit.min_q = True reddit.max_d = 1e1000 reddit.max_s = 1e1000 reddit.auto_max = False reddit.proxies = {} reddit.download()
from redvid import Downloader reddit = Downloader() reddit.max_s = 5 * (1 << 20) # 5 MB reddit.url = 'https://www.reddit.com/r/HeavyFuckingWind/comments/hiuauw/its_been_blowing_like_this_all_day_without_stop/' reddit.download()
from redvid import Downloader reddit = Downloader() reddit.url = 'https://www.reddit.com/r/pythonforengineers/comments/hfmo98/what_kind_of_hawk_is_this/' reddit.download()
import requests import glob import os.path import os import fnmatch bot_token = "" bot_chatID = '' reddit = praw.Reddit(client_id="", client_secret="", user_agent="Centos:test_bot:v0.1.0") an_hour_ago = datetime.utcnow() - timedelta(hours=1) for submission in reddit.subreddit("tiktokthots").hot(): if datetime.utcfromtimestamp(submission.created_utc) >= an_hour_ago: print(submission.url) reddit = Downloader(max_q=True) reddit.url = submission.url reddit.overwrite = True reddit.download() url = "https://api.telegram.org/bot" + bot_token + "/sendVideo" data = {"chat_id": bot_chatID} os.chdir("/root/bot_python/") try: for video in glob.glob("*.mp4"): if not fnmatch.fnmatch(video, "*240.mp4"): files = {"video": open(video, "rb")} r = requests.post(url, files=files, data=data) print(r.status_code, r.reason, r.content) except: pass
from redvid import Downloader reddit = Downloader() reddit.url = 'https://www.reddit.com/r/funny/comments/d502c4/the_power_of_static_electricity/?utm_source=share&utm_medium=web2x' reddit.download()
from redvid import Downloader reddit = Downloader() reddit.path = 'C:\\Users\\JohnDoe\\Desktop\\' reddit.url = 'https://www.reddit.com/r/HeavyFuckingWind/comments/hiuauw/its_been_blowing_like_this_all_day_without_stop/' reddit.download()
from redvid import Downloader reddit = Downloader() reddit.max = True reddit.url = 'https://www.reddit.com/r/N_N_N/comments/hj4qxb/explosion_at_sina_athar_hospital_17_reported_dead/' reddit.download()
def save_media(post, location): """Takes a post object and tries to download any image/video it might be associated with. If it can, it will return the filename.""" url = post.url stripped_url = url.split("?")[0] if url.endswith(post.permalink): return None # What is the key information? extension = stripped_url.split(".")[-1].lower() domain = ".".join(post.url.split("/")[2].split(".")[-2:]) readable_name = list(filter(bool, post.permalink.split("/")))[-1] # If it's an imgur gallery, forget it if domain == "imgur.com" and "gallery" in url: return None # Can the media be obtained directly? if extension in IMAGE_EXTENSIONS + VIDEO_EXTENSIONS: filename = f"{readable_name}_{post.id}.{extension}" response = requests.get(post.url) media_type = response.headers.get("Content-Type", "") if media_type.startswith("image") or media_type.startswith("video"): with open(os.path.join(location, "media", filename), "wb") as f: f.write(response.content) return filename # Is this a v.redd.it link? if domain == "redd.it": downloader = Downloader(max_q=True, log=False) downloader.url = url current = os.getcwd() try: name = downloader.download() extension = name.split(".")[-1] filename = f"{readable_name}_{post.id}.{extension}" os.rename(name, os.path.join(location, "media", filename)) return filename except: os.chdir(current) return None # Is it a gfycat link that redirects? Update the URL if possible if domain == "gfycat.com": html = requests.get(post.url).content if len(html) < 50000: match = re.search(r"http([\dA-Za-z\+\:\/\.]+)\.mp4", html.decode()) if match: url = match.group() else: return None # Is this an imgur image? if domain == "imgur.com" and extension != "gifv": for extension in IMAGE_EXTENSIONS: direct_url = f'https://i.{url[url.find("//") + 2:]}.{extension}' direct_url = direct_url.replace("i.imgur.com", "imgur.com") direct_url = direct_url.replace("m.imgur.com", "imgur.com") response = requests.get(direct_url) if response.status_code == 200: filename = f"{readable_name}_{post.id}.{extension}" with open(os.path.join(location, "media", filename), "wb") as f: f.write(response.content) return filename # Try to use youtube_dl if it's one of the possible domains if domain in PLATFORMS: options = { "nocheckcertificate": True, "quiet": True, "no_warnings": True, "ignoreerrors": True, "outtmpl": os.path.join(location, "media", f"{readable_name}_{post.id}" + ".%(ext)s") } with youtube_dl.YoutubeDL(options) as ydl: try: ydl.download([url]) except: pass for f in os.listdir(os.path.join(location, "media")): if f.startswith(f"{readable_name}_{post.id}"): return f
from redvid import Downloader reddit = Downloader() reddit.overwrite = True reddit.url = 'https://www.reddit.com/r/HeavyFuckingWind/comments/hiuauw/its_been_blowing_like_this_all_day_without_stop/' reddit.download()
type=int, default=1e1000) optional.add_argument( '-am', '--automax', help= 'Automatically download video with maximum size (Helps for old reddit videos with unknown qualities)', action='store_true') optional.add_argument( '-px', '--proxies', help='Download videos through proxies for blocked regions', type=dict, default={}) args = parser.parse_args() # Link arguments with Redvid reddit = Downloader() reddit.url = args.url reddit.path = args.path reddit.overwrite = args.overwrite reddit.max_q = args.maxquality reddit.min_q = args.minquality reddit.max_d = args.maxduration reddit.max_s = args.maxsize reddit.auto_max = args.automax reddit.proxies = args.proxies reddit.download()
def echo(bot, update): """Echo the user message.""" try: link = update.message.text reddit = Downloader() reddit.url = link reddit.path = '' reddit.overwrite = False reddit.max_q = True reddit.min_q = True reddit.max_d = 1e1000 reddit.max_s = 1e1000 reddit.auto_max = False reddit.proxies = {} path = reddit.download() bot.send_video(chat_id=update.message.chat_id, video=open(path, 'rb'), supports_streaming=True) except: bot.send_message(chat_id=update.message.chat_id, text="Failed")
from redvid import Downloader reddit = Downloader() reddit.auto_max = True # redvid will find the quality with the size # that does not exceed max_s (3 MB) reddit.max_s = 3 * (1 << 20) reddit.url = 'https://www.reddit.com/r/Unexpected/comments/9n8mmz/_/' reddit.download()