Example #1
0
def saveVideos():
    path = os.path.join(location, "video_uploads")
    redvid = Downloader(max_q=True)
    submissions = reddit.user.me().submissions.new(limit=None)
    for submission in submissions:
        if "v.redd" in submission.url:
            #    # #print("VIDEO URL: " + video_url)
            video_url = "https://www.reddit.com" + submission.permalink
            print(f"{bcolors.WARNING}\nVIDEO URL: " + video_url)
            print(f"{bcolors.ENDC}The following video will be downloaded: " +
                  submission.title)
            try:
                if not os.path.exists(path):
                    os.mkdir(path)
                # os.chdir(path)
                redvid.download(video_url)
                for file in os.listdir(os.getcwd()):
                    if file.endswith(".mp4"):
                        filestring = '%.14s' % submission.title + ".mp4"
                        filestring = filestring.replace(" ", "_")
                        filestring = filestring.replace("?", "q")
                        print("FILESTRING: " + filestring)
                        print("THIS WILL BE RENAMED: " + file + " to: " +
                              path + "/" + filestring)
                        os.rename(file, path + "/" + filestring)
            except Exception as err:
                print(f"{bcolors.FAIL}Exception: " + str(err))
                print(
                    "\nIt is advised that save the videos to the same partition/drive that the script is running on."
                )
                print(f"{bcolors.ENDC} ")
    print(f"{bcolors.OKBLUE} COMPLETE. ")
    print(f"{bcolors.ENDC} ")
def echo(bot, update):
    """Echo the user message."""
    try:
        link = update.message.text
        reddit = Downloader()
        reddit.url = link
        reddit.path = ''
        reddit.overwrite = False
        reddit.max_q = True
        reddit.min_q = True
        reddit.max_d = 1e1000
        reddit.max_s = 1e1000
        reddit.auto_max = False
        reddit.proxies = {}
        path = reddit.download()
        bot.send_video(chat_id=update.message.chat_id, video=open(path, 'rb'), supports_streaming=True)
    except:
        bot.send_message(chat_id=update.message.chat_id, text="Failed")
Example #3
0
                      default=1e1000)
optional.add_argument(
    '-am',
    '--automax',
    help=
    'Automatically download video with maximum size (Helps for old reddit videos with unknown qualities)',
    action='store_true')
optional.add_argument(
    '-px',
    '--proxies',
    help='Download videos through proxies for blocked regions',
    type=dict,
    default={})

args = parser.parse_args()

# Link arguments with Redvid
reddit = Downloader()
print(args)
reddit.url = args.url
reddit.path = ''
reddit.overwrite = False
reddit.max_q = True
reddit.min_q = True
reddit.max_d = 1e1000
reddit.max_s = 1e1000
reddit.auto_max = False
reddit.proxies = {}

reddit.download()
Example #4
0
def save_media(post, location):
    """Takes a post object and tries to download any image/video it might be
    associated with. If it can, it will return the filename."""

    url = post.url
    stripped_url = url.split("?")[0]
    if url.endswith(post.permalink): return None

    # What is the key information?
    extension = stripped_url.split(".")[-1].lower()
    domain = ".".join(post.url.split("/")[2].split(".")[-2:])
    readable_name = list(filter(bool, post.permalink.split("/")))[-1]

    # If it's an imgur gallery, forget it
    if domain == "imgur.com" and "gallery" in url: return None

    # Can the media be obtained directly?
    if extension in IMAGE_EXTENSIONS + VIDEO_EXTENSIONS:
        filename = f"{readable_name}_{post.id}.{extension}"
        response = requests.get(post.url)
        media_type = response.headers.get("Content-Type", "")
        if media_type.startswith("image") or media_type.startswith("video"):
            with open(os.path.join(location, "media", filename), "wb") as f:
                f.write(response.content)
                return filename

    # Is this a v.redd.it link?
    if domain == "redd.it":
        downloader = Downloader(max_q=True, log=False)
        downloader.url = url
        current = os.getcwd()
        try:
            name = downloader.download()
            extension = name.split(".")[-1]
            filename = f"{readable_name}_{post.id}.{extension}"
            os.rename(name, os.path.join(location, "media", filename))
            return filename
        except:
            os.chdir(current)
            return None

    # Is it a gfycat link that redirects? Update the URL if possible
    if domain == "gfycat.com":
        html = requests.get(post.url).content
        if len(html) < 50000:
            match = re.search(r"http([\dA-Za-z\+\:\/\.]+)\.mp4", html.decode())
            if match:
                url = match.group()
            else:
                return None

    # Is this an imgur image?
    if domain == "imgur.com" and extension != "gifv":
        for extension in IMAGE_EXTENSIONS:
            direct_url = f'https://i.{url[url.find("//") + 2:]}.{extension}'
            direct_url = direct_url.replace("i.imgur.com", "imgur.com")
            direct_url = direct_url.replace("m.imgur.com", "imgur.com")
            response = requests.get(direct_url)
            if response.status_code == 200:
                filename = f"{readable_name}_{post.id}.{extension}"
                with open(os.path.join(location, "media", filename),
                          "wb") as f:
                    f.write(response.content)
                    return filename

    # Try to use youtube_dl if it's one of the possible domains
    if domain in PLATFORMS:
        options = {
            "nocheckcertificate":
            True,
            "quiet":
            True,
            "no_warnings":
            True,
            "ignoreerrors":
            True,
            "outtmpl":
            os.path.join(location, "media",
                         f"{readable_name}_{post.id}" + ".%(ext)s")
        }
        with youtube_dl.YoutubeDL(options) as ydl:
            try:
                ydl.download([url])
            except:
                pass
        for f in os.listdir(os.path.join(location, "media")):
            if f.startswith(f"{readable_name}_{post.id}"):
                return f