コード例 #1
0
ファイル: robot_mixins.py プロジェクト: inkleby/inklebyrobots
 def _upload_gif(self,file_url):
     client = GfycatClient()
     print "uploading file to Gfycat"
     result = client.upload_from_file(file_url)
     name = result['gfyName']
     url = "https://gfycat.com/{0}".format(name)
     return name, url
コード例 #2
0
def reverseGif(gifUrl):
    imageURL = None
    client = GfycatClient()
    try:
        imageURL = client.upload_from_url(gifUrl)
    except GfycatClientError as e:
        print(e.error_message)
        print(e.status_code)
    print("Done")
    print()
    print("You can find it here: {0}".format(imageURL))
    return imageURL["gfyName"]
コード例 #3
0
ファイル: reversegif.py プロジェクト: aldilaff/reversegifsbot
def reverseGif(gifUrl):
	imageURL = None
	client = GfycatClient()
	try:
		imageURL= client.upload_from_url(gifUrl)
	except GfycatClientError as e:
	    print(e.error_message)
	    print(e.status_code)
	print("Done")
	print()
	print("You can find it here: {0}".format(imageURL))
	return imageURL["gfyName"]
コード例 #4
0
ファイル: collect.py プロジェクト: shenglinchen/Twetter
    def _get_gfycat_secrets(self,
                            gfycat_secrets: str) -> configparser.ConfigParser:
        """
        _get_gfycat_secrets checks if the Gfycat api secrets file exists.
        - If the file exists, this methods reads the the files and returns the secrets in as a dict.
        - If the file doesn't exist it asks the user over stdin to supply these values and then
          saves them into the gfycat_secrets file

        Arguments:
            gfycat_secrets (string): file name of secrets file for API credentials

        Returns:
            imgur_config (dict): Dictionary containing the client id and client secret needed to
            login to Gfycat
        """

        if not os.path.exists(gfycat_secrets):
            self.logger.warning(
                'Gfycat API keys not found. (See wiki if you need help).')

            # Whitespaces are stripped from input: https://stackoverflow.com/a/3739939
            gfycat_client_id = ''.join(
                input("[ .. ] Enter Gfycat client ID: ").split())
            gfycat_client_secret = ''.join(
                input("[ .. ] Enter Gfycat client secret: ").split())
            # Make sure authentication is working
            try:
                gfycat_client = GfycatClient(gfycat_client_id,
                                             gfycat_client_secret)

                # If this call doesn't work, it'll throw an ImgurClientError
                gfycat_client.query_gfy('oddyearlyhorsefly')
                # It worked, so save the keys to a file
                gfycat_config = configparser.ConfigParser()
                gfycat_config['Gfycat'] = {
                    'ClientID': gfycat_client_id,
                    'ClientSecret': gfycat_client_secret,
                }
                with open(gfycat_secrets, 'w') as file:
                    gfycat_config.write(file)
                file.close()
            except GfycatClientError as gfycat_error:
                self.logger.error('Error while logging into Gfycat: %s',
                                  gfycat_error)
                self.logger.error(FATAL_TOOTBOT_ERROR)
                sys.exit(1)
        else:
            # Read API keys from secret file
            gfycat_config = configparser.ConfigParser()
            gfycat_config.read(gfycat_secrets)

        return gfycat_config
コード例 #5
0
def convertGfycatUrlToWebM(submission, url):
    global gfycatClient
    # Change this:
    #   https://gfycat.com/IndolentScalyIncatern
    #   https://gfycat.com/IndolentScalyIncatern/
    # Into this:
    #   https://zippy.gfycat.com/IndolentScalyIncatern.webm
    # Or maybe this:
    #   https://giant.gfycat.com/IndolentScalyIncatern.webm

    # Lazy initialize client
    if not gfycatClient and settings.settings['Gfycat_Client_id']:
        gfycatClient = GfycatClient(settings.settings['Gfycat_Client_id'],
                                    settings.settings['Gfycat_Client_secret'])

    # Still don't have a client?
    if not gfycatClient:
        logger.log(
            "Warning: no Gfycat client; gifs will likely fail to download")
        newUrl = gfycatToRedGifsWorkaround(url)
        if newUrl:
            return newUrl
        # Hacky solution while Gfycat API isn't set up. This breaks if case is wrong
        return "https://giant.gfycat.com/{}.webm".format(url[url.rfind("/") +
                                                             1:])
    else:
        # Get the gfyname from the url
        matches = re.findall(r'gfycat\.com.*/([a-zA-Z]+)', url)
        if not matches:
            errorMessage = "Gfycat URL {} doesn't seem to match expected URL format".format(
                url)
            logger.log(errorMessage)
            LikedSavedDatabase.db.addUnsupportedSubmission(
                submission, errorMessage)
        else:
            try:
                gfycatUrlInfo = gfycatClient.query_gfy(matches[0])
            except Exception as e:
                errorMessage = '[ERROR] Exception: Url {0} raised exception:\n\t {1}'.format(
                    url, e)
                logger.log(errorMessage)
                logger.log("Gfycat client was used to make this query")
                # Gfycat sucks. They created RedGifs, but broke Gfycat API by making it not actually
                # support that transition, and you can't get a RedGifs API token unless you email
                # them for one. Great engineering, folks
                newUrl = gfycatToRedGifsWorkaround(url)
                if newUrl:
                    return newUrl
                LikedSavedDatabase.db.addUnsupportedSubmission(
                    submission, errorMessage)
                return None
            return gfycatUrlInfo['gfyItem']['mp4Url']
コード例 #6
0
def save_to_gif():
    images = []
    for i in range(num_images):
        images.append(imageio.imread('images/' + str(i) + '.png'))
    imageio.mimsave('sim.gif', images)

    # Save to gfycat
    client = GfycatClient()
    try:
        print client.upload_from_file('sim.gif')
    except GfycatClientError as e:
        print(e.error_message)
        print(e.status_code)
コード例 #7
0
ファイル: collect.py プロジェクト: shenglinchen/Twetter
    def __init__(
        self,
        config: Configuration,
        imgur_secrets: str = 'imgur.secret',
        gfycat_secrets: str = 'gfycat.secret',
    ):
        self.logger = config.bot.logger
        self.save_dir = config.media.folder

        try:
            imgur_config = self._get_imgur_secrets(imgur_secrets)
            self.imgur_client = ImgurClient(
                imgur_config['Imgur']['ClientID'],
                imgur_config['Imgur']['ClientSecret'],
            )

            gfycat_config = self._get_gfycat_secrets(gfycat_secrets)
            self.gfycat_client = GfycatClient(
                gfycat_config['Gfycat']['ClientID'],
                gfycat_config['Gfycat']['ClientSecret'],
            )

        except ImgurClientError as imgur_error:
            self.logger.error('Error on creating ImgurClient: %s', imgur_error)
            self.logger.error(FATAL_TOOTBOT_ERROR)
            sys.exit(1)
        except GfycatClientError as gfycat_error:
            self.logger.error('Error on creating GfycatClient: %s',
                              gfycat_error)
            self.logger.error(FATAL_TOOTBOT_ERROR)
            sys.exit(1)
コード例 #8
0
def get_media(img_url, post_id):
    if any(s in img_url
           for s in ('i.imgur.com', 'i.redd.it', 'i.reddituploads.com')):
        # This adds support for all imgur links (including galleries), but I need to make a new regex
        #if ('i.imgur.com' not in img_url) and ('imgur.com' in img_url):
        #print('[bot] Attempting to retrieve image URL for', img_url, 'from imgur...')
        #regex = r"(https?:\/\/imgur\.com\/a\/(.*?)(?:\/.*|$))"
        #m = re.search(regex, img_url, flags=0)
        #print(m.group(0))
        #img_url = imgur.get_image(img_url)
        file_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
        file_extension = os.path.splitext(img_url)[-1].lower()
        # Fix for issue with i.reddituploads.com links not having a file extension in the URL
        if not file_extension:
            file_extension += '.jpg'
            file_name += '.jpg'
            img_url += '.jpg'
        file_path = IMAGE_DIR + '/' + file_name
        print('[BOT] Downloading file at URL ' + img_url + ' to ' + file_path +
              ', file type identified as ' + file_extension)
        if (
                'gifv' not in img_url
        ):  # Can't process GIFV links until Imgur API integration is working
            img = save_file(img_url, file_path)
            return img
        else:
            print('[BOT] GIFV files are not supported yet')
            return ''
    elif ('gfycat.com' in img_url):  # Gfycat
        # Twitter supports uploading videos, but Tweepy hasn't updated to support it yet.
        gfycat_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
        client = GfycatClient()
        gfycat_info = client.query_gfy(gfycat_name)
        gfycat_url = gfycat_info['gfyItem']['mp4Url']
        file_path = IMAGE_DIR + '/' + gfycat_name + '.mp4'
        print('[BOT] Downloading Gfycat at URL ' + gfycat_url + ' to ' +
              file_path)
        gfycat_file = save_file(gfycat_url, file_path)
        return gfycat_file
    else:
        print('[BOT] Post', post_id, 'doesn\'t point to an image/video:',
              img_url)
        return ''
コード例 #9
0
ファイル: gfycat.py プロジェクト: Brandawg93/Gifendore
 def get_details(self, url):
     """Get details from gfycat url."""
     self.vid_url = self.get_preview()
     try:
         self.name = self.regex.findall(url)[0]
     except IndexError:
         raise InvalidURLError('gfycat url not found')
     if self.name is None:
         raise InvalidURLError('gfycat url not found')
     try:
         if self.vid_url is None:
             client = GfycatClient(constants.GFYCAT_CLIENT_ID,
                                   constants.GFYCAT_CLIENT_SECRET)
             query = client.query_gfy(self.name)
             if 'mp4Url' in query['gfyItem']:
                 self.vid_url = query['gfyItem']['mp4Url']
             elif 'gifUrl' in query['gfyItem']:
                 self.gif_url = query['gfyItem']['gifUrl']
             else:
                 raise InvalidURLError('gfycat url not found')
         return self.get_info()
     except Exception:
         raise InvalidURLError('gfycat url not found')
コード例 #10
0
ファイル: memebot.py プロジェクト: unixproject/memebot
def get_media(img_url, post_id):
	if any(s in img_url for s in ('i.imgur.com', 'i.redd.it', 'i.reddituploads.com')):
		# This adds support for all imgur links (including galleries), but I need to make a new regex
		#if ('i.imgur.com' not in img_url) and ('imgur.com' in img_url):
			#print('[bot] Attempting to retrieve image URL for', img_url, 'from imgur...')
			#regex = r"(https?:\/\/imgur\.com\/a\/(.*?)(?:\/.*|$))"
			#m = re.search(regex, img_url, flags=0)
			#print(m.group(0))
			#img_url = imgur.get_image(img_url)
		file_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
		file_extension = os.path.splitext(img_url)[-1].lower();
		# Fix for issue with i.reddituploads.com links not having a file extension in the URL
		if not file_extension:
			file_extension += '.jpg'
			file_name += '.jpg'
			img_url += '.jpg'
		file_path = IMAGE_DIR + '/' + file_name
		print('[BOT] Downloading file at URL ' + img_url + ' to ' + file_path + ', file type identified as ' + file_extension)
		if ('gifv' not in img_url): # Can't process GIFV links until Imgur API integration is working
			img = save_file(img_url, file_path)
			return img
		else:
			print('[BOT] GIFV files are not supported yet')
			return ''
	elif ('gfycat.com' in img_url): # Gfycat
		# Twitter supports uploading videos, but Tweepy hasn't updated to support it yet.
		gfycat_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
		client = GfycatClient()
		gfycat_info = client.query_gfy(gfycat_name)
		gfycat_url = gfycat_info['gfyItem']['mp4Url']
		file_path = IMAGE_DIR + '/' + gfycat_name + '.mp4'
		print('[BOT] Downloading Gfycat at URL ' + gfycat_url + ' to ' + file_path)
		gfycat_file = save_file(gfycat_url, file_path)
		return gfycat_file
	else:
		print('[BOT] Post', post_id, 'doesn\'t point to an image/video:', img_url)
		return ''
コード例 #11
0
class Bot:
    def __init__(self, videobot):
        self.bot = GfycatClient()
        self.video_bot = videobot

    def analyze_gfy(self, link):

        gfyName = link[link.find('.com/') + 5:]

        if gfyName.find('.') != -1:  #name.mp4
            gfyName = gfyName[:gfyName.find('.')]

        if gfyName.find('-') != -1:  #name-someformat.mp4
            gfyName = gfyName[:gfyName.find('-')]

        status = {}
        message = None
        try:
            gfycat_response = self.bot.query_gfy(gfyName)
            mp4_URL = gfycat_response['gfyItem']['mp4Url']

            if gfycat_response['gfyItem']['nsfw'] == 1:
                message = 'Gfycat - marked NSFW.'
                message = '**[Hover to reveal](#s "' + message + ' ")**'  # reddit spoiler tag added.
            else:
                filename = mp4_URL[mp4_URL.find('.com/') + 5:]
                urllib.urlretrieve(mp4_URL, filename)
                status[link] = self.video_bot.make_prediction(filename)

                labels = sorted(status[link].items(),
                                key=operator.itemgetter(1),
                                reverse=True)
                tag, confidence = labels[0]
                message = tag + ". I'm {0:.2f}% confident.".format(confidence)
                message = '**[Hover to reveal](#s "' + message + ' ")**'  # reddit spoiler tag added.

                if os.path.exists(filename):
                    os.remove(filename)

        except GfycatClientError as e:
            print(e.error_message)
            print(e.status_code)

        return status, message
コード例 #12
0
max_sleep = int(config['SETTINGS']['max_sleep'])
allow_nsfw = config['SETTINGS'].getboolean('allow_nsfw')
hashtags = config['SETTINGS'].getboolean('hashtags')
test_mode = config['SETTINGS'].getboolean('test_mode')

reddit = praw.Reddit(username=reddit_user,
                     password=reddit_pass,
                     client_id=reddit_client_id,
                     client_secret=reddit_client_secret,
                     user_agent='RedTwit (by u/impshum)')

auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)

gfycat_client = GfycatClient(gfycat_client_id, gfycat_client_secret)


class C:
    W, G, R, P, Y, C = '\033[0m', '\033[92m', '\033[91m', '\033[95m', '\033[93m', '\033[36m'


class TargetFormat(object):
    GIF = ".gif"
    MP4 = ".mp4"
    AVI = ".avi"


def convertFile(inputpath, targetFormat):
    outputpath = os.path.splitext(inputpath)[0] + targetFormat
    reader = imageio.get_reader(inputpath)
コード例 #13
0
from gfycat.client import GfycatClient
from gfycat.error import GfycatClientError

config = configparser.ConfigParser()
config.read(
    'config.ini')  # File isn't available on GitHub, for obvious reasons.

username = config.get('reddit', 'username')
password = config.get('reddit', 'password')

reddit = praw.Reddit(client_id=config.get('reddit', 'client_id'),
                     client_secret=config.get('reddit', 'client_secret'),
                     username=username,
                     password=password,
                     user_agent='vredditmirrorbot by /u/blinkroot')
gfycat = GfycatClient()

log_semaphore = threading.Semaphore()  # For .log file synchronization.


def reply_to_submission(submission, gif_json, root, is_gif):
    def gfy_field(prop):
        return gif_json['gfyItem'][prop]

    def strmbl_field(extension, prop):
        return gif_json['files'][extension][prop]

    reply = ""
    space = ' '

    if is_gif:
コード例 #14
0
ファイル: botz.py プロジェクト: Unity123/botz
import time as tim
import urllib
import os
import praw
import youtube_dl
from PyDictionary import PyDictionary
from gfycat.client import GfycatClient
import random
import asyncio

scounter = 1
dictionary = PyDictionary()
reddit = praw.Reddit(client_id="SgpnTCwJm3Hgag",
                     client_secret="insert",
                     user_agent="discord.bot.botz:v1.0.0 by DimBulb567")
gfycat = GfycatClient("2_cRBLwz", "insert")
bot = commands.Bot(command_prefix="b!")
submissions = []
chatpeeps = []
chatchannels = []
ischatting = False
msg = ""
newmsg = False

ydl_opts = {
    'format': 'bestaudio/best',
    'default_search': 'auto',
    'postprocessors': [{
        'key': 'FFmpegExtractAudio',
        'preferredcodec': 'mp3',
    }],
コード例 #15
0
ファイル: t2utils.py プロジェクト: DM2602/titletoimagebot
def process_gif(submission):
    sub = submission.subreddit.display_name
    url = submission.url
    title = submission.title
    author = submission.author.name

    # If its a gifv and hosted on imgur, we're ok, anywhere else I cant verify it works
    if 'imgur' in url and url.endswith("gifv"):
        # imgur will give us a (however large) gif if we ask for it
        # thanks imgur <3
        url = url.rstrip('v')
    # Reddit Hosted gifs are going to be absolute hell, served via DASH which
    #       Can be checked through a fallback url :)
    try:
        response = requests.get(url)
    # except OSError as error:
    #     logging.warning('Converting to image failed, trying with <url>.jpg | %s', error)
    #     try:
    #         response = requests.get(url + '.jpg')
    #         img = Image.open(BytesIO(response.content))
    #     except OSError as error:
    #         logging.error('Converting to image failed, skipping submission | %s', error)
    #return
    except IOError as error:
        print('Pillow couldn\'t process image, marking as parsed and skipping')
        return None
    except Exception as error:
        print(error)
        print('Exception on image conversion lines.')
        return None
    except:
        logging.error("Could not get image from url")
        return None

    img = Image.open(BytesIO(response.content))
    frames = []

    # Process Gif

    # Loop over each frame in the animated image
    for frame in ImageSequence.Iterator(img):
        # Draw the text on the frame

        # We'll create a custom RedditImage for each frame to avoid
        #      redundant code

        # TODO: Consolidate this entire method into RedditImage. I want to make
        #       Sure this works before I integrate.

        rFrame = RedditImage(frame)
        rFrame.add_title(title, False)

        frame = rFrame._image
        # However, 'frame' is still the animated image with many frames
        # It has simply been seeked to a later frame
        # For our list of frames, we only want the current frame

        # Saving the image without 'save_all' will turn it into a single frame image, and we can then re-open it
        # To be efficient, we will save it to a stream, rather than to file
        b = BytesIO()
        frame.save(b, format="GIF")
        frame = Image.open(b)

        # The first successful image generation was 150MB, so lets see what all
        #       Can be done to not have that happen

        # Then append the single frame image to a list of frames
        frames.append(frame)
    # Save the frames as a new image
    path_gif = 'temp.gif'
    path_mp4 = 'temp.mp4'
    frames[0].save(path_gif, save_all=True, append_images=frames[1:])
    # ff = ffmpy.FFmpeg(inputs={path_gif: None},outputs={path_mp4: None})
    # ff.run()

    imgur = catutils.get_imgur_client_config()
    # try:
    client = GfycatClient()

    response = client.upload_from_file(path_gif)
    # except:
    #     logging.error('Gif Upload Failed, Returning')
    #     return None
    remove(path_gif)
    return response.get("gifUrl")
コード例 #16
0
def get_media(img_url, post_id):
    if any(s in img_url for s in ('i.redd.it', 'i.reddituploads.com')):
        file_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
        file_extension = os.path.splitext(img_url)[-1].lower()
        # Fix for issue with i.reddituploads.com links not having a file extension in the URL
        if not file_extension:
            file_extension += '.jpg'
            file_name += '.jpg'
            img_url += '.jpg'
        # Grab the GIF versions of .GIFV links
        # When Tweepy adds support for video uploads, we can use grab the MP4 versions
        if (file_extension == '.gifv'):
            file_extension = file_extension.replace('.gifv', '.gif')
            file_name = file_name.replace('.gifv', '.gif')
            img_url = img_url.replace('.gifv', '.gif')
        # Download the file
        file_path = IMAGE_DIR + '/' + file_name
        print('[ OK ] Downloading file at URL ' + img_url + ' to ' +
              file_path + ', file type identified as ' + file_extension)
        img = save_file(img_url, file_path)
        return img
    elif ('imgur.com' in img_url):  # Imgur
        try:
            client = ImgurClient(IMGUR_CLIENT, IMGUR_CLIENT_SECRET)
        except BaseException as e:
            print('[EROR] Error while authenticating with Imgur:', str(e))
            return
        # Working demo of regex: https://regex101.com/r/G29uGl/2
        regex = r"(?:.*)imgur\.com(?:\/gallery\/|\/a\/|\/)(.*?)(?:\/.*|\.|$)"
        m = re.search(regex, img_url, flags=0)
        if m:
            # Get the Imgur image/gallery ID
            id = m.group(1)
            if any(s in img_url
                   for s in ('/a/', '/gallery/')):  # Gallery links
                images = client.get_album_images(id)
                # Only the first image in a gallery is used
                imgur_url = images[0].link
            else:  # Single image
                imgur_url = client.get_image(id).link
            # If the URL is a GIFV link, change it to a GIF
            file_extension = os.path.splitext(imgur_url)[-1].lower()
            if (file_extension == '.gifv'):
                file_extension = file_extension.replace('.gifv', '.gif')
                img_url = imgur_url.replace('.gifv', '.gif')
            # Download the image
            file_path = IMAGE_DIR + '/' + id + file_extension
            print('[ OK ] Downloading Imgur image at URL ' + imgur_url +
                  ' to ' + file_path)
            imgur_file = save_file(imgur_url, file_path)
            # Imgur will sometimes return a single-frame thumbnail instead of a GIF, so we need to check for this
            if (file_extension == '.gif'):
                # Open the file using the Pillow library
                img = Image.open(imgur_file)
                # Get the MIME type
                mime = Image.MIME[img.format]
                if (mime == 'image/gif'):
                    # Image is indeed a GIF, so it can be posted
                    img.close()
                    return imgur_file
                else:
                    # Image is not actually a GIF, so don't post it
                    print(
                        '[EROR] Imgur has not processed a GIF version of this link, so it can not be posted'
                    )
                    img.close()
                    # Delete the image
                    try:
                        os.remove(imgur_file)
                    except BaseException as e:
                        print('[EROR] Error while deleting media file:',
                              str(e))
                    return
            else:
                return imgur_file
        else:
            print(
                '[EROR] Could not identify Imgur image/gallery ID in this URL:',
                img_url)
            return
    elif ('gfycat.com' in img_url):  # Gfycat
        gfycat_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
        client = GfycatClient()
        gfycat_info = client.query_gfy(gfycat_name)
        # Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
        gfycat_url = gfycat_info['gfyItem']['max2mbGif']
        file_path = IMAGE_DIR + '/' + gfycat_name + '.gif'
        print('[ OK ] Downloading Gfycat at URL ' + gfycat_url + ' to ' +
              file_path)
        gfycat_file = save_file(gfycat_url, file_path)
        return gfycat_file
    elif ('giphy.com' in img_url):  # Giphy
        # Working demo of regex: https://regex101.com/r/o8m1kA/2
        regex = r"https?://((?:.*)giphy\.com/media/|giphy.com/gifs/|i.giphy.com/)(.*-)?(\w+)(/|\n)"
        m = re.search(regex, img_url, flags=0)
        if m:
            # Get the Giphy ID
            id = m.group(3)
            # Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
            giphy_url = 'https://media.giphy.com/media/' + id + '/giphy-downsized.gif'
            file_path = IMAGE_DIR + '/' + id + '-downsized.gif'
            print('[ OK ] Downloading Giphy at URL ' + giphy_url + ' to ' +
                  file_path)
            giphy_file = save_file(giphy_url, file_path)
            return giphy_file
        else:
            print('[EROR] Could not identify Giphy ID in this URL:', img_url)
            return
    else:
        print('[WARN] Post', post_id, 'doesn\'t point to an image/GIF:',
              img_url)
        return
コード例 #17
0
def get_media(submission, IMGUR_CLIENT, IMGUR_CLIENT_SECRET):
    img_url = submission.url
    # Make sure config file exists
    try:
        config = configparser.ConfigParser()
        config.read('config.ini')
    except BaseException as e:
        print('[EROR] Error while reading config file:', str(e))
        sys.exit()
    # Make sure media folder exists
    IMAGE_DIR = config['MediaSettings']['MediaFolder']
    if not os.path.exists(IMAGE_DIR):
        os.makedirs(IMAGE_DIR)
        print('[ OK ] Media folder not found, created a new one')
    # Download and save the linked image
    if any(s in img_url for s in ('i.redd.it', 'i.reddituploads.com')):  # Reddit-hosted images
        file_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
        file_extension = os.path.splitext(img_url)[-1].lower()
        # Fix for issue with i.reddituploads.com links not having a file extension in the URL
        if not file_extension:
            file_extension += '.jpg'
            file_name += '.jpg'
            img_url += '.jpg'
        # Download the file
        file_path = IMAGE_DIR + '/' + file_name
        print('[ OK ] Downloading file at URL ' + img_url + ' to ' +
              file_path + ', file type identified as ' + file_extension)
        img = save_file(img_url, file_path)
        return [img]
    elif ('v.redd.it' in img_url):  # Reddit video
        fileouts = []
        class ytdlLogger(object):
            def debug(self, msg):
                if msg.startswith(IMAGE_DIR):
                    fileouts.append(msg)
            def warning(self, msg):
                print("[WARN] " + msg)
            def error(self, msg):
                print("[EROR] " + msg)
        ytdl_opts = {
            'outtmpl': IMAGE_DIR + '/%(id)s.%(ext)s',
            'noplaylist': True,
            'forcefilename': True,
            'logger': ytdlLogger()
        }
        print("[ OK ] Downloading video at url " + img_url + " via youtube-dl...")
        with youtube_dl.YoutubeDL(ytdl_opts) as ytdl:
            ytdl.download([img_url])
            print("[ OK ] File downloaded to " + fileouts[0])
            return [fileouts[0]]
    elif ('reddit.com/gallery/' in img_url):  # Reddit galleries (multiple images)
        try:
            galleryitems = submission.gallery_data['items']
            mediadata = submission.media_metadata
        except BaseException as e:
            print('[EROR] Post seems to be a gallery but there was an error trying to get the gallery data:', str(e))
            return
        if len(galleryitems) > 4:
            print('[WARN] Post is a gallery with more than 4 images. Skipping as it is too many for Twitter.')
            return
        img_url_list = []
        for item in galleryitems:
            if mediadata[item['media_id']]['m'] == 'image/jpg':
                img_url_list.append(f"https://i.redd.it/{item['media_id']}.jpg")
            elif mediadata[item['media_id']]['m'] == 'image/png':
                img_url_list.append(f"https://i.redd.it/{item['media_id']}.png")
            elif mediadata[item['media_id']]['m'] == 'image/webp':
                img_url_list.append(f"https://i.redd.it/{item['media_id']}.webp")
            else:
                print('[WARN] An item in the gallery is not a JPG, PNG, or WEBP. Skipping this post as it is likely unable to be posted to Twitter.')
                return
        downloaded_imgs = []
        for url in img_url_list:
            file_name = os.path.basename(urllib.parse.urlsplit(url).path)
            saved = save_file(url, file_name)
            downloaded_imgs.append(saved)
        return downloaded_imgs
    elif ('imgur.com' in img_url):  # Imgur
        try:
            client = ImgurClient(IMGUR_CLIENT, IMGUR_CLIENT_SECRET)
        except BaseException as e:
            print('[EROR] Error while authenticating with Imgur:', str(e))
            return
        # Working demo of regex: https://regex101.com/r/G29uGl/2
        regex = r"(?:.*)imgur\.com(?:\/gallery\/|\/a\/|\/)(.*?)(?:\/.*|\.|$)"
        m = re.search(regex, img_url, flags=0)
        if m:
            # Get the Imgur image/gallery ID
            id = m.group(1)
            if any(s in img_url for s in ('/a/', '/gallery/')):  # Gallery links
                images = client.get_album_images(id)
                # Only the first image in a gallery is used
                imgur_url = images[0].link
            else:  # Single image
                imgur_url = client.get_image(id).link
            # If the URL is a GIFV or MP4 link, change it to the GIF version
            file_extension = os.path.splitext(imgur_url)[-1].lower()
            if (file_extension == '.gifv'):
                file_extension = file_extension.replace('.gifv', '.gif')
                imgur_url = imgur_url.replace('.gifv', '.gif')
            elif (file_extension == '.mp4'):
                file_extension = file_extension.replace('.mp4', '.gif')
                imgur_url = imgur_url.replace('.mp4', '.gif')
            # Download the image
            file_path = IMAGE_DIR + '/' + id + file_extension
            print('[ OK ] Downloading Imgur image at URL ' +
                  imgur_url + ' to ' + file_path)
            imgur_file = save_file(imgur_url, file_path)
            # Imgur will sometimes return a single-frame thumbnail instead of a GIF, so we need to check for this
            if (file_extension == '.gif'):
                # Open the file using the Pillow library
                img = Image.open(imgur_file)
                # Get the MIME type
                mime = Image.MIME[img.format]
                if (mime == 'image/gif'):
                    # Image is indeed a GIF, so it can be posted
                    img.close()
                    return [imgur_file]
                else:
                    # Image is not actually a GIF, so don't post it
                    print(
                        '[WARN] Imgur has not processed a GIF version of this link, so it can not be posted to Twitter')
                    img.close()
                    # Delete the image
                    try:
                        os.remove(imgur_file)
                    except BaseException as e:
                        print('[EROR] Error while deleting media file:', str(e))
                    return
            else:
                return [imgur_file]
        else:
            print(
                '[EROR] Could not identify Imgur image/gallery ID in this URL:', img_url)
            return
    elif ('gfycat.com' in img_url):  # Gfycat
        try:
            gfycat_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
            client = GfycatClient()
            gfycat_info = client.query_gfy(gfycat_name)
        except BaseException as e:
            print('[EROR] Error downloading Gfycat link:', str(e))
            return
        # Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
        gfycat_url = gfycat_info['gfyItem']['max2mbGif']
        file_path = IMAGE_DIR + '/' + gfycat_name + '.gif'
        print('[ OK ] Downloading Gfycat at URL ' +
              gfycat_url + ' to ' + file_path)
        gfycat_file = save_file(gfycat_url, file_path)
        return [gfycat_file]
    elif ('giphy.com' in img_url):  # Giphy
        # Working demo of regex: https://regex101.com/r/o8m1kA/2
        regex = r"https?://((?:.*)giphy\.com/media/|giphy.com/gifs/|i.giphy.com/)(.*-)?(\w+)(/|\n)"
        m = re.search(regex, img_url, flags=0)
        if m:
            # Get the Giphy ID
            id = m.group(3)
            # Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
            giphy_url = 'https://media.giphy.com/media/' + id + '/giphy-downsized.gif'
            file_path = IMAGE_DIR + '/' + id + '-downsized.gif'
            print('[ OK ] Downloading Giphy at URL ' +
                  giphy_url + ' to ' + file_path)
            giphy_file = save_file(giphy_url, file_path)
            # Check the hash to make sure it's not a GIF saying "This content is not available"
            # More info: https://github.com/corbindavenport/tootbot/issues/8
            hash = hashlib.md5(file_as_bytes(
                open(giphy_file, 'rb'))).hexdigest()
            if (hash == '59a41d58693283c72d9da8ae0561e4e5'):
                print(
                    '[WARN] Giphy has not processed a 2MB GIF version of this link, so it can not be posted to Twitter')
                return
            else:
                return [giphy_file]
        else:
            print('[EROR] Could not identify Giphy ID in this URL:', img_url)
            return
    else:
        # Check if URL is an image, based on the MIME type
        image_formats = ('image/png', 'image/jpeg', 'image/gif', 'image/webp')
        img_site = urlopen(img_url)
        meta = img_site.info()
        if meta["content-type"] in image_formats:
            # URL appears to be an image, so download it
            file_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
            file_path = IMAGE_DIR + '/' + file_name
            print('[ OK ] Downloading file at URL ' +
                  img_url + ' to ' + file_path)
            try:
                img = save_file(img_url, file_path)
                return [img]
            except BaseException as e:
                print('[EROR] Error while downloading image:', str(e))
                return
        else:
            print('[EROR] URL does not point to a valid image file')
            return
コード例 #18
0
ファイル: getmedia.py プロジェクト: webstrapd/tootbot
def get_media(img_url, IMGUR_CLIENT, IMGUR_CLIENT_SECRET):
    # Make sure config file exists
    try:
        config = configparser.ConfigParser()
        config.read('config.ini')
    except BaseException as e:
        print('[EROR] Error while reading config file:', str(e))
        sys.exit()
    # Make sure media folder exists
    IMAGE_DIR = config['MediaSettings']['MediaFolder']
    if not os.path.exists(IMAGE_DIR):
        os.makedirs(IMAGE_DIR)
        print('[ OK ] Media folder not found, created a new one')
    # Download and save the linked image
    if any(s in img_url
           for s in ('i.redd.it',
                     'i.reddituploads.com')):  # Reddit-hosted images
        file_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
        file_extension = os.path.splitext(img_url)[-1].lower()
        # Fix for issue with i.reddituploads.com links not having a file extension in the URL
        if not file_extension:
            file_extension += '.jpg'
            file_name += '.jpg'
            img_url += '.jpg'
        # Download the file
        file_path = IMAGE_DIR + '/' + file_name
        print('[ OK ] Downloading file at URL ' + img_url + ' to ' +
              file_path + ', file type identified as ' + file_extension)
        img = save_file(img_url, file_path)
        return img
    elif ('v.redd.it' in img_url):  # Reddit video
        print(
            '[WARN] Reddit videos can not be uploaded to Twitter, due to API limitations'
        )
        return
    elif ('imgur.com' in img_url):  # Imgur
        try:
            client = ImgurClient(IMGUR_CLIENT, IMGUR_CLIENT_SECRET)
        except BaseException as e:
            print('[EROR] Error while authenticating with Imgur:', str(e))
            return
        # Working demo of regex: https://regex101.com/r/G29uGl/2
        regex = r"(?:.*)imgur\.com(?:\/gallery\/|\/a\/|\/)(.*?)(?:\/.*|\.|$)"
        m = re.search(regex, img_url, flags=0)
        if m:
            # Get the Imgur image/gallery ID
            id = m.group(1)
            if any(s in img_url
                   for s in ('/a/', '/gallery/')):  # Gallery links
                images = client.get_album_images(id)
                # Only the first image in a gallery is used
                imgur_url = images[0].link
            else:  # Single image
                imgur_url = client.get_image(id).link
            # If the URL is a GIFV or MP4 link, change it to the GIF version
            file_extension = os.path.splitext(imgur_url)[-1].lower()
            if (file_extension == '.gifv'):
                file_extension = file_extension.replace('.gifv', '.gif')
                imgur_url = imgur_url.replace('.gifv', '.gif')
            elif (file_extension == '.mp4'):
                file_extension = file_extension.replace('.mp4', '.gif')
                imgur_url = imgur_url.replace('.mp4', '.gif')
            # Download the image
            file_path = IMAGE_DIR + '/' + id + file_extension
            print('[ OK ] Downloading Imgur image at URL ' + imgur_url +
                  ' to ' + file_path)
            imgur_file = save_file(imgur_url, file_path)
            # Imgur will sometimes return a single-frame thumbnail instead of a GIF, so we need to check for this
            if (file_extension == '.gif'):
                # Open the file using the Pillow library
                img = Image.open(imgur_file)
                # Get the MIME type
                mime = Image.MIME[img.format]
                if (mime == 'image/gif'):
                    # Image is indeed a GIF, so it can be posted
                    img.close()
                    return imgur_file
                else:
                    # Image is not actually a GIF, so don't post it
                    print(
                        '[WARN] Imgur has not processed a GIF version of this link, so it can not be posted to Twitter'
                    )
                    img.close()
                    # Delete the image
                    try:
                        os.remove(imgur_file)
                    except BaseException as e:
                        print('[EROR] Error while deleting media file:',
                              str(e))
                    return
            else:
                return imgur_file
        else:
            print(
                '[EROR] Could not identify Imgur image/gallery ID in this URL:',
                img_url)
            return
    elif ('gfycat.com' in img_url):  # Gfycat
        try:
            gfycat_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
            client = GfycatClient()
            gfycat_info = client.query_gfy(gfycat_name)
        except BaseException as e:
            print('[EROR] Error downloading Gfycat link:', str(e))
            return
        # Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
        gfycat_url = gfycat_info['gfyItem']['max2mbGif']
        file_path = IMAGE_DIR + '/' + gfycat_name + '.gif'
        print('[ OK ] Downloading Gfycat at URL ' + gfycat_url + ' to ' +
              file_path)
        gfycat_file = save_file(gfycat_url, file_path)
        return gfycat_file
    elif ('giphy.com' in img_url):  # Giphy
        # Working demo of regex: https://regex101.com/r/o8m1kA/2
        regex = r"https?://((?:.*)giphy\.com/media/|giphy.com/gifs/|i.giphy.com/)(.*-)?(\w+)(/|\n)"
        m = re.search(regex, img_url, flags=0)
        if m:
            # Get the Giphy ID
            id = m.group(3)
            # Download the 2MB version because Tweepy has a 3MB upload limit for GIFs
            giphy_url = 'https://media.giphy.com/media/' + id + '/giphy-downsized.gif'
            file_path = IMAGE_DIR + '/' + id + '-downsized.gif'
            print('[ OK ] Downloading Giphy at URL ' + giphy_url + ' to ' +
                  file_path)
            giphy_file = save_file(giphy_url, file_path)
            # Check the hash to make sure it's not a GIF saying "This content is not available"
            # More info: https://github.com/corbindavenport/tootbot/issues/8
            hash = hashlib.md5(file_as_bytes(open(giphy_file,
                                                  'rb'))).hexdigest()
            if (hash == '59a41d58693283c72d9da8ae0561e4e5'):
                print(
                    '[WARN] Giphy has not processed a 2MB GIF version of this link, so it can not be posted to Twitter'
                )
                return
            else:
                return giphy_file
        else:
            print('[EROR] Could not identify Giphy ID in this URL:', img_url)
            return
    else:
        # Check if URL is an image, based on the MIME type
        image_formats = ('image/png', 'image/jpeg', 'image/gif', 'image/webp')
        img_site = urlopen(img_url)
        meta = img_site.info()
        if meta["content-type"] in image_formats:
            # URL appears to be an image, so download it
            file_name = os.path.basename(urllib.parse.urlsplit(img_url).path)
            file_path = IMAGE_DIR + '/' + file_name
            print('[ OK ] Downloading file at URL ' + img_url + ' to ' +
                  file_path)
            try:
                img = save_file(img_url, file_path)
                return img
            except BaseException as e:
                print('[EROR] Error while downloading image:', str(e))
                return
        else:
            print('[EROR] URL does not point to a valid image file')
            return
コード例 #19
0
 def __init__(self, videobot):
     self.bot = GfycatClient()
     self.video_bot = videobot
コード例 #20
0
ファイル: getmedia.py プロジェクト: webstrapd/tootbot
def get_hd_media(submission, IMGUR_CLIENT, IMGUR_CLIENT_SECRET):
    media_url = submission.url
    # Make sure config file exists
    try:
        config = configparser.ConfigParser()
        config.read('config.ini')
    except BaseException as e:
        print('[EROR] Error while reading config file:', str(e))
        sys.exit()
    # Make sure media folder exists
    IMAGE_DIR = config['MediaSettings']['MediaFolder']
    if not os.path.exists(IMAGE_DIR):
        os.makedirs(IMAGE_DIR)
        print('[ OK ] Media folder not found, created a new one')
    # Download and save the linked image
    if any(s in media_url
           for s in ('i.redd.it',
                     'i.reddituploads.com')):  # Reddit-hosted images
        file_name = os.path.basename(urllib.parse.urlsplit(media_url).path)
        file_extension = os.path.splitext(media_url)[-1].lower()
        # Fix for issue with i.reddituploads.com links not having a file extension in the URL
        if not file_extension:
            file_extension += '.jpg'
            file_name += '.jpg'
            media_url += '.jpg'
        # Download the file
        file_path = IMAGE_DIR + '/' + file_name
        print('[ OK ] Downloading file at URL ' + media_url + ' to ' +
              file_path + ', file type identified as ' + file_extension)
        img = save_file(media_url, file_path)
        return img
    elif ('v.redd.it' in media_url):  # Reddit video
        if submission.media:
            # Get URL for MP4 version of reddit video
            video_url = submission.media['reddit_video']['fallback_url']
            # Download the file
            file_path = IMAGE_DIR + '/' + submission.id + '.mp4'
            print('[ OK ] Downloading Reddit video at URL ' + video_url +
                  ' to ' + file_path)
            video = save_file(video_url, file_path)
            return video
        else:
            print('[EROR] Reddit API returned no media for this URL:',
                  media_url)
            return
    elif ('imgur.com' in media_url):  # Imgur
        try:
            client = ImgurClient(IMGUR_CLIENT, IMGUR_CLIENT_SECRET)
        except BaseException as e:
            print('[EROR] Error while authenticating with Imgur:', str(e))
            return
        # Working demo of regex: https://regex101.com/r/G29uGl/2
        regex = r"(?:.*)imgur\.com(?:\/gallery\/|\/a\/|\/)(.*?)(?:\/.*|\.|$)"
        m = re.search(regex, media_url, flags=0)
        if m:
            # Get the Imgur image/gallery ID
            id = m.group(1)
            if any(s in media_url
                   for s in ('/a/', '/gallery/')):  # Gallery links
                images = client.get_album_images(id)
                # Only the first image in a gallery is used
                imgur_url = images[0].link
                print(images[0])
            else:  # Single image/GIF
                if client.get_image(id).type == 'image/gif':
                    # If the image is a GIF, use the MP4 version
                    imgur_url = client.get_image(id).mp4
                else:
                    imgur_url = client.get_image(id).link
            file_extension = os.path.splitext(imgur_url)[-1].lower()
            # Download the image
            file_path = IMAGE_DIR + '/' + id + file_extension
            print('[ OK ] Downloading Imgur image at URL ' + imgur_url +
                  ' to ' + file_path)
            imgur_file = save_file(imgur_url, file_path)
            return imgur_file
        else:
            print(
                '[EROR] Could not identify Imgur image/gallery ID in this URL:',
                media_url)
            return
    elif ('gfycat.com' in media_url):  # Gfycat
        try:
            gfycat_name = os.path.basename(
                urllib.parse.urlsplit(media_url).path)
            client = GfycatClient()
            gfycat_info = client.query_gfy(gfycat_name)
        except BaseException as e:
            print('[EROR] Error downloading Gfycat link:', str(e))
            return
        # Download the Mp4 version
        gfycat_url = gfycat_info['gfyItem']['mp4Url']
        file_path = IMAGE_DIR + '/' + gfycat_name + '.mp4'
        print('[ OK ] Downloading Gfycat at URL ' + gfycat_url + ' to ' +
              file_path)
        gfycat_file = save_file(gfycat_url, file_path)
        return gfycat_file
    elif ('giphy.com' in media_url):  # Giphy
        # Working demo of regex: https://regex101.com/r/o8m1kA/2
        regex = r"https?://((?:.*)giphy\.com/media/|giphy.com/gifs/|i.giphy.com/)(.*-)?(\w+)(/|\n)"
        m = re.search(regex, media_url, flags=0)
        if m:
            # Get the Giphy ID
            id = m.group(3)
            # Download the MP4 version of the GIF
            giphy_url = 'https://media.giphy.com/media/' + id + '/giphy.mp4'
            file_path = IMAGE_DIR + '/' + id + 'giphy.mp4'
            print('[ OK ] Downloading Giphy at URL ' + giphy_url + ' to ' +
                  file_path)
            giphy_file = save_file(giphy_url, file_path)
            return giphy_file
        else:
            print('[EROR] Could not identify Giphy ID in this URL:', media_url)
            return
    else:
        # Check if URL is an image or MP4 file, based on the MIME type
        image_formats = ('image/png', 'image/jpeg', 'image/gif', 'image/webp',
                         'video/mp4')
        img_site = urlopen(media_url)
        meta = img_site.info()
        if meta["content-type"] in image_formats:
            # URL appears to be an image, so download it
            file_name = os.path.basename(urllib.parse.urlsplit(media_url).path)
            file_path = IMAGE_DIR + '/' + file_name
            print('[ OK ] Downloading file at URL ' + media_url + ' to ' +
                  file_path)
            try:
                img = save_file(media_url, file_path)
                return img
            except BaseException as e:
                print('[EROR] Error while downloading image:', str(e))
                return
        else:
            print('[EROR] URL does not point to a valid image file.')
            return
コード例 #21
0
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--force", help="Force", action='store_true')
parser.add_argument("-g", "--gallery", help="Get the galleries again", action='store_true')
parser.add_argument("-v", "--video", help="Get the video again", action='store_true')
args, unknown = parser.parse_known_args()

reddit = praw.Reddit(
    client_id=secrets.reddit['pull']['id'], 
    client_secret=secrets.reddit['pull']['secret'], 
    password=secrets.reddit['pull']['password'], user_agent='test', 
    username=secrets.reddit['pull']['username']
)

gfycat = GfycatClient(
    secrets.gfycat['id'],
    secrets.gfycat['secret']
)

imgur = ImgurClient(
    secrets.imgur['id'],
    secrets.imgur['secret']
)

subredMap = {}


def lf(path, kind = 'set'):
    if os.path.exists(path):
        with open(path) as fp:
            if kind == 'json':
                try:
コード例 #22
0
def get_url(submission, mp4_instead_gif=True):
    '''
    return TYPE, URL, EXTENSION
    E.x.: return 'img', 'http://example.com/pic.png', 'png'
    '''
    
    def what_is_inside(url):
        header = requests.head(url).headers
        if 'Content-Type' in header:
            return header['Content-Type']
        else:
            return ''

    url = submission.url
    url_content = what_is_inside(url)

    if (CONTENT_JPEG == url_content or CONTENT_PNG == url_content):
        return TYPE_IMG, url, url_content.split('/')[1]

    if CONTENT_GIF in url_content:
        if url.endswith('.gif') and mp4_instead_gif:
            # Let's try to find .mp4 file.
            url_mp4 = url[:-4] + '.mp4'
            if CONTENT_MP4 == what_is_inside(url_mp4):
                return TYPE_GIF, url_mp4, 'mp4'
        return TYPE_GIF, url, 'gif'
    
    if url.endswith('.gifv'):
        if mp4_instead_gif:
            url_mp4 = url[:-5] + '.mp4'
            if CONTENT_MP4 == what_is_inside(url_mp4):
                return TYPE_GIF, url_mp4, 'mp4'
        if CONTENT_GIF in what_is_inside(url[0:-1]):
            return TYPE_GIF, url[0:-1], 'gif'

    if submission.is_self is True:
        # Self submission with text
        return TYPE_TEXT, None, None

    if urlparse(url).netloc == 'imgur.com':
        # Imgur
        imgur_config = yaml.load(open(os.path.join('configs', 'imgur.yml')).read())
        imgur_client = ImgurClient(imgur_config['client_id'], imgur_config['client_secret'])
        path_parts = urlparse(url).path.split('/')
        if path_parts[1] == 'gallery':
            # TODO: gallary handling
            return TYPE_OTHER, url, None
        elif path_parts[1] == 'topic':
            # TODO: topic handling
            return TYPE_OTHER, url, None
        elif path_parts[1] == 'a':
            # An imgur album
            album = imgur_client.get_album(path_parts[2])
            story = dict()
            for num, img in enumerate(album.images):
                number = num + 1
                what = TYPE_IMG
                link = img['link']
                ext = img['type'].split('/')[1]
                if img['animated']:
                    what = TYPE_GIF
                    link = img['mp4'] if mp4_instead_gif else img['gifv'][:-1]
                    ext = 'mp4' if mp4_instead_gif else 'gif'
                story[number] = {
                    'url': link,
                    'what': what,
                    'ext': ext
                }
            if len(story) == 1:
                return story[1]['what'], story[1]['url'], story[1]['ext']
            return TYPE_ALBUM, story, None
        else:
            # Just imgur img
            img = imgur_client.get_image(path_parts[1].split('.')[0])
            if not img.animated:
                return TYPE_IMG, img.link, img.type.split('/')[1]
            else:
                if mp4_instead_gif:
                    return TYPE_GIF, img.mp4, 'mp4'
                else:
                    # return 'gif', img.link, 'gif'
                    return TYPE_GIF, img.gifv[:-1], 'gif'
    elif 'gfycat.com' in urlparse(url).netloc:
        client = GfycatClient()
        rname = re.findall(r'gfycat.com\/(?:detail\/)?(\w*)', url)[0]
        try:
            urls = client.query_gfy(rname)['gfyItem']
            if mp4_instead_gif:
                return TYPE_GIF, urls['mp4Url'], 'mp4'
            else:
                return TYPE_GIF, urls['max5mbGif'], 'gif'
        except KeyError:
            logging.info('Gfy fail prevented!')
            return TYPE_OTHER, url, None
    else:
        return TYPE_OTHER, url, None
コード例 #23
0
ファイル: download.py プロジェクト: dkmiller/tidbits
def get_gfycat_client(args) -> GfycatClient:
    rv = GfycatClient(args.gfycat_id, args.gfycat_secret)
    return rv