Beispiel #1
0
def get_session(username=None, password=None):
    """Creates a requests session which is authenticated to trakt."""
    session = Session()
    session.headers = {
        'Content-Type': 'application/json',
        'trakt-api-version': 2,
        'trakt-api-key': API_KEY
    }
    if username:
        session.headers['trakt-user-login'] = username
    if username and password:
        auth = {'login': username, 'password': password}
        try:
            r = session.post(urljoin(API_URL, 'auth/login'), data=json.dumps(auth))
        except Timeout:  # requests.exceptions.Timeout
            raise plugin.PluginError('Authentication timed out to trakt')
        except RequestException as e:
            if hasattr(e, 'response') and e.response.status_code in [401, 403]:
                raise plugin.PluginError('Authentication to trakt failed, check your username/password: %s' % e.args[0])
            else:
                raise plugin.PluginError('Authentication to trakt failed: %s' % e.args[0])
        try:
            session.headers['trakt-user-token'] = r.json()['token']
        except (ValueError, KeyError):
            raise plugin.PluginError('Got unexpected response content while authorizing to trakt: %s' % r.text)
    return session
Beispiel #2
0
    def on_task_input(self, task, config):
        """Search on What.cd"""

        self.session = Session()

        # From the API docs: "Refrain from making more than five (5) requests every ten (10) seconds"
        self.session.add_domain_limiter(
            TokenBucketLimiter('ssl.what.cd', 2, '2 seconds'))

        # Custom user agent
        user_agent = config.pop('user_agent', None)
        if user_agent:
            self.session.headers.update({"User-Agent": user_agent})

        # Login
        self._login(config.pop('username'), config.pop('password'))

        # Logged in successfully, it's ok if nothing matches
        task.no_entries_ok = True

        # NOTE: Any values still in config at this point MUST be valid search parameters

        # Perform the search and parse the needed information out of the response
        results = self._search_results(config)
        return list(self._get_entries(results))
Beispiel #3
0
    def on_task_input(self, task, config):
        """Search on What.cd"""

        self.session = Session()

        # From the API docs: "Refrain from making more than five (5) requests every ten (10) seconds"
        self.session.set_domain_delay('ssl.what.cd', '2 seconds')

        # Login
        self._login(config)

        # Perform the query
        results = []
        page = 1
        while True:
            result = self._request("browse", page=page, **config)
            if not result['results']:
                break
            results.extend(result["results"])
            pages = result['pages']
            page = result['currentPage']
            log.info("Got {0} of {1} pages".format(page, pages))
            if page >= pages:
                break
            page += 1

        # Logged in and made a request successfully, it's ok if nothing matches
        task.no_entries_ok = True

        # Parse the needed information out of the response
        entries = []
        for result in results:
            # Get basic information on the release
            info = dict(
                (k, result[k]) for k in ('artist', 'groupName', 'groupYear'))

            # Releases can have multiple download options
            for tor in result['torrents']:
                temp = info.copy()
                temp.update(
                    dict(
                        (k, tor[k])
                        for k in ('media', 'encoding', 'format', 'torrentId')))

                entries.append(
                    Entry(
                        title="{artist} - {groupName} - {groupYear} "
                        "({media} - {format} - {encoding})-{torrentId}.torrent"
                        .format(**temp),
                        url="https://what.cd/torrents.php?action=download&"
                        "id={0}&authkey={1}&torrent_pass={2}".format(
                            temp['torrentId'], self.authkey, self.passkey),
                        torrent_seeds=tor['seeders'],
                        torrent_leeches=tor['leechers'],
                        # Size is given in bytes, convert it
                        content_size=int(tor['size'] / (1024**2) * 100) / 100))

        return entries
Beispiel #4
0
 def __init__(self, config):
     self.config = config
     self._session = Session()
     self._session.add_domain_limiter(TimedLimiter('imdb.com', '5 seconds'))
     self._session.headers = {'Accept-Language': config.get('force_language', 'en-us')}
     self.user_id = None
     self.list_id = None
     self._items = None
     self._authenticated = False
Beispiel #5
0
 def session(self):
     # TODO: This is not used for all requests even ..
     if self.requests is None:
         self.requests = Session()
         requests.headers.update({
             'User-Agent':
             'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
         })
         requests.add_domain_limiter(
             TimedLimiter('descargas2020.com', '2 seconds'))
     return self.requests
Beispiel #6
0
 def session(self):
     # TODO: This is not used for all requests even ..
     if self._session is None:
         self._session = Session()
         self._session.headers.update(
             {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
         )
         self._session.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))
     return self._session
Beispiel #7
0
    def on_task_input(self, task, config):
        """Search on What.cd"""

        self.session = Session()
        user_agent = config.get('user_agent')
        if user_agent:
            # Using a custom user agent
            self.session.headers.update({"User-Agent": user_agent})

        # From the API docs: "Refrain from making more than five (5) requests every ten (10) seconds"
        self.session.set_domain_delay('ssl.what.cd', '2 seconds')

        # Login
        self._login(config)

        # Perform the query
        results = []
        page = 1
        while True:
            result = self._request("browse", page=page, **config)
            if not result['results']:
                break
            results.extend(result["results"])
            pages = result['pages']
            page = result['currentPage']
            log.info("Got {0} of {1} pages".format(page, pages))
            if page >= pages:
                break
            page += 1

        # Logged in and made a request successfully, it's ok if nothing matches
        task.no_entries_ok = True

        # Parse the needed information out of the response
        entries = []
        for result in results:
            # Get basic information on the release
            info = dict((k, result[k]) for k in ('artist', 'groupName', 'groupYear'))

            # Releases can have multiple download options
            for tor in result['torrents']:
                temp = info.copy()
                temp.update(dict((k, tor[k]) for k in ('media', 'encoding', 'format', 'torrentId')))

                entries.append(Entry(
                    title="{artist} - {groupName} - {groupYear} "
                          "({media} - {format} - {encoding})-{torrentId}.torrent".format(**temp),
                    url="https://what.cd/torrents.php?action=download&"
                        "id={0}&authkey={1}&torrent_pass={2}".format(temp['torrentId'], self.authkey, self.passkey),
                    torrent_seeds=tor['seeders'],
                    torrent_leeches=tor['leechers'],
                    # Size is given in bytes, convert it
                    content_size=int(tor['size'] / (1024**2) * 100) / 100
                ))

        return entries
Beispiel #8
0
 def __init__(self, config):
     self.config = config
     self._session = RequestSession()
     self._session.add_domain_limiter(TimedLimiter('imdb.com', '5 seconds'))
     self._session.headers.update(
         {'Accept-Language': config.get('force_language', 'en-us')})
     self.user_id = None
     self.list_id = None
     self.cookies = self.parse_cookies(config.get('cookies', None))
     self.hidden_value = None
     self._items = None
     self._authenticated = False
Beispiel #9
0
def get_session(username=None, password=None):
    """Creates a requests session which is authenticated to trakt."""
    session = Session()
    session.headers = {
        'Content-Type': 'application/json',
        'trakt-api-version': 2,
        'trakt-api-key': API_KEY
    }
    if username:
        session.headers['trakt-user-login'] = username
    if username and password:
        auth = {'login': username, 'password': password}
        try:
            r = session.post(urljoin(API_URL, 'auth/login'),
                             data=json.dumps(auth))
        except RequestException as e:
            if e.response and e.response.status_code in [401, 403]:
                raise plugin.PluginError(
                    'Authentication to trakt failed, check your username/password: %s'
                    % e.args[0])
            else:
                raise plugin.PluginError('Authentication to trakt failed: %s' %
                                         e.args[0])
        try:
            session.headers['trakt-user-token'] = r.json()['token']
        except (ValueError, KeyError):
            raise plugin.PluginError(
                'Got unexpected response content while authorizing to trakt: %s'
                % r.text)
    return session
    def search(self, entry, config):

        session = Session()
        entries = set()
        for search_string in entry.get('search_strings', [entry['title']]): #[entry['series_name']]:#
            search_string_normalized = normalize_unicode(clean_title(search_string)).encode('utf8')
            search_string_normalized = search_string_normalized.replace(' ','+')
            url = 'http://www.elitetorrent.net/busqueda/'+search_string_normalized
            
            log.debug('Fetching URL for `%s`: %s' % (search_string, url))

            page = session.get(url).content
            soup = get_soup(page)

            for result in soup.findAll('a', 'nombre'):
                entry = Entry()
                entry['title'] = result['title']
                entry['url'] = 'http://www.elitetorrent.net/get-torrent/'+result['href'].split('/')[2]
                log.debug('Adding entry `%s`: %s' % (entry['title'], entry['url']))
                entries.add(entry)

        return entries
Beispiel #11
0
def get_session(username=None, password=None):
    """Creates a requests session which is authenticated to trakt."""
    session = Session()
    session.headers = {"Content-Type": "application/json", "trakt-api-version": 2, "trakt-api-key": API_KEY}
    if username:
        session.headers["trakt-user-login"] = username
    if username and password:
        auth = {"login": username, "password": password}
        try:
            r = session.post(urljoin(API_URL, "auth/login"), data=json.dumps(auth))
        except Timeout:  # requests.exceptions.Timeout
            raise plugin.PluginError("Authentication timed out to trakt")
        except RequestException as e:
            if hasattr(e, "response") and e.response.status_code in [401, 403]:
                raise plugin.PluginError("Authentication to trakt failed, check your username/password: %s" % e.args[0])
            else:
                raise plugin.PluginError("Authentication to trakt failed: %s" % e.args[0])
        try:
            session.headers["trakt-user-token"] = r.json()["token"]
        except (ValueError, KeyError):
            raise plugin.PluginError("Got unexpected response content while authorizing to trakt: %s" % r.text)
    return session
Beispiel #12
0
    def on_task_input(self, task, config):
        """Search on What.cd"""

        self.session = Session()

        # From the API docs: "Refrain from making more than five (5) requests every ten (10) seconds"
        self.session.add_domain_limiter(TokenBucketLimiter('ssl.what.cd', 2, '2 seconds'))

        # Custom user agent
        user_agent = config.pop('user_agent', None)
        if user_agent:
            self.session.headers.update({"User-Agent": user_agent})

        # Login
        self._login(config.pop('username'), config.pop('password'))

        # Logged in successfully, it's ok if nothing matches
        task.no_entries_ok = True

        # NOTE: Any values still in config at this point MUST be valid search parameters

        # Perform the search and parse the needed information out of the response
        results = self._search_results(config)
        return list(self._get_entries(results))
Beispiel #13
0
import base64
import datetime

from flexget import plugin
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException

__name__ = 'pushbullet'
log = logging.getLogger(__name__)

PUSHBULLET_URL = 'https://api.pushbullet.com/v2/pushes'

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('pushbullet.com', '5 seconds'))


class PushbulletNotifier(object):
    """
    Example::

      pushbullet:
        apikey: <API_KEY>
        [device: <DEVICE_IDEN> (can also be a list of device idens, or don't specify any idens to send to all devices)]
        [email: <EMAIL_ADDRESS> (can also be a list of user email addresses)]
        [channel: <CHANNEL_TAG> (you can only specify device / email or channel tag. cannot use both.)]
        [title: <MESSAGE_TITLE>] (default: "{{task}} - Download started" -- accepts Jinja2)
        [body: <MESSAGE_BODY>] (default: "{{series_name}} {{series_id}}" -- accepts Jinja2)
Beispiel #14
0
from sqlalchemy import Column, Unicode, DateTime
from dateutil.parser import parse as dateutil_parse

from flexget import plugin, db_schema
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from flexget.utils.database import json_synonym
from flexget.utils.requests import Session as RequestSession, TimedLimiter, RequestException
from flexget.utils.tools import parse_filesize

log = logging.getLogger('passthepopcorn')
Base = db_schema.versioned_base('passthepopcorn', 1)

requests = RequestSession()
requests.add_domain_limiter(TimedLimiter('passthepopcorn.me', '5 seconds'))

TAGS = [
    'action',
    'adventure',
    'animation',
    'arthouse',
    'asian',
    'biography',
    'camp',
    'comedy',
    'crime',
    'cult',
    'documentary',
    'drama',
Beispiel #15
0
import logging

from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException, Session, TimedLimiter
from flexget.utils.soup import get_soup

log = logging.getLogger('letterboxd')

requests = Session(max_retries=5)
requests.add_domain_limiter(TimedLimiter('letterboxd.com', '1 seconds'))
base_url = 'http://letterboxd.com'

SLUGS = {
    'default': {'p_slug': '/%(user)s/list/%(list)s/', 'f_slug': 'data-film-slug'},
    'diary': {'p_slug': '/%(user)s/films/diary/', 'f_slug': 'data-film-slug'},
    'likes': {'p_slug': '/%(user)s/likes/films/', 'f_slug': 'data-film-link'},
    'rated': {'p_slug': '/%(user)s/films/ratings/', 'f_slug': 'data-film-slug'},
    'watched': {'p_slug': '/%(user)s/films/', 'f_slug': 'data-film-slug'},
    'watchlist': {'p_slug': '/%(user)s/watchlist/', 'f_slug': 'data-film-slug'},
}

SORT_BY = {
    'default': '',
    'added': 'by/added/',
    'length-ascending': 'by/shortest/',
    'length-descending': 'by/longest/',
    'name': 'by/name/',
    'popularity': 'by/popular/',
Beispiel #16
0
import logging
import hashlib

from flexget import plugin
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException

__name__ = 'sms_ru'
log = logging.getLogger(__name__)

SMS_SEND_URL = 'http://sms.ru/sms/send'
SMS_TOKEN_URL = 'http://sms.ru/auth/get_token'

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('sms.ru', '5 seconds'))


class SMSRuNotifier(object):
    """
    Sends SMS notification through sms.ru http api sms/send.
    Phone number is a login assigned to sms.ru account.

    Example:

      sms_ru:
        phone_number: <PHONE_NUMBER> (accepted format example: '79997776655')
        password: <PASSWORD>

    """
Beispiel #17
0
from loguru import logger
from requests.exceptions import RequestException

from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession
from flexget.utils.requests import TimedLimiter

plugin_name = 'join'
logger = logger.bind(name=plugin_name)

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('appspot.com', '5 seconds'))

JOIN_URL = 'https://joinjoaomgcd.appspot.com/_ah/api/messaging/v1/sendPush'


class JoinNotifier:
    """
    Example::

      notify:
        entries:
          via:
            - join:
                [api_key: <API_KEY> (your join api key. Only required for 'group' notifications)]
                [group: <GROUP_NAME> (name of group of join devices to notify. 'all', 'android', etc.)
                [device: <DEVICE_ID> (can also be a list of device ids)]
                [url: <NOTIFICATION_URL>]
Beispiel #18
0
from flexget import db_schema, plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from flexget.utils.database import json_synonym
from flexget.utils.requests import RequestException
from flexget.utils.requests import Session as RequestSession
from flexget.utils.requests import TimedLimiter
from flexget.utils.soup import get_soup
from flexget.utils.tools import parse_filesize

logger = logger.bind(name='alpharatio')
Base = db_schema.versioned_base('alpharatio', 0)

requests = RequestSession()
requests.add_domain_limiter(TimedLimiter('alpharatio.cc', '5 seconds'))
# ElementZero confirmed with AlphaRato sysop 'jasonmaster' that they do want a 5 second limiter

CATEGORIES = {
    'tvsd': 'filter_cat[1]',
    'tvhd': 'filter_cat[2]',
    'tvuhd': 'filter_cat[3]',
    'tvdvdrip': 'filter_cat[4]',
    'tvpacksd': 'filter_cat[5]',
    'tvpackhd': 'filter_cat[6]',
    'tvpackuhd': 'filter_cat[7]',
    'moviesd': 'filter_cat[8]',
    'moviehd': 'filter_cat[9]',
    'movieuhd': 'filter_cat[10]',
    'moviepacksd': 'filter_cat[11]',
Beispiel #19
0
class IFTTTNotifier(object):
    """
    Push the notification to an IFTTT webhook.

    Configuration options

    ===============  ===================================================================
    Option           Description
    ===============  ===================================================================
    event            The event endpoint to trigger (required)
    keys             List of auth  keys to send the notification to. (required)
    ===============  ===================================================================

    Config basic example::

      notify:
        task:
          via:
            - ifttt:
                event: download_added
                keys:
                    - deadebeef123
    """

    def __init__(self):
        self.session = Session()
        self.url_template = 'https://maker.ifttt.com/trigger/{}/with/key/{}'

    schema = {
        'type': 'object',
        'properties': {'event': {'type': 'string'}, 'keys': one_or_more({'type': 'string'})},
        'required': ['event', 'keys'],
        'additionalProperties': False,
    }

    def notify(self, title, message, config):
        """
        Send notification to ifttt webhook.

        The notification will be sent to https://maker.ifttt.com/trigger/{event}/with/key/{key}'
        with the values for the config, with a json body setting 'value1' to the message title,
        and 'value2' to the message body.

        If multiple keys are provided the event will be triggered for all of them.

        :param str message: message body
        :param str title: message subject
        :param dict config: plugin config
        """
        config = self.prepare_config(config)
        notification_body = {'value1': title, 'value2': message}
        errors = False
        for key in config['keys']:
            url = self.url_template.format(config['event'], key)
            try:
                self.session.post(url, json=notification_body)
                log.info("Sent notification to key: %s", key)
            except RequestException as e:
                log.error("Error sending notification to key %s: %s", key, e)
                errors = True
        if errors:
            raise PluginWarning("Failed to send notifications")

    def prepare_config(self, config):
        if not isinstance(config['keys'], list):
            config['keys'] = [config['keys']]
        return config
Beispiel #20
0
import datetime
import logging

from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException

plugin_name = 'pushover'
log = logging.getLogger(plugin_name)

PUSHOVER_URL = 'https://api.pushover.net/1/messages.json'

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('pushover.net', '5 seconds'))


class PushoverNotifier(object):
    """
    Example::

      notify:
        entries:
          via:
            - pushover:
                user_key: <USER_KEY> (can also be a list of userkeys)
                token: <TOKEN>
                [device: <DEVICE_STRING>]
                [priority: <PRIORITY>]
Beispiel #21
0
from __future__ import unicode_literals, division, absolute_import
from builtins import *  # noqa pylint: disable=unused-import, redefined-builtin

import logging

from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.requests import Session, TimedLimiter, RequestException
from flexget.utils.search import normalize_scene
from flexget.plugin import PluginError

log = logging.getLogger('rarbg')

requests = Session()
requests.add_domain_limiter(TimedLimiter('torrentapi.org', '3 seconds'))  # they only allow 1 request per 2 seconds

CATEGORIES = {
    'all': 0,

    # Movies
    'x264': 17,
    'x264 720p': 45,
    'x264 1080p': 44,
    'x264 3D': 47,
    'XviD': 14,
    'XviD 720p': 48,
    'Full BD': 42,

    # TV
Beispiel #22
0
from __future__ import unicode_literals, division, absolute_import
from builtins import *  # noqa pylint: disable=unused-import, redefined-builtin

import logging

from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException, Session, TimedLimiter
from flexget.utils.soup import get_soup

log = logging.getLogger('letterboxd')

requests = Session(max_retries=5)
requests.add_domain_limiter(TimedLimiter('letterboxd.com', '1 seconds'))
base_url = 'http://letterboxd.com'

SLUGS = {
    'default': {'p_slug': '/%(user)s/list/%(list)s/', 'f_slug': 'data-film-slug'},
    'diary': {'p_slug': '/%(user)s/films/diary/', 'f_slug': 'data-film-slug'},
    'likes': {'p_slug': '/%(user)s/likes/films/', 'f_slug': 'data-film-link'},
    'rated': {'p_slug': '/%(user)s/films/ratings/', 'f_slug': 'data-film-slug'},
    'watched': {'p_slug': '/%(user)s/films/', 'f_slug': 'data-film-slug'},
    'watchlist': {'p_slug': '/%(user)s/watchlist/', 'f_slug': 'data-film-slug'},
}

SORT_BY = {
    'default': '',
    'added': 'by/added/',
    'length-ascending': 'by/shortest/',
Beispiel #23
0
class ImdbEntrySet(MutableSet):
    schema = {
        'type': 'object',
        'properties': {
            'login': {'type': 'string'},
            'password': {'type': 'string'},
            'list': {'type': 'string'},
            'force_language': {'type': 'string', 'default': 'en-us'}
        },
        'additionalProperties': False,
        'required': ['login', 'password', 'list']
    }

    def __init__(self, config):
        self.config = config
        self._session = Session()
        self._session.add_domain_limiter(TimedLimiter('imdb.com', '5 seconds'))
        self._session.headers = {'Accept-Language': config.get('force_language', 'en-us')}
        self.user_id = None
        self.list_id = None
        self._items = None
        self._authenticated = False

    @property
    def session(self):
        if not self._authenticated:
            self.authenticate()
        return self._session

    def authenticate(self):
        """Authenticates a session with imdb, and grabs any IDs needed for getting/modifying list."""
        try:
            r = self._session.get(
                'https://www.imdb.com/ap/signin?openid.return_to=https%3A%2F%2Fwww.imdb.com%2Fap-signin-'
                'handler&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&'
                'openid.assoc_handle=imdb_mobile_us&openid.mode=checkid_setup&openid.claimed_id=http%3A%'
                '2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.ns=http%3A%2F%2Fspecs.ope'
                'nid.net%2Fauth%2F2.0')
        except ConnectionError as e:
            raise PluginError(e.args[0])
        soup = get_soup(r.content)
        inputs = soup.select('form#ap_signin_form input')
        data = dict((i['name'], i.get('value')) for i in inputs if i.get('name'))
        data['email'] = self.config['login']
        data['password'] = self.config['password']
        d = self._session.post('https://www.imdb.com/ap/signin', data=data)
        # Get user id by extracting from redirect url
        r = self._session.head('http://www.imdb.com/profile', allow_redirects=False)
        if not r.headers.get('location') or 'login' in r.headers['location']:
            raise plugin.PluginError('Login to imdb failed. Check your credentials.')
        self.user_id = re.search('ur\d+(?!\d)', r.headers['location']).group()
        # Get list ID
        if self.config['list'] == 'watchlist':
            data = {'consts[]': 'tt0133093', 'tracking_tag': 'watchlistRibbon'}
            wl_data = self._session.post('http://www.imdb.com/list/_ajax/watchlist_has', data=data).json()
            try:
                self.list_id = wl_data['list_id']
            except KeyError:
                raise PluginError('No list ID could be received. Please initialize list by '
                                  'manually adding an item to it and try again')
        elif self.config['list'] in IMMUTABLE_LISTS or self.config['list'].startswith('ls'):
            self.list_id = self.config['list']
        else:
            data = {'tconst': 'tt0133093'}
            list_data = self._session.post('http://www.imdb.com/list/_ajax/wlb_dropdown', data=data).json()
            for li in list_data['items']:
                if li['wlb_text'] == self.config['list']:
                    self.list_id = li['data_list_id']
                    break
            else:
                raise plugin.PluginError('Could not find list %s' % self.config['list'])

        self._authenticated = True

    def invalidate_cache(self):
        self._items = None

    @property
    def items(self):
        if self._items is None:
            try:
                r = self.session.get('http://www.imdb.com/list/export?list_id=%s&author_id=%s' %
                                 (self.list_id, self.user_id))
            except HTTPError as e:
                raise PluginError(e.args[0])
            lines = r.iter_lines()
            # Throw away first line with headers
            next(lines)
            self._items = []
            for row in csv.reader(lines):
                row = [unicode(cell, 'utf-8') for cell in row]
                log.debug('parsing line from csv: %s', ', '.join(row))
                if not len(row) == 16:
                    log.debug('no movie row detected, skipping. %s', ', '.join(row))
                    continue
                entry = Entry({
                    'title': '%s (%s)' % (row[5], row[11]) if row[11] != '????' else '%s' % row[5],
                    'url': row[15],
                    'imdb_id': row[1],
                    'imdb_url': row[15],
                    'imdb_list_position': int(row[0]),
                    'imdb_list_created': datetime.strptime(row[2], '%a %b %d %H:%M:%S %Y') if row[2] else None,
                    'imdb_list_modified': datetime.strptime(row[3], '%a %b %d %H:%M:%S %Y') if row[3] else None,
                    'imdb_list_description': row[4],
                    'imdb_name': row[5],
                    'movie_name': row[5],
                    'imdb_year': int(row[11]) if row[11] != '????' else None,
                    'movie_year': int(row[11]) if row[11] != '????' else None,
                    'imdb_score': float(row[9]) if row[9] else None,
                    'imdb_user_score': float(row[8]) if row[8] else None,
                    'imdb_votes': int(row[13]) if row[13] else None,
                    'imdb_genres': [genre.strip() for genre in row[12].split(',')]
                })
                self._items.append(entry)
        return self._items

    @property
    def immutable(self):
        if self.config['list'] in IMMUTABLE_LISTS:
            return '%s list is not modifiable' % self.config['list']

    def _from_iterable(cls, it):
        # TODO: is this the right answer? the returned object won't have our custom __contains__ logic
        return set(it)

    def __contains__(self, entry):
        if not entry.get('imdb_id'):
            log.debug('entry %s does not have imdb_id, skipping', entry)
            return False
        return any(e['imdb_id'] == entry['imdb_id'] for e in self.items)

    def __iter__(self):
        return iter(self.items)

    def discard(self, entry):
        if self.config['list'] in IMMUTABLE_LISTS:
            raise plugin.PluginError('%s lists are not modifiable' % ' and '.join(IMMUTABLE_LISTS))
        if 'imdb_id' not in entry:
            log.warning('Cannot remove %s from imdb_list because it does not have an imdb_id', entry['title'])
            return
        # Get the list item id
        item_ids = None
        if self.config['list'] == 'watchlist':
            data = {'consts[]': entry['imdb_id'], 'tracking_tag': 'watchlistRibbon'}
            status = self.session.post('http://www.imdb.com/list/_ajax/watchlist_has', data=data).json()
            item_ids = status.get('has', {}).get(entry['imdb_id'])
        else:
            data = {'tconst': entry['imdb_id']}
            status = self.session.post('http://www.imdb.com/list/_ajax/wlb_dropdown', data=data).json()
            for a_list in status['items']:
                if a_list['data_list_id'] == self.list_id:
                    item_ids = a_list['data_list_item_ids']
                    break
        if not item_ids:
            log.warning('%s is not in list %s, cannot be removed', entry['imdb_id'], self.list_id)
            return
        data = {
            'action': 'delete',
            'list_id': self.list_id,
            'ref_tag': 'title'
        }
        for item_id in item_ids:
            self.session.post('http://www.imdb.com/list/_ajax/edit', data=dict(data, list_item_id=item_id))
        # We don't need to invalidate our cache if we remove the item
        self._items = [i for i in self._items if i['imdb_id'] != entry['imdb_id']] if self._items else None

    def add(self, entry):
        if self.config['list'] in IMMUTABLE_LISTS:
            raise plugin.PluginError('%s lists are not modifiable' % ' and '.join(IMMUTABLE_LISTS))
        if 'imdb_id' not in entry:
            log.warning('Cannot add %s to imdb_list because it does not have an imdb_id', entry['title'])
            return
        data = {
            'const': entry['imdb_id'],
            'list_id': self.list_id,
            'ref_tag': 'title'
        }
        self.session.post('http://www.imdb.com/list/_ajax/edit', data=data)
        # Invalidate cache so that new movie info will be grabbed
        self.invalidate_cache()

    def __len__(self):
        return len(self.items)

    @property
    def online(self):
        """ Set the online status of the plugin, online plugin should be treated differently in certain situations,
        like test mode"""
        return True
Beispiel #24
0
from __future__ import unicode_literals, division, absolute_import
import logging
import re

from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.plugin import get_plugin_by_name
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException, Session
from flexget.utils.soup import get_soup

log = logging.getLogger('letterboxd')
logging.getLogger('api_tmdb').setLevel(logging.CRITICAL)

requests = Session(max_retries=5)
requests.set_domain_delay('letterboxd.com', '1 seconds')
base_url = 'http://letterboxd.com'

SLUGS = {
    'default': {
        'p_slug': '/%(user)s/list/%(list)s/',
        'f_slug': 'data-film-slug'
    },
    'diary': {
        'p_slug': '/%(user)s/films/diary/',
        'f_slug': 'data-film-slug'
    },
    'likes': {
        'p_slug': '/%(user)s/likes/films/',
        'f_slug': 'data-film-link'
Beispiel #25
0
from __future__ import unicode_literals, division, absolute_import
from builtins import *  # noqa pylint: disable=unused-import, redefined-builtin

import logging
import re

from flexget import plugin
from flexget.event import event
from flexget.plugins.internal.urlrewriting import UrlRewritingError
from flexget.utils.requests import Session, TimedLimiter
from flexget.utils.soup import get_soup

log = logging.getLogger("newpct")

requests = Session()
requests.headers.update({"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"})
requests.add_domain_limiter(TimedLimiter("imdb.com", "2 seconds"))


class UrlRewriteNewPCT(object):
    """NewPCT urlrewriter."""

    # urlrewriter API
    def url_rewritable(self, task, entry):
        url = entry["url"]
        rewritable_regex = "^http:\/\/(www.)?newpct1?.com\/.*"
        return re.match(rewritable_regex, url) and not url.endswith(".torrent")

    # urlrewriter API
    def url_rewrite(self, task, entry):
        entry["url"] = self.parse_download_page(entry["url"])
Beispiel #26
0
import logging

from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException

plugin_name = 'pushsafer'
log = logging.getLogger(plugin_name)

PUSHSAFER_URL = 'https://www.pushsafer.com/api'

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('pushsafer.com', '5 seconds'))


class PushsaferNotifier(object):
    """
    Example::

      pushsafer:
        private_key: <string> your private key (can also be a alias key) - Required
        title: <string> (default: task name)
        body: <string> (default: '{{series_name}} {{series_id}}' )
        url: <string> (default: '{{imdb_url}}')
        url_title: <string> (default: (none))
        device: <string> ypur device or device group id (default: (none))
        icon: <integer> (default is 1)
Beispiel #27
0
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from flexget.utils.soup import get_soup
from requests.exceptions import HTTPError, RequestException

from datetime import date, timedelta

import unicodedata
import re

log = logging.getLogger('search_npo')

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('npo.nl', '5 seconds'))

fragment_regex = re.compile('[A-Z][^/]+/')
date_regex = re.compile('([1-3]?[0-9]) ([a-z]{3})(?: ([0-9]{4})|\W)')
days_ago_regex = re.compile('([0-9]+) dagen geleden')
months = ['jan', 'feb', 'mrt', 'apr', 'mei', 'jun', 'jul', 'aug', 'sep', 'okt', 'nov', 'dec']


class NPOWatchlist(object):
    """
        Produces entries for every episode on the user's npo.nl watchlist (Dutch public television).
        Entries can be downloaded using http://arp242.net/code/download-npo

        If 'remove_accepted' is set to 'yes', the plugin will delete accepted entries from the watchlist after download
            is complete.
Beispiel #28
0
import logging
import hashlib

from flexget import plugin
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException

__name__ = 'sms_ru'
log = logging.getLogger(__name__)

SMS_SEND_URL = 'http://sms.ru/sms/send'
SMS_TOKEN_URL = 'http://sms.ru/auth/get_token'

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('sms.ru', '5 seconds'))


class SMSRuNotifier(object):
    """
    Sends SMS notification through sms.ru http api sms/send.
    Phone number is a login assigned to sms.ru account.

    Example:

      sms_ru:
        phone_number: <PHONE_NUMBER> (accepted format example: '79997776655')
        password: <PASSWORD>

    """
Beispiel #29
0
from __future__ import unicode_literals, division, absolute_import
from builtins import *  # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import parse_qs, urlparse

import re
import logging

from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils.requests import Session, TimedLimiter
from flexget.utils.soup import get_soup

log = logging.getLogger('google')

requests = Session()
requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})
requests.add_domain_limiter(TimedLimiter('imdb.com', '2 seconds'))


class UrlRewriteGoogleCse(object):
    """Google custom query urlrewriter."""

    # urlrewriter API
    def url_rewritable(self, task, entry):
        if entry['url'].startswith('http://www.google.com/cse?'):
            return True
        if entry['url'].startswith('http://www.google.com/custom?'):
            return True
        return False
Beispiel #30
0
class T411RestClient(object):
    """A REST client for T411 API"""
    @staticmethod
    def template_url(url_scheme='https'):
        return url_scheme + '://' + T411API_DOMAIN_URL + '%s'

    @staticmethod
    def download_url(torrent_id, url_scheme='https'):
        return (T411RestClient.template_url(url_scheme) %
                T411API_DOWNLOAD_PATH) + str(torrent_id)

    def __init__(self, username=None, password=None, url_scheme='https'):
        self.credentials = {'username': username, 'password': password}
        self.api_token = None
        self.api_template_url = url_scheme + '://' + T411API_DOMAIN_URL + '%s'
        self.web_session = Session()

    def auth(self):
        """
        Request server to obtain a api token. Obtained
        token will be set for future usage of the client instance
        :return:
        """
        auth_url = self.api_template_url % T411API_AUTH_PATH
        response = self.web_session.post(auth_url, self.credentials)
        json_response = response.json()
        error_description = json_response.get('error', None)
        if error_description:
            log.error('%d - %s', json_response.get('code'), error_description)
        else:
            self.set_api_token(json_response.get('token'))

    def set_api_token(self, api_token):
        """
        Set the client for use an api token.
        :param api_token:
        :return:
        """
        self.api_token = api_token
        self.web_session.headers.update({'Authorization': self.api_token})

    def is_authenticated(self):
        """
        :return: True if an api token is set. Note that the client
        doesn't check if the token is valid (expired or wrong).
        """
        return self.api_token is not None

    @staticmethod
    def raise_on_fail_response(json_response):
        """
        This method throw an Exception if server return a
        error message
        :return:
        """
        if json_response is None:
            pass

        error_name = json_response.get('error', None)
        error_code = json_response.get('code', None)
        if error_name is not None:
            raise ApiError(error_code, error_name)

    def get_json(self, path, params=None):
        """
        Common method for requesting JSON response
        :param path:
        :return:
        """
        url = self.api_template_url % path

        request = self.web_session.get(url, params=params)
        try:
            result = request.json()
        except ValueError:
            log.debug(
                "Response from %s was not JSON encoded. Attempting deep inspection...",
                path)
            try:
                last_line = request.text.splitlines()[-1]
                result = json.loads(last_line)
            except (ValueError, IndexError):
                log.warning(
                    "Server response doesn't contains any JSON encoded response."
                )
                raise

        T411RestClient.raise_on_fail_response(result)
        return result

    @auth_required
    def retrieve_category_tree(self):
        """
        Request T411 API for retrieving categories and them
        subcategories
        :return**kwargs:
        """
        return self.get_json(T411API_CATEGORY_TREE_PATH)

    @auth_required
    def retrieve_terms_tree(self):
        """
        Request T411 API for retrieving term types
        and terms
        :return **kwargs:
        """
        return self.get_json(T411API_TERMS_PATH)

    @auth_required
    def search(self, query):
        """
        Search torrent
        :param query: dict
        :param query['category_id']: Int optional
        :param query['result_per_page']: Int optional
        :param query['page_index']: Int optional
        :param query['terms']: (Term type id, Term id,)
        :return dict
        """
        url = T411API_SEARCH_PATH
        if query.get('expression') is not None:
            url += query['expression']

        url_params = {}
        if query.get('category_id') is not None:
            # using cat or cid will do the same result
            # but using cid without query expression will not broke
            # results
            url_params['cid'] = query['category_id']
        if query.get('result_per_page') is not None:
            url_params['limit'] = query['result_per_page']
        if query.get('page_index') is not None:
            url_params['offset'] = query['page_index']
        if query.get('terms') is not None:
            for (term_type_id, term_id) in query['terms']:
                term_type_key_param = 'term[%s][]' % term_type_id

                if url_params.get(term_type_key_param) is None:
                    url_params[term_type_key_param] = []

                url_params[term_type_key_param].append(term_id)
        return self.get_json(url, params=url_params)

    @auth_required
    def details(self, torrent_id):
        url = T411API_DETAILS_PATH + str(torrent_id)
        return self.get_json(url)
Beispiel #31
0
class T411RestClient(object):
    """A REST client for T411 API"""

    @staticmethod
    def template_url(url_scheme='http'):
        return url_scheme + '://' + T411API_DOMAIN_URL + '%s'

    @staticmethod
    def download_url(torrent_id, url_scheme='http'):
        return (T411RestClient.template_url(url_scheme) % T411API_DOWNLOAD_PATH) + str(torrent_id)

    def __init__(self, username=None, password=None, url_scheme='http'):
        self.credentials = {'username': username, 'password': password}
        self.api_token = None
        self.api_template_url = url_scheme + '://' + T411API_DOMAIN_URL + '%s'
        self.web_session = Session()

    def auth(self):
        """
        Request server to obtain a api token. Obtained
        token will be set for future usage of the client instance
        :return:
        """
        auth_url = self.api_template_url % T411API_AUTH_PATH
        response = self.web_session.post(auth_url, self.credentials)
        json_response = response.json()
        error_description = json_response.get('error', None)
        if error_description:
            log.error('%d - %s', json_response.get('code'), error_description)
        else:
            self.set_api_token(json_response.get('token'))

    def set_api_token(self, api_token):
        """
        Set the client for use an api token.
        :param api_token:
        :return:
        """
        self.api_token = api_token
        self.web_session.headers.update({'Authorization': self.api_token})

    def is_authenticated(self):
        """
        :return: True if an api token is set. Note that the client
        doesn't check if the token is valid (expired or wrong).
        """
        return self.api_token is not None

    @staticmethod
    def raise_on_fail_response(json_response):
        """
        This method throw an Exception if server return a
        error message
        :return:
        """
        if json_response is None:
            pass

        error_name = json_response.get('error', None)
        error_code = json_response.get('code', None)
        if error_name is not None:
            raise ApiError(error_code, error_name)

    def get_json(self, path, params=None):
        """
        Common method for requesting JSON response
        :param path:
        :return:
        """
        url = self.api_template_url % path

        request = self.web_session.get(url, params=params)
        try:
            result = request.json()
        except ValueError:
            log.debug("Response from %s was not JSON encoded. Attempting deep inspection...", path)
            try:
                last_line = request.text.splitlines()[-1]
                result = json.loads(last_line)
            except (ValueError, IndexError):
                log.warning("Server response doesn't contains any JSON encoded response.")
                raise

        T411RestClient.raise_on_fail_response(result)
        return result

    @auth_required
    def retrieve_category_tree(self):
        """
        Request T411 API for retrieving categories and them
        subcategories
        :return**kwargs:
        """
        return self.get_json(T411API_CATEGORY_TREE_PATH)

    @auth_required
    def retrieve_terms_tree(self):
        """
        Request T411 API for retrieving term types
        and terms
        :return **kwargs:
        """
        return self.get_json(T411API_TERMS_PATH)

    @auth_required
    def search(self, query):
        """
        Search torrent
        :param query: dict
        :param query['category_id']: Int optional
        :param query['result_per_page']: Int optional
        :param query['page_index']: Int optional
        :param query['terms']: (Term type id, Term id,)
        :return dict
        """
        url = T411API_SEARCH_PATH
        if query.get('expression') is not None:
            url += query['expression']

        url_params = {}
        if query.get('category_id') is not None:
            # using cat or cid will do the same result
            # but using cid without query expression will not broke
            # results
            url_params['cid'] = query['category_id']
        if query.get('result_per_page') is not None:
            url_params['limit'] = query['result_per_page']
        if query.get('page_index') is not None:
            url_params['offset'] = query['page_index']
        if query.get('terms') is not None:
            for (term_type_id, term_id) in query['terms']:
                term_type_key_param = 'term[%s][]' % term_type_id

                if url_params.get(term_type_key_param) is None:
                    url_params[term_type_key_param] = []

                url_params[term_type_key_param].append(term_id)
        return self.get_json(url, params=url_params)

    @auth_required
    def details(self, torrent_id):
        url = T411API_DETAILS_PATH + str(torrent_id)
        return self.get_json(url)
Beispiel #32
0
 def __init__(self, username=None, password=None, url_scheme='https'):
     self.credentials = {'username': username, 'password': password}
     self.api_token = None
     self.api_template_url = url_scheme + '://' + T411API_DOMAIN_URL + '%s'
     self.web_session = Session()
Beispiel #33
0
from __future__ import unicode_literals, division, absolute_import
from builtins import *  # pylint: disable=unused-import, redefined-builtin

import logging

from flexget import plugin
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.config_schema import one_or_more
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException

plugin_name = 'join'
log = logging.getLogger(plugin_name)

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('appspot.com', '5 seconds'))

JOIN_URL = 'https://joinjoaomgcd.appspot.com/_ah/api/messaging/v1/sendPush'


class JoinNotifier(object):
    """
    Example::

      notify:
        entries:
          via:
            - join:
                [api_key: <API_KEY> (your join api key. Only required for 'group' notifications)]
                [group: <GROUP_NAME> (name of group of join devices to notify. 'all', 'android', etc.)
Beispiel #34
0
from sqlalchemy import Column, Unicode, DateTime

from flexget import plugin, db_schema
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import TimedLimiter, RequestException
from flexget.manager import Session
from flexget.utils.database import json_synonym
from flexget.utils.requests import Session as RequestSession
from flexget.utils.soup import get_soup
from flexget.utils.tools import parse_filesize

log = logging.getLogger('filelist')
Base = db_schema.versioned_base('filelist', 0)

requests = RequestSession()
requests.add_domain_limiter(TimedLimiter('filelist.ro', '2 seconds'))

BASE_URL = 'https://filelist.ro/'

CATEGORIES = {
    'all': 0,
    'anime': 24,
    'audio': 11,
    'cartoons': 15,
    'docs': 16,
    'games console': 10,
    'games pc': 9,
    'linux': 17,
    'misc': 18,
    'mobile': 22,
Beispiel #35
0
from flexget import plugin, db_schema
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import TimedLimiter, RequestException
from flexget.manager import Session
from flexget.utils.database import json_synonym
from flexget.utils.requests import Session as RequestSession
from flexget.utils.soup import get_soup
from flexget.config_schema import one_or_more
from flexget.utils.tools import parse_filesize

log = logging.getLogger('morethantv')
Base = db_schema.versioned_base('morethantv', 0)

requests = RequestSession()
requests.add_domain_limiter(TimedLimiter(
    'morethan.tv', '5 seconds'))  # TODO find out if they want a delay

CATEGORIES = {
    'Movies': 'filter_cat[1]',
    'TV': 'filter_cat[2]',
    'Other': 'filter_cat[3]'
}

TAGS = [
    'action', 'adventure', 'animation', 'anime', 'art', 'asian', 'biography',
    'celebrities', 'comedy', 'cooking', 'crime', 'cult', 'documentary',
    'drama', 'educational', 'elclasico', 'family', 'fantasy', 'film.noir',
    'filmromanesc', 'food', 'football', 'formula.e', 'formula1', 'gameshow',
    'highlights', 'history', 'horror', 'investigation', 'lifestyle', 'liga1',
Beispiel #36
0
class UrlRewriteDescargas2020(object):
    """Descargas2020 urlrewriter and search."""

    schema = {'type': 'boolean', 'default': False}

    def __init__(self):
        self.requests = None

    # urlrewriter API
    def url_rewritable(self, task, entry):
        url = entry['url']
        rewritable_regex = r'^http:\/\/(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com\/.*'
        return re.match(rewritable_regex, url) and not url.endswith('.torrent')

    def session(self):
        # TODO: This is not used for all requests even ..
        if self.requests is None:
            self.requests = Session()
            requests.headers.update({
                'User-Agent':
                'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
            })
            requests.add_domain_limiter(
                TimedLimiter('descargas2020.com', '2 seconds'))
        return self.requests

    # urlrewriter API
    def url_rewrite(self, task, entry):
        entry['url'] = self.parse_download_page(entry['url'], task)

    @plugin.internet(log)
    def parse_download_page(self, url, task):
        log.verbose('Descargas2020 URL: %s', url)

        try:
            page = self.requests.get(url)
        except requests.RequestException as e:
            raise UrlRewritingError(e)
        try:
            soup = get_soup(page.text)
        except Exception as e:
            raise UrlRewritingError(e)

        torrent_id = None
        url_format = DESCARGAS2020_TORRENT_FORMAT

        torrent_id_prog = re.compile(
            r"(?:parametros\s*=\s*\n?)\s*{\s*\n(?:\s*'\w+'\s*:.*\n)+\s*'(?:torrentID|id)"
            "'\s*:\s*'(\d+)'")
        torrent_ids = soup.findAll(text=torrent_id_prog)
        if torrent_ids:
            match = torrent_id_prog.search(torrent_ids[0])
            if match:
                torrent_id = match.group(1)
        if not torrent_id:
            log.debug('torrent ID not found, searching openTorrent script')
            match = re.search(
                r'function openTorrent.*\n.*\{.*(\n.*)+window\.location\.href =\s*\".*\/(\d+.*)\";',
                page.text,
                re.MULTILINE,
            )
            if match:
                torrent_id = match.group(2).rstrip('/')

        if not torrent_id:
            raise UrlRewritingError('Unable to locate torrent ID from url %s' %
                                    url)

        return url_format.format(torrent_id)

    def search(self, task, entry, config=None):
        if not config:
            log.debug('Descargas2020 disabled')
            return set()
        log.debug('Search Descargas2020')
        url_search = 'http://descargas2020.com/buscar'
        results = set()
        for search_string in entry.get('search_strings', [entry['title']]):
            query = normalize_unicode(search_string)
            query = re.sub(r' \(\d\d\d\d\)$', '', query)
            log.debug('Searching Descargas2020 %s', query)
            query = unicodedata.normalize('NFD',
                                          query).encode('ascii', 'ignore')
            data = {'q': query}
            try:
                response = task.requests.post(url_search, data=data)
            except requests.RequestException as e:
                log.error('Error searching Descargas2020: %s', e)
                return results
            content = response.content
            soup = get_soup(content)
            soup2 = soup.find('ul', attrs={'class': 'buscar-list'})
            children = soup2.findAll('a', href=True)
            for child in children:
                entry = Entry()
                entry['url'] = child['href']
                entry_title = child.find('h2')
                if entry_title is None:
                    log.debug('Ignore empty entry')
                    continue
                entry_title = entry_title.text
                if not entry_title:
                    continue
                try:
                    entry_quality_lan = re.search(
                        r'.+ \[([^\]]+)\](\[[^\]]+\])+$', entry_title).group(1)
                except AttributeError:
                    log.debug('Quality not found')
                    continue
                entry_title = re.sub(r' \[.+]$', '', entry_title)
                entry['title'] = entry_title + ' ' + entry_quality_lan
                results.add(entry)
        log.debug('Finish search Descargas2020 with %d entries', len(results))
        return results
Beispiel #37
0
from __future__ import unicode_literals, division, absolute_import
import logging
import urllib

from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.requests import Session, get
from flexget.utils.search import normalize_unicode

log = logging.getLogger('rarbg')

requests = Session()
requests.set_domain_delay('torrentapi.org', '10.3 seconds')  # they only allow 1 request per 10 seconds

CATEGORIES = {
    'all': 0,

    # Movies
    'x264 720p': 45,
    'x264 1080p': 44,
    'XviD': 14,
    'Full BD': 42,

    # TV
    'HDTV': 41,
    'SDTV': 18
}

Beispiel #38
0
 def __init__(self):
     self.session = Session()
     self.url_template = 'https://maker.ifttt.com/trigger/{}/with/key/{}'
Beispiel #39
0
import logging

from flexget import plugin
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException

__name__ = 'rapidpush'
log = logging.getLogger(__name__)

RAPIDPUSH_URL = 'https://rapidpush.net/api'

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('rapidpush.net', '5 seconds'))


class RapidpushNotifier(object):
    """
    Example::

      rapidpush:
        apikey: xxxxxxx (can also be a list of api keys)
        [category: category, default FlexGet]
        [group: device group, default no group]
        [channel: the broadcast notification channel, if provided it will be send to the channel subscribers instead of
            your devices, default no channel]
        [priority: 0 - 6 (6 = highest), default 2 (normal)]
    """
Beispiel #40
0
from sqlalchemy import Column, Unicode, DateTime
from dateutil.parser import parse as dateutil_parse

from flexget import plugin, db_schema
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from flexget.utils.database import json_synonym
from flexget.utils.requests import Session as RequestSession, TimedLimiter, RequestException
from flexget.utils.tools import parse_filesize

log = logging.getLogger('passthepopcorn')
Base = db_schema.versioned_base('passthepopcorn', 1)

requests = RequestSession()
requests.add_domain_limiter(TimedLimiter('passthepopcorn.me', '5 seconds'))

TAGS = [
    'action', 'adventure', 'animation', 'arthouse', 'asian', 'biography',
    'camp', 'comedy', 'crime', 'cult', 'documentary', 'drama', 'experimental',
    'exploitation', 'family', 'fantasy', 'film.noir', 'history', 'horror',
    'martial.arts', 'musical', 'mystery', 'performance', 'philosophy',
    'politics', 'romance', 'sci.fi', 'short', 'silent', 'sport', 'thriller',
    'video.art', 'war', 'western'
]

ORDERING = {
    'Relevance': 'relevance',
    'Time added': 'timeadded',
    'Time w/o reseed': 'timenoreseed',
Beispiel #41
0
from requests.exceptions import RequestException

from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession
from flexget.utils.requests import TimedLimiter

plugin_name = 'prowl'
log = logging.getLogger(plugin_name)

PROWL_URL = 'https://api.prowlapp.com/publicapi/add'

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('prowlapp.com', '5 seconds'))


class ProwlNotifier:
    """
    Send prowl notifications

    Example::

      notify:
        entries:
          via:
            - prowl:
                api_key: xxxxxxx
                [application: application name, default FlexGet]
Beispiel #42
0
from __future__ import unicode_literals, division, absolute_import
import difflib
import logging
import re

from BeautifulSoup import Tag

from flexget.utils.soup import get_soup
from flexget.utils.requests import Session
from flexget.utils.tools import str_to_int

log = logging.getLogger('utils.imdb')
# IMDb delivers a version of the page which is unparsable to unknown (and some known) user agents, such as requests'
# Spoof the old urllib user agent to keep results consistent
requests = Session()
requests.headers.update({'User-Agent': 'Python-urllib/2.6'})
#requests.headers.update({'User-Agent': random.choice(USERAGENTS)})

# this makes most of the titles to be returned in english translation, but not all of them
requests.headers.update({'Accept-Language': 'en-US,en;q=0.8'})

# give imdb a little break between requests (see: http://flexget.com/ticket/129#comment:1)
requests.set_domain_delay('imdb.com', '3 seconds')


def is_imdb_url(url):
    """Tests the url to see if it's for imdb.com."""
    if not isinstance(url, basestring):
        return
    # Probably should use urlparse.
    return re.match(r'https?://[^/]*imdb\.com/', url)
Beispiel #43
0
from __future__ import unicode_literals, division, absolute_import
from builtins import *  # noqa pylint: disable=unused-import, redefined-builtin

import logging

from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.requests import Session, TimedLimiter, RequestException
from flexget.components.sites.utils import normalize_scene
from flexget.plugin import PluginError

log = logging.getLogger('rarbg')

requests = Session()
requests.add_domain_limiter(
    TimedLimiter('torrentapi.org', '3 seconds')
)  # they only allow 1 request per 2 seconds

CATEGORIES = {
    'all': 0,
    # Movies
    'x264': 17,
    'x264 720p': 45,
    'x264 1080p': 44,
    'x264 3D': 47,
    'XviD': 14,
    'XviD 720p': 48,
    'Full BD': 42,
    # TV
Beispiel #44
0
from __future__ import unicode_literals, division, absolute_import
from builtins import *  # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import parse_qs, urlparse

import re
import logging

from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils.requests import Session, TimedLimiter
from flexget.utils.soup import get_soup

log = logging.getLogger('google')

requests = Session()
requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})
requests.add_domain_limiter(TimedLimiter('imdb.com', '2 seconds'))


class UrlRewriteGoogleCse(object):
    """Google custom query urlrewriter."""

    # urlrewriter API
    def url_rewritable(self, task, entry):
        if entry['url'].startswith('http://www.google.com/cse?'):
            return True
        if entry['url'].startswith('http://www.google.com/custom?'):
            return True
        return False
Beispiel #45
0
from flexget import plugin, db_schema
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import TimedLimiter, RequestException
from flexget.manager import Session
from flexget.utils.database import json_synonym
from flexget.utils.requests import Session as RequestSession
from flexget.utils.soup import get_soup
from flexget.config_schema import one_or_more
from flexget.utils.tools import parse_filesize

log = logging.getLogger('morethantv')
Base = db_schema.versioned_base('morethantv', 0)

requests = RequestSession()
requests.add_domain_limiter(TimedLimiter('morethan.tv', '5 seconds'))  # TODO find out if they want a delay

CATEGORIES = {
    'Movies': 'filter_cat[1]',
    'TV': 'filter_cat[2]',
    'Other': 'filter_cat[3]'
}

TAGS = [
    'action',
    'adventure',
    'animation',
    'anime',
    'art',
    'asian',
Beispiel #46
0
import re

from flexget import plugin
from flexget.event import event
from flexget.plugins.internal.urlrewriting import UrlRewritingError
from flexget.utils.requests import Session, TimedLimiter
from flexget.utils.soup import get_soup

from flexget.entry import Entry
from flexget.utils.search import normalize_unicode

import unicodedata

log = logging.getLogger('newpct')

requests = Session()
requests.headers.update(
    {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})
requests.add_domain_limiter(TimedLimiter('newpct1.com', '2 seconds'))
requests.add_domain_limiter(TimedLimiter('newpct.com', '2 seconds'))

NEWPCT_TORRENT_FORMAT = 'http://www.newpct.com/torrents/{:0>6}.torrent'
NEWPCT1_TORRENT_FORMAT = 'http://www.newpct1.com/download/{}.torrent'


class UrlRewriteNewPCT(object):
    """NewPCT urlrewriter and search."""

    schema = {'type': 'boolean', 'default': False}

    # urlrewriter API
Beispiel #47
0
import logging
import xml.etree.ElementTree as ET

from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException

plugin_name = 'prowl'
log = logging.getLogger(plugin_name)

PROWL_URL = 'https://api.prowlapp.com/publicapi/add'

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('prowlapp.com', '5 seconds'))


class ProwlNotifier(object):
    """
    Send prowl notifications

    Example::

      notify:
        entries:
          via:
            - prowl:
                api_key: xxxxxxx
                [application: application name, default FlexGet]
Beispiel #48
0
from __future__ import unicode_literals, division, absolute_import
import logging
import re

from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.plugin import get_plugin_by_name
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException, Session
from flexget.utils.soup import get_soup

log = logging.getLogger('letterboxd')
logging.getLogger('api_tmdb').setLevel(logging.CRITICAL)

requests = Session(max_retries=5)
requests.set_domain_delay('letterboxd.com', '1 seconds')
base_url = 'http://letterboxd.com'

SLUGS = {
    'default': {
        'p_slug': '/%(user)s/list/%(list)s/',
        'f_slug': 'data-film-slug'},
    'diary': {
        'p_slug': '/%(user)s/films/diary/',
        'f_slug': 'data-film-slug'},
    'likes': {
        'p_slug': '/%(user)s/likes/films/',
        'f_slug': 'data-film-link'},
    'rated': {
        'p_slug': '/%(user)s/films/ratings/',
Beispiel #49
0
import logging
import re

from urllib import quote

from flexget import plugin
from flexget import validator
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability, normalize_unicode, clean_title
from flexget.utils.requests import Session

log = logging.getLogger('search_torrentshack')

session = Session()

CATEGORIES = {
    'Apps/PC': 100,
    'Apps/misc': 150,
    'eBooks': 180,
    'Games/PC': 200,
    'Games/PS3': 240,
    'Games/Xbox360': 260,
    'HandHeld': 280,
    'Movies/x264': 300,
    'REMUX': 320,
    'Movies/DVD-R': 350,
    'Movies/XviD': 400,
    'Music/MP3': 450,
    'Music/FLAC': 480,
Beispiel #50
0
class InputWhatCD(object):
    """A plugin that searches what.cd

    == Usage:

    All parameters except `username` and `password` are optional.

    whatcd:
        username:
        password:

        user_agent: (A custom user-agent for the client to report.
                     It is NOT A GOOD IDEA to spoof a browser with
                     this. You are responsible for your account.)

        search: (general search filter)

        artist: (artist name)
        album: (album name)
        year: (album year)

        encoding: (encoding specifics - 192, 320, lossless, etc.)
        format: (MP3, FLAC, AAC, etc.)
        media: (CD, DVD, vinyl, Blu-ray, etc.)
        release_type: (album, soundtrack, EP, etc.)

        log: (log specification - true, false, '100%', or '<100%')
        hascue: (has a cue file - true or false)
        scene: (is a scene release - true or false)
        vanityhouse: (is a vanity house release - true or false)
        leech_type: ('freeleech', 'neutral', 'either', or 'normal')

        tags: (a list of tags to match - drum.and.bass, new.age, blues, etc.)
        tag_type: (match 'any' or 'all' of the items in `tags`)
    """

    # Aliases for config -> api params
    ALIASES = {
        "artist": "artistname",
        "album": "groupname",
        "leech_type": "freetorrent",
        "release_type": "releaseType",
        "tags": "taglist",
        "tag_type": "tags_type",
        "search": "searchstr",
        "log": "haslog",
    }

    # API parameters
    # None means a raw value entry (no validation)
    # A dict means a choice with a mapping for the API
    # A list is just a choice with no mapping
    PARAMS = {
        "searchstr": None,
        "taglist": None,
        "artistname": None,
        "groupname": None,
        "year": None,
        "tags_type": {
            "any": 0,
            "all": 1,
        },
        "encoding": [
            "192", "APS (VBR)", "V2 (VBR)", "V1 (VBR)", "256", "APX (VBR)",
            "V0 (VBR)", "320", "lossless", "24bit lossless", "V8 (VBR)"
        ],
        "format": [
            "MP3", "FLAC", "AAC", "AC3", "DTS"
        ],
        "media": [
            "CD", "DVD", "vinyl", "soundboard", "SACD", "DAT", "cassette",
            "WEB", "Blu-ray"
        ],
        "releaseType": {
            "album": 1,
            "soundtrack": 3,
            "EP": 5,
            "anthology": 6,
            "compilation": 7,
            "DJ mix": 8,
            "single": 9,
            "live album": 11,
            "remix": 13,
            "bootleg": 14,
            "interview": 15,
            "mixtape": 16,
            "unknown": 21,
            "concert recording": 22,
            "demo": 23
        },
        "haslog": {
            "False": 0,
            "True": 1,
            "100%": 100,
            "<100%": -1
        },
        "freetorrent": {
            "freeleech": 1,
            "neutral": 2,
            "either": 3,
            "normal": 0,
        },
        "hascue": {
            "False": 0,
            "True": 1,
        },
        "scene": {
            "False": 0,
            "True": 1,
        },
        "vanityhouse": {
            "False": 0,
            "True": 1,
        }
    }

    def _key(self, key):
        """Gets the API key name from the entered key"""
        try:
            if key in self.ALIASES:
                return self.ALIASES[key]
            elif key in self.PARAMS:
                return key
            return None
        except KeyError:
            return None

    def _opts(self, key):
        """Gets the options for the specified key"""
        temp = self._key(key)
        try:
            return self.PARAMS[temp]
        except KeyError:
            return None

    def _getval(self, key, val):
        """Gets the value for the specified key"""
        # No alias or param by that name
        if self._key(key) is None:
            return None

        opts = self._opts(key)
        if opts is None:
            if isinstance(val, list):
                return ",".join(val)
            return val
        elif isinstance(opts, dict):
            # Options, translate the input to output
            # The str cast converts bools to 'True'/'False' for use as keys
            return opts[str(val)]
        else:
            # List of options, check it's in the list
            if val not in opts:
                return None
            return val

    def __init__(self):
        """Set up the schema"""

        self.schema = {
            'type': 'object',
            'properties': {
                'username': {'type': 'string'},
                'password': {'type': 'string'},
                'user_agent': {'type': 'string'},
                'search': {'type': 'string'},
                'artist': {'type': 'string'},
                'album': {'type': 'string'},
                'year': {'type': ['string', 'integer']},
                'tags': one_or_more({'type': 'string'}),
                'tag_type': {'type': 'string', 'enum': self._opts('tag_type').keys()},
                'encoding': {'type': 'string', 'enum': self._opts('encoding')},
                'format': {'type': 'string', 'enum': self._opts('format')},
                'media': {'type': 'string', 'enum': self._opts('media')},
                'release_type': {'type': 'string', 'enum': self._opts('release_type').keys()},
                'log': {'oneOf': [{'type': 'string', 'enum': self._opts('log').keys()}, {'type': 'boolean'}]},
                'leech_type': {'type': 'string', 'enum': self._opts('leech_type').keys()},
                'hascue': {'type': 'boolean'},
                'scene': {'type': 'boolean'},
                'vanityhouse': {'type': 'boolean'},
            },
            'required': ['username', 'password'],
            'additionalProperties': False
        }

    def _login(self, config):
        """
        Log in and store auth data from the server
        Adapted from https://github.com/isaaczafuta/whatapi
        """

        data = {
            'username': config['username'],
            'password': config['password'],
            'keeplogged': 1,
        }

        r = self.session.post("https://ssl.what.cd/login.php", data=data,
                              allow_redirects=False)
        if r.status_code != 302 or r.headers.get('location') != "index.php":
            raise PluginError("Failed to log in to What.cd")

        accountinfo = self._request("index")

        self.authkey = accountinfo["authkey"]
        self.passkey = accountinfo["passkey"]
        log.info("Logged in to What.cd")

    def _request(self, action, **kwargs):
        """
        Make an AJAX request to a given action page
        Adapted from https://github.com/isaaczafuta/whatapi
        """

        ajaxpage = 'https://ssl.what.cd/ajax.php'

        params = {}

        # Filter params and map config values -> api values
        for k, v in kwargs.iteritems():
            key = self._key(k)
            if key is not None:
                params[key] = self._getval(k, v)

        # Params other than the searching ones
        params['action'] = action
        if 'page' in kwargs:
            params['page'] = kwargs['page']

        r = self.session.get(ajaxpage, params=params, allow_redirects=False)
        if r.status_code != 200:
            raise PluginError("What.cd returned a non-200 status code")

        try:
            json_response = r.json()
            if json_response['status'] != "success":

                # Try to deal with errors returned by the API
                error = json_response.get('error', json_response.get('status'))
                if not error or error == "failure":
                    error = json_response.get('response')
                if not error:
                    error = str(json_response)

                raise PluginError("What.cd gave a failure response: "
                                  "'{0}'".format(error))
            return json_response['response']
        except (ValueError, TypeError, KeyError) as e:
            raise PluginError("What.cd returned an invalid response")

    @cached('whatcd')
    @plugin.internet(log)
    def on_task_input(self, task, config):
        """Search on What.cd"""

        self.session = Session()
        user_agent = config.get('user_agent')
        if user_agent:
            # Using a custom user agent
            self.session.headers.update({"User-Agent": user_agent})

        # From the API docs: "Refrain from making more than five (5) requests every ten (10) seconds"
        self.session.set_domain_delay('ssl.what.cd', '2 seconds')

        # Login
        self._login(config)

        # Perform the query
        results = []
        page = 1
        while True:
            result = self._request("browse", page=page, **config)
            if not result['results']:
                break
            results.extend(result["results"])
            pages = result['pages']
            page = result['currentPage']
            log.info("Got {0} of {1} pages".format(page, pages))
            if page >= pages:
                break
            page += 1

        # Logged in and made a request successfully, it's ok if nothing matches
        task.no_entries_ok = True

        # Parse the needed information out of the response
        entries = []
        for result in results:
            # Get basic information on the release
            info = dict((k, result[k]) for k in ('artist', 'groupName', 'groupYear'))

            # Releases can have multiple download options
            for tor in result['torrents']:
                temp = info.copy()
                temp.update(dict((k, tor[k]) for k in ('media', 'encoding', 'format', 'torrentId')))

                entries.append(Entry(
                    title="{artist} - {groupName} - {groupYear} "
                          "({media} - {format} - {encoding})-{torrentId}.torrent".format(**temp),
                    url="https://what.cd/torrents.php?action=download&"
                        "id={0}&authkey={1}&torrent_pass={2}".format(temp['torrentId'], self.authkey, self.passkey),
                    torrent_seeds=tor['seeders'],
                    torrent_leeches=tor['leechers'],
                    # Size is given in bytes, convert it
                    content_size=int(tor['size'] / (1024**2) * 100) / 100
                ))

        return entries
Beispiel #51
0
class ImdbEntrySet(MutableSet):
    schema = {
        'type': 'object',
        'properties': {
            'login': {
                'type': 'string'
            },
            'password': {
                'type': 'string'
            },
            'list': {
                'type': 'string'
            },
            'force_language': {
                'type': 'string',
                'default': 'en-us'
            },
        },
        'additionalProperties': False,
        'required': ['login', 'password', 'list'],
    }

    def __init__(self, config):
        self.config = config
        self._session = RequestSession()
        self._session.add_domain_limiter(TimedLimiter('imdb.com', '5 seconds'))
        self._session.headers.update(
            {'Accept-Language': config.get('force_language', 'en-us')})
        self.user_id = None
        self.list_id = None
        self.cookies = None
        self.hidden_value = None
        self._items = None
        self._authenticated = False

    @property
    def session(self):
        if not self._authenticated:
            self.authenticate()
        return self._session

    def get_user_id_and_hidden_value(self, cookies=None):
        try:
            if cookies:
                self._session.cookies = cookiejar_from_dict(cookies)
            # We need to allow for redirects here as it performs 1-2 redirects before reaching the real profile url
            response = self._session.get('https://www.imdb.com/profile',
                                         allow_redirects=True)
        except RequestException as e:
            raise PluginError(str(e))

        user_id_match = re.search(r'ur\d+(?!\d)', response.url)
        if user_id_match:
            # extract the hidden form value that we need to do post requests later on
            try:
                soup = get_soup(response.text)
                self.hidden_value = soup.find('input',
                                              attrs={'id': '49e6c'})['value']
            except Exception as e:
                log.warning(
                    'Unable to locate the hidden form value '
                    '49e6c'
                    '. Without it, you might not be able to '
                    'add or remove items. %s',
                    e,
                )
        return user_id_match.group() if user_id_match else None

    def authenticate(self):
        """Authenticates a session with IMDB, and grabs any IDs needed for getting/modifying list."""
        cached_credentials = False
        with Session() as session:
            user = (session.query(IMDBListUser).filter(
                IMDBListUser.user_name == self.config.get(
                    'login')).one_or_none())
            if user and user.cookies and user.user_id:
                log.debug('login credentials found in cache, testing')
                self.user_id = user.user_id
                if not self.get_user_id_and_hidden_value(cookies=user.cookies):
                    log.debug('cache credentials expired')
                    user.cookies = None
                    self._session.cookies.clear()
                else:
                    self.cookies = user.cookies
                    cached_credentials = True
            if not cached_credentials:
                log.debug(
                    'user credentials not found in cache or outdated, fetching from IMDB'
                )
                url_credentials = (
                    'https://www.imdb.com/ap/signin?openid.return_to=https%3A%2F%2Fwww.imdb.com%2Fap-signin-'
                    'handler&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&'
                    'openid.assoc_handle=imdb_mobile_us&openid.mode=checkid_setup&openid.claimed_id=http%3A%'
                    '2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.ns=http%3A%2F%2Fspecs.ope'
                    'nid.net%2Fauth%2F2.0')
                try:
                    # we need to get some cookies first
                    self._session.get('https://www.imdb.com')
                    r = self._session.get(url_credentials)
                except RequestException as e:
                    raise PluginError(e.args[0])
                soup = get_soup(r.content)
                form = soup.find('form', attrs={'name': 'signIn'})
                inputs = form.select('input')
                data = dict((i['name'], i.get('value')) for i in inputs
                            if i.get('name'))
                data['email'] = self.config['login']
                data['password'] = self.config['password']
                action = form.get('action')
                log.debug('email=%s, password=%s', data['email'],
                          data['password'])
                self._session.headers.update({'Referer': url_credentials})
                self._session.post(action, data=data)
                self._session.headers.update(
                    {'Referer': 'https://www.imdb.com/'})

                self.user_id = self.get_user_id_and_hidden_value()
                if not self.user_id:
                    raise plugin.PluginError(
                        'Login to IMDB failed. Check your credentials.')
                self.cookies = self._session.cookies.get_dict(
                    domain='.imdb.com')
                # Get list ID
            if user:
                for list in user.lists:
                    if self.config['list'] == list.list_name:
                        log.debug(
                            'found list ID %s matching list name %s in cache',
                            list.list_id,
                            list.list_name,
                        )
                        self.list_id = list.list_id
            if not self.list_id:
                log.debug(
                    'could not find list ID in cache, fetching from IMDB')
                if self.config['list'] == 'watchlist':
                    data = {
                        'consts[]': 'tt0133093',
                        'tracking_tag': 'watchlistRibbon'
                    }
                    wl_data = self._session.post(
                        'https://www.imdb.com/list/_ajax/watchlist_has',
                        data=data,
                        cookies=self.cookies,
                    ).json()
                    try:
                        self.list_id = wl_data['list_id']
                    except KeyError:
                        raise PluginError(
                            'No list ID could be received. Please initialize list by '
                            'manually adding an item to it and try again')
                elif self.config['list'] in IMMUTABLE_LISTS or self.config[
                        'list'].startswith('ls'):
                    self.list_id = self.config['list']
                else:
                    data = {'tconst': 'tt0133093'}
                    list_data = self._session.post(
                        'https://www.imdb.com/list/_ajax/wlb_dropdown',
                        data=data,
                        cookies=self.cookies,
                    ).json()
                    for li in list_data['items']:
                        if li['wlb_text'] == self.config['list']:
                            self.list_id = li['data_list_id']
                            break
                    else:
                        raise plugin.PluginError('Could not find list %s' %
                                                 self.config['list'])

            user = IMDBListUser(self.config['login'], self.user_id,
                                self.cookies)
            list = IMDBListList(self.list_id, self.config['list'],
                                self.user_id)
            user.lists.append(list)
            session.merge(user)

        self._authenticated = True

    def invalidate_cache(self):
        self._items = None

    @property
    def items(self):
        if self._items is None:
            log.debug('fetching items from IMDB')
            try:
                r = self.session.get(
                    'https://www.imdb.com/list/export?list_id=%s&author_id=%s'
                    % (self.list_id, self.user_id),
                    cookies=self.cookies,
                )
                lines = list(r.iter_lines(decode_unicode=True))
            except RequestException as e:
                raise PluginError(e.args[0])
            # Normalize headers to lowercase
            lines[0] = lines[0].lower()
            self._items = []
            for row in csv.DictReader(lines):
                log.debug('parsing line from csv: %s', row)

                try:
                    item_type = row['title type'].lower()
                    name = row['title']
                    year = int(row['year']) if row['year'] != '????' else None
                    created = (datetime.strptime(row['created'], '%Y-%m-%d')
                               if row.get('created') else None)
                    modified = (datetime.strptime(row['modified'], '%Y-%m-%d')
                                if row.get('modified') else None)
                    entry = Entry({
                        'title':
                        '%s (%s)' % (name, year) if year != '????' else name,
                        'url':
                        row['url'],
                        'imdb_id':
                        row['const'],
                        'imdb_url':
                        row['url'],
                        'imdb_list_position':
                        int(row['position']) if 'position' in row else None,
                        'imdb_list_created':
                        created,
                        'imdb_list_modified':
                        modified,
                        'imdb_list_description':
                        row.get('description'),
                        'imdb_name':
                        name,
                        'imdb_year':
                        year,
                        'imdb_user_score':
                        float(row['imdb rating'])
                        if row['imdb rating'] else None,
                        'imdb_votes':
                        int(row['num votes']) if row['num votes'] else None,
                        'imdb_genres':
                        [genre.strip() for genre in row['genres'].split(',')],
                    })

                except ValueError as e:
                    log.debug(
                        'no movie row detected, skipping. %s. Exception: %s',
                        row, e)
                    continue

                if item_type in MOVIE_TYPES:
                    entry['movie_name'] = name
                    entry['movie_year'] = year
                elif item_type in SERIES_TYPES:
                    entry['series_name'] = name
                    entry['series_year'] = year
                elif item_type in OTHER_TYPES:
                    entry['title'] = name
                else:
                    log.verbose(
                        'Unknown IMDB type entry received: %s. Skipping',
                        item_type)
                    continue
                self._items.append(entry)
        return self._items

    @property
    def immutable(self):
        if self.config['list'] in IMMUTABLE_LISTS:
            return '%s list is not modifiable' % self.config['list']

    def _from_iterable(cls, it):
        # TODO: is this the right answer? the returned object won't have our custom __contains__ logic
        return set(it)

    def __contains__(self, entry):
        return self.get(entry) is not None

    def __iter__(self):
        return iter(self.items)

    def discard(self, entry):
        if self.config['list'] in IMMUTABLE_LISTS:
            raise plugin.PluginError('%s lists are not modifiable' %
                                     ' and '.join(IMMUTABLE_LISTS))
        if 'imdb_id' not in entry:
            log.warning(
                'Cannot remove %s from imdb_list because it does not have an imdb_id',
                entry['title'],
            )
            return
        # Get the list item id
        item_ids = None
        urls = []
        if self.config['list'] == 'watchlist':
            method = 'delete'
            data = {
                'consts[]': entry['imdb_id'],
                'tracking_tag': 'watchlistRibbon'
            }
            status = self.session.post(
                'https://www.imdb.com/list/_ajax/watchlist_has',
                data=data,
                cookies=self.cookies).json()
            item_ids = status.get('has', {}).get(entry['imdb_id'])
            urls = ['https://www.imdb.com/watchlist/%s' % entry['imdb_id']]
        else:
            method = 'post'
            data = {'tconst': entry['imdb_id']}
            status = self.session.post(
                'https://www.imdb.com/list/_ajax/wlb_dropdown',
                data=data,
                cookies=self.cookies).json()
            for a_list in status['items']:
                if a_list['data_list_id'] == self.list_id:
                    item_ids = a_list['data_list_item_ids']
                    break

            for item_id in item_ids:
                urls.append('https://www.imdb.com/list/%s/li%s/delete' %
                            (self.list_id, item_id))
        if not item_ids:
            log.warning('%s is not in list %s, cannot be removed',
                        entry['imdb_id'], self.list_id)
            return

        for url in urls:
            log.debug(
                'found movie %s with ID %s in list %s, removing',
                entry['title'],
                entry['imdb_id'],
                self.list_id,
            )
            self.session.request(method,
                                 url,
                                 data={'49e6c': self.hidden_value},
                                 cookies=self.cookies)
            # We don't need to invalidate our cache if we remove the item
            self._items = (
                [i for i in self._items
                 if i['imdb_id'] != entry['imdb_id']] if self._items else None)

    def _add(self, entry):
        """Submit a new movie to imdb. (does not update cache)"""
        if self.config['list'] in IMMUTABLE_LISTS:
            raise plugin.PluginError('%s lists are not modifiable' %
                                     ' and '.join(IMMUTABLE_LISTS))
        if 'imdb_id' not in entry:
            log.warning(
                'Cannot add %s to imdb_list because it does not have an imdb_id',
                entry['title'])
            return
        # Manually calling authenticate to fetch list_id and cookies and hidden form value
        self.authenticate()
        if self.config['list'] == 'watchlist':
            method = 'put'
            url = 'https://www.imdb.com/watchlist/%s' % entry['imdb_id']
        else:
            method = 'post'
            url = 'https://www.imdb.com/list/%s/%s/add' % (self.list_id,
                                                           entry['imdb_id'])

        log.debug('adding title %s with ID %s to imdb %s', entry['title'],
                  entry['imdb_id'], self.list_id)
        self.session.request(method,
                             url,
                             cookies=self.cookies,
                             data={'49e6c': self.hidden_value})

    def add(self, entry):
        self._add(entry)
        # Invalidate the cache so that we get the canonical entry from the imdb list
        self.invalidate_cache()

    def __ior__(self, entries):
        for entry in entries:
            self._add(entry)
        self.invalidate_cache()
        return self

    def __len__(self):
        return len(self.items)

    @property
    def online(self):
        """ Set the online status of the plugin, online plugin should be treated differently in certain situations,
        like test mode"""
        return True

    def get(self, entry):
        if not entry.get('imdb_id'):
            log.debug(
                'entry %s does not have imdb_id, cannot compare to imdb list items',
                entry)
            return None
        log.debug('finding %s in imdb list', entry['imdb_id'])
        for e in self.items:
            if e['imdb_id'] == entry['imdb_id']:
                return e
        log.debug('could not find %s in imdb list items', entry['imdb_id'])
        return None
Beispiel #52
0
from requests.exceptions import RequestException

from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession
from flexget.utils.requests import TimedLimiter

plugin_name = 'pushsafer'
log = logging.getLogger(plugin_name)

PUSHSAFER_URL = 'https://www.pushsafer.com/api'

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('pushsafer.com', '5 seconds'))


class PushsaferNotifier:
    """
    Example::

      notify:
        entries:
          via:
            - pushsafer:
                private_key: <string> your private key (can also be a alias key) - Required
                url: <string> (default: '{{imdb_url}}')
                url_title: <string> (default: (none))
                device: <string> ypur device or device group id (default: (none))
Beispiel #53
0
from sqlalchemy import Column, Unicode, DateTime

from flexget import plugin, db_schema
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import TimedLimiter, RequestException
from flexget.manager import Session
from flexget.utils.database import json_synonym
from flexget.utils.requests import Session as RequestSession
from flexget.utils.soup import get_soup
from flexget.utils.tools import parse_filesize

log = logging.getLogger('filelist')
Base = db_schema.versioned_base('filelist', 0)

requests = RequestSession()
requests.add_domain_limiter(TimedLimiter('filelist.ro', '2 seconds'))

BASE_URL = 'https://filelist.ro/'

CATEGORIES = {
    'all': 0,
    'anime': 24,
    'audio': 11,
    'cartoons': 15,
    'docs': 16,
    'games console': 10,
    'games pc': 9,
    'linux': 17,
    'misc': 18,
    'mobile': 22,
Beispiel #54
0
from __future__ import unicode_literals, division, absolute_import
import difflib
import logging
import re

from BeautifulSoup import Tag

from flexget.utils.soup import get_soup
from flexget.utils.requests import Session
from flexget.utils.tools import str_to_int


log = logging.getLogger('utils.imdb')
# IMDb delivers a version of the page which is unparsable to unknown (and some known) user agents, such as requests'
# Spoof the old urllib user agent to keep results consistent
requests = Session()
requests.headers.update({'User-Agent': 'Python-urllib/2.6'})
#requests.headers.update({'User-Agent': random.choice(USERAGENTS)})

# this makes most of the titles to be returned in english translation, but not all of them
requests.headers.update({'Accept-Language': 'en-US,en;q=0.8'})

# give imdb a little break between requests (see: http://flexget.com/ticket/129#comment:1)
requests.set_domain_delay('imdb.com', '3 seconds')


def is_imdb_url(url):
    """Tests the url to see if it's for imdb.com."""
    if not isinstance(url, basestring):
        return
    # Probably should use urlparse.
Beispiel #55
0
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from flexget.utils.soup import get_soup
from requests.exceptions import HTTPError, RequestException

from datetime import datetime, date, timedelta

import unicodedata
import re

log = logging.getLogger('search_npo')

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('npostart.nl', '8 seconds'))


class NPOWatchlist(object):
    """
        Produces entries for every episode on the user's npostart.nl watchlist (Dutch public television).
        Entries can be downloaded using http://arp242.net/code/download-npo

        If 'remove_accepted' is set to 'yes', the plugin will delete accepted entries from the watchlist after download
            is complete.
        If 'max_episode_age_days' is set (and not 0), entries will only be generated for episodes broadcast in the last
            x days.  This only applies to episodes related to series the user is following.

        For example:
            npo_watchlist:
Beispiel #56
0
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from flexget.utils.soup import get_soup
from requests.exceptions import HTTPError, RequestException

from datetime import datetime, date, timedelta

import unicodedata
import re

log = logging.getLogger('search_npo')

requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('npostart.nl', '8 seconds'))


class NPOWatchlist(object):
    """
        Produces entries for every episode on the user's npostart.nl watchlist (Dutch public television).
        Entries can be downloaded using http://arp242.net/code/download-npo

        If 'remove_accepted' is set to 'yes', the plugin will delete accepted entries from the watchlist after download
            is complete.
        If 'max_episode_age_days' is set (and not 0), entries will only be generated for episodes broadcast in the last
            x days.  This only applies to episodes related to series the user is following.

        For example:
            npo_watchlist:
Beispiel #57
0
 def __init__(self, username=None, password=None, url_scheme='http'):
     self.credentials = {'username': username, 'password': password}
     self.api_token = None
     self.api_template_url = url_scheme + '://' + T411API_DOMAIN_URL + '%s'
     self.web_session = Session()
Beispiel #58
0
class InputWhatCD(object):
    """A plugin that searches what.cd

    == Usage:

    All parameters except `username` and `password` are optional.

    whatcd:
        username:
        password:

        user_agent: (A custom user-agent for the client to report.
                     It is NOT A GOOD IDEA to spoof a browser with
                     this. You are responsible for your account.)

        search: (general search filter)

        artist: (artist name)
        album: (album name)
        year: (album year)

        encoding: (encoding specifics - 192, 320, lossless, etc.)
        format: (MP3, FLAC, AAC, etc.)
        media: (CD, DVD, vinyl, Blu-ray, etc.)
        release_type: (album, soundtrack, EP, etc.)

        log: (log specification - true, false, '100%', or '<100%')
        hascue: (has a cue file - true or false)
        scene: (is a scene release - true or false)
        vanityhouse: (is a vanity house release - true or false)
        leech_type: ('freeleech', 'neutral', 'either', or 'normal')

        tags: (a list of tags to match - drum.and.bass, new.age, blues, etc.)
        tag_type: (match 'any' or 'all' of the items in `tags`)
    """

    # Aliases for config -> api params
    ALIASES = {
        "artist": "artistname",
        "album": "groupname",
        "leech_type": "freetorrent",
        "release_type": "releaseType",
        "tags": "taglist",
        "tag_type": "tags_type",
        "search": "searchstr",
        "log": "haslog",
    }

    # API parameters
    # None means a raw value entry (no validation)
    # A dict means a choice with a mapping for the API
    # A list is just a choice with no mapping
    PARAMS = {
        "searchstr":
        None,
        "taglist":
        None,
        "artistname":
        None,
        "groupname":
        None,
        "year":
        None,
        "tags_type": {
            "any": 0,
            "all": 1,
        },
        "encoding": [
            "192", "APS (VBR)", "V2 (VBR)", "V1 (VBR)", "256", "APX (VBR)",
            "V0 (VBR)", "320", "lossless", "24bit lossless", "V8 (VBR)"
        ],
        "format": ["MP3", "FLAC", "AAC", "AC3", "DTS"],
        "media": [
            "CD", "DVD", "vinyl", "soundboard", "SACD", "DAT", "cassette",
            "WEB", "Blu-ray"
        ],
        "releaseType": {
            "album": 1,
            "soundtrack": 3,
            "EP": 5,
            "anthology": 6,
            "compilation": 7,
            "DJ mix": 8,
            "single": 9,
            "live album": 11,
            "remix": 13,
            "bootleg": 14,
            "interview": 15,
            "mixtape": 16,
            "unknown": 21,
            "concert recording": 22,
            "demo": 23
        },
        "haslog": {
            "False": 0,
            "True": 1,
            "100%": 100,
            "<100%": -1
        },
        "freetorrent": {
            "freeleech": 1,
            "neutral": 2,
            "either": 3,
            "normal": 0,
        },
        "hascue": {
            "False": 0,
            "True": 1,
        },
        "scene": {
            "False": 0,
            "True": 1,
        },
        "vanityhouse": {
            "False": 0,
            "True": 1,
        }
    }

    def _key(self, key):
        """Gets the API key name from the entered key"""
        if key in self.ALIASES:
            return self.ALIASES[key]
        return key

    def _opts(self, key):
        """Gets the options for the specified key"""
        return self.PARAMS[self._key(key)]

    def _getval(self, key, val):
        """Gets the value for the specified key based on a config option"""

        opts = self._opts(key)
        if isinstance(opts, dict):
            # Translate the input value to the What.CD API value
            # The str cast converts bools to 'True'/'False' for use as keys
            # This allows for options that have True/False/Other values
            return opts[str(val)]
        elif isinstance(val, list):
            # Fix yaml parser making a list out of a string
            return ",".join(val)

        return val

    def __init__(self):
        """Set up the schema"""

        self.schema = {
            'type': 'object',
            'properties': {
                'username': {
                    'type': 'string'
                },
                'password': {
                    'type': 'string'
                },
                'user_agent': {
                    'type': 'string'
                },
                'search': {
                    'type': 'string'
                },
                'artist': {
                    'type': 'string'
                },
                'album': {
                    'type': 'string'
                },
                'year': {
                    'type': ['string', 'integer']
                },
                'tags': one_or_more({'type': 'string'}),
                'tag_type': {
                    'type': 'string',
                    'enum': list(self._opts('tag_type').keys())
                },
                'encoding': {
                    'type': 'string',
                    'enum': self._opts('encoding')
                },
                'format': {
                    'type': 'string',
                    'enum': self._opts('format')
                },
                'media': {
                    'type': 'string',
                    'enum': self._opts('media')
                },
                'release_type': {
                    'type': 'string',
                    'enum': list(self._opts('release_type').keys())
                },
                'log': {
                    'oneOf': [{
                        'type': 'string',
                        'enum': list(self._opts('log').keys())
                    }, {
                        'type': 'boolean'
                    }]
                },
                'leech_type': {
                    'type': 'string',
                    'enum': list(self._opts('leech_type').keys())
                },
                'hascue': {
                    'type': 'boolean'
                },
                'scene': {
                    'type': 'boolean'
                },
                'vanityhouse': {
                    'type': 'boolean'
                },
            },
            'required': ['username', 'password'],
            'additionalProperties': False
        }

    def _login(self, user, passwd):
        """
        Log in and store auth data from the server
        Adapted from https://github.com/isaaczafuta/whatapi
        """

        data = {
            'username': user,
            'password': passwd,
            'keeplogged': 1,
        }

        r = self.session.post("https://ssl.what.cd/login.php",
                              data=data,
                              allow_redirects=False)
        if r.status_code != 302 or r.headers.get('location') != "index.php":
            raise PluginError("Failed to log in to What.cd")

        accountinfo = self._request('index')

        self.authkey = accountinfo['authkey']
        self.passkey = accountinfo['passkey']
        log.info("Logged in to What.cd")

    def _request(self, action, page=None, **kwargs):
        """
        Make an AJAX request to a given action page
        Adapted from https://github.com/isaaczafuta/whatapi
        """

        ajaxpage = "https://ssl.what.cd/ajax.php"

        params = {}

        # Filter params and map config values -> api values
        for k, v in list(kwargs.items()):
            params[self._key(k)] = self._getval(k, v)

        # Params other than the searching ones
        params['action'] = action
        if page:
            params['page'] = page

        r = self.session.get(ajaxpage, params=params, allow_redirects=False)
        if r.status_code != 200:
            raise PluginError("What.cd returned a non-200 status code")

        try:
            json_response = r.json()
            if json_response['status'] != "success":

                # Try to deal with errors returned by the API
                error = json_response.get('error', json_response.get('status'))
                if not error or error == "failure":
                    error = json_response.get('response', str(json_response))

                raise PluginError("What.cd gave a failure response: "
                                  "'{}'".format(error))
            return json_response['response']
        except (ValueError, TypeError, KeyError) as e:
            raise PluginError("What.cd returned an invalid response")

    def _search_results(self, config):
        """Generator that yields search results"""
        page = 1
        pages = None
        while True:
            if pages and page >= pages:
                break

            log.debug(
                "Attempting to get page {} of search results".format(page))
            result = self._request('browse', page=page, **config)
            if not result['results']:
                break
            for x in result['results']:
                yield x

            pages = result.get('pages', pages)
            page += 1

    def _get_entries(self, search_results):
        """Genertor that yields Entry objects from search results"""
        for result in search_results:
            # Get basic information on the release
            info = dict(
                (k, result[k]) for k in ('artist', 'groupName', 'groupYear'))

            # Releases can have multiple download options
            for tor in result['torrents']:
                temp = info.copy()
                temp.update(
                    dict(
                        (k, tor[k])
                        for k in ('media', 'encoding', 'format', 'torrentId')))

                yield Entry(
                    title="{artist} - {groupName} - {groupYear} "
                    "({media} - {format} - {encoding})-{torrentId}.torrent".
                    format(**temp),
                    url="https://what.cd/torrents.php?action=download&"
                    "id={}&authkey={}&torrent_pass={}".format(
                        temp['torrentId'], self.authkey, self.passkey),
                    torrent_seeds=tor['seeders'],
                    torrent_leeches=tor['leechers'],
                    # Size is returned in bytes, convert to MB for compat with the content_size plugin
                    content_size=math.floor(tor['size'] / (1024**2)))

    @cached('whatcd')
    @plugin.internet(log)
    def on_task_input(self, task, config):
        """Search on What.cd"""

        self.session = Session()

        # From the API docs: "Refrain from making more than five (5) requests every ten (10) seconds"
        self.session.add_domain_limiter(
            TokenBucketLimiter('ssl.what.cd', 2, '2 seconds'))

        # Custom user agent
        user_agent = config.pop('user_agent', None)
        if user_agent:
            self.session.headers.update({"User-Agent": user_agent})

        # Login
        self._login(config.pop('username'), config.pop('password'))

        # Logged in successfully, it's ok if nothing matches
        task.no_entries_ok = True

        # NOTE: Any values still in config at this point MUST be valid search parameters

        # Perform the search and parse the needed information out of the response
        results = self._search_results(config)
        return list(self._get_entries(results))