Пример #1
0
def get_stream_url(hn, path):
    with session.Session() as sess:
        video_url = config.API_BASE_URL.format(path='/v2{0}'.format(path))
        utils.log("Fetching stream URL: {0}".format(video_url))
        video_data = sess.get(video_url).text
        video_json = json.loads(video_data)
        if video_json.get('playable') is False:
            return {
                'msg': video_json.get('playableMessage'),
                'availability': video_json.get('availability')
            }
        sess.headers = {'User-Agent': config.USER_AGENT}
        for playlist in video_json['_embedded']['playlist']:
            if playlist.get('type') not in ['program', 'livestream']:
                continue
            if 'hls' in playlist.get('streams'):
                hls_streams = playlist['streams'].get('hls')
                stream_url_base = hls_streams.get(
                    '720', hls_streams.get('sd', hls_streams.get('sd-low')))
            if stream_url_base:
                captions_url = playlist.get('captions', {}).get('src-vtt')
                break
        akamai_auth = get_auth(hn, sess)
        request = sess.get(stream_url_base, params={'hdnea': akamai_auth})
        cookies = cookies_to_string(request.cookies)
        stream_url = '{0}|User-Agent={1}&Cookie={2}'.format(
            request.url, quote_plus(config.USER_AGENT), quote_plus(cookies))

    return {'stream_url': stream_url, 'captions_url': captions_url}
Пример #2
0
    def test_get_stream_url(self, mock_auth):
        path = '/video/ZW1939A025S00'
        video_url = config.API_BASE_URL.format(path='/v2{0}'.format(path))

        req = requests.Request('GET',
                               fakes.EXPECTED_HLS_URL,
                               params={'hdnea': self.AUTH_RESP_TEXT})
        prepared = req.prepare()
        url = prepared.url

        responses.add(responses.GET, video_url, body=self.VIDEO_JSON)
        responses.add(responses.GET,
                      url,
                      body='#EXTM3U',
                      headers={'Set-Cookie': fakes.AUTH_COOKIE},
                      status=200)
        mock_auth.return_value = self.AUTH_RESP_TEXT
        observed = comm.get_stream_url(fakes.HN, path)

        expected = {
            'stream_url':
            fakes.RESOLVED_URL.format(
                quote_plus(self.AUTH_RESP_TEXT, safe='~'),
                quote_plus(config.USER_AGENT)),
            'captions_url':
            fakes.EXPECTED_SUB_URL
        }
        # responses doesn't handle cookie domain/paths correctly atm, so
        # the expected stream_url omits the domain value.
        self.assertEqual(expected, observed)
 def test_default_play_livestream(self, mock_listitem, mock_version,
                                  mock_time):
     mock_listitem.side_effect = fakes.FakeListItem
     mock_version.return_value = 15
     mock_time.return_value = 1565672000
     path = '/video/NS1413V001S00'
     video_url = config.API_BASE_URL.format(path='/v2{0}'.format(path))
     req = requests.Request('GET',
                            fakes.EXPECTED_LIVE_HLS_URL,
                            params={'hdnea': self.AUTH_RESP_TEXT})
     prepared = req.prepare()
     url = prepared.url
     responses.add(responses.GET, video_url, body=self.LIVE_VIDEO_JSON)
     responses.add(responses.GET,
                   url,
                   body='#EXTM3U',
                   headers={'Set-Cookie': fakes.AUTH_COOKIE},
                   status=200)
     responses.add(responses.GET,
                   fakes.AUTH_URL_DEFAULT_LIVE_TEST,
                   body=self.AUTH_RESP_TEXT,
                   status=200)
     default.main()
     observed = self.mock_plugin.resolved[2].getPath()
     expected = fakes.RESOLVED_LIVE_URL.format(
         quote_plus(self.AUTH_RESP_TEXT, safe='~'),
         quote_plus(config.USER_AGENT))
     self.assertEqual(expected, observed)
def make_url(d):
    """Build a URL suitable for a Kodi add-on from a dict"""
    pairs = []
    for k, v in iteritems(d):
        k = quote_plus(k)
        v = ensure_ascii(v)
        v = quote_plus(v)
        pairs.append("%s=%s" % (k, v))
    return "&".join(pairs)
Пример #5
0
    def search(self, task, entry, config=None):
        """
        Search for name from fuzer.
        """
        self.rss_key = config['rss_key']
        username = config['username']
        password = hashlib.md5(config['password'].encode('utf-8')).hexdigest()

        # build the form request:
        data = {'cookieuser': '******',
                'do': 'login',
                's': '',
                'securitytoken': 'guest',
                'vb_login_username': username,
                'vb_login_password': '',
                'vb_login_md5password': password,
                'vb_login_md5password_utf': password
                }
        # POST the login form:
        try:
            login = requests.post('https://www.fuzer.me/login.php?do=login', data=data)
        except RequestException as e:
            raise PluginError('Could not connect to fuzer: %s' % str(e))

        login_check_phrases = ['ההתחברות נכשלה', 'banned']
        if any(phrase in login.text for phrase in login_check_phrases):
            raise PluginError('Login to Fuzer failed, check credentials')

        self.user_id = requests.cookies.get('fzr2userid')
        category = config.get('category', [0])
        # Make sure categories is a list
        if not isinstance(category, list):
            category = [category]

        # If there are any text categories, turn them into their id number
        categories = [c if isinstance(c, int) else CATEGORIES[c] for c in category]

        c_list = []
        for c in categories:
            c_list.append('c{}={}'.format(quote_plus('[]'), c))

        entries = []
        if entry.get('imdb_id'):
            log.debug('imdb_id {} detected, using in search.'.format(entry['imdb_id']))
            soup = self.get_fuzer_soup(entry['imdb_id'], c_list)
            entries = self.extract_entry_from_soup(soup)
            if entries:
                for e in list(entries):
                    e['imdb_id'] = entry.get('imdb_id')
        else:
            for search_string in entry.get('search_strings', [entry['title']]):
                query = normalize_unicode(search_string).replace(":", "")
                text = quote_plus(query.encode('windows-1255'))
                soup = self.get_fuzer_soup(text, c_list)
                entries += self.extract_entry_from_soup(soup)
        return sorted(entries, reverse=True, key=lambda x: x.get('search_sort')) if entries else []
Пример #6
0
 def program_info_url(self, program_id):
     if self.is_elava_arkisto_media(program_id):
         did = program_id.split('-')[-1]
         return ('https://yle.fi/elavaarkisto/embed/%s.jsonp'
                 '?callback=yleEmbed.eaJsonpCallback'
                 '&instance=1&id=%s&lang=fi' %
                 (quote_plus(did), quote_plus(did)))
     else:
         return (super(ElavaArkistoExtractor, self)
                 .program_info_url(program_id))
Пример #7
0
 def program_info_url(self, program_id):
     if self.is_elava_arkisto_media(program_id):
         did = program_id.split('-')[-1]
         return ('https://yle.fi/elavaarkisto/embed/%s.jsonp'
                 '?callback=yleEmbed.eaJsonpCallback'
                 '&instance=1&id=%s&lang=fi' %
                 (quote_plus(did), quote_plus(did)))
     else:
         return (super(ElavaArkistoExtractor,
                       self).program_info_url(program_id))
Пример #8
0
    def yle_media_descriptor(self, program_id, media_id, protocol):
        media_jsonp_url = 'https://player.yle.fi/api/v1/media.jsonp?' \
                          'id=%s&callback=yleEmbed.startPlayerCallback&' \
                          'mediaId=%s&protocol=%s&client=areena-flash-player' \
                          '&instance=1' % \
            (quote_plus(media_id), quote_plus(program_id),
             quote_plus(protocol))
        media = JSONP.load_jsonp(media_jsonp_url, self.httpclient)

        if media:
            logger.debug('media:\n' + json.dumps(media, indent=2))

        return media
Пример #9
0
    def yle_media_descriptor(self, program_id, media_id, protocol):
        media_jsonp_url = 'https://player.yle.fi/api/v1/media.jsonp?' \
                          'id=%s&callback=yleEmbed.startPlayerCallback&' \
                          'mediaId=%s&protocol=%s&client=areena-flash-player' \
                          '&instance=1' % \
            (quote_plus(media_id), quote_plus(program_id),
             quote_plus(protocol))
        media = JSONP.load_jsonp(media_jsonp_url, self.httpclient)

        if media:
            logger.debug('media:\n' + json.dumps(media, indent=2))

        return media
Пример #10
0
def list_categories():
    """
    Make initial list
    """
    try:
        _url = sys.argv[0]
        _handle = int(sys.argv[1])
        listing = []
        categories = config.CATEGORIES
        for category in categories:
            li = create_listitem(category)
            url_string = '{0}?action=listcategories&category={1}'
            url = url_string.format(_url, category)
            is_folder = True
            listing.append((url, li, is_folder))

        genres = comm.list_genres()
        for g in genres:
            li = create_listitem(g.title)
            li.setArt({'fanart': g.fanart, 'icon': g.thumb, 'thumb': g.thumb})
            url_string = '{0}?action=listcategories&category=genre&genre={1}'
            url = url_string.format(_url, quote_plus(g.genre_slug))
            is_folder = True
            listing.append((url, li, is_folder))
        li = create_listitem('Settings')
        listing.append(('{0}?action=settings'.format(_url), li, is_folder))
        xbmcplugin.addDirectoryItems(_handle, listing, len(listing))
        xbmcplugin.endOfDirectory(_handle)
    except Exception:
        utils.handle_error('Unable to list categories')
Пример #11
0
    def search(self, task, entry, config=None):
        """
        Search for name from iptorrents
        """

        categories = config.get('category', 'All')
        # Make sure categories is a list
        if not isinstance(categories, list):
            categories = [categories]

        # If there are any text categories, turn them into their id number
        categories = [c if isinstance(c, int) else CATEGORIES[c]
                      for c in categories]
        filter_url = '&'.join((str(c) + '=') for c in categories)

        entries = set()

        for search_string in entry.get('search_strings', [entry['title']]):
            query = normalize_unicode(search_string)
            query = quote_plus(query.encode('utf8'))

            url = "{base_url}/t?{filter}&q={query}&qf=".format(base_url=BASE_URL, filter=filter_url, query=query)
            log.debug('searching with url: %s' % url)
            req = requests.get(url, cookies={'uid': str(config['uid']), 'pass': config['password']})

            if '/u/' + str(config['uid']) not in req.text:
                raise plugin.PluginError("Invalid cookies (user not logged in)...")

            soup = get_soup(req.content, parser="html.parser")
            torrents = soup.find('table', {'id': 'torrents'})

            results = torrents.findAll('tr')
            for torrent in results:
                if torrent.th and 'ac' in torrent.th.get('class'):
                    # Header column
                    continue
                if torrent.find('td', {'colspan': '99'}):
                    log.debug('No results found for search %s', search_string)
                    return
                entry = Entry()
                link = torrent.find('a', href=re.compile('download'))['href']
                entry['url'] = "{base}{link}?torrent_pass={key}".format(
                    base=BASE_URL, link=link, key=config.get('rss_key'))
                entry['title'] = torrent.find('a', href=re.compile('details')).text

                seeders = torrent.findNext('td', {'class': 'ac t_seeders'}).text
                leechers = torrent.findNext('td', {'class': 'ac t_leechers'}).text
                entry['torrent_seeds'] = int(seeders)
                entry['torrent_leeches'] = int(leechers)
                entry['search_sort'] = torrent_availability(entry['torrent_seeds'],
                                                            entry['torrent_leeches'])

                size = torrent.findNext(text=re.compile('^([\.\d]+) ([GMK]?)B$'))
                size = re.search('^([\.\d]+) ([GMK]?)B$', size)

                entry['content_size'] = parse_filesize(size.group(0))
                log.debug('Found entry %s', entry)
                entries.add(entry)

        return entries
Пример #12
0
    def continue_core(self, action_name, events, sender_id):
        # type: (Text, List[Event], Text) -> Optional[Dict[Text, Any]]
        """Send a continue request to rasa core to get next action
        prediction."""

        url = "{}/conversations/{}/continue?token={}".format(
                self.host, sender_id, quote_plus(self.token))
        dumped_events = []
        for e in events:
            dumped_events.append(e.as_dict())
        data = json.dumps(
                {"executed_action": action_name, "events": dumped_events},
                ensure_ascii=False)
        response = requests.post(url, data=data.encode('utf-8'),
                                 headers={
                                     'Content-type': 'text/plain; '
                                                     'charset=utf-8'})

        if response.status_code == 200:
            return response.json()
        else:
            logger.warn("Got a bad response from rasa core :( Status: {} "
                        "Response: {}".format(response.status_code,
                                              response.text))
            return None
Пример #13
0
    def upload_model(self, model_dir, max_retries=1):
        # type: (Text, int) -> Optional[Dict[Text, Any]]
        """Upload a Rasa core model to the remote instance."""

        url = "{}/load?token={}".format(self.host, quote_plus(self.token))
        logger.debug("Uploading model to rasa core server.")

        model_zip = utils.zip_folder(model_dir)

        response = None
        while max_retries > 0:
            max_retries -= 1

            try:
                with io.open(model_zip, "rb") as f:
                    response = requests.post(url, files={"model": f})

                if response.status_code == 200:
                    logger.debug("Finished uploading")
                    return response.json()
            except RequestException as e:
                logger.warn("Failed to send model upload request. "
                            "{}".format(e))

            if max_retries > 0:
                # some resting time before we try again - e.g. server
                # might be unavailable / not started yet
                time.sleep(2)

        if response:
            logger.warn("Got a bad response from rasa core while uploading "
                        "the model (Status: {} "
                        "Response: {})".format(response.status_code,
                                               response.text))
        return None
Пример #14
0
 def test_url_with_30x_follows_redirect(self, client):
     redirect_url = client + u'/?status=200&content=test&content-type=text/csv'
     url = client + u'/?status=301&location=%s' % quote_plus(redirect_url)
     context = json.dumps({})
     data = json.dumps({'url': url})
     result = json.loads(link_checker(context, data))
     assert result
Пример #15
0
    def test_play_video_live(self, mock_listitem, mock_ticket):
        params = dict(parse_qsl(sys.argv[2][1:]))
        escaped_bc_url = re.escape(config.BC_URL).replace('\\{', '{').replace(
            '\\}', '}')
        bc_url = re.compile(escaped_bc_url.format('.*', '.*'))
        responses.add(responses.GET,
                      bc_url,
                      body=self.BC_EDGE_JSON,
                      status=200)
        responses.add(responses.GET,
                      config.MEDIA_AUTH_URL.format(video_id='6112170884001'),
                      body=self.VIDEO_TOKEN_JSON,
                      status=200)
        responses.add(responses.GET,
                      config.SIGN_URL.format(
                          quote_plus('https://foo.bar/video.m3u8')),
                      body=self.SIGN_JSON,
                      status=200)
        mock_ticket.return_value = json.dumps({
            'pai': fakes.FAKE_UUID[0],
            'bearer': 'abc123'
        })
        mock_listitem.side_effect = fakes.FakeListItem

        mock_plugin = fakes.FakePlugin()
        with mock.patch.dict('sys.modules', xbmcplugin=mock_plugin):
            import resources.lib.play as play
            play.play_video(params)
            self.assertEqual('https://foo.bar/index.m3u8?signed',
                             mock_plugin.resolved[2].getPath())
Пример #16
0
    def search(self, task, entry, config=None):
        """
        Search for name from iptorrents
        """

        categories = config.get('category', 'All')
        # Make sure categories is a list
        if not isinstance(categories, list):
            categories = [categories]

        # If there are any text categories, turn them into their id number
        categories = [c if isinstance(c, int) else CATEGORIES[c]
                      for c in categories]
        filter_url = '&'.join((str(c) + '=') for c in categories)

        entries = set()

        for search_string in entry.get('search_strings', [entry['title']]):
            query = normalize_unicode(search_string)
            query = quote_plus(query.encode('utf8'))

            url = "{base_url}/t?{filter}&q={query}&qf=".format(base_url=BASE_URL, filter=filter_url, query=query)
            log.debug('searching with url: %s' % url)
            req = requests.get(url, cookies={'uid': str(config['uid']), 'pass': config['password']})

            if '/u/' + str(config['uid']) not in req.text:
                raise plugin.PluginError("Invalid cookies (user not logged in)...")

            soup = get_soup(req.content, parser="html.parser")
            torrents = soup.find('table', {'id': 'torrents'})

            results = torrents.findAll('tr')
            for torrent in results:
                if torrent.th and 'ac' in torrent.th.get('class'):
                    # Header column
                    continue
                if torrent.find('td', {'colspan': '99'}):
                    log.debug('No results found for search %s', search_string)
                    break
                entry = Entry()
                link = torrent.find('a', href=re.compile('download'))['href']
                entry['url'] = "{base}{link}?torrent_pass={key}".format(
                    base=BASE_URL, link=link, key=config.get('rss_key'))
                entry['title'] = torrent.find('a', href=re.compile('details')).text

                seeders = torrent.findNext('td', {'class': 'ac t_seeders'}).text
                leechers = torrent.findNext('td', {'class': 'ac t_leechers'}).text
                entry['torrent_seeds'] = int(seeders)
                entry['torrent_leeches'] = int(leechers)
                entry['search_sort'] = torrent_availability(entry['torrent_seeds'],
                                                            entry['torrent_leeches'])

                size = torrent.findNext(text=re.compile('^([\.\d]+) ([GMK]?)B$'))
                size = re.search('^([\.\d]+) ([GMK]?)B$', size)

                entry['content_size'] = parse_filesize(size.group(0))
                log.debug('Found entry %s', entry)
                entries.add(entry)

        return entries
Пример #17
0
def make_search_history_list():
    try:
        listing = search.get_search_history_listing()
        ok = True
        for item in listing:
            listitem = comm.create_listitem(label=item)
            listitem.setInfo('video', {'plot': ''})
            listitem.addContextMenuItems([
                ('Remove from search history',
                 ('RunPlugin(plugin://plugin.video.sbs/?action=remove'
                  'search&name={0})'.format(item)))
            ])
            url = "{0}?action=searchhistory&name={1}".format(
                sys.argv[0], quote_plus(item))

            ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
                                             url=url,
                                             listitem=listitem,
                                             isFolder=True)
        xbmcplugin.endOfDirectory(handle=int(sys.argv[1]),
                                  succeeded=ok,
                                  cacheToDisc=False)
        xbmcplugin.setContent(handle=int(sys.argv[1]), content='tvshows')
    except Exception:
        utils.handle_error('Unable to fetch search history list')
Пример #18
0
  def _construct_cmd_args(jars, common_args, global_excludes,
                          pinned_coords, coursier_workdir, json_output_path):

    # Make a copy, so there is no side effect or others using `common_args`
    cmd_args = list(common_args)

    cmd_args.extend(['--json-output-file', json_output_path])

    # Dealing with intransitivity and forced versions.
    for j in jars:
      if not j.rev:
        raise TaskError('Undefined revs for jars unsupported by Coursier. "{}"'.format(repr(j.coordinate).replace('M2Coordinate', 'jar')))

      module = j.coordinate.simple_coord
      if j.coordinate.classifier:
        module += ',classifier={}'.format(j.coordinate.classifier)

      if j.get_url():
        jar_url = j.get_url()
        module += ',url={}'.format(parse.quote_plus(jar_url))
        
      if j.intransitive:
        cmd_args.append('--intransitive')

      cmd_args.append(module)

      # Force requires specifying the coord again with -V
      if j.force:
        cmd_args.append('-V')
        cmd_args.append(j.coordinate.simple_coord)

    # Force pinned coordinates
    for m2coord in pinned_coords:
      cmd_args.append('-V')
      cmd_args.append(m2coord.simple_coord)

    # Local exclusions
    local_exclude_args = []
    for jar in jars:
      for ex in jar.excludes:
        # `--` means exclude. See --local-exclude-file in `coursier fetch --help`
        # If ex.name does not exist, that means the whole org needs to be excluded.
        ex_arg = "{}:{}--{}:{}".format(jar.org, jar.name, ex.org, ex.name or '*')
        local_exclude_args.append(ex_arg)

    if local_exclude_args:
      with temporary_file(coursier_workdir, cleanup=False) as f:
        exclude_file = f.name
        with open(exclude_file, 'w') as ex_f:
          ex_f.write('\n'.join(local_exclude_args))

        cmd_args.append('--local-exclude-file')
        cmd_args.append(exclude_file)

    for ex in global_excludes:
      cmd_args.append('-E')
      cmd_args.append('{}:{}'.format(ex.org, ex.name or '*'))

    return cmd_args
Пример #19
0
  def _construct_cmd_args(jars, common_args, global_excludes,
                          pinned_coords, coursier_workdir, json_output_path):

    # Make a copy, so there is no side effect or others using `common_args`
    cmd_args = list(common_args)

    cmd_args.extend(['--json-output-file', json_output_path])

    # Dealing with intransitivity and forced versions.
    for j in jars:
      if not j.rev:
        raise TaskError('Undefined revs for jars unsupported by Coursier. "{}"'.format(repr(j.coordinate).replace('M2Coordinate', 'jar')))

      module = j.coordinate.simple_coord
      if j.coordinate.classifier:
        module += ',classifier={}'.format(j.coordinate.classifier)

      if j.get_url():
        jar_url = j.get_url()
        module += ',url={}'.format(parse.quote_plus(jar_url))

      if j.intransitive:
        cmd_args.append('--intransitive')

      cmd_args.append(module)

      # Force requires specifying the coord again with -V
      if j.force:
        cmd_args.append('-V')
        cmd_args.append(j.coordinate.simple_coord)

    # Force pinned coordinates
    for m2coord in pinned_coords:
      cmd_args.append('-V')
      cmd_args.append(m2coord.simple_coord)

    # Local exclusions
    local_exclude_args = []
    for jar in jars:
      for ex in jar.excludes:
        # `--` means exclude. See --local-exclude-file in `coursier fetch --help`
        # If ex.name does not exist, that means the whole org needs to be excluded.
        ex_arg = "{}:{}--{}:{}".format(jar.org, jar.name, ex.org, ex.name or '*')
        local_exclude_args.append(ex_arg)

    if local_exclude_args:
      with temporary_file(coursier_workdir, cleanup=False) as f:
        exclude_file = f.name
        with open(exclude_file, 'w') as ex_f:
          ex_f.write('\n'.join(local_exclude_args))

        cmd_args.append('--local-exclude-file')
        cmd_args.append(exclude_file)

    for ex in global_excludes:
      cmd_args.append('-E')
      cmd_args.append('{}:{}'.format(ex.org, ex.name or '*'))

    return cmd_args
Пример #20
0
 def program_info_url(self, program_id):
     if self.is_elava_arkisto_media(program_id):
         plain_id = program_id.split('-')[-1]
         return 'https://player.yle.fi/api/v1/arkivet.jsonp?' \
             'id=%s&callback=yleEmbed.eaJsonpCallback&instance=1&lang=sv' % \
             (quote_plus(plain_id))
     else:
         return super(ArkivetExtractor, self).program_info_url(program_id)
Пример #21
0
 def program_info_url(self, program_id):
     if self.is_elava_arkisto_media(program_id):
         plain_id = program_id.split('-')[-1]
         return 'https://player.yle.fi/api/v1/arkivet.jsonp?' \
             'id=%s&callback=yleEmbed.eaJsonpCallback&instance=1&lang=sv' % \
             (quote_plus(plain_id))
     else:
         return super(ArkivetExtractor, self).program_info_url(program_id)
Пример #22
0
 def test_url_with_30x_follows_and_records_redirect(self, client):
     url = client + '/'
     redirect_url = url + u'?status=200&content=test&content-type=text/csv'
     url += u'?status=301&location=%s' % quote_plus(redirect_url)
     res_id = self._test_resource(url)['id']
     result = json.loads(update_resource(res_id))
     assert result
     assert result['url_redirected_to'] == redirect_url
Пример #23
0
def sign_url(url, media_auth_token):
    headers = {'authorization': 'JWT {0}'.format(media_auth_token)}
    data = json.loads(
        fetch_url(config.SIGN_URL.format(quote_plus(url)), headers=headers))
    if data.get('message') == 'SUCCESS':
        return str(data.get('url'))
    else:
        raise Exception('error in signing url')
Пример #24
0
    def search(self, task, entry, config=None):
        """
        Search for name from iptorrents
        """

        categories = config.get('category', 'all')
        # Make sure categories is a list
        if not isinstance(categories, list):
            categories = [categories]

        # If there are any text categories, turn them into their id number
        categories = [c if isinstance(c, int) else CATEGORIES[c]
                      for c in categories]
        filter_url = '&'.join((str(c) + '=') for c in categories)

        entries = set()

        for search_string in entry.get('search_strings', [entry['title']]):
            query = normalize_unicode(search_string)
            query = quote_plus(query.encode('utf8'))

            url = "{base_url}/t?{filter}&q={query}&qf=".format(base_url=BASE_URL, filter=filter_url, query=query)
            log.debug('searching with url: %s' % url)
            req = requests.get(url, cookies={'uid': str(config['uid']), 'pass': config['password']})

            if '/u/' + str(config.get('uid')) not in req.content:
                raise plugin.PluginError("Invalid cookies (user not logged in)...")

            soup = get_soup(req.content, parser="html.parser")
            torrents = soup.find('table', {'class': 'torrents'})

            for torrent in torrents.findAll('a', href=re.compile('\.torrent$')):
                entry = Entry()
                entry['url'] = "{base}{link}?torrent_pass={key}".format(
                    base=BASE_URL, link=torrent['href'], key=config.get('rss_key'))
                entry['title'] = torrent.findPrevious("a", attrs={'class': 't_title'}).text

                seeders = torrent.findNext('td', {'class': 'ac t_seeders'}).text
                leechers = torrent.findNext('td', {'class': 'ac t_leechers'}).text
                entry['torrent_seeds'] = int(seeders)
                entry['torrent_leeches'] = int(leechers)
                entry['search_sort'] = torrent_availability(entry['torrent_seeds'],
                                                            entry['torrent_leeches'])

                size = torrent.findNext(text=re.compile('^([\.\d]+) ([GMK]?)B$'))
                size = re.search('^([\.\d]+) ([GMK]?)B$', size)
                if size:
                    if size.group(2) == 'G':
                        entry['content_size'] = int(float(size.group(1)) * 1000 ** 3 / 1024 ** 2)
                    elif size.group(2) == 'M':
                        entry['content_size'] = int(float(size.group(1)) * 1000 ** 2 / 1024 ** 2)
                    elif size.group(2) == 'K':
                        entry['content_size'] = int(float(size.group(1)) * 1000 / 1024 ** 2)
                    else:
                        entry['content_size'] = int(float(size.group(1)) / 1024 ** 2)
                entries.add(entry)

        return entries
Пример #25
0
    def search(self, task, entry, config=None):
        """
        Search for name from fuzer.
        """
        self.rss_key = config['rss_key']
        self.user_id = config['user_id']

        self.cookies = {
            'fzr2lastactivity': '0',
            'fzr2lastvisit': '',
            'fzr2password': config['cookie_password'],
            'fzr2sessionhash': '',
            'fzr2userid': str(self.user_id)
        }

        category = config.get('category', [0])
        # Make sure categories is a list
        if not isinstance(category, list):
            category = [category]

        # If there are any text categories, turn them into their id number
        categories = [
            c if isinstance(c, int) else CATEGORIES[c] for c in category
        ]
        c_list = ['c{}={}'.format(quote_plus('[]'), c) for c in categories]

        entries = []
        if entry.get('imdb_id'):
            log.debug("imdb_id '%s' detected, using in search.",
                      entry['imdb_id'])
            soup = self.get_fuzer_soup(entry['imdb_id'], c_list)
            entries = self.extract_entry_from_soup(soup)
            if entries:
                for e in list(entries):
                    e['imdb_id'] = entry.get('imdb_id')
        else:
            for search_string in entry.get('search_strings', [entry['title']]):
                query = normalize_scene(search_string)
                text = quote_plus(query.encode('windows-1255'))
                soup = self.get_fuzer_soup(text, c_list)
                entries += self.extract_entry_from_soup(soup)
        return sorted(entries,
                      reverse=True,
                      key=lambda x: x.get('search_sort')) if entries else []
Пример #26
0
    def search(self, task, entry, config=None):
        """
        Search for name from fuzer.
        """
        self.rss_key = config['rss_key']
        self.user_id = config['user_id']

        self.cookies = {
            'fzr2lastactivity': '0',
            'fzr2lastvisit': '',
            'fzr2password': config['cookie_password'],
            'fzr2sessionhash': '',
            'fzr2userid': str(self.user_id),
        }

        category = config.get('category', [0])
        # Make sure categories is a list
        if not isinstance(category, list):
            category = [category]

        # If there are any text categories, turn them into their id number
        categories = [c if isinstance(c, int) else CATEGORIES[c] for c in category]
        c_list = ['c{}={}'.format(quote_plus('[]'), c) for c in categories]

        entries = []
        if entry.get('imdb_id'):
            log.debug("imdb_id '%s' detected, using in search.", entry['imdb_id'])
            soup = self.get_fuzer_soup(entry['imdb_id'], c_list)
            entries = self.extract_entry_from_soup(soup)
            if entries:
                for e in list(entries):
                    e['imdb_id'] = entry.get('imdb_id')
        else:
            for search_string in entry.get('search_strings', [entry['title']]):
                query = normalize_scene(search_string)
                text = quote_plus(query.encode('windows-1255'))
                soup = self.get_fuzer_soup(text, c_list)
                entries += self.extract_entry_from_soup(soup)
        return (
            sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))
            if entries
            else []
        )
Пример #27
0
    def __init__(self, filename, data, content_type='application/octet-stream', disposition='attachment', headers=None):
        self.filename = filename
        self.content_type = content_type
        self.data = data
        self.disposition = disposition
        self.headers = headers or {}

        # Inlining
        if self.disposition == 'inline':
            self.headers.setdefault('Content-ID', '<{}>'.format(quote_plus(filename)))
Пример #28
0
 def make_kodi_url(self):
     d_original = OrderedDict(
         sorted(self.__dict__.items(), key=lambda x: x[0]))
     d = d_original.copy()
     for key, value in d_original.items():
         if not value:
             d.pop(key)
             continue
         if isinstance(value, str):
             d[key] = unicodedata.normalize('NFKD', value).encode(
                 'ascii', 'ignore').decode('utf-8')
     url = ''
     for key in d.keys():
         if isinstance(d[key], (str, bytes)):
             val = quote_plus(d[key])
         elif isinstance(d[key], datetime.datetime):
             val = quote_plus(Program.format_date(d[key]))
         else:
             val = d[key]
         url += '&{0}={1}'.format(key, val)
     return url.lstrip('&')
 def test_get_m3u8_playlist(self, mock_ticket):
     mock_ticket.return_value = 'foobar123456'
     auth_url = config.AUTH_URL.format(
         config.PCODE, fakes.VIDEO_ID,
         quote_plus('http://foobar.com/video'))
     responses.add(responses.GET, auth_url,
                   body=self.AUTH_JSON, status=200)
     responses.add(responses.GET,
                   config.EMBED_TOKEN_URL.format(fakes.VIDEO_ID),
                   body=self.EMBED_TOKEN_XML, status=200)
     observed = stream_auth.get_m3u8_playlist(fakes.VIDEO_ID, '')
     self.assertEqual(fakes.M3U8_URL, observed)
Пример #30
0
    def test_play_captions(self, mock_listitem, mock_translate_path,
                           mock_version, mock_time, mock_file, mock_isdir,
                           mock_isfile):
        mock_listitem.side_effect = fakes.FakeListItem
        mock_translate_path.return_value = '/foo/bar/'
        mock_version.return_value = 15
        mock_time.return_value = 1565672000
        mock_isdir.return_value = True
        mock_isfile.return_value = False

        path = '/video/ZW1939A025S00'
        video_url = config.API_BASE_URL.format(path='/v2{0}'.format(path))
        url = '{0}&hdnea={1}'.format(fakes.EXPECTED_HLS_URL,
                                     self.AUTH_RESP_TEXT.decode('utf-8'))
        responses.add(responses.GET, video_url, body=self.VIDEO_JSON)
        responses.add(responses.GET,
                      url,
                      body='#EXTM3U',
                      headers={'Set-Cookie': fakes.AUTH_COOKIE},
                      status=200)
        responses.add(responses.GET,
                      fakes.AUTH_URL_DEFAULT_TEST,
                      body=self.AUTH_RESP_TEXT,
                      status=200)
        responses.add(responses.GET,
                      fakes.EXPECTED_CAPTIONS_URL,
                      body=self.VTT_TEXT,
                      status=200)
        play.play(sys.argv[2][1:])
        observed = self.mock_plugin.resolved[2].getPath()
        expected = fakes.RESOLVED_URL.format(
            quote_plus(self.AUTH_RESP_TEXT, safe='~'),
            quote_plus(config.USER_AGENT))
        self.assertEqual(expected, observed)
        mock_file.assert_called()
        handle = mock_file()
        handle.write.assert_called_once_with(fakes.EXPECTED_SRT_TEXT)
        observed = self.mock_plugin.resolved[2].subtitles
        expected = ['/foo/bar/subtitles.eng.srt']
        self.assertEqual(expected, observed)
def get_m3u8_playlist(video_id, pcode):
    """
    Main function to call other functions that will return us our m3u8 HLS
    playlist as a string, which we can then write to a file for Kodi
    to use
    """
    if pcode == '':
        pcode = config.PCODE
    login_ticket = get_user_ticket()
    embed_token = get_embed_token(login_ticket, video_id)
    authorize_url = config.AUTH_URL.format(pcode, video_id,
                                           quote_plus(embed_token))
    secure_token_url = get_secure_token(authorize_url, video_id)
    return secure_token_url
Пример #32
0
    def playlist_url(self, series_id, page_size=100, offset=0):
        if offset:
            offset_param = '&offset={offset}'.format(offset=str(offset))
        else:
            offset_param = ''

        return ('https://areena.yle.fi/api/programs/v1/items.json?'
                'series={series_id}&type=program&availability=ondemand&'
                'order=episode.hash%3Adesc%2C'
                'publication.starttime%3Adesc%2Ctitle.fi%3Aasc&'
                'app_id=89868a18&app_key=54bb4ea4d92854a2a45e98f961f0d7da&'
                'limit={limit}{offset_param}'.format(
                    series_id=quote_plus(series_id),
                    limit=str(page_size),
                    offset_param=offset_param))
Пример #33
0
    def _build_uri(self, stanza_name=None, query=None):
        """
        Create URI for CONF file CRUD operations

        :param stanza_name: stanza to perform operations on
        :param query: query params that will be written as key-value pairs to the conf file
        """
        base_uri = self.base_uri()
        query_params = dict(output_mode='json')
        if query:
            query_params.update(query)
        base_url = "%s/configs/conf-%s" % (base_uri, quote(self.conf_file))
        query_string = '?%s' % (urlencode(query_params))
        return '%s/%s' % (base_url, query_string) if not stanza_name else \
            '%s/%s%s' % (base_url, quote_plus(stanza_name), query_string)
Пример #34
0
 def test_play_resolved(self, mock_listitem, mock_version, mock_time):
     mock_listitem.side_effect = fakes.FakeListItem
     mock_version.return_value = 15
     mock_time.return_value = 1565672000
     path = '/video/ZW1939A025S00'
     video_url = config.API_BASE_URL.format(path='/v2{0}'.format(path))
     url = '{0}&hdnea={1}'.format(fakes.EXPECTED_HLS_URL,
                                  self.AUTH_RESP_TEXT.decode('utf-8'))
     responses.add(responses.GET, video_url, body=self.VIDEO_JSON)
     responses.add(responses.GET,
                   url,
                   body='#EXTM3U',
                   headers={'Set-Cookie': fakes.AUTH_COOKIE},
                   status=200)
     responses.add(responses.GET,
                   fakes.AUTH_URL_DEFAULT_TEST,
                   body=self.AUTH_RESP_TEXT,
                   status=200)
     play.play(sys.argv[2][1:])
     observed = self.mock_plugin.resolved[2].getPath()
     expected = fakes.RESOLVED_URL.format(
         quote_plus(self.AUTH_RESP_TEXT, safe='~'),
         quote_plus(config.USER_AGENT))
     self.assertEqual(expected, observed)
Пример #35
0
    def __init__(self,
                 filename,
                 data,
                 content_type='application/octet-stream',
                 disposition='attachment',
                 headers=None):
        self.filename = filename
        self.content_type = content_type
        self.data = data
        self.disposition = disposition
        self.headers = headers or {}

        # Inlining
        if self.disposition == 'inline':
            self.headers.setdefault('Content-ID',
                                    '<{}>'.format(quote_plus(filename)))
Пример #36
0
    def playlist_url(self, series_id, page_size=100, offset=0):
        if offset:
            offset_param = '&offset={offset}'.format(offset=str(offset))
        else:
            offset_param = ''

        return ('https://areena.yle.fi/api/programs/v1/items.json?'
                'series={series_id}&type=program&availability=ondemand&'
                'order=publication.starttime%3Adesc%2C'
                'episode.hash%3Aasc%2Ctitle.fi%3Aasc&'
                'app_id=areena_web_frontend_prod&'
                'app_key=4622a8f8505bb056c956832a70c105d4&'
                'limit={limit}{offset_param}'.format(
                    series_id=quote_plus(series_id),
                    limit=str(page_size),
                    offset_param=offset_param))
Пример #37
0
    def playlist_url(self, series_id, page_size=100, offset=0):
        if offset:
            offset_param = '&offset={offset}'.format(offset=str(offset))
        else:
            offset_param = ''

        return ('https://areena.yle.fi/api/programs/v1/items.json?'
                'series={series_id}&type=program&availability=ondemand&'
                'order=publication.starttime%3Adesc%2C'
                'episode.hash%3Aasc%2Ctitle.fi%3Aasc&'
                'app_id=areena_web_frontend_prod&'
                'app_key=4622a8f8505bb056c956832a70c105d4&'
                'limit={limit}{offset_param}'.format(
                    series_id=quote_plus(series_id),
                    limit=str(page_size),
                    offset_param=offset_param))
Пример #38
0
def on_basic_auth(env, url_config, needs_auth_info=True):
    """ Visit _RequestApp._on_basic_auth method's docstring.
    """
    username = url_config['basic-auth-username']
    result = _on_basic_auth(env.get('HTTP_AUTHORIZATION', ''), username, url_config['basic-auth-password'])
    is_success = result is True # Yes, need to check for True

    auth_result = AuthResult(is_success)

    if is_success:
        if needs_auth_info:
            auth_result.auth_info = {b'basic-auth-username': quote_plus(username).encode('utf-8')}
    else:
        auth_result.code = result

    return auth_result
Пример #39
0
def movies_search(q, page_limit=None, page=None, api_key=None):
    if isinstance(q, basestring):
        q = quote_plus(q.encode('latin-1', errors='ignore'))

    if not api_key:
        api_key = API_KEY

    url = '%s/%s/movies.json?q=%s&apikey=%s' % (SERVER, API_VER, q, api_key)
    if page_limit:
        url += '&page_limit=%i' % page_limit
    if page:
        url += '&page=%i' % page

    results = get_json(url)
    if isinstance(results, dict) and results.get('total') and len(results.get('movies')):
        return results
Пример #40
0
 def get_url(server_url, wiki_space, page_title):
   """ return the url for a confluence page in a given space and with a given
   title. """
   return '%s/display/%s/%s' % (server_url, wiki_space, quote_plus(page_title))
Пример #41
0
 def program_info_url(self, program_id):
     return 'https://player.yle.fi/api/v1/programs.jsonp?' \
         'id=%s&callback=yleEmbed.programJsonpCallback' % \
         (quote_plus(program_id))
Пример #42
0
 def program_info_url(self, program_id):
     quoted_pid = quote_plus(program_id)
     return 'https://player.yle.fi/api/v1/services.jsonp?' \
         'id=%s&callback=yleEmbed.simulcastJsonpCallback&' \
         'region=fi&instance=1&dataId=%s' % \
         (quoted_pid, quoted_pid)
Пример #43
0
    def search(self, task, entry, config):
        """CPASBIEN search plugin

        Config example:

        tv_search_cpasbien:
            discover:
              what:
                 - trakt_list:
                   username: xxxxxxx
                   api_key: xxxxxxx
                        series: watchlist
                  from:
                    - cpasbien:
                        category: "series-vostfr"
                  interval: 1 day
                  ignore_estimations: yes

        Category is ONE of:
            all
            films
            series
            musique
            films-french
            1080p
            720p
            series-francaise
            films-dvdrip
            films-vostfr
            series-vostfr
            ebook
        """

        base_url = 'http://www.cpasbien.io'
        entries = set()
        for search_string in entry.get('search_strings', [entry['title']]):
            search_string = search_string.replace(' ', '-').lower()
            search_string = search_string.replace('(', '')
            search_string = search_string.replace(')', '')
            query = normalize_unicode(search_string)
            query_url_fragment = quote_plus(query.encode('utf-8'))
            # http://www.cpasbien.pe/recherche/ncis.html
            if config['category'] == 'all':
                str_url = (base_url, 'recherche', query_url_fragment)
                url = '/'.join(str_url)
            else:
                category_url_fragment = '%s' % config['category']
                str_url = (base_url, 'recherche', category_url_fragment, query_url_fragment)
                url = '/'.join(str_url)
            log.debug('search url: %s' % url + '.html')
            # GET URL
            f = task.requests.get(url + '.html').content
            soup = get_soup(f)
            if soup.findAll(text=re.compile(' 0 torrents')):
                log.debug('search returned no results')
            else:
                nextpage = 0
                while (nextpage >= 0):
                    if (nextpage > 0):
                        newurl = url + '/page-' + str(nextpage)
                        log.debug('-----> NEXT PAGE : %s' % newurl)
                        f1 = task.requests.get(newurl).content
                        soup = get_soup(f1)
                    for result in soup.findAll('div', attrs={'class': re.compile('ligne')}):
                        entry = Entry()
                        link = result.find('a', attrs={'href': re.compile('dl-torrent')})
                        entry['title'] = link.contents[0]
                        # REWRITE URL
                        page_link = link.get('href')
                        link_rewrite = page_link.split('/')
                        # get last value in array remove .html and replace by .torrent
                        endlink = link_rewrite[-1]
                        str_url = (base_url, '/telechargement/', endlink[:-5], '.torrent')
                        entry['url'] = ''.join(str_url)

                        log.debug('Title: %s | DL LINK: %s' % (entry['title'], entry['url']))

                        entry['torrent_seeds'] = (int(result.find('span', attrs={'class': re.compile('seed')}).text))
                        entry['torrent_leeches'] = (int(result.find('div', attrs={'class': re.compile('down')}).text))
                        size = result.find('div', attrs={'class': re.compile('poid')}).text

                        entry['content_size'] = parse_filesize(size, si=False)

                        if (entry['torrent_seeds'] > 0):
                            entries.add(entry)
                        else:
                            log.debug('0 SEED, not adding entry')
                    if soup.find(text=re.compile('Suiv')):
                        nextpage += 1
                    else:
                        nextpage = -1
        return entries