Пример #1
0
def get_twitter_share_url(context, chapter):
    try:
        anchor = get_chapter_anchor(context, chapter)
    except ArticlePage.DoesNotExist:
        twitter_share_url = ''
    else:
        stream_field_value = chapter.value
        tweet_text = stream_field_value['tweet_text']
        twitter_share_params = []
        text = []
        _hashtags = set([])
        tweet_text_parts = tweet_text.split(' ')
        for word in tweet_text_parts:
            if len(word) > 0 and word[0] == '#':
                _hashtags.add(word[1:])
            text.append(word)
        if len(text) > 0:
            text = ' '.join(text)
            twitter_share_params.append('text={0}'.format(quote_plus(text)))
        if len(_hashtags) > 0:
            # You can collect hashtags and pass them along as a separate param...
            # However, doing so in this manner appends the hashtags to the end of the tweet, which we don't need to do
            # as the hashtags we want will already be in the tweet_text
            _hashtags = ','.join(_hashtags)
            twitter_share_params.append('hashtags={0}'.format(quote_plus(_hashtags)))
        twitter_share_params.append('url={0}'.format(quote_plus(anchor)))
        twitter_share_params = '&'.join(twitter_share_params)
        twitter_share_url = 'https://twitter.com/share?{0}'.format(twitter_share_params)
    return twitter_share_url
Пример #2
0
    def get_results(self, query="", selected_facets=None, sortby="",
                    repo_slug=None):
        """Helper method to get search results."""
        if repo_slug is None:
            repo_slug = self.repo.slug

        if selected_facets is None:
            selected_facets = []

        selected_facets_arg = ""
        for facet in selected_facets:
            selected_facets_arg += "&selected_facets={facet}".format(
                facet=urllib_parse.quote_plus(facet)
            )
        resp = self.client.get(
            "{repo_base}{repo_slug}/search/?q={query}{facets}"
            "&sortby={sortby}".format(
                repo_base=REPO_BASE,
                repo_slug=repo_slug,
                query=urllib_parse.quote_plus(query),
                facets=selected_facets_arg,
                sortby=sortby
            )
        )
        self.assertEqual(HTTP_200_OK, resp.status_code)
        return as_json(resp)
Пример #3
0
 def test_md_query_single(self):
     q = quote_plus('https://idp.nordu.net/idp/shibboleth')
     r = requests.get("http://127.0.0.1:%s/entities/%s" % (self.port, q))
     assert (r.status_code == 200)
     assert ('application/xml' in r.headers['Content-Type'])
     t = parse_xml(six.BytesIO(r.content))
     assert (t is not None)
     e = root(t)
     assert (e.get('entityID') == 'https://idp.nordu.net/idp/shibboleth')
Пример #4
0
    def download_invoice(self, request, pk=None):
        """
        Download Invoice Endpoint
        ---
        omit_serializer: True
        omit_parameters:
            - query
        """
        current_url = '{}?{}'.format(
            reverse(request.resolver_match.url_name, kwargs={'pk': pk}),
            urlencode(request.query_params)
        )
        login_url = '/signin?next=%s' % quote_plus(current_url)
        if not request.user.is_authenticated():
            return redirect(login_url)

        invoice = get_object_or_404(self.get_queryset(), pk=pk)
        if invoice:
            try:
                self.check_object_permissions(request, invoice)
            except NotAuthenticated:
                return redirect(login_url)
            except PermissionDenied:
                return HttpResponse("You do not have permission to access this invoice")

            if not (
                invoice.is_due or request.user.is_admin or request.user.is_project_manager or request.user.is_developer):
                return HttpResponse("You do not have permission to access this invoice")

        if request.accepted_renderer.format == 'html':
            if invoice.type == INVOICE_TYPE_CREDIT_NOTA:
                return HttpResponse(invoice.credit_note_html)
            return HttpResponse(invoice.html)
        else:
            if invoice.type == INVOICE_TYPE_CREDIT_NOTA:
                http_response = HttpResponse(invoice.credit_note_pdf, content_type='application/pdf')
                http_response['Content-Disposition'] = 'filename="Invoice_{}_{}_{}.pdf"'.format(
                    invoice and invoice.number or pk,
                    invoice and invoice.project and invoice.project.title or pk,
                    invoice and invoice.title or pk
                )
                return http_response
            else:
                http_response = HttpResponse(invoice.pdf, content_type='application/pdf')
                http_response['Content-Disposition'] = 'filename="Invoice_{}_{}_{}.pdf"'.format(
                    invoice and invoice.number or pk,
                    invoice and invoice.project and invoice.project.title or pk,
                    invoice and invoice.title or pk
                )
                return http_response
Пример #5
0
def openaddresses_download_configured_files(out_dir):
    for path in openaddresses_config.sources:

        source = six.b('/').join([safe_encode(p) for p in path])
        filename = safe_encode(path[-1]) + six.b('.zip')
        zip_path = filename + '.zip'
        zip_url_path = six.b('/').join([safe_encode(p) for p in path[:-1]] + [quote_plus(filename)])

        url = urljoin(OPENADDRESSES_LATEST_DIR, zip_url_path)

        download_pre_release_downloads(out_dir)

        print(six.u('doing {}').format(safe_decode(source)))
        success = download_and_unzip_file(url, out_dir)
        if not success:
            print(six.u('ERR: could not download {}').format(source))
Пример #6
0
    def test_login(self):
        # Test redirect
        resp = self.client.get(url_for("oauthclient.login", remote_app='test'))
        self.assertStatus(resp, 302)
        self.assertEqual(
            resp.location,
            "https://foo.bar/oauth/authorize?response_type=code&"
            "client_id=testid&redirect_uri=%s" % quote_plus(url_for(
                "oauthclient.authorized", remote_app='test', _external=True
            ))
        )

        # Invalid remote
        resp = self.client.get(
            url_for("oauthclient.login", remote_app='invalid')
        )
        self.assertStatus(resp, 404)
    def test_s3_event_simple(self, mock_stdout):
        self.patch(s3, 'S3_AUGMENT_TABLE', [])
        session_factory = self.replay_flight_data('test_s3_encrypt')
        client = session_factory().client('s3')
        self.patch(s3crypt, 's3', client)

        event = {'Records': [{
            's3': {
                'bucket': {
                    'name': 'test-bucket'
                },
                'object': {
                    'key': quote_plus('test-key'),
                    'size': 42
                }
            }
        }]}
        s3crypt.process_event(event, {})
    def test_s3_event_unsafe_key(self, mock_stdout):
        self.patch(s3, 'S3_AUGMENT_TABLE', [])
        session_factory = self.replay_flight_data('test_s3_encrypt')
        client = session_factory().client('s3')
        self.patch(s3crypt, 's3', client)

        event = {'Records': [{
            's3': {
                'bucket': {
                    'name': 'test-bucket'
                },
                'object': {
                    'key': quote_plus('/test000/!-_.*\'()/&@:,$=+%2b?;/ /whatever'),
                    'size': 42
                }
            }
        }]}
        s3crypt.process_event(event, {})
Пример #9
0
    def _extract_asset_urls(self, asset_ids):
        """
        Extract asset URLs along with asset ids.

        @param asset_ids: List of ids to get URLs for.
        @type assertn: [str]

        @return: List of dictionaries with asset URLs and ids.
        @rtype: [{
            'id': '<id>',
            'url': '<url>'
        }]
        """
        dom = get_page_json(self._session, OPENCOURSE_ASSET_URL,
                            ids=quote_plus(','.join(asset_ids)))

        return [{'id': element['id'],
                 'url': element['url'].strip()}
                for element in dom['elements']]
Пример #10
0
    def download_profile(self, request, user_id=None):
        """
        Download User Profile Endpoint
        ---
        omit_serializer: True
        omit_parameters:
            - query
        """
        current_url = '%s?%s' % (
            reverse(request.resolver_match.url_name, kwargs={'user_id': user_id}),
            urlencode(request.query_params)
        )
        login_url = '/signin?next=%s' % quote_plus(current_url)
        if not request.user.is_authenticated():
            return redirect(login_url)

        user = get_object_or_404(self.get_queryset(), pk=user_id)

        try:
            self.check_object_permissions(request, user)
        except NotAuthenticated:
            return redirect(login_url)
        except PermissionDenied:
            return HttpResponse("You do not have permission to access this estimate")

        ctx = {
            'user': user,
            'profile': user.profile,
            'work': user.work_set.all(),
            'education': user.education_set.all()
        }

        rendered_html = render_to_string("tunga/pdf/profile.html", context=ctx).encode(encoding="UTF-8")

        if request.accepted_renderer.format == 'html':
            return HttpResponse(rendered_html)

        pdf_file = HTML(string=rendered_html, encoding='utf-8').write_pdf()
        http_response = HttpResponse(pdf_file, content_type='application/pdf')
        http_response['Content-Disposition'] = 'filename="developer_profile.pdf"'
        return http_response
Пример #11
0
    def _extract_asset_urls(self, asset_ids):
        """
        Extract asset URLs along with asset ids.

        @param asset_ids: List of ids to get URLs for.
        @type assertn: [str]

        @return: List of dictionaries with asset URLs and ids.
        @rtype: [{
            'id': '<id>',
            'url': '<url>'
        }]
        """
        ids = quote_plus(','.join(asset_ids))
        url = OPENCOURSE_ASSET_URL.format(ids=ids)
        page = get_page(self._session, url)
        dom = json.loads(page)

        return [{'id': element['id'],
                 'url': element['url']}
                for element in dom['elements']]
 def __init__(self, method_name, **kwargs):
     self.scrubber = GeneralNameReplacer()
     super(VirtualMachineExampleTest, self).__init__(
         method_name,
         config_file=TEST_CONFIG,
         recording_processors=[
             self.scrubber,
             SubscriptionRecordingProcessor(DUMMY_UUID),
             AccessTokenReplacer(),
         ],
         replay_patches=[
             patch_long_run_operation_delay,
         ]
     )
     if self.in_recording:
         constants_to_scrub = [
             (os.environ['AZURE_CLIENT_ID'], DUMMY_UUID),
             (os.environ['AZURE_CLIENT_SECRET'], DUMMY_SECRET),
             (os.environ['AZURE_TENANT_ID'], DUMMY_UUID),
             (STORAGE_ACCOUNT_NAME, DUMMY_STORAGE_NAME)
         ]
         for key, replacement in constants_to_scrub:
             self.scrubber.register_name_pair(key, replacement)
             self.scrubber.register_name_pair(quote_plus(key), replacement)
Пример #13
0
    def __check_cache(self, media_id):
        url = '{0}/magnet/instant?agent={1}&apikey={2}&magnets[]={3}'.format(api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'), media_id.lower())
        result = self.net.http_GET(url, headers=self.headers).content
        result = json.loads(result)
        if result.get('status', False) == "success":
            magnets = result.get('data').get('magnets')
            for magnet in magnets:
                if media_id == magnet.get('magnet') or media_id == magnet.get('hash'):
                    response = magnet.get('instant', False)
                    return response

        return False
Пример #14
0
def addDownLink(name, url, mode, iconimage, desc='', stream=None, fav='add', noDownload=False, contextm=None, fanart=None, duration=None, quality=None):
    contextMenuItems = []
    favtext = "Remove from" if fav == 'del' else "Add to"  # fav == 'add' or 'del'
    u = (sys.argv[0]
         + "?url=" + urllib_parse.quote_plus(url)
         + "&mode=" + str(mode)
         + "&name=" + urllib_parse.quote_plus(name))
    dwnld = (sys.argv[0]
             + "?url=" + urllib_parse.quote_plus(url)
             + "&mode=" + str(mode)
             + "&download=" + str(1)
             + "&name=" + urllib_parse.quote_plus(name))
    favorite = (sys.argv[0]
                + "?url=" + urllib_parse.quote_plus(url)
                + "&fav=" + fav
                + "&favmode=" + str(mode)
                + "&mode=" + str('favorites.Favorites')
                + "&img=" + urllib_parse.quote_plus(iconimage)
                + "&name=" + urllib_parse.quote_plus(name))
    ok = True
    if not iconimage:
        iconimage = cuminationicon
    if duration:
        if addon.getSetting('duration_in_name') == 'true':
            duration = " [COLOR deeppink]" + duration + "[/COLOR]"
            name = name + duration if six.PY3 else (name.decode('utf-8') + duration).encode('utf-8')
        else:
            secs = None
            try:
                duration = duration.upper().replace('H', ':').replace('M', ':').replace('S', '').replace(' ', '').replace('IN', '0').replace('::', ':').strip()
                if ':' in duration:
                    if duration.endswith(':'):
                        duration += '0'
                    secs = sum(int(x) * 60 ** i for i, x in enumerate(reversed(duration.split(':'))))
                elif duration.isdigit():
                    secs = int(duration)
                if secs is None and len(duration) > 0:
                    xbmc.log("@@@@Cumination: Duration format error: " + str(duration), xbmc.LOGERROR)
            except:
                xbmc.log("@@@@Cumination: Duration format error: " + str(duration), xbmc.LOGERROR)
    width = None
    if quality:
        if addon.getSetting('quality_in_name') == 'true':
            quality = " [COLOR orange]" + quality + "[/COLOR]"
            name = name + quality if six.PY3 else (name.decode('utf-8') + quality).encode('utf-8')
        else:
            width, height = resolution(quality)
    liz = xbmcgui.ListItem(name)
    if duration and addon.getSetting('duration_in_name') != 'true':
        liz.setInfo(type="Video", infoLabels={"Duration": secs})
    liz.setArt({'thumb': iconimage, 'icon': "DefaultVideo.png", 'poster': iconimage})
    if not fanart:
        fanart = os.path.join(rootDir, 'fanart.jpg')
        if addon.getSetting('posterfanart') == 'true':
            fanart = iconimage
    liz.setArt({'fanart': fanart})
    if stream:
        liz.setProperty('IsPlayable', 'true')
    if desc:
        liz.setInfo(type="Video", infoLabels={"Title": name, "plot": desc, "plotoutline": desc})
    else:
        liz.setInfo(type="Video", infoLabels={"Title": name})
    if width:
        video_streaminfo = {'codec': 'h264', 'width': width, 'height': height}
    else:
        video_streaminfo = {'codec': 'h264'}
    liz.addStreamInfo('video', video_streaminfo)
    if contextm:
        if isinstance(contextm, list):
            for i in contextm:
                if isinstance(i, tuple):
                    contextMenuItems.append(i)
        else:
            if isinstance(contextm, tuple):
                contextMenuItems.append(contextm)
    contextMenuItems.append(('[COLOR hotpink]' + favtext + ' favorites[/COLOR]', 'RunPlugin(' + favorite + ')'))
    if fav == 'del':
        favorite_move_to_end = (sys.argv[0]
                                + "?url=" + urllib_parse.quote_plus(url)
                                + "&fav=" + 'move_to_end'
                                + "&favmode=" + str(mode)
                                + "&mode=" + str('favorites.Favorites')
                                + "&img=" + urllib_parse.quote_plus(iconimage)
                                + "&name=" + urllib_parse.quote_plus(name))
        contextMenuItems.append(('[COLOR hotpink]Move favorite to Top[/COLOR]', 'RunPlugin(' + favorite_move_to_end + ')'))
        favorite_move_up = (sys.argv[0]
                            + "?url=" + urllib_parse.quote_plus(url)
                            + "&fav=" + 'move_up'
                            + "&favmode=" + str(mode)
                            + "&mode=" + str('favorites.Favorites')
                            + "&img=" + urllib_parse.quote_plus(iconimage)
                            + "&name=" + urllib_parse.quote_plus(name))
        contextMenuItems.append(('[COLOR hotpink]Move favorite Up[/COLOR]', 'RunPlugin(' + favorite_move_up + ')'))
        favorite_move_down = (sys.argv[0]
                              + "?url=" + urllib_parse.quote_plus(url)
                              + "&fav=" + 'move_down'
                              + "&favmode=" + str(mode)
                              + "&mode=" + str('favorites.Favorites')
                              + "&img=" + urllib_parse.quote_plus(iconimage)
                              + "&name=" + urllib_parse.quote_plus(name))
        contextMenuItems.append(('[COLOR hotpink]Move favorite Down[/COLOR]', 'RunPlugin(' + favorite_move_down + ')'))

    if not noDownload:
        contextMenuItems.append(('[COLOR hotpink]Download Video[/COLOR]', 'RunPlugin(' + dwnld + ')'))
    settings_url = (sys.argv[0]
                    + "?mode=" + str('utils.openSettings'))
    contextMenuItems.append(
        ('[COLOR hotpink]Addon settings[/COLOR]', 'RunPlugin(' + settings_url + ')'))
    setview = (sys.argv[0]
               + "?mode=" + str('utils.setview'))
    contextMenuItems.append(
        ('[COLOR hotpink]Set this view as default[/COLOR]', 'RunPlugin(' + setview + ')'))
    liz.addContextMenuItems(contextMenuItems, replaceItems=False)
    ok = xbmcplugin.addDirectoryItem(handle=addon_handle, url=u, listitem=liz, isFolder=False)
    return ok
Пример #15
0
def addDir(name, url, mode, iconimage=None, page=None, channel=None, section=None, keyword='', Folder=True, about=None,
           custom=False, list_avail=True, listitem_id=None, custom_list=False, contextm=None):
    u = (sys.argv[0]
         + "?url=" + urllib_parse.quote_plus(url)
         + "&mode=" + str(mode)
         + "&page=" + str(page)
         + "&channel=" + str(channel)
         + "&section=" + str(section)
         + "&keyword=" + urllib_parse.quote_plus(keyword)
         + "&name=" + urllib_parse.quote_plus(name))
    ok = True
    if not iconimage:
        iconimage = cuminationicon
    liz = xbmcgui.ListItem(name)
    fanart = os.path.join(rootDir, 'fanart.jpg')
    art = {'thumb': iconimage, 'icon': "DefaultFolder.png", 'fanart': fanart}
    if addon.getSetting('posterfanart') == 'true':
        fanart = iconimage
        art.update({'poster': iconimage})
    liz.setArt(art)
    liz.setInfo(type="Video", infoLabels={"Title": name})

    contextMenuItems = []
    if contextm:
        if isinstance(contextm, list):
            for i in contextm:
                if isinstance(i, tuple):
                    contextMenuItems.append(i)
        else:
            if isinstance(contextm, tuple):
                contextMenuItems.append(contextm)
    if about:
        about_url = (sys.argv[0]
                     + "?mode=" + str('main.about_site')
                     + "&img=" + urllib_parse.quote_plus(iconimage)
                     + "&name=" + urllib_parse.quote_plus(name)
                     + "&about=" + str(about)
                     + "&custom=" + str(custom))
        contextMenuItems.append(
            ('[COLOR hotpink]About site[/COLOR]', 'RunPlugin(' + about_url + ')'))
    if len(keyword) >= 1:
        keyw = (sys.argv[0]
                + "?mode=" + str('utils.delKeyword')
                + "&keyword=" + urllib_parse.quote_plus(keyword))
        contextMenuItems.append(('[COLOR hotpink]Remove keyword[/COLOR]', 'RunPlugin(' + keyw + ')'))
    if list_avail:
        list_item_name = 'Add item to ...'
        list_url = (sys.argv[0]
                    + "?url=" + urllib_parse.quote_plus(url)
                    + "&favmode=" + str(mode)
                    + "&mode=" + str('favorites.add_listitem')
                    + "&img=" + urllib_parse.quote_plus(iconimage)
                    + "&name=" + urllib_parse.quote_plus(name))
        contextMenuItems.append(('[COLOR hotpink]%s[/COLOR]' % list_item_name, 'RunPlugin(' + list_url + ')'))
    if listitem_id:
        move_listitem_url = (sys.argv[0]
                             + "?mode=" + str('favorites.move_listitem')
                             + "&listitem_id=" + str(listitem_id))
        contextMenuItems.append(('[COLOR hotpink]Move item to ...[/COLOR]', 'RunPlugin(' + move_listitem_url + ')'))
        listitem_url = (sys.argv[0]
                        + "?mode=" + str('favorites.remove_listitem')
                        + "&listitem_id=" + str(listitem_id))
        contextMenuItems.append(('[COLOR hotpink]Remove from list[/COLOR]', 'RunPlugin(' + listitem_url + ')'))
        moveupitem_url = (sys.argv[0]
                          + "?mode=" + str('favorites.moveup_listitem')
                          + "&listitem_id=" + str(listitem_id))
        contextMenuItems.append(('[COLOR hotpink]Move item Up[/COLOR]', 'RunPlugin(' + moveupitem_url + ')'))
        movedownitem_url = (sys.argv[0]
                            + "?mode=" + str('favorites.movedown_listitem')
                            + "&listitem_id=" + str(listitem_id))
        contextMenuItems.append(('[COLOR hotpink]Move item Down[/COLOR]', 'RunPlugin(' + movedownitem_url + ')'))

    if custom_list:
        editlist_url = (sys.argv[0]
                        + "?mode=" + str('favorites.edit_list')
                        + "&rowid=" + str(url))
        contextMenuItems.append(('[COLOR hotpink]Edit name[/COLOR]', 'RunPlugin(' + editlist_url + ')'))
        dellist_url = (sys.argv[0]
                       + "?mode=" + str('favorites.remove_list')
                       + "&rowid=" + str(url))
        contextMenuItems.append(('[COLOR hotpink]Remove list[/COLOR]', 'RunPlugin(' + dellist_url + ')'))
        moveuplist_url = (sys.argv[0]
                          + "?mode=" + str('favorites.moveup_list')
                          + "&rowid=" + str(url))
        contextMenuItems.append(('[COLOR hotpink]Move list Up[/COLOR]', 'RunPlugin(' + moveuplist_url + ')'))
        movedownlist_url = (sys.argv[0]
                            + "?mode=" + str('favorites.movedown_list')
                            + "&rowid=" + str(url))
        contextMenuItems.append(('[COLOR hotpink]Move list Down[/COLOR]', 'RunPlugin(' + movedownlist_url + ')'))

    settings_url = (sys.argv[0]
                    + "?mode=" + str('utils.openSettings'))
    contextMenuItems.append(
        ('[COLOR hotpink]Addon settings[/COLOR]', 'RunPlugin(' + settings_url + ')'))
    setview = (sys.argv[0]
               + "?mode=" + str('utils.setview'))
    contextMenuItems.append(
        ('[COLOR hotpink]Set this view as default[/COLOR]', 'RunPlugin(' + setview + ')'))
    liz.addContextMenuItems(contextMenuItems, replaceItems=False)
    ok = xbmcplugin.addDirectoryItem(handle=addon_handle, url=u, listitem=liz, isFolder=Folder)
    return ok
Пример #16
0
def resolve(regex):
    try:
        vanilla = re.compile('(<regex>.+)',
                             re.MULTILINE | re.DOTALL).findall(regex)[0]
        cddata = re.compile('<\!\[CDATA\[(.+?)\]\]>',
                            re.MULTILINE | re.DOTALL).findall(regex)
        for i in cddata:
            regex = regex.replace('<![CDATA[' + i + ']]>',
                                  urllib_parse.quote_plus(i))

        regexs = re.compile('(<regex>.+)',
                            re.MULTILINE | re.DOTALL).findall(regex)[0]
        regexs = re.compile('<regex>(.+?)</regex>',
                            re.MULTILINE | re.DOTALL).findall(regexs)
        regexs = [
            re.compile('<(.+?)>(.*?)</.+?>',
                       re.MULTILINE | re.DOTALL).findall(i) for i in regexs
        ]

        regexs = [
            dict([(client.replaceHTMLCodes(x[0]),
                   client.replaceHTMLCodes(urllib_parse.unquote_plus(x[1])))
                  for x in i]) for i in regexs
        ]
        regexs = [(i['name'], i) for i in regexs]
        regexs = dict(regexs)

        url = regex.split('<regex>', 1)[0].strip()
        url = client.replaceHTMLCodes(url)
        url = control.six_encode(url)

        r = getRegexParsed(regexs, url)

        try:
            ln = ''
            ret = r[1]
            listrepeat = r[2]['listrepeat']
            regexname = r[2]['name']

            for obj in ret:
                try:
                    item = listrepeat
                    for i in list(range(len(obj) + 1)):
                        item = item.replace(
                            '[%s.param%s]' % (regexname, str(i)), obj[i - 1])

                    item2 = vanilla
                    for i in list(range(len(obj) + 1)):
                        item2 = item2.replace(
                            '[%s.param%s]' % (regexname, str(i)), obj[i - 1])

                    item2 = re.compile('(<regex>.+?</regex>)',
                                       re.MULTILINE | re.DOTALL).findall(item2)
                    item2 = [
                        x for x in item2
                        if not '<name>%s</name>' % regexname in x
                    ]
                    item2 = ''.join(item2)

                    ln += '\n<item>%s\n%s</item>\n' % (item, item2)
                except:
                    pass

            return ln
        except:
            pass

        if r[1] == True:
            return r[0]
    except:
        return
Пример #17
0
def getRegexParsed(regexs, url,cookieJar=None,forCookieJarOnly=False,recursiveCall=False,cachedPages={}, rawPost=False, cookie_jar_file=None):#0,1,2 = URL, regexOnly, CookieJarOnly
        #cachedPages = {}
        #print 'url',url
        doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
#        print 'doRegexs',doRegexs,regexs
        setresolved=True
        for k in doRegexs:
            if k in regexs:
                #print 'processing ' ,k
                m = regexs[k]
                #print m
                cookieJarParam=False
                if  'cookiejar' in m: # so either create or reuse existing jar
                    #print 'cookiejar exists',m['cookiejar']
                    cookieJarParam=m['cookiejar']
                    if  '$doregex' in cookieJarParam:
                        cookieJar=getRegexParsed(regexs, m['cookiejar'],cookieJar,True, True,cachedPages)
                        cookieJarParam=True
                    else:
                        cookieJarParam=True
                #print 'm[cookiejar]',m['cookiejar'],cookieJar
                if cookieJarParam:
                    if cookieJar==None:
                        #print 'create cookie jar'
                        cookie_jar_file=None
                        if 'open[' in m['cookiejar']:
                            cookie_jar_file=m['cookiejar'].split('open[')[1].split(']')[0]
#                            print 'cookieJar from file name',cookie_jar_file

                        cookieJar=getCookieJar(cookie_jar_file)
#                        print 'cookieJar from file',cookieJar
                        if cookie_jar_file:
                            saveCookieJar(cookieJar,cookie_jar_file)
                        #cookieJar = http_cookiejar.LWPCookieJar()
                        #print 'cookieJar new',cookieJar
                    elif 'save[' in m['cookiejar']:
                        cookie_jar_file=m['cookiejar'].split('save[')[1].split(']')[0]
                        complete_path=os.path.join(profile,cookie_jar_file)
#                        print 'complete_path',complete_path
                        saveCookieJar(cookieJar,cookie_jar_file)

                if  m['page'] and '$doregex' in m['page']:
                    pg=getRegexParsed(regexs, m['page'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
                    if len(pg)==0:
                        pg='http://regexfailed'
                    m['page']=pg

                if 'setcookie' in m and m['setcookie'] and '$doregex' in m['setcookie']:
                    m['setcookie']=getRegexParsed(regexs, m['setcookie'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
                if 'appendcookie' in m and m['appendcookie'] and '$doregex' in m['appendcookie']:
                    m['appendcookie']=getRegexParsed(regexs, m['appendcookie'],cookieJar,recursiveCall=True,cachedPages=cachedPages)


                if  'post' in m and '$doregex' in m['post']:
                    m['post']=getRegexParsed(regexs, m['post'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
#                    print 'post is now',m['post']

                if  'rawpost' in m and '$doregex' in m['rawpost']:
                    m['rawpost']=getRegexParsed(regexs, m['rawpost'],cookieJar,recursiveCall=True,cachedPages=cachedPages,rawPost=True)
                    #print 'rawpost is now',m['rawpost']

                if 'rawpost' in m and '$epoctime$' in m['rawpost']:
                    m['rawpost']=m['rawpost'].replace('$epoctime$',getEpocTime())

                if 'rawpost' in m and '$epoctime2$' in m['rawpost']:
                    m['rawpost']=m['rawpost'].replace('$epoctime2$',getEpocTime2())


                link=''
                if m['page'] and m['page'] in cachedPages and not 'ignorecache' in m and forCookieJarOnly==False :
                    #print 'using cache page',m['page']
                    link = cachedPages[m['page']]
                else:
                    if m['page'] and  not m['page']=='' and  m['page'].startswith('http'):
                        if '$epoctime$' in m['page']:
                            m['page']=m['page'].replace('$epoctime$',getEpocTime())
                        if '$epoctime2$' in m['page']:
                            m['page']=m['page'].replace('$epoctime2$',getEpocTime2())

                        #print 'Ingoring Cache',m['page']
                        page_split=m['page'].split('|')
                        pageUrl=page_split[0]
                        header_in_page=None
                        if len(page_split)>1:
                            header_in_page=page_split[1]

#                            if 
#                            proxy = urllib_request.ProxyHandler({ ('https' ? proxytouse[:5]=="https":"http") : proxytouse})
#                            opener = urllib_request.build_opener(proxy)
#                            urllib_request.install_opener(opener)

                            
                        
#                        print 'urllib_request.getproxies',urllib_request.getproxies()
                        current_proxies=urllib_request.ProxyHandler(urllib_request.getproxies())
        
        
                        #print 'getting pageUrl',pageUrl
                        req = urllib_request.Request(pageUrl)
                        if 'proxy' in m:
                            proxytouse= m['proxy']
#                            print 'proxytouse',proxytouse
#                            urllib_request.getproxies= lambda: {}
                            if pageUrl[:5]=="https":
                                proxy = urllib_request.ProxyHandler({ 'https' : proxytouse})
                                #req.set_proxy(proxytouse, 'https')
                            else:
                                proxy = urllib_request.ProxyHandler({ 'http'  : proxytouse})
                                #req.set_proxy(proxytouse, 'http')
                            opener = urllib_request.build_opener(proxy)
                            urllib_request.install_opener(opener)
                            
                        
                        req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
                        proxytouse=None

                        if 'referer' in m:
                            req.add_header('Referer', m['referer'])
                        if 'accept' in m:
                            req.add_header('Accept', m['accept'])
                        if 'agent' in m:
                            req.add_header('User-agent', m['agent'])
                        if 'x-req' in m:
                            req.add_header('X-Requested-With', m['x-req'])
                        if 'x-addr' in m:
                            req.add_header('x-addr', m['x-addr'])
                        if 'x-forward' in m:
                            req.add_header('X-Forwarded-For', m['x-forward'])
                        if 'setcookie' in m:
#                            print 'adding cookie',m['setcookie']
                            req.add_header('Cookie', m['setcookie'])
                        if 'appendcookie' in m:
#                            print 'appending cookie to cookiejar',m['appendcookie']
                            cookiestoApend=m['appendcookie']
                            cookiestoApend=cookiestoApend.split(';')
                            for h in cookiestoApend:
                                n,v=h.split('=')
                                w,n= n.split(':')
                                ck = http_cookiejar.Cookie(version=0, name=n, value=v, port=None, port_specified=False, domain=w, domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
                                cookieJar.set_cookie(ck)
                        if 'origin' in m:
                            req.add_header('Origin', m['origin'])
                        if header_in_page:
                            header_in_page=header_in_page.split('&')
                            for h in header_in_page:
                                n,v=h.split('=')
                                req.add_header(n,v)
                        
                        if not cookieJar==None:
#                            print 'cookieJarVal',cookieJar
                            cookie_handler = urllib_request.HTTPCookieProcessor(cookieJar)
                            opener = urllib_request.build_opener(cookie_handler, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
                            opener = urllib_request.install_opener(opener)
#                            print 'noredirect','noredirect' in m
                            
                            if 'noredirect' in m:
                                opener = urllib_request.build_opener(cookie_handler,NoRedirection, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
                                opener = urllib_request.install_opener(opener)
                        elif 'noredirect' in m:
                            opener = urllib_request.build_opener(NoRedirection, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
                            opener = urllib_request.install_opener(opener)
                            

                        if 'connection' in m:
#                            print '..........................connection//////.',m['connection']
                            from keepalive import HTTPHandler
                            keepalive_handler = HTTPHandler()
                            opener = urllib_request.build_opener(keepalive_handler)
                            urllib_request.install_opener(opener)


                        #print 'after cookie jar'
                        post=None

                        if 'post' in m:
                            postData=m['post']
                            #if '$LiveStreamRecaptcha' in postData:
                            #    (captcha_challenge,catpcha_word,idfield)=processRecaptcha(m['page'],cookieJar)
                            #    if captcha_challenge:
                            #        postData=postData.replace('$LiveStreamRecaptcha','manual_recaptcha_challenge_field:'+captcha_challenge+',recaptcha_response_field:'+catpcha_word+',id:'+idfield)
                            splitpost=postData.split(',');
                            post={}
                            for p in splitpost:
                                n=p.split(':')[0];
                                v=p.split(':')[1];
                                post[n]=v
                            post = urllib_parse.urlencode(post)

                        if 'rawpost' in m:
                            post=m['rawpost']
                            #if '$LiveStreamRecaptcha' in post:
                            #    (captcha_challenge,catpcha_word,idfield)=processRecaptcha(m['page'],cookieJar)
                            #    if captcha_challenge:
                            #       post=post.replace('$LiveStreamRecaptcha','&manual_recaptcha_challenge_field='+captcha_challenge+'&recaptcha_response_field='+catpcha_word+'&id='+idfield)
                        link=''
                        try:
                            
                            if post:
                                response = urllib_request.urlopen(req,post)
                            else:
                                response = urllib_request.urlopen(req)
                            if response.info().get('Content-Encoding') == 'gzip':
                                import gzip
                                buf = six.BytesIO( response.read())
                                f = gzip.GzipFile(fileobj=buf)
                                link = f.read()
                            else:
                                link=response.read()
                                link = control.six_decode(link)
                        
                        
                            if 'proxy' in m and not current_proxies is None:
                                urllib_request.install_opener(urllib_request.build_opener(current_proxies))
                            
                            link=javascriptUnEscape(link)
                            #print repr(link)
                            #print link This just print whole webpage in LOG
                            if 'includeheaders' in m:
                                #link+=str(response.headers.get('Set-Cookie'))
                                link+='$$HEADERS_START$$:'
                                for b in response.headers:
                                    link+= b+':'+response.headers.get(b)+'\n'
                                link+='$$HEADERS_END$$:'
    #                        print link

                            response.close()
                        except: 
                            pass
                        cachedPages[m['page']] = link
                        #print link
                        #print 'store link for',m['page'],forCookieJarOnly

                        if forCookieJarOnly:
                            return cookieJar# do nothing
                    elif m['page'] and  not m['page'].startswith('http'):
                        if m['page'].startswith('$pyFunction:'):
                            val=doEval(m['page'].split('$pyFunction:')[1],'',cookieJar,m )
                            if forCookieJarOnly:
                                return cookieJar# do nothing
                            link=val
                            link=javascriptUnEscape(link)
                        else:
                            link=m['page']

                if  '$doregex' in m['expres']:
                    m['expres']=getRegexParsed(regexs, m['expres'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
                  
                if not m['expres']=='':
                    #print 'doing it ',m['expres']
                    if '$LiveStreamCaptcha' in m['expres']:
                        val=askCaptcha(m,link,cookieJar)
                        #print 'url and val',url,val
                        url = url.replace("$doregex[" + k + "]", val)

                    elif m['expres'].startswith('$pyFunction:') or '#$pyFunction' in m['expres']:
                        #print 'expeeeeeeeeeeeeeeeeeee',m['expres']
                        val=''
                        if m['expres'].startswith('$pyFunction:'):
                            val=doEval(m['expres'].split('$pyFunction:')[1],link,cookieJar,m)
                        else:
                            val=doEvalFunction(m['expres'],link,cookieJar,m)
                        if 'ActivateWindow' in m['expres']: return
                        if forCookieJarOnly:
                            return cookieJar# do nothing
                        if 'listrepeat' in m:
                            listrepeat=m['listrepeat']
                            return listrepeat,eval(val), m,regexs,cookieJar

                        try:
                            url = url.replace(u"$doregex[" + k + "]", val)
                        except: url = url.replace("$doregex[" + k + "]", control.six_decode(val))
                    else:
                        if 'listrepeat' in m:
                            listrepeat=m['listrepeat']
                            ret=re.findall(m['expres'],link)
                            return listrepeat,ret, m,regexs
                             
                        val=''
                        if not link=='':
                            #print 'link',link
                            reg = re.compile(m['expres']).search(link)                            
                            try:
                                val=reg.group(1).strip()
                            except: traceback.print_exc()
                        elif m['page']=='' or m['page']==None:
                            val=m['expres']
                            
                        if rawPost:
#                            print 'rawpost'
                            val=urllib_parse.quote_plus(val)
                        if 'htmlunescape' in m:
                            #val=urllib_parse.unquote_plus(val)
                            val=html_parser.HTMLParser().unescape(val)
                        try:
                            url = url.replace("$doregex[" + k + "]", val)
                        except: url = url.replace("$doregex[" + k + "]", control.six_decode(val))
                        #print 'ur',url
                        #return val
                else:
                    url = url.replace("$doregex[" + k + "]",'')
        if '$epoctime$' in url:
            url=url.replace('$epoctime$',getEpocTime())
        if '$epoctime2$' in url:
            url=url.replace('$epoctime2$',getEpocTime2())

        if '$GUID$' in url:
            import uuid
            url=url.replace('$GUID$',str(uuid.uuid1()).upper())
        if '$get_cookies$' in url:
            url=url.replace('$get_cookies$',getCookiesString(cookieJar))

        if recursiveCall: return url
        #print 'final url',repr(url)
        if url=="":
            return
        else:
            return url,setresolved
Пример #18
0
    def channelDirectory(self, items):
        if items == None or len(items) == 0: return #control.idle() ; sys.exit()

        sysaddon = sys.argv[0]

        syshandle = int(sys.argv[1])

        addonPoster, addonBanner = control.addonPoster(), control.addonBanner()

        addonFanart = control.addonFanart()

        traktCredentials = trakt.getTraktCredentialsInfo()

        kodiVersion = control.getKodiVersion()

        isPlayable = True if not 'plugin' in control.infoLabel('Container.PluginName') else False

        indicators = playcount.getMovieIndicators(refresh=True) if action == 'movies' else playcount.getMovieIndicators()

        if self.trailer_source == '0': trailerAction = 'tmdb_trailer'
        elif self.trailer_source == '1': trailerAction = 'yt_trailer'
        else: trailerAction = 'imdb_trailer'


        playbackMenu = control.lang(32063) if control.setting('hosts.mode') == '2' else control.lang(32064)

        watchedMenu = control.lang(32068) if trakt.getTraktIndicatorsInfo() == True else control.lang(32066)

        unwatchedMenu = control.lang(32069) if trakt.getTraktIndicatorsInfo() == True else control.lang(32067)

        queueMenu = control.lang(32065)

        traktManagerMenu = control.lang(32070)

        nextMenu = control.lang(32053)

        addToLibrary = control.lang(32551)

        clearProviders = control.lang(32081)

        findSimilar = control.lang(32100)

        infoMenu = control.lang(32101)

        for i in items:
            try:
                imdb, tmdb, title, year = i['imdb'], i['tmdb'], i['originaltitle'], i['year']
                label = i['label'] if 'label' in i and not i['label'] == '0' else title
                label = '%s (%s)' % (label, year)
                if 'channel' in i: label = '[B]%s[/B] : %s' % (i['channel'].upper(), label)

                status = i['status'] if 'status' in i else '0'

                sysname = urllib_parse.quote_plus('%s (%s)' % (title, year))
                systitle = urllib_parse.quote_plus(title)

                meta = dict((k,v) for k, v in six.iteritems(i) if not v == '0')
                meta.update({'imdbnumber': imdb, 'code': tmdb})
                meta.update({'mediatype': 'movie'})
                meta.update({'trailer': '%s?action=%s&name=%s&tmdb=%s&imdb=%s' % (sysaddon, trailerAction, systitle, tmdb, imdb)})
                if not 'duration' in i: meta.update({'duration': '120'})
                elif i['duration'] == '0': meta.update({'duration': '120'})
                try: meta.update({'duration': str(int(meta['duration']) * 60)})
                except: pass
                try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
                except: pass
                if 'castwiththumb' in i and not i['castwiththumb'] == '0': meta.pop('cast', '0')

                poster = i['poster'] if 'poster' in i and not i['poster'] == '0' else addonPoster
                meta.update({'poster': poster})

                sysmeta = urllib_parse.quote_plus(json.dumps(meta))

                url = '%s?action=play&title=%s&year=%s&imdb=%s&meta=%s&t=%s' % (sysaddon, systitle, year, imdb, sysmeta, self.systime)
                sysurl = urllib_parse.quote_plus(url)

                #path = '%s?action=play&title=%s&year=%s&imdb=%s' % (sysaddon, systitle, year, imdb)

                cm = []

                cm.append((findSimilar, 'Container.Update(%s?action=movies&url=%s)' % (sysaddon, urllib_parse.quote_plus(self.related_link % tmdb))))

                cm.append(('[I]Cast[/I]', 'RunPlugin(%s?action=moviecredits&tmdb=%s&status=%s)' % (sysaddon, tmdb, status)))

                cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))

                try:
                    overlay = int(playcount.getMovieOverlay(indicators, imdb))
                    if overlay == 7:
                        cm.append((unwatchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=6)' % (sysaddon, imdb)))
                        meta.update({'playcount': 1, 'overlay': 7})
                    else:
                        cm.append((watchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=7)' % (sysaddon, imdb)))
                        meta.update({'playcount': 0, 'overlay': 6})
                except:
                    pass

                if traktCredentials == True:
                    cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&imdb=%s&content=movie)' % (sysaddon, sysname, imdb)))

                cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))

                if kodiVersion < 17:
                    cm.append((infoMenu, 'Action(Info)'))

                cm.append((addToLibrary, 'RunPlugin(%s?action=movieToLibrary&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s)' % (sysaddon, sysname, systitle, year, imdb, tmdb)))

                cm.append(('[I]Scrape Filterless[/I]', 'RunPlugin(%s?action=playUnfiltered&title=%s&year=%s&imdb=%s&meta=%s&t=%s)' % (sysaddon, systitle, year, imdb, sysmeta, self.systime)))

                cm.append((clearProviders, 'RunPlugin(%s?action=clearCacheProviders)' % sysaddon))

                try: item = control.item(label=label, offscreen=True)
                except: item = control.item(label=label)

                art = {}
                art.update({'icon': poster, 'thumb': poster, 'poster': poster})

                fanart = i['fanart'] if 'fanart' in i and not i['fanart'] == '0' else addonFanart

                if self.settingFanart == 'true':
                    art.update({'fanart': fanart})
                else:
                    art.update({'fanart': addonFanart})

                if 'banner' in i and not i['banner'] == '0':
                    art.update({'banner': i['banner']})
                else:
                    art.update({'banner': addonBanner})

                if 'clearlogo' in i and not i['clearlogo'] == '0':
                    art.update({'clearlogo': i['clearlogo']})

                if 'clearart' in i and not i['clearart'] == '0':
                    art.update({'clearart': i['clearart']})

                if 'landscape' in i and not i['landscape'] == '0':
                    landscape = i['landscape']
                else:
                    landscape = fanart
                art.update({'landscape': landscape})

                if 'discart' in i and not i['discart'] == '0':
                    art.update({'discart': i['discart']})

                item.setArt(art)
                item.addContextMenuItems(cm)
                if isPlayable:
                    item.setProperty('IsPlayable', 'true')

                castwiththumb = i.get('castwiththumb')
                if castwiththumb and not castwiththumb == '0':
                    if kodiVersion >= 18:
                        item.setCast(castwiththumb)
                    else:
                        cast = [(p['name'], p['role']) for p in castwiththumb]
                        meta.update({'cast': cast})

                offset = bookmarks.get('movie', imdb, '', '', True)
                if float(offset) > 120:
                    percentPlayed = int(float(offset) / float(meta['duration']) * 100)
                    item.setProperty('resumetime', str(offset))
                    item.setProperty('percentplayed', str(percentPlayed))

                item.setProperty('imdb_id', imdb)
                item.setProperty('tmdb_id', tmdb)
                try: item.setUniqueIDs({'imdb': imdb, 'tmdb': tmdb})
                except: pass

                item.setInfo(type='Video', infoLabels = control.metadataClean(meta))

                video_streaminfo = {'codec': 'h264'}
                item.addStreamInfo('video', video_streaminfo)

                control.addItem(handle=syshandle, url=url, listitem=item, isFolder=False)
            except:
                log_utils.log('channels_dir', 1)
                pass

        control.content(syshandle, 'files')
        control.directory(syshandle, cacheToDisc=True)
Пример #19
0
def List(url):
    url = update_url(url)
    hdr = dict(utils.base_hdrs)
    hdr['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0'
    try:
        listhtml = utils.getHtml(url, headers=hdr)
    except:
        return None

    cm_sortby = (utils.addon_sys + "?mode=" +
                 str('xvideos.ContextSortbyFilter'))
    cm_date = (utils.addon_sys + "?mode=" + str('xvideos.ContextDateFilter'))
    cm_length = (utils.addon_sys + "?mode=" +
                 str('xvideos.ContextLengthFilter'))
    cm_quality = (utils.addon_sys + "?mode=" +
                  str('xvideos.ContextQualityFilter'))
    cm_filter = [
        ('[COLOR violet]SortBy[/COLOR] [COLOR orange]{}[/COLOR]'.format(
            get_setting('sortby')), 'RunPlugin(' + cm_sortby + ')'),
        ('[COLOR violet]Date[/COLOR] [COLOR orange]{}[/COLOR]'.format(
            get_setting('date')), 'RunPlugin(' + cm_date + ')'),
        ('[COLOR violet]Length[/COLOR] [COLOR orange]{}[/COLOR]'.format(
            get_setting('length')), 'RunPlugin(' + cm_length + ')'),
        ('[COLOR violet]Quality[/COLOR] [COLOR orange]{}[/COLOR]'.format(
            get_setting('quality')), 'RunPlugin(' + cm_quality + ')')
    ]

    match = re.compile(
        r'div id="video.+?href="([^"]+)".+?data-src="([^"]+)"(.+?)title="([^"]+)">.+?duration">([^<]+)<',
        re.DOTALL | re.IGNORECASE).findall(listhtml)
    for videopage, img, res, name, duration in match:
        match = re.search(r'mark">(.+?)<', res)
        res = match.group(1) if match else ''
        name = utils.cleantext(name)
        img = img.replace('THUMBNUM', '5')

        cm_related = (utils.addon_sys + "?mode=" +
                      str('xvideos.ContextRelated') + "&url=" +
                      urllib_parse.quote_plus(videopage))
        cm = [('[COLOR violet]Related videos[/COLOR]',
               'RunPlugin(' + cm_related + ')')]
        if 'k=' in url or '/tags/' in url or '/c/' in url:
            cm += cm_filter

        site.add_download_link(name,
                               site.url[:-1] + videopage,
                               'Playvid',
                               img,
                               name,
                               contextm=cm,
                               duration=duration,
                               quality=res)
    npage = re.compile(r'href="([^"]+)" class="no-page next-page',
                       re.DOTALL | re.IGNORECASE).findall(listhtml)
    if npage:
        npage = npage[0].replace('&amp;', '&')
        np = re.findall(r'\d+', npage)[-1]
        if url.split(site.url)[-1] in ('', 'gay/', 'shemale/'):
            npage = npage.replace('/2', '/1')
        else:
            np = str(int(np) + 1)
        if npage == '#1':
            npage = url + '/1'
        elif npage.startswith('#'):
            new = npage.split('#')[-1]
            old = str(int(new) - 1)
            npage = url.replace('/{}'.format(old), '/{}'.format(new))
        if not npage.startswith('http'):
            npage = site.url[:-1] + npage
        lp = re.compile(r'>(\d+)<', re.DOTALL | re.IGNORECASE).findall(
            listhtml.split('next-page')[0])
        if lp:
            lp = '/' + lp[-1]
        else:
            ''
        site.add_dir('Next Page ({}{})'.format(np, lp), npage, 'List',
                     site.img_next)
    if 'No video match with this search.' in listhtml:
        site.add_dir(
            'No videos found. [COLOR hotpink]Clear all filters.[/COLOR]',
            '',
            'ResetFilters',
            Folder=False,
            contextm=cm_filter)
    utils.eod()
 def quote_plus(self):
     """Return the object's URL quote_plus representation."""
     return urllib.quote_plus(self.context)
Пример #21
0
    def __direct_dl(self, media_id, torrent=False):

        try:

            if torrent:

                response = self.net.http_GET(torrents_files.format(media_id),
                                             headers=self.headers).content

            else:

                response = self.net.http_GET(linkgen.format(
                    urllib_parse.quote_plus('{"link":"%s"}' % media_id)),
                                             headers=self.headers).content

            result = json.loads(response)

            if torrent:

                if result.get('status') == 'OK':

                    _videos = []

                    def _search_tree(d):

                        for v in list(d.items()):
                            if isinstance(v, dict) and v.get('isVideo') != 'y':
                                _search_tree(v)
                            else:
                                if isinstance(v, dict):
                                    _videos.append(v)

                    _search_tree(result)

                    try:

                        link = max(_videos,
                                   key=lambda x: int(x.get('size'))).get(
                                       'downloadLink', None)

                        stream = self.net.http_GET(
                            link, headers=self.headers).get_url()

                        return stream

                    except Exception:

                        raise ResolverError(
                            'Failed to locate largest video file')

                else:

                    raise ResolverError('Unexpected Response Received')

            else:

                stream = result.get('links')[0]

                if stream['status'] != 'OK':

                    raise ResolverError('Link Not Found: {0}'.format(
                        stream.get('error')))

                elif stream['type'] != 'video':

                    raise ResolverError(
                        'Generated link "{0}" does not contain a playable file'
                        .format(stream.get('generated')))

                elif any(item in media_id for item in self.get_hosts()[1]):

                    transfer_info = self.__check_dl_status(stream.get('hash'))

                    if transfer_info.get('percent') != 100:

                        line1 = stream.get('filename')
                        line2 = stream.get('filehost')

                        with common.kodi.ProgressDialog(
                                'ResolveURL Linksnappy transfer', line1,
                                line2) as pd:

                            while self.__check_dl_status(
                                    stream.get('hash')).get('percent') != 100:

                                common.kodi.sleep(2000)

                                transfer_info = self.__check_dl_status(
                                    stream.get('hash'))

                                try:

                                    logger.log_debug(
                                        'Transfer with id "{0}" is still in progress, caching... active connections {1}, download speed {2}'
                                        .format(
                                            stream.get('hash'),
                                            transfer_info.get('connections'),
                                            transfer_info.get(
                                                'downloadSpeed')))

                                except ValueError:

                                    pass

                                try:

                                    line1 = stream.get('filename')
                                    line2 = stream.get('filehost')

                                    try:

                                        line3 = ''.join([
                                            i18n('download_rate'), ' ',
                                            transfer_info.get('downloadSpeed')
                                        ])

                                        pd.update(int(
                                            transfer_info.get('percent')),
                                                  line1=line1,
                                                  line2=line2,
                                                  line3=line3)

                                    except ValueError:

                                        pd.update(int(
                                            transfer_info.get('percent')),
                                                  line1=line1,
                                                  line2=line2)

                                except ValueError:

                                    pass

                                if pd.is_canceled():

                                    raise ResolverError(
                                        'Transfer ID "{0}" canceled by user'.
                                        format(stream.get('hash')))

                            else:

                                logger.log_debug(
                                    'Transfer with id "{0}" completed'.format(
                                        stream.get('hash')))
                                pd.update(percent=100)
                                return stream.get('generated')

                    else:

                        stream.get('generated')

                return stream.get('generated')

        except Exception as e:

            # _, __, tb = sys.exc_info()
            #
            # print traceback.print_tb(tb)

            logger.log_debug(
                'Linksnappy, error at __direct_dl function: {0}'.format(e))

        return None
Пример #22
0
def BGList(url, page=1):
    listjson = utils.getHtml(url, site.url)
    jdata = json.loads(listjson)

    for video in jdata:
        tag = ''
        slug = ''
        fc_facts = video["fc_facts"]
        for t in video["tags"]:
            if t["is_owner"]:
                tag = t["tg_name"]
                slug = t["tg_slug"]
        tag = tag if utils.PY3 else tag.encode('utf8')
        name = video["file"]["stuff"]["sf_name"] if "sf_name" in video["file"][
            "stuff"] else tag
        name = name if utils.PY3 else name.encode('utf8')
        name = '{} - {}'.format(tag, name)
        story = video["file"]["stuff"]["sf_story"] if "sf_story" in video[
            "file"]["stuff"] else ''
        story = story if utils.PY3 else story.encode('utf8')
        if "fl_duration" in video["file"]:
            m, s = divmod(video["file"]["fl_duration"], 60)
            duration = '{:d}:{:02d}'.format(m, s)
        else:
            duration = ''

        h = video["file"]["fl_height"]
        w = video["file"]["fl_width"]
        quality = str(h) + 'p' if "fl_height" in video["file"] else ''
        th_size = '480x' + str((480 * h) // w)
        plot = tag + ' - ' + name + '[CR]' + story

        thumb = str(random.choice(fc_facts[0]["fc_thumbs"]))
        videodump = json.dumps(video)
        videopage = base64.b64encode(videodump.encode())
        # videopage = 'https://store.externulls.com/facts/file/' + str(video["fc_file_id"])
        if "set_id" in video["file"]:
            img = 'https://thumbs-015.externulls.com/sets/{0}/thumbs/{0}-{1}.jpg?size={2}'.format(
                str(video["file"]["set_id"]).zfill(5), thumb.zfill(4), th_size)
        else:
            img = 'https://thumbs-015.externulls.com/videos/{0}/{1}.jpg?size={2}'.format(
                str(video["fc_file_id"]), thumb, th_size)
        parts = ''
        if len(fc_facts) > 1:
            parts = '[COLOR blue] ({} parts)[/COLOR]'.format(len(fc_facts))
            for fc_fact in fc_facts:
                if "fc_start" not in fc_fact:
                    parts = ''

        if len(fc_facts) == 1 and "fc_start" in fc_facts[
                0] and "fc_end" in fc_facts[0]:
            min_start, sec_start = divmod(fc_facts[0]["fc_start"], 60)
            min_end, sec_end = divmod(fc_facts[0]["fc_end"], 60)
            parts = '[COLOR blue] ({:d}:{:02d} - {:d}:{:02d})[/COLOR]'.format(
                min_start, sec_start, min_end, sec_end)

        name += parts

        cm_related = (utils.addon_sys + "?mode=" + str('beeg.ContextRelated') +
                      "&slug=" + urllib_parse.quote_plus(slug))
        if tag:
            cm = [('[COLOR violet]Tag [COLOR orange][{}][/COLOR]'.format(tag),
                   'RunPlugin(' + cm_related + ')')]
        else:
            cm = ''

        site.add_download_link(name,
                               videopage,
                               'BGPlayvid',
                               img,
                               plot,
                               contextm=cm,
                               duration=duration,
                               quality=quality)
    if len(jdata) == 48:
        if not page:
            page = 1
        npage = url.split('offset=')[0] + 'offset=' + str(page * 48)
        cm_page = (utils.addon_sys + "?mode=beeg.GotoPage" + "&url=" +
                   urllib_parse.quote_plus(npage) + "&np=" + str(page))
        cm = [('[COLOR violet]Goto Page #[/COLOR]',
               'RunPlugin(' + cm_page + ')')]
        site.add_dir('Next Page ({})'.format(str(page + 1)),
                     npage,
                     'BGList',
                     site.img_next,
                     page=page + 1,
                     contextm=cm)
    utils.eod()
Пример #23
0
Файл: mdx.py Проект: leifj/pyFF
    def webfinger(self, resource=None, rel=None):
        """An implementation the webfinger protocol (http://tools.ietf.org/html/draft-ietf-appsawg-webfinger-12)
        in order to provide information about up and downstream metadata available at this pyFF instance.

Example:

.. code-block:: bash

        # curl http://localhost:8080/.well-known/webfinger?resource=http://localhost:8080

This should result in a JSON structure that looks something like this:

.. code-block:: json

        {"expires": "2013-04-13T17:40:42.188549",
         "links": [
            {"href": "http://reep.refeds.org:8080/role/sp.xml", "rel": "urn:oasis:names:tc:SAML:2.0:metadata"},
            {"href": "http://reep.refeds.org:8080/role/sp.json", "rel": "disco-json"}],
         "subject": "http://reep.refeds.org:8080"}

Depending on which version of pyFF your're running and the configuration you may also see downstream metadata
listed using the 'role' attribute to the link elements.
        """
        if resource is None:
            resource = cherrypy.request.base

        jrd = dict()
        dt = datetime.now() + duration2timedelta("PT1H")
        jrd['expires'] = dt.isoformat()
        jrd['subject'] = cherrypy.request.base
        links = list()
        jrd['links'] = links

        _dflt_rels = {
            'urn:oasis:names:tc:SAML:2.0:metadata': '.xml',
            'disco-json': '.json'
        }

        if rel is None:
            rel = list(_dflt_rels.keys())
        else:
            rel = [rel]

        def _links(url):
            if url.startswith('/'):
                url = url.lstrip('/')
            for r in rel:
                suffix = ""
                if not url.endswith('/'):
                    suffix = _dflt_rels[r]
                links.append(dict(rel=r,
                                  href='%s/%s%s' % (cherrypy.request.base, url, suffix)))

        _links('/entities/')
        for a in self.server.md.store.collections():
            if a is not None and '://' not in a:
                _links(a)

        for entity_id in self.server.md.store.entity_ids():
            _links("/metadata/%s" % hash_id(entity_id))

        for a in list(self.server.aliases.keys()):
            for v in self.server.md.store.attribute(self.server.aliases[a]):
                _links('%s/%s' % (a, quote_plus(v)))

        cherrypy.response.headers['Content-Type'] = 'application/json'
        cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
        return dumps(jrd)
Пример #24
0
def ContextRelated(slug):
    url = 'https://store.externulls.com/facts/tag?slug={}&get_original=true&limit=48&offset=0'.format(
        slug)
    contexturl = (utils.addon_sys + "?mode=" + str('beeg.BGList') + "&url=" +
                  urllib_parse.quote_plus(url))
    xbmc.executebuiltin('Container.Update(' + contexturl + ')')
Пример #25
0
 def filter_url(attrs, new=False):
     if not attrs.get((None, 'href'), '').startswith('http://bouncer'):
         quoted = quote_plus(attrs[(None, 'href')])
         attrs[(None, 'href')] = 'http://bouncer/?u={0!s}'.format(quoted)
     return attrs
Пример #26
0
    def __init__(self, *args, **kwargs):
        self.keyword = quote_plus(kwargs.get("k", "投票"))

        super(WechatVoteSpider, self).__init__(*args, **kwargs)
Пример #27
0
def PTList(url, page=1):
    hdr = dict(utils.base_hdrs)
    hdr['Cookie'] = get_cookies()
    listhtml = utils.getHtml(url, site.url, headers=hdr)
    if ptlogged and ('>Log in<' in listhtml):
        if PTLogin(False):
            hdr['Cookie'] = get_cookies()
            listhtml = utils.getHtml(url, site.url, headers=hdr)
        else:
            return None

    match = re.compile(
        r'class="video-.+?data-src="([^"]+)".+?/ul>(.+?)title.+?class="quality">([^<]+).+?clock-o"></i>\s*([^<]+).+?href="([^"]+).+?>([^<]+)',
        re.DOTALL | re.IGNORECASE).findall(listhtml)
    for img, private, hd, duration, videopage, name in match:
        name = utils.cleantext(name)
        if 'private' in private.lower():
            if not ptlogged:
                continue
            private = "[COLOR blue][PV][/COLOR] "
        else:
            private = ""
        if any(x in hd for x in ['720', '1080']):
            hd = "[COLOR orange]HD[/COLOR] "
        elif any(x in hd for x in ['1440', '2160']):
            hd = "[COLOR yellow]4K[/COLOR] "
        else:
            hd = ""
        name = "{0}{1}".format(private, name)  # , hd, duration)
        if img.startswith('//'):
            img = 'https:' + img
        elif img.startswith('/'):
            img = site.url[:-1] + img
        img = re.sub(r"http:", "https:", img)
        imgint = randint(1, 10)
        newimg = str(imgint) + '.jpg'
        img = img.replace('1.jpg', newimg)
        img = img.replace(' ', '%20')
        img = img + '|Referer=' + url
        contextmenu = []
        if ptlogged:
            contexturl = (utils.addon_sys + "?mode=" +
                          str('porntrex.PTCheck_pornstars') + "&url=" +
                          urllib_parse.quote_plus(videopage))
            contextmenu.append(
                ('[COLOR deeppink]Add pornstar to subscriptions[/COLOR]',
                 'RunPlugin(' + contexturl + ')'))
            if 'my_favourite_videos' in url:
                contextdel = (utils.addon_sys + "?mode=" +
                              str('porntrex.ContextMenu') + "&url=" +
                              urllib_parse.quote_plus(videopage) + "&fav=del")
                contextmenu.append(
                    ('[COLOR violet]Delete from PT favorites[/COLOR]',
                     'RunPlugin(' + contextdel + ')'))
            else:
                contextadd = (utils.addon_sys + "?mode=" +
                              str('porntrex.ContextMenu') + "&url=" +
                              urllib_parse.quote_plus(videopage) + "&fav=add")
                contextmenu.append(
                    ('[COLOR violet]Add to PT favorites[/COLOR]',
                     'RunPlugin(' + contextadd + ')'))

        contexturl = (utils.addon_sys + "?mode=" +
                      str('porntrex.PTCheck_tags') + "&url=" +
                      urllib_parse.quote_plus(videopage))
        contextmenu.append(('[COLOR deeppink]Lookup tags[/COLOR]',
                            'RunPlugin(' + contexturl + ')'))
        site.add_download_link(name,
                               videopage,
                               'PTPlayvid',
                               img,
                               name,
                               contextm=contextmenu,
                               duration=duration,
                               quality=hd)
    if re.search('<li class="next">', listhtml, re.DOTALL | re.IGNORECASE):
        search = False
        if not page:
            page = 1
        npage = page + 1

        if url.endswith('/latest-updates/'):
            url += '{}/'.format(str(npage))
            search = True
        elif url.endswith('/{}/'.format(str(page))):
            url = url.replace('/{}/'.format(str(page)),
                              '/{}/'.format(str(npage)))
            search = True
        elif 'list_videos_latest_videos_list' in url:
            url = url.replace('from=' + str(page), 'from=' + str(npage))
            search = True
        elif '/categories/' in url:
            url = url.replace('from=' + str(page), 'from=' + str(npage))
            search = True
        elif 'list_videos_common_videos_list_norm' in url:
            if len(match) == 120:
                url = url.replace('from4=' + str(page), 'from4=' + str(npage))
                search = True
        elif '/search/' in url:
            url = url.replace('from_videos=' + str(page),
                              'from_videos=' + str(npage)).replace(
                                  'from_albums=' + str(page),
                                  'from_albums=' + str(npage))
            search = True
        elif 'from_my_subscriptions_videos' in url:
            if len(match) == 10:
                url = url.replace('from_my_subscriptions_videos=' + str(page),
                                  'from_my_subscriptions_videos=' + str(npage))
                search = True
        elif '/favourites/' in url:
            if 'from_my_fav_videos={0:02d}'.format(page) in url:
                url = url.replace('from_my_fav_videos={0:02d}'.format(page),
                                  'from_my_fav_videos={0:02d}'.format(npage))
                search = True
            else:
                utils.kodilog(' favorites pagination error')
        else:
            url = url.replace('/' + str(page) + '/', '/' + str(npage) + '/')
            search = True

        lastp = re.compile(r'class="pagination".+data-max="(\d+)"',
                           re.DOTALL | re.IGNORECASE).findall(listhtml)
        if lastp:
            lastp = '/{}'.format(lastp[0])
            if npage > int(lastp[1:]):
                search = False
        else:
            lastp = ''

        if search:
            site.add_dir('Next Page (' + str(npage) + lastp + ')', url,
                         'PTList', site.img_next, npage)
    utils.eod()
    return True
    def get_media_url(self, host, media_id, cached_only=False):
        try:
            if media_id.lower().startswith('magnet:'):
                r = re.search(
                    '''magnet:.+?urn:([a-zA-Z0-9]+):([a-zA-Z0-9]+)''',
                    media_id, re.I)
                if r:
                    _hash = r.group(2)
                    if self.__check_cache(_hash):
                        logger.log_debug(
                            'AllDebrid: BTIH {0} is readily available to stream'
                            .format(_hash))
                        transfer_id = self.__create_transfer(_hash)
                    else:
                        if self.get_setting(
                                'cached_only') == 'true' or cached_only:
                            raise ResolverError(
                                'AllDebrid: Cached torrents only allowed to be initiated'
                            )
                        else:
                            transfer_id = self.__create_transfer(_hash)
                            self.__initiate_transfer(transfer_id)

                    transfer_info = self.__list_transfer(transfer_id)
                    sources = [(link.get('size'), link.get('link'))
                               for link in transfer_info.get('links') if any(
                                   link.get('filename').lower().endswith(x)
                                   for x in FORMATS)]
                    media_id = max(sources)[1]
                    self.__delete_transfer(transfer_id)

            url = '{0}/link/unlock?agent={1}&apikey={2}&link={3}'.format(
                api_url, urllib_parse.quote_plus(AGENT),
                self.get_setting('token'), urllib_parse.quote_plus(media_id))
            result = self.net.http_GET(url, headers=self.headers).content
        except urllib_error.HTTPError as e:
            try:
                js_result = json.loads(e.read())
                if 'error' in js_result:
                    msg = '{0} ({1})'.format(js_result.get('error'),
                                             js_result.get('errorCode'))
                else:
                    msg = 'Unknown Error (1)'
            except:
                msg = 'Unknown Error (2)'
            raise ResolverError('AllDebrid Error: {0} ({1})'.format(
                msg, e.code))
        else:
            js_result = json.loads(result)
            logger.log_debug('AllDebrid resolve: [{0}]'.format(js_result))
            if 'error' in js_result:
                e = js_result.get('error')
                raise ResolverError('AllDebrid Error: {0} ({1})'.format(
                    e.get('message'), e.get('code')))
            elif js_result.get('status', False) == "success":
                if js_result.get('data').get('link'):
                    return js_result.get('data').get('link')
                elif js_result.get('data').get('host') == "stream":
                    sources = js_result.get('data').get('streams')
                    fid = js_result.get('data').get('id')
                    sources = [(str(source.get("quality")), source.get("id"))
                               for source in sources
                               if '+' not in source.get("id")]
                    sid = helpers.pick_source(
                        helpers.sort_sources_list(sources))
                    url = '{0}/link/streaming?agent={1}&apikey={2}&id={3}&stream={4}' \
                          .format(api_url, urllib_parse.quote_plus(AGENT), self.get_setting('token'), fid, sid)
                    result = self.net.http_GET(url,
                                               headers=self.headers).content
                    js_data = json.loads(result)
                    if js_data.get('data').get('link'):
                        return js_data.get('data').get('link')

        raise ResolverError('AllDebrid: no stream returned')
Пример #29
0
    def __create_transfer(self, media_id):

        try:

            if media_id.startswith('magnet:'):
                response = self.net.http_GET(torrents_addmagnet.format(
                    urllib_parse.quote_plus(media_id)),
                                             headers=self.headers).content
            else:
                response = self.net.http_GET(torrents_addurl.format(
                    urllib_parse.quote_plus(media_id)),
                                             headers=self.headers).content

            result = json.loads(response)

            if media_id.startswith('magnet:'):

                if result.get(
                        'status') == 'OK' and result.get('error') is False:

                    torrent = result.get('return')[0]

                    error = torrent.get('error')

                    torrent_id = torrent.get('torrentid')

                    if error:

                        logger.log_debug('Linksnappy error at line 332: ' +
                                         error)

                else:

                    raise ResolverError(
                        'Unexpected response received when attempting to add a torrent'
                    )

            else:

                if list(result.keys())[0].endswith('.torrent'):

                    torrent_id = list(result.values())[0].get('torrentid')

                    error = list(result.values())[0].get('error')

                    if error:

                        logger.log_debug('Linksnappy error at line 348:' +
                                         error)

                else:

                    raise ResolverError(
                        'Unexpected response received when attempting to add a torrent'
                    )

            if torrent_id:

                logger.log_debug(
                    'Linksnappy.com: Added the following url for transfer {0}'.
                    format(media_id))

            folder_id = self.__create_folder()

            result = self.__start_transfer(torrent_id, folder_id)

            if result.get('error') is False:

                logger.log_debug(
                    'Linksnappy transfer with torrent id: "{0}" successfully started'
                    .format(torrent_id))

            else:

                logger.log_debug(
                    'Linksnappy transfer with torrent id "{0}" has the following error: {1}'
                    .format(torrent_id, result.get('error')))

                if result.get(
                        'error'
                ) == 'Magnet URI processing in progress. Please wait.':

                    count = 1
                    while self.__start_transfer(
                            torrent_id, folder_id).get('error') is not False:

                        logger.log_debug(
                            'Waiting for Linksnappy transfer due to the following status: "{0}"'
                            .format(result.get('error')))

                        common.kodi.sleep(3000)
                        count += 1
                        if count == 8:
                            raise ResolverError(
                                'Linksnappy torrens: Waited too long for transfer to start'
                            )

            return str(torrent_id)

        except Exception as e:

            logger.log_debug(
                'Linksnappy error at __create_transfer: {0}'.format(e))

        return ''
Пример #30
0
def Tags(url):
    url = site.url + 'php/model_tags.php?get_tags=1&tag_sort=&word_source=tags&display_style=list&member_mode=0'

    page = utils._getHtml(url)
    res = re.compile(r"g_oTags.SelectTag\('selected_field','(.+?)'.+?10px.+?>(.+?)<", re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(page)

    for item, models in res:
        url = site.url + 'php/model_tags.php?get_users=1&selected_field={0}&display_style=list'.format(urllib_parse.quote_plus(item)) \
            + '&word_source=tags&member_mode=0&page=1&stand_alone=true'
        site.add_dir('{0} [COLOR hotpink]{1}[/COLOR]'.format(item, models), url, 'TagsList', '', '')
    utils.eod()
Пример #31
0
    def addDirectory(self, items, content):
        if items == None or len(items) == 0:
            return  #control.idle() ; sys.exit()

        sysaddon = sys.argv[0]

        syshandle = int(sys.argv[1])

        addonFanart, addonThumb, artPath = control.addonFanart(
        ), control.addonThumb(), control.artPath()

        playRandom = control.lang(32535)

        nextMenu = control.lang(32053)

        for i in items:
            try:
                name = i['name']

                plot = i['plot'] or '[CR]'

                if i['image'].startswith('http'): thumb = i['image']
                elif not artPath == None:
                    thumb = os.path.join(artPath, i['image'])
                else:
                    thumb = addonThumb

                try:
                    item = control.item(label=name, offscreen=True)
                except:
                    item = control.item(label=name)

                item.setArt({
                    'icon': thumb,
                    'thumb': thumb,
                    'poster': thumb,
                    'fanart': addonFanart
                })
                item.setInfo(type='video', infoLabels={'plot': plot})

                cm = []

                if content == 'movies':
                    link = urllib_parse.quote_plus(self.person_movie_link %
                                                   i['id'])
                    cm.append(
                        (playRandom,
                         'RunPlugin(%s?action=random&rtype=movie&url=%s)' %
                         (sysaddon, link)))
                    url = '%s?action=movies&url=%s' % (sysaddon, link)
                elif content == 'tvshows':
                    link = urllib_parse.quote_plus(self.person_tv_link %
                                                   i['id'])
                    cm.append(
                        (playRandom,
                         'RunPlugin(%s?action=random&rtype=show&url=%s)' %
                         (sysaddon, link)))
                    url = '%s?action=tvshows&url=%s' % (sysaddon, link)
                else:
                    url = '%s?action=personsSelect&name=%s&url=%s' % (
                        sysaddon, urllib_parse.quote_plus(name),
                        urllib_parse.quote_plus(i['id']))

                if cm:
                    item.addContextMenuItems(cm)

                control.addItem(handle=syshandle,
                                url=url,
                                listitem=item,
                                isFolder=True)
            except:
                log_utils.log('people_dir', 1)
                pass

        try:
            next = items[0]['next']
            if next == '': raise Exception()

            icon = control.addonNext()
            url = '%s?action=persons&url=%s&content=%s' % (
                sysaddon, urllib_parse.quote_plus(next), content)

            try:
                item = control.item(label=nextMenu, offscreen=True)
            except:
                item = control.item(label=nextMenu)

            item.setArt({
                'icon': icon,
                'thumb': icon,
                'poster': icon,
                'banner': icon,
                'fanart': addonFanart
            })
            item.setProperty('SpecialSort', 'bottom')

            control.addItem(handle=syshandle,
                            url=url,
                            listitem=item,
                            isFolder=True)
        except:
            pass

        control.content(syshandle, '')
        control.directory(syshandle, cacheToDisc=True)
Пример #32
0
def download(name, image, url):

    if url == None: return

    from resources.lib.modules import control

    try:
        headers = dict(urllib_parse.parse_qsl(url.rsplit('|', 1)[1]))
    except:
        headers = dict('')

    url = url.split('|')[0]

    content = re.compile('(.+?)\sS(\d*)E\d*$').findall(name)
    try:
        transname = name.translate(None, '\/:*?"<>|').strip('.')
    except:
        transname = name.translate(str.maketrans('', '',
                                                 '\/:*?"<>|')).strip('.')
    levels = ['../../../..', '../../..', '../..', '..']

    if len(content) == 0:
        dest = control.setting('movie.download.path')
        dest = control.transPath(dest)
        for level in levels:
            try:
                control.makeFile(os.path.abspath(os.path.join(dest, level)))
            except:
                pass
        control.makeFile(dest)
        dest = os.path.join(dest, transname)
        control.makeFile(dest)
    else:
        dest = control.setting('tv.download.path')
        dest = control.transPath(dest)
        for level in levels:
            try:
                control.makeFile(os.path.abspath(os.path.join(dest, level)))
            except:
                pass
        control.makeFile(dest)
        try:
            transtvshowtitle = content[0][0].translate(None,
                                                       '\/:*?"<>|').strip('.')
        except:
            transtvshowtitle = content[0][0].translate(
                str.maketrans('', '', '\/:*?"<>|')).strip('.')
        dest = os.path.join(dest, transtvshowtitle)
        control.makeFile(dest)
        dest = os.path.join(dest, 'Season %01d' % int(content[0][1]))
        control.makeFile(dest)

    ext = os.path.splitext(urllib_parse.urlparse(url).path)[1][1:]
    if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4'
    dest = os.path.join(dest, transname + '.' + ext)

    sysheaders = urllib_parse.quote_plus(json.dumps(headers))

    sysurl = urllib_parse.quote_plus(url)

    systitle = urllib_parse.quote_plus(name)

    sysimage = urllib_parse.quote_plus(image)

    sysdest = urllib_parse.quote_plus(dest)

    script = inspect.getfile(inspect.currentframe())
    cmd = 'RunScript(%s, %s, %s, %s, %s, %s)' % (
        script, sysurl, sysdest, systitle, sysimage, sysheaders)

    xbmc.executebuiltin(cmd)
Пример #33
0
def ContextRelated(url):
    contexturl = (utils.addon_sys + "?mode=" + str('xvideos.ListRelated') +
                  "&url=" + urllib_parse.quote_plus(url))
    xbmc.executebuiltin('Container.Update(' + contexturl + ')')
Пример #34
0
    def after_request(self, response):
        """
        The heavy lifter. This method collects the majority of data
        and passes it off for storage.

        :Parameters:
           - `response`: The response on it's way to the client.
        """
        ctx = _request_ctx_stack.top
        view_func = self.app.view_functions.get(ctx.request.endpoint)
        if self._type == 'exclude':
            if view_func in self._exclude_views:
                return response
        elif self._type == 'include':
            if view_func not in self._include_views:
                return response
        else:
            raise NotImplementedError('You must set include or exclude type.')

        now = datetime.datetime.utcnow()
        speed = None
        try:
            speed = (now - g.start_time).total_seconds()
        except:
            # Older python versions don't have total_seconds()
            speed_result = (now - g.start_time)
            speed = float("%s.%s" %
                          (speed_result.seconds, speed_result.microseconds))

        if self._fake_time:
            current_time = self._fake_time
        else:
            current_time = now

        data = {
            'url':
            ctx.request.url,
            'user_agent':
            ctx.request.user_agent,
            'server_name':
            ctx.app.name,
            'blueprint':
            ctx.request.blueprint,
            'view_args':
            ctx.request.view_args,
            'status':
            response.status_code,
            'remote_addr':
            ctx.request.remote_addr,
            'xforwardedfor':
            ctx.request.headers.get('X-Forwarded-For', None),
            'authorization':
            bool(ctx.request.authorization),
            'ip_info':
            None,
            'path':
            ctx.request.path,
            'speed':
            float(speed),
            'date':
            int(time.mktime(current_time.timetuple())),
            'content_length':
            response.content_length,
            'request':
            "{} {} {}".format(ctx.request.method, ctx.request.url,
                              ctx.request.environ.get('SERVER_PROTOCOL')),
            'url_args':
            dict([(k, ctx.request.args[k]) for k in ctx.request.args]),
            'username':
            None,
            'track_var':
            g.track_var
        }
        if ctx.request.authorization:
            data['username'] = str(ctx.request.authorization.username)
        if self._use_freegeoip:
            clean_ip = quote_plus(str(ctx.request.remote_addr))
            if '{ip}' in self._freegeoip_endpoint:
                url = self._freegeoip_endpoint.format(ip=clean_ip)
            else:
                url = self._freegeoip_endpoint + clean_ip
            # seperate capture and conversion to aid in debugging
            text = urlopen(url).read()
            ip_info = json.loads(text)
            if url.startswith("http://extreme-ip-lookup.com/"):
                del ip_info["businessWebsite"]
                del ip_info["status"]
            data['ip_info'] = ip_info

        for storage in self._storages:
            try:
                storage(data)
            except:
                pass
        return response
Пример #35
0
def append_headers(headers):
    return '|%s' % '&'.join([
        '%s=%s' % (key, urllib_parse.quote_plus(headers[key]))
        for key in headers
    ])
Пример #36
0
def download(name, iconimage, url):
    from resources.lib.modules import control
    control.busy()
    import json
    if url is None:
        control.idle()
        return

    try:

        url = evaluate(url)
        # xbmc.log('URL-EVALUATE: %s' % url)
    except Exception:
        control.idle()
        xbmcgui.Dialog().ok(NAME, 'Download failed',
                            'Your service can\'t resolve this hoster',
                            'or Link is down')
        return
    try:
        headers = dict(parse_qsl(url.rsplit('|', 1)[1]))
    except BaseException:
        headers = dict('')
    control.idle()
    title = re.sub('\[.+?\]', '', name)
    content = re.compile('(.+?)\s+[\.|\(|\[]S(\d+)E\d+[\.|\)|\]]',
                         re.I).findall(title)
    transname = title.translate(None, '\/:*?"<>|').strip('.')
    transname = re.sub('\[.+?\]', '', transname)
    levels = ['../../../..', '../../..', '../..', '..']
    if len(content) == 0:
        dest = control.setting('movie.download.path')
        dest = control.transPath(dest)
        for level in levels:
            try:
                control.makeFile(os.path.abspath(os.path.join(dest, level)))
            except:
                pass
        control.makeFile(dest)
        dest = os.path.join(dest, transname)
        control.makeFile(dest)
    else:
        dest = control.setting('tv.download.path')
        dest = control.transPath(dest)
        for level in levels:
            try:
                control.makeFile(os.path.abspath(os.path.join(dest, level)))
            except:
                pass
        control.makeFile(dest)
        tvtitle = re.sub('\[.+?\]', '', content[0])
        transtvshowtitle = tvtitle.translate(None, '\/:*?"<>|').strip('.')
        dest = os.path.join(dest, transtvshowtitle)
        control.makeFile(dest)
        dest = os.path.join(dest, 'Season %01d' % int(content[0][1]))
        control.makeFile(dest)
    control.idle()
    # ext = os.path.splitext(urlparse(url).path)[1]

    ext = os.path.splitext(urlparse(url).path)[1][1:]
    # xbmc.log('URL-EXT: %s' % ext)
    if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4'
    dest = os.path.join(dest, transname + '.' + ext)
    headers = quote_plus(json.dumps(headers))
    # xbmc.log('URL-HEADERS: %s' % headers)

    from resources.lib.modules import downloader
    control.idle()
    downloader.doDownload(url, dest, name, iconimage, headers)
Пример #37
0
def webfinger_handler(request):
    """An implementation the webfinger protocol
(http://tools.ietf.org/html/draft-ietf-appsawg-webfinger-12)
in order to provide information about up and downstream metadata available at
this pyFF instance.

Example:

.. code-block:: bash

# curl http://my.org/.well-known/webfinger?resource=http://my.org

This should result in a JSON structure that looks something like this:

.. code-block:: json

{
 "expires": "2013-04-13T17:40:42.188549",
 "links": [
 {
  "href": "http://reep.refeds.org:8080/role/sp.xml",
  "rel": "urn:oasis:names:tc:SAML:2.0:metadata"
  },
 {
  "href": "http://reep.refeds.org:8080/role/sp.json",
  "rel": "disco-json"
  }
 ],
 "subject": "http://reep.refeds.org:8080"
}

Depending on which version of pyFF your're running and the configuration you
may also see downstream metadata listed using the 'role' attribute to the link
elements.
        """

    resource = request.params.get('resource', None)
    rel = request.params.get('rel', None)

    if resource is None:
        resource = request.host_url

    jrd = dict()
    dt = datetime.now() + duration2timedelta("PT1H")
    jrd['expires'] = dt.isoformat()
    jrd['subject'] = request.host_url
    links = list()
    jrd['links'] = links

    _dflt_rels = {
        'urn:oasis:names:tc:SAML:2.0:metadata': ['.xml', 'application/xml'],
        'disco-json': ['.json', 'application/json']
    }

    if rel is None or len(rel) == 0:
        rel = _dflt_rels.keys()
    else:
        rel = [rel]

    def _links(url, title=None):
        if url.startswith('/'):
            url = url.lstrip('/')
        for r in rel:
            suffix = ""
            if not url.endswith('/'):
                suffix = _dflt_rels[r][0]
            links.append(
                dict(rel=r,
                     type=_dflt_rels[r][1],
                     href='%s/%s%s' % (request.host_url, url, suffix)))

    _links('/entities/')
    for a in request.registry.md.store.collections():
        if a is not None and '://' not in a:
            _links(a)

    for entity in request.registry.md.store.lookup('entities'):
        entity_display = entity_display_name(entity)
        _links("/entities/%s" % hash_id(entity.get('entityID')),
               title=entity_display)

    aliases = request.registry.aliases
    for a in aliases.keys():
        for v in request.registry.md.store.attribute(aliases[a]):
            _links('%s/%s' % (a, quote_plus(v)))

    response = Response(dumps(jrd, default=json_serializer))
    response.headers['Content-Type'] = 'application/json'

    return response
Пример #38
0
def request(url,
            check,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30'):
    try:
        r = client.request(url,
                           close=close,
                           redirect=redirect,
                           proxy=proxy,
                           post=post,
                           headers=headers,
                           mobile=mobile,
                           XHR=XHR,
                           limit=limit,
                           referer=referer,
                           cookie=cookie,
                           compression=compression,
                           output=output,
                           timeout=timeout)
        if r is not None and error is not False: return r
        if check in str(r) or str(r) == '': return r

        proxies = sorted(get(), key=lambda x: random.random())
        proxies = sorted(proxies, key=lambda x: random.random())
        proxies = proxies[:3]

        for p in proxies:
            p += urllib_parse.quote_plus(url)
            if post is not None:
                if isinstance(post, dict):
                    post = utils.byteify(post)
                    post = urllib_parse.urlencode(post)
                p += urllib_parse.quote_plus('?%s' % post)
            r = client.request(p,
                               close=close,
                               redirect=redirect,
                               proxy=proxy,
                               headers=headers,
                               mobile=mobile,
                               XHR=XHR,
                               limit=limit,
                               referer=referer,
                               cookie=cookie,
                               compression=compression,
                               output=output,
                               timeout='20')
            if check in str(r) or str(r) == '': return r
    except:
        pass
Пример #39
0
    def run(self, title, year, season, episode, imdb, tmdb, url, meta):
        try:
            control.sleep(200)

            self.totalTime = 0
            self.currentTime = 0

            self.content = 'movie' if season == None or episode == None else 'episode'

            self.title = title
            self.year = year
            self.name = urllib_parse.quote_plus(
                title) + urllib_parse.quote_plus(
                    ' (%s)' % year
                ) if self.content == 'movie' else urllib_parse.quote_plus(
                    title) + urllib_parse.quote_plus(
                        ' S%01dE%01d' % (int(season), int(episode)))
            self.name = urllib_parse.unquote_plus(self.name)
            self.season = '%01d' % int(
                season) if self.content == 'episode' else None
            self.episode = '%01d' % int(
                episode) if self.content == 'episode' else None

            self.DBID = None
            self.imdb = imdb if not imdb == None else '0'
            self.tmdb = tmdb if not tmdb == None else '0'
            self.ids = {'imdb': self.imdb, 'tmdb': self.tmdb}
            self.ids = dict(
                (k, v) for k, v in six.iteritems(self.ids) if not v == '0')

            self.offset = bookmarks.get(self.content, imdb, season, episode)

            poster, thumb, fanart, clearlogo, clearart, discart, meta = self.getMeta(
                meta)

            item = control.item(path=url)
            if self.content == 'movie':
                item.setArt({
                    'icon': thumb,
                    'thumb': thumb,
                    'poster': poster,
                    'fanart': fanart,
                    'clearlogo': clearlogo,
                    'clearart': clearart,
                    'discart': discart
                })
            else:
                item.setArt({
                    'icon': thumb,
                    'thumb': thumb,
                    'tvshow.poster': poster,
                    'season.poster': poster,
                    'fanart': fanart,
                    'clearlogo': clearlogo,
                    'clearart': clearart
                })
            item.setInfo(type='video', infoLabels=control.metadataClean(meta))

            if 'plugin' in control.infoLabel('Container.PluginName'):
                control.player.play(url, item)

            control.resolve(int(sys.argv[1]), True, item)

            control.window.setProperty('script.trakt.ids',
                                       json.dumps(self.ids))

            self.keepPlaybackAlive()

            control.window.clearProperty('script.trakt.ids')
        except:
            log_utils.log('player_fail', 1)
            return
Пример #40
0
if mode is None:
    Main_addDir()

###############GAMATOKIDS#################
elif mode == 3:
    get_gam_genres(url)
elif mode == 4:
    gamato_kids(url)
elif mode == 12:
    get_links(name, url, iconimage, description)
    # gamato_links(url, name, iconimage)
elif mode == 18:
    keyb = xbmc.Keyboard('', Lang(32002))
    keyb.doModal()
    if keyb.isConfirmed():
        search = quote_plus(keyb.getText())
        url = GAMATO + "?s={}".format(search)
        Search_gamato(url)
    else:
        pass
elif mode == 20:
    gamatokids()
elif mode == 21:
    gamatokids_top(url)

##########################################

###############METAGLOTISMENO#################
elif mode == 30:
    from resources.lib.indexers import teniesonline
Пример #41
0
 def filter_url(attrs, new=False):
     if not attrs.get((None, 'href'), '').startswith('http://bouncer'):
         quoted = quote_plus(attrs[(None, 'href')])
         attrs[(None, 'href')] = 'http://bouncer/?u={0!s}'.format(quoted)
     return attrs
Пример #42
0
    def webfinger(self, resource=None, rel=None):
        """An implementation the webfinger protocol (http://tools.ietf.org/html/draft-ietf-appsawg-webfinger-12)
        in order to provide information about up and downstream metadata available at this pyFF instance.

Example:

.. code-block:: bash

        # curl http://localhost:8080/.well-known/webfinger?resource=http://localhost:8080

This should result in a JSON structure that looks something like this:

.. code-block:: json

        {"expires": "2013-04-13T17:40:42.188549",
         "links": [
            {"href": "http://reep.refeds.org:8080/role/sp.xml", "rel": "urn:oasis:names:tc:SAML:2.0:metadata"},
            {"href": "http://reep.refeds.org:8080/role/sp.json", "rel": "disco-json"}],
         "subject": "http://reep.refeds.org:8080"}

Depending on which version of pyFF your're running and the configuration you may also see downstream metadata
listed using the 'role' attribute to the link elements.
        """
        if resource is None:
            resource = cherrypy.request.base

        jrd = dict()
        dt = datetime.now() + duration2timedelta("PT1H")
        jrd['expires'] = dt.isoformat()
        jrd['subject'] = cherrypy.request.base
        links = list()
        jrd['links'] = links

        _dflt_rels = {
            'urn:oasis:names:tc:SAML:2.0:metadata': '.xml',
            'disco-json': '.json'
        }

        if rel is None:
            rel = list(_dflt_rels.keys())
        else:
            rel = [rel]

        def _links(url):
            if url.startswith('/'):
                url = url.lstrip('/')
            for r in rel:
                suffix = ""
                if not url.endswith('/'):
                    suffix = _dflt_rels[r]
                links.append(
                    dict(rel=r,
                         href='%s/%s%s' %
                         (cherrypy.request.base, url, suffix)))

        _links('/entities/')
        for a in self.server.md.store.collections():
            if a is not None and '://' not in a:
                _links(a)

        for entity_id in self.server.md.store.entity_ids():
            _links("/metadata/%s" % hash_id(entity_id))

        for a in list(self.server.aliases.keys()):
            for v in self.server.md.store.attribute(self.server.aliases[a]):
                _links('%s/%s' % (a, quote_plus(v)))

        cherrypy.response.headers['Content-Type'] = 'application/json'
        cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
        return dumps(jrd)
Пример #43
0
def urlencode_filter(s):
    if type(s) == 'Markup':
        s = s.unescape()
    s = s.encode('utf8')
    s = quote_plus(s)
    return Markup(s)
Пример #44
0
def hanime_list(url='', search='', page=0):
    tag = []
    if url:
        if '|' in url:
            tag = url.split('|')
        else:
            tag.append(url)
    mode = 'OR' if len(tag) == 1 else 'AND'

    siteurl = 'https://search.htv-services.com/'
    data = {
        "search_text": search,
        "tags": tag,
        "tags_mode": mode,
        "brands": [],
        "blacklist": [],
        "order_by": "created_at_unix",
        "ordering": "desc",
        "page": page
    }
    _user_agent = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 ' + \
                  '(KHTML, like Gecko) Chrome/13.0.782.99 Safari/535.1'
    try:
        listhtml = utils.postHtml(siteurl,
                                  json_data=data,
                                  headers={'User-Agent': _user_agent})
    except Exception as e:
        utils.notify('Notify', e)
        return None
    hits = json.loads(listhtml)
    videos = json.loads(hits['hits'])
    for video in videos:
        name = video['name']
        if video['is_censored'] is False:
            name = name + " [COLOR hotpink][I]Uncensored[/I][/COLOR]"
        videoid = video['slug']
        img = video['cover_url'].replace('highwinds-cdn.com', 'droidbuzz.top')
        fanart = video['poster_url'].replace('highwinds-cdn.com',
                                             'droidbuzz.top')
        plot = video['description'].replace('<p>', '').replace('</p>', '')
        if utils.PY2:
            plot = plot.encode('ascii', 'ignore')
        tags = ', '.join(sorted(video['tags']))
        plot = '[COLOR hotpink][I]Tags: {1}[/I][/COLOR]\n\n{0}'.format(
            plot, tags)
        contexturl = (utils.addon_sys + "?mode=" + str('hanime.hanime_eps') +
                      "&url=" + urllib_parse.quote_plus(videoid))
        contextmenu = ('[COLOR deeppink]Check other episodes[/COLOR]',
                       'RunPlugin(' + contexturl + ')')
        site.add_download_link(name,
                               videoid,
                               'hanime_play',
                               img,
                               plot,
                               noDownload=True,
                               contextm=contextmenu,
                               fanart=fanart)

    if 'nbPages' in hits:
        totalp = hits['nbPages']
        npage = page + 1
        if npage < totalp:
            site.add_dir('Next Page', url, 'hanime_list', site.img_next, npage)

    utils.eod()
Пример #45
0
def List(url, page=1):
    hdr = dict(utils.base_hdrs)
    hdr['Cookie'] = get_cookies()
    listhtml = utils.getHtml(url, site.url, headers=hdr)
    if jblogged and ('>Log in<' in listhtml):
        if JBLogin(False):
            hdr['Cookie'] = get_cookies()
            listhtml = utils.getHtml(url, site.url, headers=hdr)
        else:
            return None

    match = re.compile(
        r'class="video-item([^"]+)".+?href="([^"]+)".+?title="([^"]+).+?(?:original|"cover"\s*src)="([^"]+)(.+?)clock\D+([\d:]+)',
        re.DOTALL | re.IGNORECASE).findall(listhtml)
    for private, videopage, name, img, hd, name2 in match:
        if '>HD<' in hd:
            hd = ' [COLOR orange]HD[/COLOR]'
        else:
            hd = ''
        name = utils.cleantext(name) + '{0} [COLOR cyan]({1})[/COLOR]'.format(
            hd, name2)
        if 'private' in private.lower():
            if not jblogged:
                continue
            private = "[COLOR blue] [PV][/COLOR] "
        else:
            private = ""
        name = private + name

        contextmenu = None
        if jblogged:
            contextadd = (utils.addon_sys + "?mode=" +
                          str('javbangers.ContextMenu') + "&url=" +
                          urllib_parse.quote_plus(videopage) + "&fav=add")
            contextdel = (utils.addon_sys + "?mode=" +
                          str('javbangers.ContextMenu') + "&url=" +
                          urllib_parse.quote_plus(videopage) + "&fav=del")
            contextmenu = [('[COLOR violet]Add to JB favorites[/COLOR]',
                            'RunPlugin(' + contextadd + ')'),
                           ('[COLOR violet]Delete from JB favorites[/COLOR]',
                            'RunPlugin(' + contextdel + ')')]

        site.add_download_link(name,
                               videopage,
                               'Playvid',
                               img,
                               '',
                               contextm=contextmenu)

    if re.search(r'<li\s*class="next"><a', listhtml,
                 re.DOTALL | re.IGNORECASE):
        lastp = re.compile(r':(\d+)">Last',
                           re.DOTALL | re.IGNORECASE).findall(listhtml)
        lastp = '/{}'.format(lastp[0]) if lastp else ''
        if not page:
            page = 1
        npage = page + 1

        if '/categories/' in url:
            if '/{}/'.format(page) in url:
                nurl = url.replace(str(page), str(npage))
            else:
                nurl = url + '{}/'.format(npage)
        elif '/search/' in url:
            if 'from_videos={0:02d}'.format(page) in url:
                nurl = url.replace('from_videos={0:02d}'.format(page),
                                   'from_videos={0:02d}'.format(npage))
            else:
                searchphrase = url.split('/')[-2]
                nurl = url + '?mode=async&function=get_block&block_id=list_videos_videos_list_search_result&q={0}&category_ids=&sort_by=&from_videos={1:02d}'.format(
                    searchphrase, npage)
        elif '/favourites/' in url:
            if 'from_my_fav_videos={0:02d}'.format(page) in url:
                nurl = url.replace('from_my_fav_videos={0:02d}'.format(page),
                                   'from_my_fav_videos={0:02d}'.format(npage))
            else:
                utils.kodilog(' favorites pagination error')
                nurl = url
        elif '/playlists/' in url:
            if '?mode' not in url:
                url += '?mode=async&function=get_block&block_id=playlist_view_playlist_view&sort_by=added2fav_date&from=1'
            if 'from={}'.format(page) in url:
                nurl = url.replace('from={}'.format(page),
                                   'from={}'.format(npage))
            else:
                utils.kodilog(' playlist pagination error')
                nurl = url
        else:
            nurl = site.url[:-1] + re.compile(
                r'next"><a\s*href="(/[^"]+)"',
                re.DOTALL | re.IGNORECASE).findall(listhtml)[0]
        site.add_dir(
            '[COLOR hotpink]Next Page...[/COLOR] (' + str(npage) + lastp + ')',
            nurl, 'List', site.img_next, npage)
    utils.eod()
Пример #46
0
def resolve(name, url, iconimage, description):
    liz = xbmcgui.ListItem(name)
    host = url
    if '/links/' in host:
        try:
            frame = client.request(host)
            host = client.parseDOM(frame, 'a', {'id': 'link'}, ret='href')[0]

        except BaseException:
            host = requests.get(host,
                                allow_redirects=False).headers['Location']
    else:
        host = host

    if host.split('|')[0].endswith('.mp4') and 'clou' in host:
        stream_url = host + '|User-Agent=%s&Referer=%s' % (quote_plus(
            client.agent(), ':/'), GAMATO)
        name = name
    elif host.endswith('.mp4') and 'vidce.net' in host:
        stream_url = host + '|User-Agent={}'.format(quote_plus(client.agent()))
    elif host.endswith('.mp4'):
        stream_url = host + '|User-Agent=%s&Referer=%s' % (quote_plus(
            client.agent(), ':/'), GAMATO)
    # stream_url = requests.get(host, headers=hdr).url
    elif '/aparat.' in host:
        try:
            from resources.lib.resolvers import aparat
            stream_url = aparat.get_video(host)
            stream_url, sub = stream_url.split('|')
            liz.setSubtitles([sub])
        except BaseException:
            stream_url = evaluate(host)
    # elif '/clipwatching.' in host:
    #     xbmc.log('HOST: {}'.format(host))
    #     # try:
    #     data = requests.get(host).text
    #     xbmc.log('DATA: {}'.format(data))
    #     try:
    #         sub = client.parseDOM(data, 'track', ret='src', attrs={'label': 'Greek'})[0]
    #         xbmc.log('SUBS: {}'.format(sub))
    #         liz.setSubtitles([sub])
    #     except IndexError:
    #         pass
    #
    #     stream_url = re.findall(r'''sources:\s*\[\{src:\s*['"](.+?)['"]\,''', data, re.DOTALL)[0]
    #     xbmc.log('HOST111: {}'.format(stream_url))
    #
    #
    #     # except BaseException:
    #     #     stream_url = evaluate(stream_url)

    else:
        stream_url = evaluate(host)
        name = name.split(' [B]|')[0]
    try:
        liz.setArt({'icon': iconimage, 'fanart': FANART})
        liz.setInfo(type="Video",
                    infoLabels={
                        "Title": name,
                        "Plot": description
                    })
        liz.setProperty("IsPlayable", "true")
        liz.setPath(str(stream_url))
        xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, liz)
    except BaseException:
        control.infoDialog(Lang(32012), NAME)