def get_media_url(self, host, media_id):
        web_url = self.get_url(host, media_id)
        rurl = urllib_parse.urljoin(web_url, '/')
        headers = {'User-Agent': common.FF_USER_AGENT, 'Referer': rurl}
        html = self.net.http_GET(web_url, headers=headers).content
        r = re.search(r"btn--primary'\s*href='([^']+)", html)
        if r:
            common.kodi.sleep(7000)
            g = self.net.http_GET(r.group(1), headers=headers)
            ghtml = g.content
            gurl = g.get_url()
            data = helpers.get_hidden(ghtml)
            headers.update({
                'Origin': urllib_parse.urljoin(gurl, '/')[:-1],
                'Referer': gurl,
                'X-Requested-With': 'XMLHttpRequest'
            })
            purl = re.findall('<form.+?action="([^"]+)', ghtml)[0]
            if purl.startswith('/'):
                purl = urllib_parse.urljoin(gurl, purl)
            common.kodi.sleep(5000)
            html = self.net.http_POST(purl, form_data=data,
                                      headers=headers).content
            jd = json.loads(html)
            if jd.get('status') == "success":
                headers.pop('X-Requested-With')
                return jd.get('url') + helpers.append_headers(headers)

        raise ResolverError('File Not Found or removed')
Ejemplo n.º 2
0
def call_recommendation_server(server, client_id_or_guid, data, verb='get'):
    """Call taar `server` to get recommendations for a given
    `client_id_or_guid`.

    `data` is a dict containing either query parameters to be passed in the URL
    if we're calling the server through GET, or the data we'll pass through
    POST as json.
    The HTTP verb to use is either "get" or "post", controlled through `verb`,
    which defaults to "get"."""
    request_kwargs = {
        'timeout': settings.RECOMMENDATION_ENGINE_TIMEOUT
    }
    if verb == 'get':
        params = OrderedDict(sorted(data.items(), key=lambda t: t[0]))
        endpoint = urljoin(server, '%s/%s%s' % (
            client_id_or_guid, '?' if params else '', urlencode(params)))

    else:
        endpoint = urljoin(server, '%s/' % client_id_or_guid)
        request_kwargs['json'] = data
    log.debug(u'Calling recommendation server: {0}'.format(endpoint))
    try:
        with statsd.timer('services.recommendations'):
            response = getattr(requests, verb)(endpoint, **request_kwargs)
        if response.status_code != 200:
            raise requests.exceptions.RequestException()
    except requests.exceptions.RequestException as e:
        log.error(u'Calling recommendation engine failed: {0}'.format(e))
        statsd.incr('services.recommendations.fail')
        return None
    else:
        statsd.incr('services.recommendations.success')
    return json.loads(response.content).get('results', None)
    def get_media_url(self, host, media_id):
        if '$$' in media_id:
            media_id, referer = media_id.split('$$')
            referer = urllib_parse.urljoin(referer, '/')
        else:
            referer = False
        web_url = self.get_url(host, media_id)
        rurl = urllib_parse.urljoin(web_url, '/')
        if not referer:
            referer = rurl
        domain = 'aHR0cHM6Ly9zdHJlYW1yYXBpZC5ydTo0NDM.'
        headers = {'User-Agent': common.FF_USER_AGENT, 'Referer': referer}
        html = self.net.http_GET(web_url, headers).content
        token = helpers.girc(html, rurl, domain)
        number = re.findall(r"recaptchaNumber\s*=\s*'(\d+)", html)
        if token and number:
            eid, media_id = media_id.split('/')
            surl = 'https://streamrapid.ru/ajax/embed-{0}/getSources'.format(
                eid)
            if '?' in media_id:
                media_id = media_id.split('?')[0]
            data = {'_number': number[0], 'id': media_id, '_token': token}
            headers.update({'X-Requested-With': 'XMLHttpRequest'})
            shtml = self.net.http_GET('{0}?{1}'.format(
                surl, urllib_parse.urlencode(data)),
                                      headers=headers).content
            sources = json.loads(shtml).get('sources')
            if sources:
                source = sources[0].get('file')
                headers.pop('X-Requested-With')
                return source + helpers.append_headers(headers)

        raise ResolverError('File Not Found or removed')
Ejemplo n.º 4
0
    def get_media_url(self, host, media_id):
        if '$$' in media_id:
            media_id, referer = media_id.split('$$')
            referer = urllib_parse.urljoin(referer, '/')
        else:
            referer = False
        web_url = self.get_url(host, media_id)
        if not referer:
            referer = urllib_parse.urljoin(web_url, '/')
        headers = {'User-Agent': common.FF_USER_AGENT, 'Referer': referer}
        html = self.net.http_GET(web_url, headers=headers).content
        params = re.search(r'video:\s*([^;]+)', html)
        if params:
            params = params.group(1)
            server = re.findall(r'server:\s*"([^"]+)', params)[0][::-1]
            server = base64.b64decode(server.encode('ascii')).decode('ascii')
            ids = re.search(r'cdn_id:\s*"([^"]+)', params)
            if ids:
                id1, id2 = ids.group(1).split('_')
                sources = json.loads(
                    re.findall(r'cdn_files:\s*([^}]+})', params)[0])
                sources = [(key[4:], 'https://{0}/videos/{1}/{2}/{3}'.format(
                    server, id1, id2, sources[key].replace('.',
                                                           '.mp4?extra=')))
                           for key in list(sources.keys())]
            else:
                vid = re.findall(r'id:\s*"([^"]+)', params)[0]
                ekeys = json.loads(
                    re.findall(r'quality":\s*([^}]+})', params)[0])
                data = {
                    'token':
                    re.findall(r'access_token:\s*"([^"]+)', params)[0],
                    'videos':
                    vid,
                    'ckey':
                    re.findall(r'c_key:\s*"([^"]+)', params)[0],
                    'credentials':
                    re.findall(r'credentials:\s*"([^"]+)', params)[0]
                }
                vurl = 'https://{0}/method/video.get/{1}?{2}'.format(
                    server, vid, urllib_parse.urlencode(data))
                headers.update({'Origin': referer[:-1]})
                vhtml = self.net.http_GET(vurl, headers=headers).content
                sources = json.loads(vhtml).get('response').get(
                    'items')[0].get('files')
                sources = [(key[4:], sources[key] +
                            '&videos={0}&extra_key={1}&videos={0}'.format(
                                vid, ekeys[key[4:]]))
                           for key in list(sources.keys())
                           if key[4:] in ekeys.keys()]

            source = helpers.pick_source(sorted(sources, reverse=True))
            if 'extra_key' in source:
                source = source.replace('https://',
                                        'https://{0}/'.format(server))

            return source + helpers.append_headers(headers)

        raise ResolverError('No playable video found.')
Ejemplo n.º 5
0
    def send_confirm_notification(self, queue, subscription, conf,
                                  project=None, expires=None,
                                  api_version=None, is_unsubscribed=False):
        # NOTE(flwang): If the confirmation feature isn't enabled, just do
        # nothing. Here we're getting the require_confirmation from conf
        # object instead of using self.require_confirmation, because the
        # variable from self object really depends on the kwargs when
        # initializing the NotifierDriver object. See bug 1655812 for more
        # information.
        if not conf.notification.require_confirmation:
            return

        key = conf.signed_url.secret_key
        if not key:
            LOG.error("Can't send confirm notification due to the value of"
                      " secret_key option is None")
            return
        url = "/%s/queues/%s/subscriptions/%s/confirm" % (api_version, queue,
                                                          subscription['id'])
        pre_url = urls.create_signed_url(key, [url], project=project,
                                         expires=expires, methods=['PUT'])
        message = None
        if is_unsubscribed:
            message_type = MessageType.UnsubscribeConfirmation.name
            message = ('You have unsubscribed successfully to the queue: %s, '
                       'you can resubscribe it by using confirmed=True.'
                       % queue)
        else:
            message_type = MessageType.SubscriptionConfirmation.name
            message = 'You have chosen to subscribe to the queue: %s' % queue

        messages = {}
        endpoint_dict = auth.get_public_endpoint()
        if endpoint_dict:
            wsgi_endpoint = endpoint_dict.get('zaqar')
            if wsgi_endpoint:
                wsgi_subscribe_url = urllib_parse.urljoin(
                    wsgi_endpoint, url)
                messages['WSGISubscribeURL'] = wsgi_subscribe_url
            websocket_endpoint = endpoint_dict.get('zaqar-websocket')
            if websocket_endpoint:
                websocket_subscribe_url = urllib_parse.urljoin(
                    websocket_endpoint, url)
                messages['WebSocketSubscribeURL'] = websocket_subscribe_url
        messages.update({'Message_Type': message_type,
                         'Message': message,
                         'URL-Signature': pre_url['signature'],
                         'URL-Methods': pre_url['methods'][0],
                         'URL-Paths': pre_url['paths'][0],
                         'X-Project-ID': pre_url['project'],
                         'URL-Expires': pre_url['expires'],
                         'SubscribeBody': {'confirmed': True},
                         'UnsubscribeBody': {'confirmed': False}})
        s_type = urllib_parse.urlparse(subscription['subscriber']).scheme
        LOG.info('Begin to send %(type)s confirm/unsubscribe notification.'
                 ' The request body is %(messages)s',
                 {'type': s_type, 'messages': messages})

        self._execute(s_type, subscription, [messages], conf)
Ejemplo n.º 6
0
 def edit(self):
     '''Generate Edit template for client from a Job object'''
     f = Response()
     f.entries = [
         FormEntry(name='name',
                   type='text',
                   value=self.name,
                   label='Name',
                   placeholder='Name for Job...',
                   required=True),
         FormEntry(name='id',
                   type='text',
                   value=self.id,
                   label='Id',
                   hidden=True),
         FormEntry(name='notebook name',
                   type='text',
                   value=self.meta.notebook.name,
                   label='Notebook',
                   required=True,
                   readonly=True),
         FormEntry(name='notebook',
                   type='text',
                   value=self.meta.notebook.id,
                   required=True,
                   readonly=True,
                   hidden=True),
         FormEntry(name='starttime',
                   value=self.meta.start_time.strftime('%Y-%m-%dT%H:%M'),
                   type='datetime',
                   label='Start Time/Date',
                   required=True),
         FormEntry(name='interval',
                   type='select',
                   value=self.meta.interval,
                   label='Interval',
                   options=_INTERVAL_TYPES,
                   required=True),
         FormEntry(name='level',
                   type='select',
                   value=self.meta.level,
                   label='Level',
                   options=_SERVICE_LEVELS,
                   required=True),
         FormEntry(name='reports',
                   type='text',
                   value=str(self.meta.reports),
                   readonly=True),
         FormEntry(name='save',
                   type='submit',
                   value='save',
                   url=urljoin(self.config.apiurl, 'jobs?action=save')),
         FormEntry(name='delete',
                   type='submit',
                   value='delete',
                   url=urljoin(self.config.apiurl, 'jobs?action=delete'))
     ]
     return f.to_json()
Ejemplo n.º 7
0
    def download_get_basefiles(self, url):
        done = False
        pagecnt = 1
        # existing_cnt = 0
        while not done:
            self.log.info('Result page #%s (%s)' % (pagecnt, url))
            resp = requests.get(url)
            mainsoup = BeautifulSoup(resp.text)
            for link in mainsoup.find_all(href=re.compile("/sb/d/108/a/")):
                desc = link.find_next_sibling("span", "info").get_text(strip=True)
                tmpurl = urljoin(url, link['href'])

                # use a strict regex first, then a more forgiving
                m = self.re_basefile_strict.search(desc)
                if not m:
                    m = self.re_basefile_lax.search(desc)
                    if not m:
                        self.log.warning(
                            "Can't find Document ID from %s, forced to download doc page" % desc)
                        resp = requests.get(tmpurl)
                        subsoup = BeautifulSoup(resp.text)

                        for a in subsoup.find("div", "doc").find_all("li", "pdf"):
                            text = a.get_text(strip=True)
                            m = self.re_basefile_lax.search(text)
                            if m:
                                break
                        else:
                            self.log.error("Cannot possibly find docid for %s" % tmpurl)
                            continue
                    else:
                        self.log.warning(
                            "%s (%s) not using preferred form: '%s'" % (m.group(1), tmpurl, m.group(0)))
                basefile = m.group(1)

                # Extra checking -- sometimes ids like 2003/2004:45
                # are used (should be 2003/04:45)
                if (":" in basefile and "/" in basefile):
                    (y1, y2, o) = re.split("[:/]", basefile)
                    # 1999/2000:45 is a special case
                    if len(y2) == 4 and y1 != "1999":
                        self.log.warning(
                            "%s (%s) using incorrect year format, should be '%s/%s:%s'" %
                            (basefile, tmpurl, y1, y2[2:], o))
                        basefile = "%s/%s:%s" % (y1, y2[2:], o)

                yield basefile, urljoin(url, link['href'])

            pagecnt += 1
            next = mainsoup.find("a", text="Nästa sida")
            if next:
                url = urljoin(url, next['href'])
            else:
                done = True
Ejemplo n.º 8
0
    def download_from_atom(self):
        refresh = self.config.force
        feed_url = self.start_url
        ns = 'http://www.w3.org/2005/Atom'
        done = False
        biggraph = Graph()
        biggraph.bind("dct", self.ns['dct'])
        biggraph.bind("rpubl", self.ns['rpubl'])

        while not done:
            self.log.info("Feed: %s" % feed_url)
            tree = etree.parse(requests.get(feed_url).text)
            for entry in tree.findall('{%s}entry' % (ns)):
                try:
                    self.log.info("  Examining entry")
                    rdf_url = None
                    for node in entry:
                        if (node.tag == "{%s}link" % ns and
                                node.get('type') == 'application/rdf+xml'):
                            rdf_url = urljoin(feed_url, node.get("href"))
                        elif (node.tag == "{%s}content" % ns and
                              node.get('type') == 'application/rdf+xml'):
                            rdf_url = urljoin(feed_url, node.get("src"))

                    if rdf_url:
                        self.log.info("    RDF: %s" % rdf_url)
                        g = Graph()
                        g.parse(requests.get(rdf_url).text)
                        for triple in g:
                            s, p, o = triple
                            if (not isinstance(o, URIRef) or
                                    not str(o).startswith(self.config.url)):
                                g.remove(triple)

                        self.log.debug("     Adding %s triples" % len(g))
                        biggraph += g
                except KeyboardInterrupt:
                    raise
                except:
                    e = sys.exc_info()[1]
                    self.log.error("ERROR: %s" % e)

            done = True
            for link in list(tree.findall('{%s}link' % (ns))):
                self.log.info("  Examining link")
                if link.get('rel') == 'prev-archive':
                    feed_url = urljoin(feed_url, link.get("href"))
                    done = False
                    # done = True

        self.log.info("Done downloading")
        with self.store.open_downloaded("biggraph", "wb") as fp:
            fp.write(biggraph.serialize(format="nt"))
Ejemplo n.º 9
0
    def _update_tags(self):
        """ Synchronize tags with Bleemeo API
        """
        current_tags = set(self.core.config.get('tags', []))
        old_tags = set(self.core.state.get('tags_uuid', {}))

        if current_tags == old_tags:
            return

        response = requests.get(
            urllib_parse.urljoin(self.bleemeo_base_url,
                                 '/v1/agent/%s/' % self.agent_uuid),
            params={'fields': 'tags'},
            auth=(self.agent_username, self.agent_password),
            headers={
                'X-Requested-With': 'XMLHttpRequest',
                'User-Agent': self.core.http_user_agent,
            },
        )
        if response.status_code != 200:
            logging.debug('Fetching current agent tags failed: %s',
                          response.content)
            return

        current_api_tags = set(x['name'] for x in response.json()['tags'])

        deleted_tags = (old_tags - current_tags)
        tags = (current_api_tags - deleted_tags).union(current_tags)

        response = requests.patch(
            urllib_parse.urljoin(self.bleemeo_base_url,
                                 '/v1/agent/%s/' % self.agent_uuid),
            data=json.dumps({'tags': [{
                'name': x
            } for x in tags]}),
            auth=(self.agent_username, self.agent_password),
            headers={
                'X-Requested-With': 'XMLHttpRequest',
                'Content-type': 'application/json',
                'User-Agent': self.core.http_user_agent,
            },
        )
        if response.status_code > 400:
            logging.debug('Updating current agent tags failed: %s',
                          response.content)
            return

        tags_uuid = {}
        for tag in response.json()['tags']:
            if tag['name'] in current_tags:
                tags_uuid[tag['name']] = tag['id']
        self.core.state.set('tags_uuid', tags_uuid)
Ejemplo n.º 10
0
 def form(self):
     '''Generate Form template for client from a Report object'''
     f = Response()
     f.entries = [
         FormEntry(name='name',
                   type='text',
                   label='Name',
                   placeholder='Name for Report...',
                   required=True),
         FormEntry(name='notebook',
                   type='autocomplete',
                   label='Notebook',
                   url=urljoin(self.config.apiurl,
                               'autocomplete?type=notebooks&partial='),
                   required=True),
         FormEntry(name='job',
                   type='autocomplete',
                   label='Job',
                   url=urljoin(self.config.apiurl,
                               'autocomplete?type=jobs&partial='),
                   required=True),
         FormEntry(name='parameters',
                   type='textarea',
                   label='Parameters',
                   placeholder='JSON Parameters...'),
         FormEntry(name='type',
                   type='select',
                   label='Type',
                   options=_REPORT_TYPES,
                   required=True),
         FormEntry(name='output',
                   type='select',
                   label='Output',
                   options=_OUTPUT_TYPES,
                   required=True),
         FormEntry(name='code',
                   type='select',
                   label='Strip Code',
                   options=['yes', 'no'],
                   required=True),
         FormEntry(name='template',
                   type='text',
                   label='Template',
                   required=False),
         FormEntry(name='submit',
                   type='submit',
                   value='save',
                   url=urljoin(self.config.apiurl, 'reports?action=save'))
     ]
     return f.to_json()
Ejemplo n.º 11
0
    def download_resource(self, resource_location, location):
        """Download the resource in the specified location

        :param resource_script:
            Is relative to the /argus/resources/ directory.
        :param location:
            The location on the instance.
        """
        base_resource = CONFIG.argus.resources
        if not base_resource.endswith("/"):
            base_resource = urlparse.urljoin(CONFIG.argus.resources,
                                             "resources/")
        uri = urlparse.urljoin(base_resource, resource_location)
        self.download(uri, location)
Ejemplo n.º 12
0
    def download_resource(self, resource_location, location):
        """Download the resource in the specified location

        :param resource_script:
            Is relative to the /argus/resources/ directory.
        :param location:
            The location on the instance.
        """
        base_resource = CONFIG.argus.resources
        if not base_resource.endswith("/"):
            base_resource = urlparse.urljoin(CONFIG.argus.resources,
                                             "resources/")
        uri = urlparse.urljoin(base_resource, resource_location)
        self.download(uri, location)
def __getTrakt(url, post=None):
    try:
        url = urllib_parse.urljoin(BASE_URL, url)
        post = json.dumps(post) if post else None
        headers = {'Content-Type': 'application/json', 'trakt-api-key': V2_API_KEY, 'trakt-api-version': 2}

        if getTraktCredentialsInfo():
            headers.update({'Authorization': 'Bearer %s' % control.addon('plugin.video.koditvr').getSetting('trakt.token')})

        result = client.request(url, post=post, headers=headers, output='extended', error=True)
        result = utils.byteify(result)

        resp_code = result[1]
        resp_header = result[2]
        result = result[0]

        if resp_code in ['423', '500', '502', '503', '504', '520', '521', '522', '524']:
            log_utils.log('Trakt Error: %s' % str(resp_code))
            control.infoDialog('Trakt Error: ' + str(resp_code), sound=True)
            return
        elif resp_code in ['429']:
            log_utils.log('Trakt Rate Limit Reached: %s' % str(resp_code))
            control.infoDialog('Trakt Rate Limit Reached: ' + str(resp_code), sound=True)
            return
        elif resp_code in ['404']:
            log_utils.log('Object Not Found : %s' % str(resp_code))
            return

        if resp_code not in ['401', '405']:
            return result, resp_header

        koditvr = urllib_parse.urljoin(BASE_URL, '/koditvr/token')
        opost = {'client_id': V2_API_KEY, 'client_secret': CLIENT_SECRET, 'redirect_uri': REDIRECT_URI, 'grant_type': 'refresh_token', 'refresh_token': control.addon('plugin.video.koditvr').getSetting('trakt.refresh')}

        result = client.request(koditvr, post=json.dumps(opost), headers=headers)
        result = utils.json_loads_as_str(result)

        token, refresh = result['access_token'], result['refresh_token']
        print('Info - ' + str(token))
        control.addon('plugin.video.koditvr').setSetting(id='trakt.token', value=token)
        control.addon('plugin.video.koditvr').setSetting(id='trakt.refresh', value=refresh)

        headers['Authorization'] = 'Bearer %s' % token

        result = client.request(url, post=post, headers=headers, output='extended', error=True)
        result = utils.byteify(result)
        return result[0], result[2]
    except:
        log_utils.log('getTrakt Error', 1)
        pass
Ejemplo n.º 14
0
    def paste(self, s):
        """Upload to pastebin via json interface."""

        url = urljoin(self.url, '/json/new')
        payload = {
            'code': s,
            'lexer': 'pycon',
            'expiry': self.expiry
        }

        try:
            response = requests.post(url, data=payload, verify=True)
            response.raise_for_status()
        except requests.exceptions.RequestException as exc:
            raise PasteFailed(exc.message)

        data = response.json()

        paste_url_template = Template(self.show_url)
        paste_id = urlquote(data['paste_id'])
        paste_url = paste_url_template.safe_substitute(paste_id=paste_id)

        removal_url_template = Template(self.removal_url)
        removal_id = urlquote(data['removal_id'])
        removal_url = removal_url_template.safe_substitute(
            removal_id=removal_id)

        return (paste_url, removal_url)
 def _get_application(self, app_id):
     url = urllib_parse.urljoin(self._api_url + 'applications/', str(app_id) + '.json')
     resp = requests.get(url, headers=self._headers).json()
     if 'application' in resp:
         # pick 1st application
         return resp['application'] if resp['application'] else None
     return None
Ejemplo n.º 16
0
    def get_api_data(self, target_url=None, query_params=None):
        """retrieves the Jenkins API specific data from the specified URL

        :param str target_url:
            Full URL to the REST API endpoint to be queried. If not provided,
            data will be loaded from the default 'url' for this object
        :param str query_params:
            optional set of query parameters to customize the returned data
        :returns:
            The set of Jenkins attributes, converted to Python objects,
            associated with the given URL.
        :rtype: :class:`dict`
        """
        if target_url is None:
            target_url = self.url

        temp_url = urllib_parse.urljoin(target_url, "api/json")

        if query_params is not None:
            # TODO: Update this to pass 'params' key to get method
            temp_url += "?" + query_params

        req = requests.get(
            temp_url,
            auth=self._creds,
            verify=self._ssl_cert)
        req.raise_for_status()
        retval = req.json()
        self._log.debug(json.dumps(retval, indent=4))
        return retval
Ejemplo n.º 17
0
    def _register_services(self):
        """ Check for any unregistered services and register them

            Also check for changed services and update them
        """
        base_url = self.bleemeo_base_url
        registration_url = urllib_parse.urljoin(base_url, '/v1/service/')

        for key, service_info in self.core.services.items():
            (service_name, instance) = key

            entry = {
                'listen_addresses':
                    get_listen_addresses(service_info),
                'label': service_name,
                'exe_path': service_info.get('exe_path', ''),
            }
            if instance is not None:
                entry['instance'] = instance

            if key in self.services_uuid:
                entry['uuid'] = self.services_uuid[key]['uuid']
                # check for possible update
                if self.services_uuid[key] == entry:
                    continue
                method = requests.put
                service_uuid = self.services_uuid[key]['uuid']
                url = registration_url + str(service_uuid) + '/'
                expected_code = 200
            else:
                method = requests.post
                url = registration_url
                expected_code = 201

            payload = entry.copy()
            payload.update({
                'account': self.account_id,
                'agent': self.agent_uuid,
            })

            response = method(
                url,
                data=json.dumps(payload),
                auth=(self.agent_username, self.agent_password),
                headers={
                    'X-Requested-With': 'XMLHttpRequest',
                    'Content-type': 'application/json',
                },
            )
            if response.status_code != expected_code:
                logging.debug(
                    'Service registration failed. Server response = %s',
                    response.content
                )
                continue
            entry['uuid'] = response.json()['id']
            self.services_uuid[key] = entry
            self.core.state.set_complex_dict(
                'services_uuid', self.services_uuid
            )
Ejemplo n.º 18
0
def _ComputePaths(package, version, discovery_doc):
    full_path = urllib_parse.urljoin(discovery_doc["rootUrl"], discovery_doc["servicePath"])
    api_path_component = "/".join((package, version, ""))
    if api_path_component not in full_path:
        return full_path, ""
    prefix, _, suffix = full_path.rpartition(api_path_component)
    return prefix + api_path_component, suffix
Ejemplo n.º 19
0
def make_fasta_dna_url(
        ensembl_release,
        species,
        contig,
        server=ENSEMBL_FTP_SERVER):
    """
    Construct URL to FASTA file with full sequence of a particular chromosome.
    Returns server_url/subdir and filename as tuple result.
    """
    ensembl_release, species, reference_name = _normalize_release_properties(
        ensembl_release, species)
    subdir = _species_subdir(
        ensembl_release,
        species=species,
        filetype="fasta",
        server=server,)
    server_subdir = urllib_parse.urljoin(server, subdir)

    server_sequence_subdir = join(server_subdir, "dna")
    filename = FASTA_DNA_CHROMOSOME_FILENAME_TEMPLATE % {
        "Species": species.capitalize(),
        "reference": reference_name,
        "release": ensembl_release,
        "sequence_type": "dna",
        "contig": contig
    }
    return join(server_sequence_subdir, filename)
Ejemplo n.º 20
0
def notify_new_email(email, user):
    """ Ask the user to confirm to the email belong to them.
    """

    root_url = pagure_config.get("APP_URL", flask.request.url_root)

    url = urljoin(
        root_url or flask.request.url_root,
        flask.url_for("ui_ns.confirm_email", token=email.token),
    )

    text = """Dear %(username)s,

You have registered a new email on pagure at %(root_url)s.

To finish your validate this registration, please click on the following
link or copy/paste it in your browser, this link will remain valid only 2 days:
  %(url)s

The email will not be activated until you finish this step.

Sincerely,
Your pagure admin.
""" % (
        {"username": user.username, "url": url, "root_url": root_url}
    )

    send_email(
        text,
        "Confirm new email",
        email.email,
        user_from=user.fullname or user.user,
    )
Ejemplo n.º 21
0
def urls_from_urlset_or_sitemapindex(response):
    """ Yields URLs from ``<urlset>`` or ``<sitemapindex>`` elements as per 
        `sitemaps.org <http://www.sitemaps.org/protocol.html>`_.
    """

    sitemap = URL(response.url).fragment_dict.get('sitemap')
    content_subtypes = response.headers.get_content_subtype().split('+')
    if not sitemap and not 'xml' in content_subtypes:
        return

    root = None
    for _, elem in iterparse(decode(response)):

        if root is None:
            root = elem.getroottree().getroot()
            if not (root.tag.endswith('}sitemapindex') or
                    root.tag.endswith('}urlset')):
                # root element has wrong tag - give up
                break

        if elem.tag.endswith('}loc') and elem.text is not None:
            text = elem.text.strip()
            if text:
                # http://www.sitemaps.org/protocol.html#locdef
                url = URL(urljoin(response.url, text))
                if elem.getparent().tag.endswith('}sitemap'):
                    # set sitemap=True to help downstream processing
                    url = url.update_fragment_dict(sitemap=True)
                yield "url", url

        if elem.getparent() is root:
            # release memory for previous elements
            while elem.getprevious() is not None:
                del root[0]
Ejemplo n.º 22
0
def _generate_client_conf():
    auth_strategy = os.environ.get('OS_AUTH_STRATEGY', 'noauth')
    if auth_strategy == 'keystone':
        args = _get_credential_args()
        # FIXME(flwang): Now we're hardcode the keystone auth version, since
        # there is a 'bug' with the osc-config which is returning the auth_url
        # without version. This should be fixed as long as the bug is fixed.
        parsed_url = urllib_parse.urlparse(args['auth_url'])
        auth_url = args['auth_url']
        if not parsed_url.path or parsed_url.path == '/':
            auth_url = urllib_parse.urljoin(args['auth_url'], 'v2.0')
        conf = {
            'auth_opts': {
                'backend': 'keystone',
                'options': {
                    'os_username': args['username'],
                    'os_password': args['password'],
                    'os_project_name': args['project_name'],
                    'os_auth_url': auth_url,
                    'insecure': '',
                },
            },
        }
    else:
        conf = {
            'auth_opts': {
                'backend': 'noauth',
                'options': {
                    'os_project_id': 'my-lovely-benchmark',
                },
            },
        }
    print("Using '{0}' authentication method".format(conf['auth_opts']
                                                     ['backend']))
    return conf
Ejemplo n.º 23
0
def verify_proxy_ticket(ticket, service):
    """Verifies CAS 2.0+ XML-based proxy ticket.

    Returns username on success and None on failure.
    """

    try:
        from xml.etree import ElementTree
    except ImportError:
        from elementtree import ElementTree

    params = {'ticket': ticket, 'service': service}

    url = (urljoin(settings.CAS_SERVER_URL, 'proxyValidate') + '?' +
           urlencode(params))

    page = urlopen(url)

    try:
        response = page.read()
        tree = ElementTree.fromstring(response)
        if tree[0].tag.endswith('authenticationSuccess'):
            username = tree[0][0].text
            proxies = []
            if len(tree[0]) > 1:
                for element in tree[0][1]:
                    proxies.append(element.text)
            return {"username": username, "proxies": proxies}, None
        else:
            return None, None
    finally:
        page.close()
Ejemplo n.º 24
0
    def get_file_urls(self, name):
        query = (
            select([
                release_files.c.name,
                release_files.c.filename,
                release_files.c.python_version,
                release_files.c.md5_digest,
            ])
            .where(release_files.c.name == name)
            .order_by(release_files.c.filename.desc())
        )

        with self.engine.connect() as conn:
            results = conn.execute(query)

            return [
                FileURL(
                    filename=r["filename"],
                    url=urllib_parse.urljoin(
                        "/".join([
                            "../../packages",
                            r["python_version"],
                            r["name"][0],
                            r["name"],
                            r["filename"],
                        ]),
                        "#md5={}".format(r["md5_digest"]),
                    ),
                )
                for r in results
            ]
Ejemplo n.º 25
0
def remove_user_from_uri(apps, schema_editor):
    Formula = apps.get_model('formulas', 'Formula')

    for formula in Formula.objects.all():
        url_bits = urlsplit(formula.uri)

        # don't do it if it's an ssh formula
        if 'ssh' in url_bits.scheme:
            continue

        if url_bits.username:
            formula.git_username = url_bits.username

            if url_bits.port:
                new_netloc = '{}:{}'.format(url_bits.hostname, url_bits.port)
            else:
                new_netloc = url_bits.hostname

            formula.uri = urljoin((
                url_bits.scheme,
                new_netloc,
                url_bits.path,
                url_bits.query,
                url_bits.fragment,
            ))

            formula.save()
Ejemplo n.º 26
0
    def build_uri(self, base, matches):
        if not base:
            return None
        if self.uriTemplate:
            expanded = str(self.uriTemplate)
        elif self.fragmentTemplate:
            if "#" in base:
                base += self.space.fragmentSeparator
            else:
                base += "#"
            expanded = base + str(self.fragmentTemplate)
        else:
            return None

        expanded = expanded.replace("{+base}", base)
        for var, value in matches.items():
            slug = self.transform_value(value)
            expanded = expanded.replace("{%s}" % var, slug)
        # if base is eg "http://localhost/res/" and expanded is a
        # /-prefixed relative uri like "/sfs/9999:998", urljoin
        # results in "http://localhost/sfs/9999:998/", not
        # "http://localhost/res/" like you'd expect. So we work
        # around.
        if expanded[0] == "/":
            expanded = expanded[1:]
            
        if expanded.startswith("http://") or expanded.startswith("https://"):
            return urljoin(base, expanded)
        else:
            # see the test integrationLegalURI.CustomCoinstruct.test_1845_50_s.1
            return "%s/%s" % (base, expanded)
Ejemplo n.º 27
0
    def rename(self, new_job_name):
        """Changes the name of this job

        :param str new_job_name:
            new name to assign to this job
        """
        args = {
            "params": {
                "newName": new_job_name
            }
        }
        self._api.post(self._api.url + "doRename", args=args)

        # NOTE: In order to properly support jobs that may contain nested
        #       jobs we have to do some URL manipulations to extrapolate the
        #       REST API endpoint for the parent object to which the cloned
        #       view is to be contained.
        parts = urllib_parse.urlsplit(self._api.url).path.split("/")
        parts = [cur_part for cur_part in parts if cur_part.strip()]
        assert len(parts) >= 2
        assert parts[-2] == "job"
        new_url = urllib_parse.urljoin(
            self._api.url, "/" + "/".join(parts[:-2]))
        new_url += "/job/" + new_job_name
        self._api = self._api.clone(new_url)

        assert self.name == new_job_name
Ejemplo n.º 28
0
    def get_proxy_ticket_for(self, service):
        """Verifies CAS 2.0+ XML-based authentication ticket.

        Returns username on success and None on failure.
        """
        if not settings.CAS_PROXY_CALLBACK:
            raise CasConfigException("No proxy callback set in settings")

        try:
            from xml.etree import ElementTree
        except ImportError:
            from elementtree import ElementTree

        params = {'pgt': self.tgt, 'targetService': service}

        url = (urljoin(settings.CAS_SERVER_URL, 'proxy') + '?' +
               urlencode(params))

        page = urlopen(url)

        try:
            response = page.read()
            tree = ElementTree.fromstring(response)
            if tree[0].tag.endswith('proxySuccess'):
                return tree[0][0].text
            else:
                raise CasTicketException("Failed to get proxy ticket")
        finally:
            page.close()
Ejemplo n.º 29
0
def static_url(app, filename):
    """
    static_url('css/bootstrap.css')
    """
    static_dir = os.path.join(
        os.path.dirname(os.path.abspath(warehouse.__file__)),
        "static",
        "compiled",
    )

    filepath = os.path.join(static_dir, filename)
    manifest_path = os.path.join(os.path.dirname(filepath), ".manifest.json")

    if not app.config.debug:
        # Load our on disk manifest
        with open(manifest_path) as fp:
            manifest = json.load(fp)

        # Get the base name for this file
        basename = manifest.get(os.path.basename(filename))

        # If we were able to get a base name, then create a filename with it
        if basename is not None:
            filename = os.path.join(os.path.dirname(filename), basename)

    return urllib_parse.urljoin("/static/", filename)
Ejemplo n.º 30
0
 def get_eureka_urls(self):
     if self.eureka_url:
         return [self.eureka_url]
     elif self.use_dns:
         zone_dns_map = self.get_zones_from_dns()
         zones = zone_dns_map.keys()
         assert len(zones) > 0, "No availability zones found for, please add them explicitly"
         if self.prefer_same_zone:
             if self.get_instance_zone() in zones:
                 zones = [zones.pop(zones.index(self.get_instance_zone()))] + zones  # Add our zone as the first element
             else:
                 logger.warn("No match for the zone %s in the list of available zones %s" % (
                     self.get_instance_zone(), zones)
                 )
         service_urls = []
         for zone in zones:
             eureka_instances = zone_dns_map[zone]
             random.shuffle(eureka_instances)  # Shuffle order for load balancing
             for eureka_instance in eureka_instances:
                 server_uri = "http://%s" % eureka_instance
                 if self.eureka_port:
                     server_uri += ":%s" % self.eureka_port
                 eureka_instance_url = urljoin(server_uri, self.context, "/")
                 if not eureka_instance_url.endswith("/"):
                     eureka_instance_url = "%s/" % eureka_instance_url
                 service_urls.append(eureka_instance_url)
         primary_server = service_urls.pop(0)
         random.shuffle(service_urls)
         service_urls.insert(0, primary_server)
         logger.info("This client will talk to the following serviceUrls in order: %s" % service_urls)
         return service_urls
Ejemplo n.º 31
0
 def reportIncorrectImage(self, task_id):
     request = {"clientKey": self.client_key,
                "taskId": task_id
                }
     response = self.session.post(urljoin(self.base_url, self.REPORT_IMAGE_URL), json=request).json()
     self._check_response(response)
     return response.get('status', False) != False
Ejemplo n.º 32
0
    def get_headers(self, path=None):
        """gets the HTTP header attributes from a Jenkins URL

        :param str path:
            optional extension path to append to the root
            URL managed by this object when performing the
            get operation
        :returns: dictionary of HTTP header attributes with their associated values
        :rtype: :class:`dict`
        """

        temp_path = self._url
        if path is not None:
            temp_path = urllib_parse.urljoin(temp_path, path.lstrip("/\\"))

        if temp_path in DataRequester._header_cache:
            return DataRequester._header_cache[temp_path]

        self._log.debug("Header cache miss: " + temp_path)

        req = requests.get(temp_path, auth=self._credentials)

        if req.status_code != 200:
            req.raise_for_status()

        if DataRequester.ENABLE_CACHING:
            DataRequester._header_cache[temp_path] = req.headers

        return req.headers
Ejemplo n.º 33
0
    def get_api_data(self, query_params=None):
        """Convenience method that retrieves the Jenkins API specific data from the specified URL

        :param str query_params: optional set of query parameters to customize the returned data
        :returns:
            The set of Jenkins attributes, converted to Python objects, associated
            with the given URL.
        :rtype: :class:`object`
        """
        cache_url = urllib_parse.urljoin(self._url, "api/python")
        if cache_url in DataRequester._api_data_cache:
            # NOTE: The assumption here is that the query parameters have no impact on the cached data
            # while this may not be true in general, for most use cases intended to be supported this
            # shouldn't be an issue
            return DataRequester._api_data_cache[cache_url]

        temp_url = cache_url
        if query_params is not None:
            temp_url += "?" + query_params

        txt = self._get_raw_text(temp_url)

        retval = eval(txt)  # pylint: disable=eval-used
        if DataRequester.ENABLE_CACHING:
            DataRequester._api_data_cache[cache_url] = retval

        return retval
Ejemplo n.º 34
0
    def download_www(self, dirname, recurse):
        url = 'https://lagen.nu/dv/downloaded/%s' % dirname
        self.log.debug('Listing contents of %s' % url)
        resp = requests.get(url)
        iterlinks = lxml.html.document_fromstring(resp.text).iterlinks()
        for element, attribute, link, pos in iterlinks:
            if link.startswith("/"):
                continue
            elif link.endswith("/") and recurse:
                self.download_www(link, recurse)
            elif link.endswith(".zip"):
                basefile = os.path.splitext(link)[0]
                if dirname:
                    basefile = dirname + basefile

                localpath = self.store.downloaded_path(basefile)
                if os.path.exists(localpath) and not self.config.force:
                    pass  # we already got this
                else:
                    absolute_url = urljoin(url, link)
                    self.log.debug('Fetching %s to %s' % (link, localpath))
                    resp = requests.get(absolute_url)
                    with self.store.open_downloaded(basefile, "wb") as fp:
                        fp.write(resp.content)
                    self.process_zipfile(localpath)
Ejemplo n.º 35
0
def _ComputePaths(package, version, discovery_doc):
    full_path = urllib_parse.urljoin(
        discovery_doc['rootUrl'], discovery_doc['servicePath'])
    api_path_component = '/'.join((package, version, ''))
    if api_path_component not in full_path:
        return full_path, ''
    prefix, _, suffix = full_path.rpartition(api_path_component)
    return prefix + api_path_component, suffix
Ejemplo n.º 36
0
    def build_absolute_uri(self, location=None):
        if location is None:
            return None

        bits = urlsplit(location)
        if not (bits.scheme and bits.netloc):
            location = urljoin(self.prod_url, location)
        return iri_to_uri(location)
Ejemplo n.º 37
0
def get_cp_url(client, project_id, section="dashboard"):
    project_data = client.get_project(project_id)
    url = project_data["dashboard_url"]

    if section != "dashboard":
        url = urljoin(url, section)

    return url
Ejemplo n.º 38
0
Archivo: fb.py Proyecto: lbolla/fbcli
 def checkins(self, ixbug):
     '''The API does not provide a call for this.'''
     kilnhg_url = self._fburl.replace('.fogbugz.', '.kilnhg.')
     base_url = urljoin(kilnhg_url, '/fogbugz/casecheckins/{}?token={}')
     url = base_url.format(ixbug, self.current_token)
     r = requests.get(url)
     r.raise_for_status()
     return r.json()