def select_backend(self, request): """Replaces the scheme and netloc of the url with the backend""" if self.backend_iter is None: return next_backend = next(self.backend_iter) scheme, netloc = urlsplit(next_backend)[0:2] parsed = urlsplit(request._original_url) request.url = urlunsplit(parsed._replace(scheme=scheme, netloc=netloc))
def safe_put_data(ranking, resource, data, operation): """Send some data to ranking using a PUT request. ranking (bytes): the URL of ranking server. resource (bytes): the relative path of the entity. data (dict): the data to JSON-encode and send. operation (unicode): a human-readable description of the operation we're performing (to produce log messages). raise (CannotSendError): in case of communication errors. """ try: url = urljoin(ranking, resource) # XXX With requests-1.2 auth is automatically extracted from # the URL: there is no need for this. auth = urlsplit(url) res = requests.put(url, json.dumps(data), auth=(auth.username, auth.password), headers={'content-type': 'application/json'}, verify=config.https_certfile) except requests.exceptions.RequestException as error: msg = "%s while %s: %s." % (type(error).__name__, operation, error) logger.warning(msg) raise CannotSendError(msg) if 400 <= res.status_code < 600: msg = "Status %s while %s." % (res.status_code, operation) logger.warning(msg) raise CannotSendError(msg)
def parse_entry(self, res): entry = Entry() entry['title'] = res.find('a', {'class': 'torrent_name_link'})['title'] # skip if nuked if res.find('img', alt='Nuked'): log.info('Skipping entry %s (nuked)', entry['title']) return None details_url = res.find('a', {'class': 'torrent_name_link'})['href'] torrent_id = parse_qs(urlsplit(details_url).query)['id'][0] params = { 'type': 'rss', 'id': torrent_id, 'passkey': self.config['passkey'] } url = '%s/%s?%s' % (BASE_URL, DL_PAGE, urllib.urlencode(params)) entry['url'] = url log.debug('Title: %s | DL LINK: %s', (entry['title'], entry['url'])) seeds = res.find('td', {'class': 'table_seeders'}) \ .find('span').text.strip() leechers = res.find('td', {'class': 'table_leechers'}) \ .find('a').text.strip() entry['torrent_seeds'] = int(seeds) entry['torrent_leeches'] = int(leechers) size = res.find('td', attrs={'class': re.compile('table_size')}).text entry['content_size'] = parse_filesize(size) return entry
def is_localhost(url): if not url: return False split_url = urlsplit(url) return split_url.scheme == 'http' and split_url.hostname in [ '127.0.0.1', '0.0.0.0', 'localhost' ]
def _title_from_url(self, url): parts = parse.urlsplit(url) name = '' if parts.scheme == 'magnet': match = re.search('(?:&dn(?:\.\d)?=)(.+?)(?:&)', parts.query) if match: name = match.group(1) else: name = posixpath.basename(parts.path) return parse.unquote_plus(name)
def create_proxy(url): parsed = urlsplit(url) if not parsed.scheme: path = parsed.path return xmlrpc_client.ServerProxy('http://1', transport=SCGITransport(socket_path=path)) if parsed.scheme == 'scgi': url = 'http://%s' % parsed.netloc return xmlrpc_client.ServerProxy(url, transport=SCGITransport()) log.debug('Creating Normal XMLRPC Proxy with url %r' % url) return xmlrpc_client.ServerProxy(url)
def add_query_params(url, params): scheme, netloc, path, query_string, fragment = urlsplit(url) query_params = parse_qs(query_string) for name, value in iteritems(params): if value: query_params[name] = [value] new_query_string = urlencode(query_params, doseq=True) return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def get_auth_from_url(): """Moves basic authentication from url to username and password fields""" parts = list(parse.urlsplit(config['url'])) split = parts[1].split('@') if len(split) > 1: auth = split[0].split(':') if len(auth) == 2: config['username'], config['password'] = auth[0], auth[1] else: log.warning('Invalid basic authentication in url: %s' % config['url']) parts[1] = split[1] config['url'] = parse.urlunsplit(parts)
def get_scrape_url(tracker_url, info_hash): if 'announce' in tracker_url: v = urlsplit(tracker_url) result = urlunsplit([v.scheme, v.netloc, v.path.replace('announce', 'scrape'), v.query, v.fragment]) else: log.debug('`announce` not contained in tracker url, guessing scrape address.') result = tracker_url + '/scrape' result += '&' if '?' in result else '?' result += 'info_hash=%s' % quote(binascii.unhexlify(info_hash)) return result
def _get_filename(self, response): header_filename = response.headers.get('content-disposition') if header_filename: file_name = header_filename.split("filename=")[-1] return file_name.replace('"', '') else: split_url = urlparse.urlsplit(self.download_url) if split_url.path: file_name = os.path.basename(split_url.path) if file_name.split(".")[-1].lower() in accepted_extensions: return file_name raise ValueError('CKAN resource filename could not be deduced')
def __init__(self, mediator): """Create a LinkGenerationFacade Args: mediator (ZigZag): the mediator that stores shared data """ self._git_sha = None self._mediator = mediator try: if mediator.ci_environment == 'asc': self._git_sha = self._get_testsuite_prop('MOLECULE_GIT_COMMIT') split = urlsplit(self._get_testsuite_prop('REPO_URL')) path = self._strip_git_ending(split.path) self._molecule_scenario = self._get_testsuite_prop( 'MOLECULE_SCENARIO_NAME') self._repo_fork = list(filter(None, path.split('/')))[0] self._repo_name = self._get_testsuite_prop( 'MOLECULE_TEST_REPO') elif mediator.ci_environment == 'mk8s': self._git_sha = self._get_testsuite_prop('GIT_COMMIT') split = urlsplit(self._get_testsuite_prop('GIT_URL')) path = self._strip_git_ending(split.path) pr_testing = None # Assume we are not testing a PR if 'CHANGE_BRANCH' in mediator.testsuite_props: if not re.match(r'unknown', mediator.testsuite_props['CHANGE_BRANCH'], re.IGNORECASE): pr_testing = True if pr_testing: self._repo_fork = self._get_testsuite_prop('CHANGE_FORK') self._repo_name = list(filter(None, path.split('/')))[1] else: # Branch testing on a set cadence self._repo_fork, self._repo_name = list( filter(None, path.split('/'))) self._scheme = split.scheme self._netloc = split.netloc except (KeyError, UnboundLocalError): pass # If we dont have the info to generate links we want to silently fail
def parse_url(url): """ Parse an URL Args: url (str): The URL Returns: (urlparse.SplitResult, dict): the parsed URL (SplitResult, params) """ url = urlsplit(url) params = parse_qs(url.query) return url, params
def set_spc_url_and_tpuid(self): """ Send ticket back and get 'sports pass confirmation' URL and 'TpUid' :return: """ yinz_resp = self.session.get(config.YINZCAM_AUTH_URL2) jsondata = json.loads(yinz_resp.text) self.token = jsondata.get('TpUid') self.spc_url = jsondata.get('Url') self.offer_id = dict(parse_qsl(urlsplit(self.spc_url)[3]))['offerId'] if not self.token or not self.spc_url: raise TelstraAuthException( 'Unable to get token/spc url from Netball API')
def extract_info(url): """ Extracts amount and BitCoin address from a BitPay URL. :param url: the BitPay URL like "https://bitpay.com/invoice?id=J3qU6XapEqevfSCW35zXXX" :return: a tuple of the amount in BitCoin along with the address """ # https://bitpay.com/ or https://test.bitpay.com uspl = urlsplit(url) base_url = "{0.scheme}://{0.netloc}".format(uspl) print(base_url) invoice_id = uspl.query.split("=")[1] # On the browser, users have to select between Bitcoin and Bitcoin cash # trigger bitcoin selection for successful transaction trigger_url = "{}/invoice-noscript?id={}&buyerSelectedTransactionCurrency=BTC".format( base_url, invoice_id) print(trigger_url) request.urlopen(trigger_url) # Make the payment payment_url = "bitcoin:?r={}/i/{}".format(base_url, invoice_id) print(payment_url) # Check for testnet mode if os.getenv('TESTNET', '0') == '1' and uspl.netloc == 'test.bitpay.com': bitcoin.set_testnet() # get payment request using Electrum's lib pq = parse_qs(urlsplit(payment_url).query) out = {k: v[0] for k, v in pq.items()} payreq = pr.get_payment_request(out.get('r')).get_dict() # amount is in satoshis (1/10e8 Bitcoin) amount = float(payreq.get('amount')) / pow(10, 8) address = payreq.get('requestor') return PaymentInfo(amount, address)
def add_enclosure_info(self, entry, enclosure, filename=True, multiple=False): """Stores information from an rss enclosure into an Entry.""" entry['url'] = enclosure['href'] # get optional meta-data if 'length' in enclosure: try: entry['size'] = int(enclosure['length']) except ValueError: entry['size'] = 0 if 'type' in enclosure: entry['type'] = enclosure['type'] # TODO: better and perhaps join/in download plugin? # Parse filename from enclosure url basename = posixpath.basename(urlsplit(entry['url']).path) # If enclosure has size OR there are multiple enclosures use filename from url if (entry.get('size') or multiple and basename) and filename: entry['filename'] = basename log.trace('filename `%s` from enclosure', entry['filename'])
def get_user_test_id(self): # Only valid after self.execute() # Parse submission ID out of redirect. if self.redirected_to is None: return None query = parse_qs(urlsplit(self.redirected_to).query) if "user_test_id" not in query or len(query["user_test_id"]) != 1: logger.warning("Redirected to an unexpected page: `%s'", self.redirected_to) return None try: user_test_id = decrypt_number(query["user_test_id"][0]) except Exception: logger.warning("Unable to decrypt user test id from page: `%s'", self.redirected_to) return None return user_test_id
def __init__(self, **kwargs): if kwargs.get('service'): # Wrapper semantics self.service = kwargs['service'] elif kwargs.get('token'): # Connection by token if kwargs.get('server_uri'): splunkd = urllib_parse.urlsplit(kwargs.get('server_uri'), allow_fragments=False) kwargs['scheme'] = splunkd.scheme kwargs['host'] = splunkd.hostname kwargs['port'] = splunkd.port self.service = splunklib.connect(**kwargs) elif kwargs.get('service'): self.service = kwargs['service'] else: # Connection by standard auth self.service = splunklib.connect(**kwargs)
def do_GET(self): """GET method implementation for BaseHTTPRequestHandler.""" if not self._client_allowed(): return try: (_, _, path, query, _) = urlsplit(self.path) params = parse_qs(query) # Give each handler a chance to respond. for prefix, handler in self._GET_handlers: if self._maybe_handle(prefix, handler, path, params): return # If no path specified, default to showing the list of all runs. if path == '/': self._handle_runs('', {}) return self._send_content('Invalid GET request {}'.format(self.path), 'text/html', code=400) except (IOError, ValueError): pass # Printing these errors gets annoying, and there's nothing to do about them anyway.
def do_GET(self): """GET method implementation for BaseHTTPRequestHandler.""" if not self._client_allowed(): return try: (_, _, path, query, _) = urlsplit(self.path) params = parse_qs(query) # Give each handler a chance to respond. for prefix, handler in self._GET_handlers: if self._maybe_handle(prefix, handler, path, params): return # If no path specified, default to showing the list of all runs. if path == '/': self._handle_runs('', {}) return content = 'Invalid GET request {}'.format(self.path).encode('utf-8'), self._send_content(content, 'text/html', code=400) except (IOError, ValueError): pass # Printing these errors gets annoying, and there's nothing to do about them anyway.
def parse(self): return urllib_parse.urlsplit(self.url)
def stripUrlPassword(url): parts = list(urlsplit(url)) parts[1] = _netloc_url_re.sub(':xxxx@', parts[1]) return urlunsplit(parts)
def get_paid_token(self): """ Obtain a valid token from Telstra/Yinzcam, will be used to make requests for Ooyala embed tokens """ session = custom_session.Session(force_tlsv1=False) self.code_verifier = self.get_code_verifier() params = config.NRL_AUTH_PARAMS scope = base64.b64encode(os.urandom(16)).decode('utf-8').rstrip('=') params.update({ 'scope': scope, 'code_challenge': self.get_code_challenge(self.code_verifier) }) auth_resp = session.get(config.NRL_AUTH, params=params, allow_redirects=False) xsrf = auth_resp.cookies['XSRF-TOKEN'] session.headers.update({'x-xsrf-token': xsrf}) data = { 'emailAddress': '{0}'.format(self.username), 'password': '******'.format(self.password) } login_resp = session.post(config.NRL_LOGIN, json=data) login_resp_json = json.loads(login_resp.text) if not login_resp_json.get('success') == True: # noqa: E712 raise TelstraAuthException('Login failed for nrl.com: {0}'.format( login_resp_json.get('error'))) auth2_resp = session.get(config.NRL_AUTH_ACCEPT, params=params, allow_redirects=False) redirect_url = auth2_resp.headers.get('location') redirect_pieces = urlsplit(redirect_url) redirect_query = dict(parse_qsl(redirect_pieces.query)) code = redirect_query.get('code') token_form = {'code': code, 'code_verifier': self.code_verifier} token_form.update(config.TOKEN_DATA) session.headers = {} session.cookies.clear() token_resp = session.post(config.NRL_TOKEN, data=token_form) refresh_token = json.loads(token_resp.text).get('refresh_token') session.headers.update({ 'Content-Type': 'application/xml', 'Accept': 'application/json, text/plain, */*' }) ticket_signon = session.post( config.YINZCAM_AUTH_URL, data=config.NEW_LOGIN_DATA2.format(refresh_token)) ticket = json.loads(ticket_signon.text).get('Ticket') # check validity of subscription session.headers.update({'X-YinzCam-Ticket': ticket}) sub_status = session.get(config.STATUS_URL) status_json = json.loads(sub_status.text) if status_json.get('Valid') != 'true': raise TelstraAuthException('NRL.com login failed: {0}'.format( status_json.get('Reason'))) return ticket
def extend_querystring_params(url, params): scheme, netloc, path, query, _ = urlsplit(url) orig_params = parse_qs(query) orig_params.update(params) query = urlencode(orig_params, True) return urlunsplit([scheme, netloc, path, query, ''])
def _title_from_url(self, url): parts = parse.urlsplit(url) title = parse.unquote_plus(posixpath.basename(parts.path)) return title
def main(): parser = argparse.ArgumentParser(prog='cmsRWSHelper') parser.add_argument( '-v', '--verbose', action='store_true', help="tell on stderr what's happening") # FIXME It would be nice to use '--rankings' with action='store' # and nargs='+' but it doesn't seem to work with subparsers... parser.add_argument( '-r', '--ranking', dest='rankings', action='append', type=int, choices=list(range(len(config.rankings))), metavar='shard', help="select which RWS to connect to (omit for 'all')") subparsers = parser.add_subparsers( title='available actions', metavar='action', help='what to ask the RWS to do with the entity') # Create the parser for the "get" command parser_get = subparsers.add_parser('get', help="retrieve the entity") parser_get.set_defaults(action='get') # Create the parser for the "create" command parser_create = subparsers.add_parser('create', help="create the entity") parser_create.set_defaults(action='create') parser_create.add_argument( 'file', action="store", type=argparse.FileType('rb'), help="file holding the entity body to send ('-' for stdin)") # Create the parser for the "update" command parser_update = subparsers.add_parser('update', help='update the entity') parser_update.set_defaults(action='update') parser_update.add_argument( 'file', action="store", type=argparse.FileType('rb'), help="file holding the entity body to send ('-' for stdin)") # Create the parser for the "delete" command parser_delete = subparsers.add_parser('delete', help='delete the entity') parser_delete.set_defaults(action='delete') # Create the group for entity-related arguments group = parser.add_argument_group( title='entity reference') group.add_argument( 'entity_type', action='store', choices=ENTITY_TYPES, metavar='type', help="type of the entity (e.g. contest, user, task, etc.)") group.add_argument( 'entity_id', action='store', type=utf8_decoder, metavar='id', help='ID of the entity (usually a short codename)') # Parse the given arguments args = parser.parse_args() args.entity_id = quote(args.entity_id) if args.verbose: verb = args.action[:4] + 'ting' logger.info("%s entity '%ss/%s'", verb.capitalize(), args.entity_type, args.entity_id) if args.rankings is not None: shards = args.rankings else: shards = list(range(len(config.rankings))) s = Session() had_error = False for shard in shards: url = get_url(shard, args.entity_type, args.entity_id) # XXX With requests-1.2 auth is automatically extracted from # the URL: there is no need for this. auth = urlsplit(url) if args.verbose: logger.info("Preparing %s request to %s", ACTION_METHODS[args.action], url) if hasattr(args, 'file'): if args.verbose: logger.info("Reading file contents to use as message body") body = args.file.read() else: body = None req = Request(ACTION_METHODS[args.action], url, data=body, auth=(auth.username, auth.password), headers={'content-type': 'application/json'}).prepare() if args.verbose: logger.info("Sending request") try: res = s.send(req, verify=config.https_certfile) except RequestException: logger.error("Failed", exc_info=True) had_error = True continue if args.verbose: logger.info("Response received") if 400 <= res.status_code < 600: logger.error("Unexpected status code: %d", res.status_code) had_error = True continue if args.action == "get": print(res.content) if had_error: return 1 else: return 0
def is_localhost(url): if not url: return False split_url = urlsplit(url) return split_url.scheme == 'http' and split_url.hostname in ['127.0.0.1', '0.0.0.0', 'localhost']
def _check_ranking_web_server(self): """Health checker for RWS.""" url = urlsplit(self.cms_config["rankings"][0]) sock = socket.socket() sock.connect((url.hostname, url.port)) sock.close()