def get_genome_space_launch_apps( atm_url, url_opener, file_url, file_type ): gs_request = Request( "%s/%s/webtool/descriptor" % ( atm_url, GENOMESPACE_API_VERSION_STRING ) ) gs_request.get_method = lambda: 'GET' opened_gs_request = url_opener.open( gs_request ) webtool_descriptors = json.loads( opened_gs_request.read() ) webtools = [] for webtool in webtool_descriptors: webtool_name = webtool.get( 'name' ) base_url = webtool.get( 'baseUrl' ) use_tool = False for param in webtool.get( 'fileParameters', [] ): for format in param.get( 'formats', [] ): if format.get( 'name' ) == file_type: use_tool = True break if use_tool: file_param_name = param.get( 'name' ) # file_name_delimiters = param.get( 'nameDelimiters' ) if '?' in base_url: url_delimiter = "&" else: url_delimiter = "?" launch_url = "%s%s%s" % ( base_url, url_delimiter, urlencode( [ ( file_param_name, file_url ) ] ) ) webtools.append( ( launch_url, webtool_name ) ) break return webtools
def _request(self, url, data=None, headers=None, checker=None, method=None): if not headers: headers = {} if self.token: headers["X-API-Key"] = self.token self.log.debug("Request: %s %s %s", method if method else 'GET', url, data[:self.logger_limit] if data else None) # .encode("utf-8") is probably better data = data.encode() if isinstance(data, six.text_type) else data request = Request(url, data, headers) if method: request.get_method = lambda: method response = urlopen(request, timeout=self.timeout) if checker: checker(response) resp = response.read() if not isinstance(resp, str): resp = resp.decode() self.log.debug("Response: %s", resp[:self.logger_limit] if resp else None) return json.loads(resp) if len(resp) else {}
def set_genomespace_format_identifiers( url_opener, dm_site ): gs_request = Request( "%s/%s/dataformat/list" % ( dm_site, GENOMESPACE_API_VERSION_STRING ) ) gs_request.get_method = lambda: 'GET' opened_gs_request = url_opener.open( gs_request ) genomespace_formats = json.loads( opened_gs_request.read() ) for format in genomespace_formats: GENOMESPACE_FORMAT_IDENTIFIER_TO_GENOMESPACE_EXT[ format['url'] ] = format['name']
def json_req(self, url, http_post=False, skip_auth=False, raw=False, **kwargs): ''' Performs JSON request. ''' # Encode params if len(kwargs) > 0: params = urlencode( {key: val.encode('utf-8') for key, val in kwargs.items()} ) else: params = '' # Store for exception handling self.request_url = url self.request_params = params # Append parameters if len(params) > 0 and not http_post: url = '?'.join((url, params)) # Create request object with custom headers request = Request(url) request.timeout = 0.5 request.add_header('User-Agent', USER_AGENT) # Optional authentication if not skip_auth: self.authenticate(request) # Fire request if http_post: handle = urlopen(request, params.encode('utf-8')) else: handle = urlopen(request) # Read and possibly convert response text = handle.read() # Needed for Microsoft if text[:3] == b'\xef\xbb\xbf': text = text.decode('UTF-8-sig') else: text = text.decode('utf-8') # Replace literal \t text = text.strip().replace( '\t', '\\t' ).replace( '\r', '\\r' ) # Needed for Google while ',,' in text or '[,' in text: text = text.replace(',,', ',null,').replace('[,', '[') if raw: return text # Parse JSON response = json.loads(text) # Return data return response
def download(baseurl, parameters={}, headers={}): """Download Data from an url and returns it as a String @param baseurl Url to download from (e.g. http://www.google.com) @param parameters Parameter dict to be encoded with url @param headers Headers dict to pass with Request @returns String of data from URL """ url = "?".join([baseurl, urlencode(parameters)]) log.debug("Downloading: " + url) data = "" for _ in range(MAX_RETRIES): try: req = Request(url, headers=headers) req.add_header(USER_AGENT, USER_AGENT_STRING) response = urlopen(req) if six.PY2: data = response.read() else: data = response.read().decode("utf-8") response.close() break except Exception as err: if not isinstance(err, URLError): log.debug("Error %s during HTTP Request, abort", repr(err)) raise # propagate non-URLError log.debug("Error %s during HTTP Request, retrying", repr(err)) else: raise return data
def test_extract_macaroons_from_request(self): def encode_macaroon(m): macaroons = '[' + utils.macaroon_to_json_string(m) + ']' return base64.urlsafe_b64encode(utils.to_bytes(macaroons)).decode('ascii') req = Request('http://example.com') m1 = pymacaroons.Macaroon(version=pymacaroons.MACAROON_V2, identifier='one') req.add_header('Macaroons', encode_macaroon(m1)) m2 = pymacaroons.Macaroon(version=pymacaroons.MACAROON_V2, identifier='two') jar = requests.cookies.RequestsCookieJar() jar.set_cookie(utils.cookie( name='macaroon-auth', value=encode_macaroon(m2), url='http://example.com', )) jar.set_cookie(utils.cookie( name='macaroon-empty', value='', url='http://example.com', )) jar.add_cookie_header(req) macaroons = httpbakery.extract_macaroons(req) self.assertEquals(len(macaroons), 2) macaroons.sort(key=lambda ms: ms[0].identifier) self.assertEquals(macaroons[0][0].identifier, m1.identifier) self.assertEquals(macaroons[1][0].identifier, m2.identifier)
def download_from_repository(self, repo_source, target): """ Download given source file from the repository and store it as target file The repo_source location is used relative to the repository location and will be part of a mime type source like: file://repo_path/repo_source :param string source: source file in the repo :param string target: file path """ try: request = Request( os.sep.join([self._get_mime_typed_uri(), repo_source]) ) if self.user and self.secret: credentials = b64encode( format(':'.join([self.user, self.secret])).encode() ) request.add_header( 'Authorization', b'Basic ' + credentials ) location = urlopen(request) except Exception as e: raise KiwiUriOpenError( '{0}: {1}'.format(type(e).__name__, format(e)) ) with open(target, 'wb') as target_file: target_file.write(location.read())
def _pd_api(self, url, data=None, method='GET'): url = '%s/%s' % (PD_API_BASE, url) request_args = { 'headers': dict(self._pd_headers) } if six.PY3: # pragma: no cover request_args['method'] = method if data is not None: request_args['data'] = json.dumps(data).encode('utf-8') request_args['headers']['Content-Type'] = APPLICATION_JSON request = Request(url, **request_args) if six.PY2: # pragma: no cover request.get_method = lambda: method try: response = urlopen(request) return json.loads(response.read().decode('utf-8')) except HTTPError as e: response = e.read().decode('utf-8') logger.warning("API error: %s", response) if method == 'GET' and e.code == 404: return None else: raise e
def get_version(self): """Get the version of this Master. :returns: This master's version number ``str`` Example:: >>> j = Jenkins() >>> info = j.get_version() >>> print info >>> 1.541 """ try: request = Request(self.server + "/login") request.add_header('X-Jenkins', '0.0') response = urlopen(request, timeout=self.timeout) if response is None: raise EmptyResponseException( "Error communicating with server[%s]: " "empty response" % self.server) if six.PY2: return response.info().getheader('X-Jenkins') if six.PY3: return response.getheader('X-Jenkins') except (HTTPError, BadStatusLine): raise BadHTTPException("Error communicating with server[%s]" % self.server)
def _request_with_auth(url, username, password): request = Request(url) base64string = base64.b64encode( username.encode('ascii') + b':' + password.encode('ascii') ) request.add_header(b"Authorization", b"Basic " + base64string) return urlopen(request)
def _query_api(self, clip): # Adapted from SpeechRecognition source code, modified to get text onsets flac_data = clip.get_flac_data( convert_rate = None if clip.sample_rate >= 16000 else 16000, convert_width = None if clip.sample_width >= 2 else 2 ) model = "{0}_BroadbandModel".format("en-US") url = "https://stream.watsonplatform.net/speech-to-text/api/v1/recognize?{0}".format(urlencode({ "profanity_filter": "false", "continuous": "true", "model": model, "timestamps": "true", })) request = Request(url, data = flac_data, headers = { "Content-Type": "audio/x-flac", "X-Watson-Learning-Opt-Out": "true", }) if hasattr("", "encode"): # Python 2.6 compatibility authorization_value = base64.standard_b64encode("{0}:{1}".format(self.username, self.password).encode("utf-8")).decode("utf-8") else: authorization_value = base64.standard_b64encode("{0}:{1}".format(self.username, self.password)) request.add_header("Authorization", "Basic {0}".format(authorization_value)) try: response = urlopen(request, timeout=None) except HTTPError as e: raise Exception("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) except URLError as e: raise Exception("recognition connection failed: {0}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) return result
def _notify_emby(self, message, host=None, emby_apikey=None): """Handles notifying Emby host via HTTP API Returns: Returns True for no issue or False if there was an error """ # fill in omitted parameters if not host: host = sickbeard.EMBY_HOST if not emby_apikey: emby_apikey = sickbeard.EMBY_APIKEY url = 'http://%s/emby/Notifications/Admin' % host values = {'Name': 'Medusa', 'Description': message, 'ImageUrl': sickbeard.LOGO_URL} data = json.dumps(values) try: req = Request(url, data) req.add_header('X-MediaBrowser-Token', emby_apikey) req.add_header('Content-Type', 'application/json') response = urlopen(req) result = response.read() response.close() logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG) return True except (URLError, IOError) as e: logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING) return False
def _send_to_kodi(command, host=None, username=None, password=None, dest_app="KODI"): # pylint: disable=too-many-arguments """Handles communication to KODI servers via HTTP API Args: command: Dictionary of field/data pairs, encoded via urllib and passed to the KODI API via HTTP host: KODI webserver host:port username: KODI webserver username password: KODI webserver password Returns: Returns response.result for successful commands or False if there was an error """ # fill in omitted parameters if not username: username = sickbeard.KODI_USERNAME if not password: password = sickbeard.KODI_PASSWORD if not host: logger.log(u'No %s host passed, aborting update' % dest_app, logger.WARNING) return False for key in command: if isinstance(command[key], text_type): command[key] = command[key].encode('utf-8') enc_command = urlencode(command) logger.log(u"%s encoded API command: %r" % (dest_app, enc_command), logger.DEBUG) # url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command) # maybe need for old plex? url = 'http://%s/kodiCmds/kodiHttp/?%s' % (host, enc_command) try: req = Request(url) # if we have a password, use authentication if password: base64string = base64.encodestring('%s:%s' % (username, password))[:-1] authheader = "Basic %s" % base64string req.add_header("Authorization", authheader) logger.log(u"Contacting %s (with auth header) via url: %s" % (dest_app, ss(url)), logger.DEBUG) else: logger.log(u"Contacting %s via url: %s" % (dest_app, ss(url)), logger.DEBUG) try: response = urlopen(req) except (BadStatusLine, URLError) as e: logger.log(u"Couldn't contact %s HTTP at %r : %r" % (dest_app, url, ex(e)), logger.DEBUG) return False result = response.read().decode(sickbeard.SYS_ENCODING) response.close() logger.log(u"%s HTTP response: %s" % (dest_app, result.replace('\n', '')), logger.DEBUG) return result except Exception as e: logger.log(u"Couldn't contact %s HTTP at %r : %r" % (dest_app, url, ex(e)), logger.DEBUG) return False
def __init__(self, url, body=b'', headers={}, method='PUT'): normalized_headers = { str(key): str(value) for key, value in six.iteritems(headers) } URLRequest.__init__(self, str(url), body, normalized_headers) self.method = str(method)
def post_soap(self, url, xml, soapaction=None): url = self.opener.relative(url) request = Request(url, etree.tostring(soap_body(xml))) request.add_header('Content-type', 'text/xml; charset=utf-8') if soapaction: request.add_header('Soapaction', soapaction) response = self.opener.open(request, timeout=self.timeout) return etree.parse(response).xpath('/soap:Envelope/soap:Body/*', namespaces=namespaces)[0]
def http_request(method, url, request=None, timeout=30): """Perform HTTP request""" if method == 'POST': return http_post(url, request, timeout=timeout) else: # GET request = Request(url) request.add_header('User-Agent', 'pycsw (http://pycsw.org/)') return urlopen(request, timeout=timeout).read()
def __del(api_key, url, data): """ Do the actual DELETE """ url = make_url(api_key, url) req = Request(url, headers={'Content-Type': 'application/json'}, data=json.dumps(data)) req.get_method = lambda: 'DELETE' return json.loads(urlopen(req).read())
def execute(cls, uri, http_verb, extra_headers=None, batch=False, body=None, **kw): """ if batch == False, execute a command with the given parameters and return the response JSON. If batch == True, return the dictionary that would be used in a batch command. """ if batch: ret = {"method": http_verb, "path": uri.split("parse.com", 1)[1]} if kw: ret["body"] = kw return ret if not ('app_id' in ACCESS_KEYS and 'rest_key' in ACCESS_KEYS): raise core.ParseError('Missing connection credentials') app_id = ACCESS_KEYS.get('app_id') rest_key = ACCESS_KEYS.get('rest_key') master_key = ACCESS_KEYS.get('master_key') url = uri if uri.startswith(API_ROOT) else cls.ENDPOINT_ROOT + uri if body is None: data = kw and json.dumps(kw, default=date_handler) or "{}" else: data = body if http_verb == 'GET' and data: url += '?%s' % urlencode(kw) data = None else: data = data.encode('utf-8') headers = { 'Content-type': 'application/json', 'X-Parse-Application-Id': app_id, 'X-Parse-REST-API-Key': rest_key } headers.update(extra_headers or {}) request = Request(url, data, headers) if master_key and 'X-Parse-Session-Token' not in headers.keys(): request.add_header('X-Parse-Master-Key', master_key) request.get_method = lambda: http_verb try: response = urlopen(request, timeout=CONNECTION_TIMEOUT) except HTTPError as e: exc = { 400: core.ResourceRequestBadRequest, 401: core.ResourceRequestLoginRequired, 403: core.ResourceRequestForbidden, 404: core.ResourceRequestNotFound }.get(e.code, core.ParseError) raise exc(e.read()) return json.loads(response.read().decode('utf-8'))
def __init__(self, **kwargs): self.site_url = kwargs.get('site_url') self.site_domain = Request(self.site_url).get_host() self.src_url = kwargs.get('src_url') self.src_domain = Request(self.src_url).get_host() self.p = kwargs.get('p') self.replace_dict = {} self.total_count = 0 self.broken_links = {}
def test_filter_json_post_data(tmpdir): data = json.dumps({"id": "secret", "foo": "bar"}).encode("utf-8") request = Request("http://httpbin.org/post", data=data) request.add_header("Content-Type", "application/json") cass_file = str(tmpdir.join("filter_jpd.yaml")) with vcr.use_cassette(cass_file, filter_post_data_parameters=["id"]): urlopen(request) with vcr.use_cassette(cass_file, filter_post_data_parameters=["id"]) as cass: assert b'"id": "secret"' not in cass.requests[0].body
def create_directory( url_opener, directory_dict, new_dir, dm_url ): payload = { "isDirectory": True } for dir_slice in new_dir: if dir_slice in ( '', '/', None ): continue url = '/'.join( ( directory_dict['url'], quote( dir_slice.replace( '/', '_' ), safe='' ) ) ) new_dir_request = Request( url, headers={ 'Content-Type': 'application/json', 'Accept': 'application/json' }, data=json.dumps( payload ) ) new_dir_request.get_method = lambda: 'PUT' directory_dict = json.loads( url_opener.open( new_dir_request ).read() ) return directory_dict
def test_filter_json_post_data(tmpdir): data = json.dumps({'id': 'secret', 'foo': 'bar'}).encode('utf-8') request = Request('http://httpbin.org/post', data=data) request.add_header('Content-Type', 'application/json') cass_file = str(tmpdir.join('filter_jpd.yaml')) with vcr.use_cassette(cass_file, filter_post_data_parameters=['id']): urlopen(request) with vcr.use_cassette(cass_file, filter_post_data_parameters=['id']) as cass: assert b'"id": "secret"' not in cass.requests[0].body
def download_avatar_image(user, size): """Download avatar image from remote server.""" url = avatar_for_email(user.email, size) request = Request(url) request.add_header('User-Agent', USER_AGENT) # Fire request handle = urlopen(request, timeout=1.0) # Read and possibly convert response return bytes(handle.read())
def clear_all_queries(self, cluster_name=DEFAULT_CLUSTER): """ Clear all the primed queries from a particular cluster :param cluster_name: cluster to clear queries from """ opener = build_opener(HTTPHandler) request = Request("http://{0}/{1}/{2}".format( self.admin_addr, "prime", cluster_name)) request.get_method = lambda: 'DELETE' connection = opener.open(request) return connection.read()
def _verify_cas2_saml(ticket, service): """Verifies CAS 3.0+ XML-based authentication ticket and returns extended attributes. @date: 2011-11-30 @author: Carlos Gonzalez Vila <*****@*****.**> Returns username and attributes on success and None,None on failure. """ try: from xml.etree import ElementTree except ImportError: from elementtree import ElementTree # We do the SAML validation headers = {'soapaction': 'http://www.oasis-open.org/committees/security', 'cache-control': 'no-cache', 'pragma': 'no-cache', 'accept': 'text/xml', 'connection': 'keep-alive', 'content-type': 'text/xml'} params = {'TARGET': service} url = Request(urljoin(settings.CAS_SERVER_URL, 'samlValidate') + '?' + urlencode(params), '', headers) data = get_saml_assertion(ticket) url.add_data(get_saml_assertion(ticket)) page = urlopen(url) try: user = None attributes = {} response = page.read() print(response) # XXX: Is this supposed to be here? tree = ElementTree.fromstring(response) # Find the authentication status success = tree.find('.//' + SAML_1_0_PROTOCOL_NS + 'StatusCode') if success is not None and success.attrib['Value'] == 'samlp:Success': # User is validated attrs = tree.findall('.//' + SAML_1_0_ASSERTION_NS + 'Attribute') for at in attrs: if 'uid' in set(at.attrib.values()): user = at.find(SAML_1_0_ASSERTION_NS + 'AttributeValue').text attributes['uid'] = user values = at.findall(SAML_1_0_ASSERTION_NS + 'AttributeValue') if len(values) > 1: values_array = [] for v in values: values_array.append(v.text) attributes[at.attrib['AttributeName']] = values_array else: attributes[at.attrib['AttributeName']] = values[0].text return user, attributes finally: page.close()
def build_request_with_data(url, data, api_key, method): """Build a request with the received method.""" http_redirect_with_data_handler = HTTPRedirectWithDataHandler(method=method) opener = build_opener(http_redirect_with_data_handler) install_opener(opener) url = make_url(url, api_key=api_key, args=None) request = Request(url, headers={'Content-Type': 'application/json'}, data=json.dumps(data)) request_method = request.get_method() if request_method != method: request.get_method = lambda: method return opener, request
def add_data(self, data): if hasattr(data, "items"): data = urlencode(data).encode("ascii") assert isinstance(data, binary_type) if hasattr(BaseRequest, "add_data"): BaseRequest.add_data(self, data) else: self.data = data self.add_header("Content-Length", str(len(data)))
def req(self, path, data=None, method=None): url = self.server + path if data: req = Request(url, headers={'Content-Type': 'application/json'}, data=json.dumps(data)) else: req = Request(url, headers={'Content-Type': 'application/json'}) if method: req.get_method = lambda: method res = self.opener.open(req) print('==> at %s (%s)' % (url, method or 'GET')) assert res.getcode() == 200, url return res
def get_access_token(self): data = dict( client_id=self.client_id, client_secret=self.client_secret, scope="http://api.microsofttranslator.com", grant_type="client_credentials", ) data_bytes = urlencode(data).encode("utf-8") request = Request(self.AUTH_URL, data_bytes, method="POST") request.add_header("Content-Type", "application/x-www-form-urlencoded;charset=utf-8") response = urlopen(request, data_bytes) result = response.read().decode("utf-8") result_dict = loads(result) return result_dict
def redirect_request(self, request, fp, code, msg, headers, new_url): request_method = request.get_method() if str(code) in self.redirect_codes and request_method in self.valid_methods: new_url = new_url.replace(' ', '%20') request = Request(new_url, data=request.data, headers=request.headers, origin_req_host=request.get_origin_req_host(), unverifiable=True) if self.method in self.valid_methods: if request.get_method() != self.method: request.get_method = lambda: self.method return request else: HTTPRedirectHandler.redirect_request(request, fp, code, msg, headers, new_url)
def Suite(self, succes): """ Après le téléchargement passe à la suite """ # Vérifie que le fichier est bien entier : tailleFichierDest = os.path.getsize(self.parent.fichierDest + "/" + self.parent.nomFichier) tailleFichierOrigin = self.parent.tailleFichier if tailleFichierDest != tailleFichierOrigin: succes = False # Téléchargement terminé if succes == True: # On attribue un ID unique qui permet de compter les téléchargements IDfichier = FonctionsPerso.GetIDfichier() if len(IDfichier) > 7: id = IDfichier[-7:] else: id = "" # On envoie le signal pour le compteur de téléchargement if "linux" in sys.platform: typeFichier = "linux" else: typeFichier = "windows" try: versionFichier = self.parent.versionFichier fichier = "%s-%s-%s" % (typeFichier, versionFichier, id) req = Request( "http://www.noethys.com/fichiers/telechargement.cgi?fichier=%s" % fichier) handle = urlopen(req) except: pass # Si téléchargement complet, on passe à la page de fin de téléchargement sleep(1) # Attend 2 secondes avant de continuer self.parent.Active_page("page_fin_telechargement") else: # Vidage du rep Updates FonctionsPerso.VideRepertoireUpdates(forcer=True) # Le téléchargement n'est pas complet, demande à l'utilisateur de recommencer self.label_introduction.SetLabel( _(u"Le téléchargement n'est pas complet. Voulez-vous recommencer ?" )) self.bouton_ok.Show(True) self.Layout()
def get_url_disposition_filename(url, headers=None): """Get filename as possibly provided by the server in Content-Disposition """ if headers is None: request = Request(url) r = retry_urlopen(request) # things are different in requests if 'requests.' in str(r.__class__): headers = r.headers else: headers = r.info() else: r = None try: return get_response_disposition_filename( headers.get('Content-Disposition', '')) finally: if r: r.close()
def _open_seed_url(self, base, branch, name): path = os.path.join(base, branch) if not path.endswith('/'): path += '/' url = urljoin(path, name) if not urlparse(url).scheme: fullpath = os.path.join(path, name) _logger.info("Using %s", fullpath) return open(fullpath) _logger.info("Downloading %s", url) req = Request(url) req.add_header('Cache-Control', 'no-cache') req.add_header('Pragma', 'no-cache') return urlopen(req)
def get_github_email(access_token): """Get real email from GitHub""" request = Request('https://api.github.com/user/emails') request.timeout = 1.0 request.add_header('User-Agent', USER_AGENT) request.add_header('Authorization', 'token {0}'.format(access_token)) handle = urlopen(request) data = json.loads(handle.read().decode('utf-8')) email = None for entry in data: # Skip not verified ones if not entry['verified']: continue email = entry['email'] if entry['primary']: break return email
def relate_cdash_builds(spec_map, cdash_base_url, job_build_id, cdash_project, cdashids_mirror_url): if not job_build_id: return dep_map = spec_map['deps'] headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', } cdash_api_url = '{0}/api/v1/relateBuilds.php'.format(cdash_base_url) for dep_pkg_name in dep_map: tty.debug('Fetching cdashid file for {0}'.format(dep_pkg_name)) dep_spec = dep_map[dep_pkg_name] dep_build_id = read_cdashid_from_mirror(dep_spec, cdashids_mirror_url) payload = { "project": cdash_project, "buildid": job_build_id, "relatedid": dep_build_id, "relationship": "depends on" } enc_data = json.dumps(payload).encode('utf-8') opener = build_opener(HTTPHandler) request = Request(cdash_api_url, data=enc_data, headers=headers) response = opener.open(request) response_code = response.getcode() if response_code != 200 and response_code != 201: msg = 'Relate builds ({0} -> {1}) failed (resp code = {2})'.format( job_build_id, dep_build_id, response_code) raise SpackError(msg) response_text = response.read() tty.debug('Relate builds response: {0}'.format(response_text))
def http_call(method, url, data=None): """Utility method for making HTTP requests.""" LOG.debug("http_call(): Calling %s %s" % (method, url)) opener = build_opener(HTTPHandler) if data: data = simplejson.dumps(data) LOG.debug("http_call(): With body: %s" % data) request = Request(url, data) request.add_header('Accept', 'application/json') if data: request.add_header('Content-Type', 'application/json') request.get_method = lambda: method resp = opener.open(request) if resp.getcode() >= 400: raise exceptions.RomanaException("Error in %s %s with payload %s: %s", method, url, data, resp) body = resp.read() data = simplejson.loads(body) return data
def _impl(): try: r = Request('http://emma-project.org/versions.json', headers={'User-Agent': 'PyEMMA-{emma_version}-Py-{python_version}-{platform}-{addr}' .format(emma_version=current, python_version=platform.python_version(), platform=platform.platform(terse=True), addr=uuid.getnode())} if not testing else {}) encoding_args = {} if six.PY2 else {'encoding': 'ascii'} with closing(urlopen(r, timeout=30)) as response: payload = str(response.read(), **encoding_args) versions = json.loads(payload) latest_json = tuple(filter(lambda x: x['latest'], versions))[0]['version'] latest = parse(latest_json) if parse(current) < latest: import warnings warnings.warn("You are not using the latest release of PyEMMA." " Latest is {latest}, you have {current}." .format(latest=latest, current=current), category=UserWarning) except Exception: import logging logging.getLogger('pyemma').debug("error during version check", exc_info=True)
def auth_complete(self, *args, **kwargs): """Completes loging process, must return user instance""" self.process_error(self.data) params = self.auth_complete_params(self.validate_state()) request = Request(self.ACCESS_TOKEN_URL, data=urlencode(params), headers=self.auth_headers()) try: response = json.loads(dsa_urlopen(request).read()) except HTTPError as e: if e.code == 400: raise AuthCanceled(self) else: raise except (ValueError, KeyError): raise AuthUnknownError(self) self.process_error(response) return self.do_auth(response['access_token'], response=response, *args, **kwargs)
def _execute_request_response(self, service, values): """ The part of _execute that has actual side effects. In a separate function to make everything else easily testable. """ data = urlencode(values) if six.PY3: data = data.encode('utf8') req = Request(self.base_url + service, data) try: response = urlopen(req) except HTTPError as exc: # don't want the vccs_client user to have to know what http client we use. raise VCCSClientHTTPError(reason='Authentication backend error', http_code=exc.getcode()) except URLError: raise VCCSClientHTTPError( reason='Authentication backend unavailable', http_code=503) return response.read()
def fetch(self, url, data=None, headers=None, is_json=True, is_xml=False): if headers is None: headers = {} req = Request(url, data, headers) res = urlopen(req) if six.PY3: charset = res.headers.get_content_charset() else: charset = res.headers.getparam('charset') body = res.read() if charset: body = body.decode(charset) if is_json: body = json.loads(body) if is_xml: body = xmlparse(body) return body
def fetch_saml_validation(self, ticket): # We do the SAML validation headers = { 'soapaction': 'http://www.oasis-open.org/committees/security', 'cache-control': 'no-cache', 'pragma': 'no-cache', 'accept': 'text/xml', 'connection': 'keep-alive', 'content-type': 'text/xml; charset=utf-8', } params = [('TARGET', self.service_url)] saml_validate_url = urllib_parse.urljoin( self.server_url, 'samlValidate', ) request = Request( saml_validate_url + '?' + urllib_parse.urlencode(params), self.get_saml_assertion(ticket), headers, ) return urllib_request.urlopen(request)
def send(self, data): """ Send an https request. """ try: headers = { 'User-Agent': 'BluepPay Python Library/' + self.RELEASE_VERSION, 'Content-Type': 'application/x-www-form-urlencoded' } req = Request(self.url, data, headers=headers) r = urlopen(req) response = "" response = r.geturl() if self.api == 'bp10emu' else r.read() return response except HTTPError as e: if re.match("https://secure.bluepay.com/interfaces/wlcatch", e.geturl()): response = e.geturl() return response #return e.read() return "ERROR"
def download_and_resize_image(url, new_width=256, new_height=256, display=False): _, filename = tempfile.mkstemp(suffix=".jpg") req = Request(url, headers={'User-Agent': 'Mozilla/5.0'}) response = urlopen(req) image_data = response.read() image_data = BytesIO(image_data) pil_image = Image.open(image_data) pil_image = ImageOps.fit(pil_image, (new_width, new_height), Image.ANTIALIAS) pil_image_rgb = pil_image.convert("RGB") pil_image_rgb.save(filename, format="JPEG", quality=90) print("Image downloaded to %s." % filename) if display: display_image(pil_image) return filename
def check_external(self, homepage, path, text): headers = {"User-Agent": "Mozilla/5.0"} context = ssl._create_unverified_context() request = Request(path, headers=headers) assert path in homepage try: assert text in urlopen(request, context=context).read().decode("utf-8") except URLError as e: if e.errno in [ errno.ETIMEDOUT, errno.ECONNREFUSED, errno.EHOSTDOWN ]: pass elif "Connection refused" in str( e): # not every error comes with a errno pass else: print(e) print(e.errno) raise
def read(self, size=-1): if self.fileobj: self.fileobj.close() opener = build_opener(HTTPRangeHandler) install_opener(opener) if size < 0: rangeheader = {'Range': 'bytes=%s-' % (self.pos)} else: rangeheader = { 'Range': 'bytes=%s-%s' % (self.pos, self.pos + size - 1) } req = Request(self.url, headers=rangeheader) res = urlopen(req) self.pos += size data = res.read() return data
def forceSaveEPGonRemoteReceiver(self): url = "http://%s/api/saveepg" % self.getRemoteAddress() print('[ChannelsImporter][saveEPGonRemoteReceiver] URL: %s' % url) try: req = Request(url) response = urlopen(req) print( '[ChannelsImporter][saveEPGonRemoteReceiver] Response: %d, %s' % (response.getcode(), response.read().strip().replace( "\r", "").replace("\n", ""))) except HTTPError as err: print('[ChannelsImporter][saveEPGonRemoteReceiver] ERROR:', err) except URLError as err: print('[ChannelsImporter][saveEPGonRemoteReceiver] ERROR:', err.reason[0]) #except urllib2 as err: # print('[ChannelsImporter][saveEPGonRemoteReceiver] ERROR:', err) except: print( '[ChannelsImporter][saveEPGonRemoteReceiver] undefined error')
def rest(self, obj, method, params): self.init() data = {"method": method, "params": params, "object": obj} data = json.dumps(data) request = Request(self.url, data) base64string = base64.encodestring('%s:%s' % (self.username, self.password))[:-1] request.add_header('Authorization', 'Basic %s' % base64string) request.add_header('Content-Type', 'application/json') try: response = urlopen(request) except URLError: raise ex.excError("unreachable head %s" % self.head) response = json.loads(response.read()) return response
def http_emitter(message, log, url): """Send payload """ log.debug('http_emitter: attempting postback to ' + url) # Post back the data partial_payload = [] for measurement in message: partial_payload.append(measurement) payload = json.dumps(partial_payload) if PY3: payload = payload.encode('utf-8') url = "%s/intake" % url headers = post_headers(payload) try: # Make sure no proxy is autodetected for this localhost connection proxy_handler = ProxyHandler({}) # Should this be installed as the default opener and reused? opener = build_opener(proxy_handler) request = Request(url, payload, headers) response = None try: response = opener.open(request) log.debug('http_emitter: postback response: ' + str(response.read())) except Exception as exc: log.error("""Forwarder at {0} is down or not responding... Error is {1} Please restart the monasca-agent.""".format( url, repr(exc))) finally: if response: response.close() except HTTPError as e: if e.code == 202: log.debug("http payload accepted") else: raise
def getVideoUrl(self, fmt): video_id = str(self.getYouTubeId()) if video_id is None: return None #, no video_id for el_type in ['detailpage', 'embedded', 'vevo']: video_info_url = ( 'http://www.youtube.com/get_video_info?&video_id=%s&el=%s&ps=default&eurl=&gl=DE&hl=en' % (video_id, el_type)) request = Request(video_info_url, None, std_headers) try: video_info_page = urlopen(request).read() video_info = parse_qs(video_info_page) if 'token' in video_info: break except (URLError, HTTPException, error) as err: return None #, ('ERROR: unable to download video info webpage: %s' % str(err)) if 'token' not in video_info: if 'reason' not in video_info: reason = 'Unable to extract "t" parameter for unknown reason' else: reason = unquote_plus(video_info['reason'][0]) return None #, reason else: quality_fallback_dict = dict({"22": "18", "18": "6", "6": "1"}) token = video_info['token'][0] while True: print("[YTB] Trying fmt=" + fmt) video_real_url = 'http://www.youtube.com/get_video?video_id=%s&t=%s&eurl=&el=detailpage&ps=default&gl=US&hl=en&fmt=%s' % ( video_id, token, fmt) video_real_url = self.verify_url(video_real_url) if video_real_url is None: if fmt == "1": print("[YTB] no valid fmt found") break else: print("[YTB] not found") fmt = quality_fallback_dict[fmt] else: print("[YTB] found") break return video_real_url #, 'OK'
def upload_disk(self, fileItem, lease, host): """ Upload an individual disk. Passes the file handle of the disk directly to the urlopen request. """ filename = self.get_disk(fileItem, lease) if filename is None: return deviceUrl = self.get_device_url(fileItem, lease) url = deviceUrl.url.replace('*', host) headers = {'Content-length': os.path.getsize(filename)} if hasattr(ssl, '_create_unverified_context'): sslContext = ssl._create_unverified_context() else: sslContext = None print("Uploading disk: %s" % filename) self.handle = FileHandle(filename) req = Request(url, self.handle, headers) urlopen(req, context=sslContext)
def validate_url(url, lint_ctx, user_agent=None): is_valid = True if user_agent: req = Request(url, headers={"User-Agent": user_agent}) else: req = url try: handle = urlopen(req) handle.read(100) except HTTPError as e: if e.code == 429: # too many requests pass else: is_valid = False lint_ctx.error("HTTP Error %s accessing %s" % (e.code, url)) except URLError as e: is_valid = False lint_ctx.error("URL Error %s accessing %s" % (str(e), url)) if is_valid: lint_ctx.info("URL OK %s" % url)
def wgetUrl(query): try: target = "http://www.tv3.ie/player/assets/php/search.php" values = {'queryString': query, 'limit': 20} headers = {} headers[ 'User-Agent'] = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3' headers['DNT'] = '1' headers['Referer'] = 'http://www.tv3.ie/3player/' headers[ 'Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' data = urlencode(values) req = Request(target, data, headers) response = urlopen(req) html = str(response.read()) response.close() return html except (Exception) as exception: print('wgetUrl: Error retrieving URL ', exception) return ''
def DownloadSetting(url): _list = [] try: req = Request(url) #req.add_header('User-Agent', 'VAS') response = urlopen(req) link = six.ensure_str(response.read()) response.close() xx = re.compile('<td><a href="(.+?)">(.+?)</a></td>.*?<td>(.+?)</td>', re.DOTALL).findall(link) for link, name, date in xx: #print(link, name, date) prelink = '' if not link.startswith("http://"): prelink = url.replace('asd.php', '') _list.append((date, name, prelink + link)) except: print("ERROR DownloadSetting %s" % (url)) return _list
def __call__(self, url): # Extracted from above madness # TODO: add mode alike to 'relaxed' where we would not # care about content-disposition filename # http://stackoverflow.com/questions/862173/how-to-download-a-file-using-python-in-a-smarter-way request = Request(url) # No traffic compression since we do not know how to identify # exactly either it has to be decompressed # request.add_header('Accept-encoding', 'gzip,deflate') # # TODO: think about stamping etc -- we seems to be redoing # what git-annex does for us already... not really r = retry_urlopen(request) try: r_info = r.info() r_stamp = get_url_response_stamp(url, r_info) return dict(mtime=r_stamp['mtime'], size=r_stamp['size'], url=url) finally: r.close()
def auth_complete(self, *args, **kwargs): """Completes loging process, must return user instance""" self.process_error(self.data) params = self.auth_complete_params(self.validate_state()) request = Request( self.ACCESS_TOKEN_URL, data=urlencode(params), headers=self.auth_headers() ) try: response = json.loads(dsa_urlopen(request).read()) except HTTPError as e: logger.exception( "plugins.auth.error", extra={"class": type(self), "status_code": e.code, "response": e.read()[:128]}, ) raise AuthUnknownError(self) except (ValueError, KeyError): raise AuthUnknownError(self) self.process_error(response) return self.do_auth(response["access_token"], response=response, *args, **kwargs)
def get_wf_node_log(self, name, number, node): """ Get build log for execution node. :param name: Job name, ``str`` :param name: Build number, ``int`` :param name: Execution node number, ``int`` :returns: Execution node build log, ``dict`` """ folder_url, short_name = self.client._get_job_folder(name) try: response = self.client.jenkins_open( Request(self.client._build_url(WF_NODE_LOG, locals()))) if response: return json.loads(response) else: raise jenkins.JenkinsException( 'job[%s] number[%d] does not exist' % (name, number)) except HTTPError: raise jenkins.JenkinsException( 'job[%s] number[%d] does not exist' % (name, number))
def create_credential(self, id, username, private_key=''): '''Create a new Jenkins credential :param id: id of the credential, ``str`` :param username: Name of the credential, ``str`` :param private_key: private key, ``str`` ''' if self.credential_exists(id): return None inner_params = { "": "0", "credentials": { "scope": 'GLOBAL', "id": id, "username": username, "password": "", "privateKeySource": { "stapler-class": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey$DirectEntryPrivateKeySource", "privateKey": private_key, }, "description": "jenkins credentials with private key", "stapler-class": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey" } } params = {'json': inner_params} data = urlencode(params) url = self._build_url(CREATE_CREDENTIAL) request = Request(url, data, URLENCODE_HEADERS) self.jenkins_open(request) self.assert_credential_exists(id)
def getUrl2(url, referer): req = Request(url) req.add_header('User-Agent', RequestAgent()) req.add_header('Referer', referer) try: response = urlopen(req) link = response.read() response.close() return link except: import ssl gcontext = ssl._create_unverified_context() response = urlopen(req, context=gcontext) link = response.read() response.close() return link
def get_plugin_info(self, name, depth=2): """Get an installed plugin information on this Master. This method retrieves information about a speicifc plugin. The passed in plugin name (short or long) must be an exact match. :param name: Name (short or long) of plugin, ``str`` :param depth: JSON depth, ``int`` :returns: a specific plugin ``dict`` Example:: >>> j = Jenkins() >>> info = j.get_plugin_info("Gearman Plugin") >>> print(info) {u'backupVersion': None, u'version': u'0.0.4', u'deleted': False, u'supportsDynamicLoad': u'MAYBE', u'hasUpdate': True, u'enabled': True, u'pinned': False, u'downgradable': False, u'dependencies': [], u'url': u'http://wiki.jenkins-ci.org/display/JENKINS/Gearman+Plugin', u'longName': u'Gearman Plugin', u'active': True, u'shortName': u'gearman-plugin', u'bundled': False} """ try: plugins_info = json.loads(self.jenkins_open( Request(self.server + PLUGIN_INFO % self._get_encoded_params(locals())))) for plugin in plugins_info['plugins']: if plugin['longName'] == name or plugin['shortName'] == name: return plugin except HTTPError: raise JenkinsException("Error communicating with server[%s]" % self.server) except BadStatusLine: raise JenkinsException("Error communicating with server[%s]" % self.server) except ValueError: raise JenkinsException("Could not parse JSON info for server[%s]" % self.server)
def _query_api(self, clip): # Adapted from SpeechRecognition source code, modified to get text # onsets flac_data = clip.get_flac_data( convert_rate=None if clip.sample_rate >= 16000 else 16000, convert_width=None if clip.sample_width >= 2 else 2) model = "{0}_BroadbandModel".format("en-US") url = "https://stream.watsonplatform.net/speech-to-text/api/v1/recognize?{0}".format( urlencode({ "profanity_filter": "false", "continuous": "true", "model": model, "timestamps": "true", "inactivity_timeout": -1, })) request = Request(url, data=flac_data, headers={ "Content-Type": "audio/x-flac", "X-Watson-Learning-Opt-Out": "true", }) return self._send_request(request)