def __handle_connection_error(self, err): if isinstance(err, six.string_types): msg = err elif isinstance(err, Exception) and len(err.args) > 0: if hasattr(err.args[0], 'reason'): msg = err.args[0].reason elif isinstance(err.args[0], Exception): return self.__handle_connection_error(err.args[0]) else: msg = err.args[0] else: raise ClientError('connection error') raise ClientError('connection error ({0})'.format(msg))
def print_vpn_connection(self, vpn, show_conn_info=False, stylesheet=None): print self.tabify(('VPNCONNECTION', vpn.get('vpnConnectionId'), vpn.get('type'), vpn.get('customerGatewayId'), vpn.get('vpnGatewayId'), vpn.get('state'))) if show_conn_info and vpn.get('customerGatewayConfiguration'): if stylesheet is None: print vpn.get('customerGatewayConfiguration') else: if (stylesheet.startswith('http://') or stylesheet.startswith('https://')): self.log.info( 'fetching connection info stylesheet from %s', stylesheet) response = requests.get(stylesheet) try: response.raise_for_status() except requests.exceptions.HTTPError as err: raise ClientError( 'failed to fetch stylesheet: {0}'.format(str(err))) xslt_root = lxml.etree.XML(response.text) else: if stylesheet.startswith('file://'): stylesheet = stylesheet[7:] self.log.info('using connection info stylesheet %s', stylesheet) with open(stylesheet) as stylesheet_file: xslt_root = lxml.etree.parse(xslt_file) transform = lxml.etree.XSLT(xslt_root) conn_info_root = lxml.etree.parse( io.BytesIO(vpn.get('customerGatewayConfiguration'))) print transform(conn_info_root) for tag in vpn.get('tagSet') or []: self.print_resource_tag(tag, vpn.get('vpnConnectionId'))
def __populate_args_from_metadata(self): """ Populate missing/empty values in self.args using info obtained from the metadata service. """ try: if not self.args.get('kernel'): self.args['kernel'] = self.__read_metadata_value('kernel-id') self.log.info('inherited kernel: %s', self.args['kernel']) if not self.args.get('ramdisk'): self.args['ramdisk'] = self.__read_metadata_value('ramdisk-id') self.log.info('inherited ramdisk: %s', self.args['ramdisk']) if not self.args.get('productcodes'): self.args['productcodes'] = self.__read_metadata_list( 'product-codes') if self.args['productcodes']: self.log.info('inherited product codes: %s', ','.join(self.args['productcodes'])) if not self.args.get('block_device_mappings'): self.args['block_device_mappings'] = {} for key, val in (self.__read_metadata_dict( 'block-device-mapping') or {}).iteritems(): if not key.startswith('ebs'): self.args['block_device_mappings'][key] = val for key, val in self.args['block_device_mappings'].iteritems(): self.log.info('inherited block device mapping: %s=%s', key, val) except requests.exceptions.Timeout: raise ClientError('metadata service is absent or unresponsive; ' 'use --no-inherit to proceed without it')
def try_send(self, source, retries_left=0): self.body = source if retries_left > 0 and not source.can_rewind: self.log.warn('source cannot rewind, so requested retries will ' 'not be attempted') retries_left = 0 try: response = self.send() our_md5 = source.read_hexdigest their_md5 = response.headers['ETag'].lower().strip('"') if their_md5 != our_md5: self.log.error('corrupt upload (our MD5: %s, their MD5: %s', our_md5, their_md5) raise ClientError('upload was corrupted during transit') except ClientError as err: if len(err.args) > 0 and isinstance(err.args[0], socket.error): self.log.warn('socket error') if retries_left > 0: self.log.info('retrying upload (%i retries remaining)', retries_left) source.rewind() return self.try_send(source, retries_left - 1) with self._lock: self.last_upload_error = err raise except Exception as err: with self._lock: self.last_upload_error = err raise
def check_metadata(): """Check if instance metadata is available.""" try: response = requests.get(METADATA_URL) if not response.ok: raise ServerError(response) except ConnectionError as err: raise ClientError("unable to contact metadata service: {0}".format( err.args[0]))
def ensure_kernel_reg_privs(self): req = ListAccountAliases(service=self.__euare, config=self.config) response = req.main() for alias in response.get('AccountAliases', []): if alias == 'eucalyptus': self.log.debug("found account alias '%s'; ok to register " "kernel/ramdisk images", alias) return raise ClientError("kernel/ramdisk images may only be registered by " "the 'eucalyptus' account")
def get_metadata(*paths): """Get a single metadata value. Returns a string containing the value of the metadata key. :param paths: A variable number of items to be joined together as segments of the metadata url. """ url = METADATA_URL if paths: url = urljoin(url, "/".join(paths)) try: response = requests.get(url, timeout=METADATA_TIMEOUT) except Timeout: raise ClientError( "timeout occurred when getting metadata from {0}".format(url)) except ConnectionError as err: raise ClientError( "error occurred when getting metadata from {0}: {1}".format( url, err.args[0])) if response.ok: return response.content else: raise ServerError(response)
def preprocess(self): if not self.args.get('ThumbprintList.member'): req = GetOpenIDConnectProvider.from_other( self, OpenIDConnectProviderArn=self.args.get( 'OpenIDConnectProviderArn')) url = req.main().get('Url') if not url: raise ClientError("unable to determine the provider's URL " "automatically; please specify a thumbprint " "with -t/--thumbprint") elif '://' not in url: # URLs seem to come back from IAM without schemes url = 'https://{0}'.format(url) self.params['ThumbprintList.member.1'] = \ util.get_cert_fingerprint(url, log=self.log)
def send_request(self, method='GET', path=None, params=None, headers=None, data=None, files=None, auth=None): url = self.__get_url_for_path(path) headers = dict(headers) if 'host' not in [header.lower() for header in headers]: headers['Host'] = urlparse.urlparse(self.endpoint).netloc try: max_tries = self.max_retries + 1 assert max_tries >= 1 redirects_left = 5 if isinstance(data, file) and hasattr(data, 'seek'): # If we're redirected we need to be able to reset data_file_offset = data.tell() else: data_file_offset = None while True: for attempt_no, delay in enumerate( _generate_delays(max_tries), 1): # Use exponential backoff if this is a retry if delay > 0: self.log.debug('will retry after %.3f seconds', delay) time.sleep(delay) self.log.info('sending request (attempt %i of %i)', attempt_no, max_tries) p_request = self.__log_and_prepare_request( method, url, params, data, files, headers, auth) proxies = requests.utils.get_environ_proxies(url) for key, val in sorted(proxies.items()): self.log.debug('request proxy: %s=%s', key, val) p_request.start_time = datetime.datetime.now() try: response = self.session.send( p_request, timeout=self.timeout, proxies=proxies, allow_redirects=False) except requests.exceptions.Timeout: if attempt_no < max_tries: self.log.debug('timeout', exc_info=True) if data_file_offset is not None: self.log.debug('re-seeking body to ' 'beginning of file') # pylint: disable=E1101 data.seek(data_file_offset) # pylint: enable=E1101 continue elif not hasattr(data, 'tell'): continue # Fallthrough -- if it has a file pointer but not # seek we can't retry because we can't rewind. raise if response.status_code not in (500, 503): break # If it *was* in that list, retry if (response.status_code in (301, 302, 307, 308) and redirects_left > 0 and 'Location' in response.headers): # Standard redirect -- we need to handle this ourselves # because we have to re-sign requests when their URLs # change. redirects_left -= 1 parsed_rdr = urlparse.urlparse( response.headers['Location']) parsed_url = urlparse.urlparse(url) new_url_bits = [] for rdr_bit, url_bit in zip(parsed_rdr, parsed_url): new_url_bits.append(rdr_bit or url_bit) if 'Host' in headers: headers['Host'] = new_url_bits[1] # netloc url = urlparse.urlunparse(new_url_bits) self.log.debug('redirecting to %s (%i redirect(s) ' 'remaining)', url, redirects_left) if data_file_offset is not None: self.log.debug('re-seeking body to beginning of file') # pylint: disable=E1101 data.seek(data_file_offset) # pylint: enable=E1101 continue elif response.status_code >= 300: # We include 30x because we've handled the standard method # of redirecting, but the server might still be trying to # redirect another way for some reason. self.handle_http_error(response) return response except requests.exceptions.Timeout as exc: self.log.debug('timeout', exc_info=True) raise TimeoutError('request timed out', exc) except requests.exceptions.ConnectionError as exc: self.log.debug('connection error', exc_info=True) return self.__handle_connection_error(exc) except requests.exceptions.HTTPError as exc: return self.handle_http_error(response) except requests.exceptions.RequestException as exc: self.log.debug('request error', exc_info=True) raise ClientError(exc)