def searchEmsurfer(emd, **kwargs): """Search with the EM-Surfer server with input of EMD ID (or local EMD file). EM-Surfer server: http://kiharalab.org/em-surfer/ :arg emd: EMD code or local EMD map file for the query protein """ import requests from requests.models import Request LOGGER.timeit('_emsurfer') # timeout = 120 timeout = kwargs.pop('timeout', 120) emsurferURL = "http://kiharalab.org/em-surfer/cgi-bin/listResults.cgi" volumeFilter = kwargs.get('volumeFilter', 'on') representation = kwargs.get('representation','recommend') minResolution = kwargs.get('minResolution', 0.5) maxResolution = kwargs.get('maxResolution', 30.) if isinstance(emd, EMDMAP): emdmap = emd stream = createStringIO() writeEMD(stream, emdmap) data = stream.getvalue() stream.close() files = {"file1" : data} emdId = emdmap.getTitle() emdId = '' emsurfer_title = 'Title_'+emdId elif isinstance(emd, str): if os.path.isfile(emd): emdmap = parseEMD(emd) filename = os.path.basename(emd) filename, ext = os.path.splitext(filename) if ext.lower() == '.gz': filename2, ext2 = os.path.splitext(filename) if ext2.lower() == '.emd': filename = filename2 emdId = filename files = {"file1" : data} emdId = '' emsurfer_title = 'Title_' + emdId else: emdId = emd emsurfer_title = 'Title_' + emdId files = '' method='post' url=emsurferURL params = { 'emdbid' : emdId, 'volumefilter' : volumeFilter, 'representation' : representation, 'minresolution': minResolution, 'maxresolution': maxResolution } # Generate request url deep inside data=None; headers=None; cookies=None; files=None auth=None; timeout=None; allow_redirects=True; proxies=None hooks=None; stream=None; verify=None; cert=None; json=None req = Request( method=method.upper(), url=url, headers=headers, files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=cookies, hooks=hooks, ) session = requests.sessions.Session() prep = session.prepare_request(req) resp = session.send(prep) url = resp.url LOGGER.debug('Submitted Emsurfer search for EMD "{0}".'.format(emdId)) LOGGER.info(url) LOGGER.clear() obj = EmsurferRecord(url, emdId, timeout=timeout, **kwargs) return obj
def forward_request(self, method, path, data, headers): # parse path and query params parsed_path = urlparse.urlparse(path) # Make sure we use 'localhost' as forward host, to ensure moto uses path style addressing. # Note that all S3 clients using LocalStack need to enable path style addressing. if 's3.amazonaws.com' not in headers.get('host', ''): headers['host'] = 'localhost' # check content md5 hash integrity if not a copy request if 'Content-MD5' in headers and not self.is_s3_copy_request(headers, path): response = check_content_md5(data, headers) if response is not None: return response modified_data = None # check bucket name bucket_name = get_bucket_name(path, headers) if method == 'PUT' and not re.match(BUCKET_NAME_REGEX, bucket_name): if len(parsed_path.path) <= 1: return error_response('Unable to extract valid bucket name. Please ensure that your AWS SDK is ' + 'configured to use path style addressing, or send a valid <Bucket>.s3.amazonaws.com "Host" header', 'InvalidBucketName', status_code=400) return error_response('The specified bucket is not valid.', 'InvalidBucketName', status_code=400) # TODO: For some reason, moto doesn't allow us to put a location constraint on us-east-1 to_find = to_bytes('<LocationConstraint>us-east-1</LocationConstraint>') if data and data.startswith(to_bytes('<')) and to_find in data: modified_data = data.replace(to_find, to_bytes('')) # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(modified_data or data) headers['content-length'] = headers.get('x-amz-decoded-content-length') # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = multipart_content.expand_multipart_filename(original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # If no content-type is provided, 'binary/octet-stream' should be used # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html if method == 'PUT' and not headers.get('content-type'): headers['content-type'] = 'binary/octet-stream' # persist this API call to disk persistence.record('s3', method, path, data, headers) # parse query params query = parsed_path.query path = parsed_path.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query, keep_blank_values=True) # remap metadata query params (not supported in moto) to request headers append_metadata_headers(method, query_map, headers) if query == 'notification' or 'notification' in query_map: # handle and return response for ?notification request response = handle_notification_request(bucket, method, data) return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) if query == 'replication' or 'replication' in query_map: if method == 'GET': return get_replication(bucket) if method == 'PUT': return set_replication(bucket, data) if query == 'encryption' or 'encryption' in query_map: if method == 'GET': return get_encryption(bucket) if method == 'PUT': return set_encryption(bucket, data) if query == 'object-lock' or 'object-lock' in query_map: if method == 'GET': return get_object_lock(bucket) if method == 'PUT': return set_object_lock(bucket, data) if modified_data is not None: return Request(data=modified_data, headers=headers, method=method) return True
def py3(arg): # TODO: Implement feeling lucky search payload = {'q': arg} return Request(url=PYTHON3_REF, params=payload).prepare().url
def forward_request(self, method, path, data, headers): if method == 'OPTIONS': return 200 data = data or '' data_orig = data data = aws_stack.fix_account_id_in_arns( data, existing='%3A{}%3Astack/'.format(TEST_AWS_ACCOUNT_ID), replace='%3A{}%3Astack/'.format(MOTO_CLOUDFORMATION_ACCOUNT_ID), colon_delimiter='') data = aws_stack.fix_account_id_in_arns( data, existing='%3A{}%3AchangeSet/'.format(TEST_AWS_ACCOUNT_ID), replace='%3A{}%3AchangeSet/'.format( MOTO_CLOUDFORMATION_ACCOUNT_ID), colon_delimiter='') data = aws_stack.fix_account_id_in_arns(data, existing=TEST_AWS_ACCOUNT_ID, replace=MOTO_ACCOUNT_ID, colon_delimiter='%3A') req_data = None if method == 'POST' and path == '/': req_data = urlparse.parse_qs(to_str(data)) req_data = dict([(k, v[0]) for k, v in req_data.items()]) action = req_data.get('Action') stack_name = req_data.get('StackName') if action == 'CreateStack': event_publisher.fire_event( event_publisher.EVENT_CLOUDFORMATION_CREATE_STACK, payload={'n': event_publisher.get_hash(stack_name)}) if action == 'DeleteStack': client = aws_stack.connect_to_service( 'cloudformation', region_name=aws_stack.extract_region_from_auth_header( headers)) stack_resources = client.list_stack_resources( StackName=stack_name)['StackResourceSummaries'] template_deployer.delete_stack(stack_name, stack_resources) if action == 'DescribeStackEvents': # fix an issue where moto cannot handle ARNs as stack names (or missing names) run_fix = not stack_name if stack_name: if stack_name.startswith('arn:aws:cloudformation'): run_fix = True pattern = r'arn:aws:cloudformation:[^:]+:[^:]+:stack/([^/]+)(/.+)?' stack_name = re.sub(pattern, r'\1', stack_name) if run_fix: stack_names = [ stack_name ] if stack_name else self._list_stack_names() client = aws_stack.connect_to_service('cloudformation') events = [] for stack_name in stack_names: tmp = client.describe_stack_events( StackName=stack_name)['StackEvents'][:1] events.extend(tmp) events = [{'member': e} for e in events] response_content = '<StackEvents>%s</StackEvents>' % obj_to_xml( events) return make_response('DescribeStackEvents', response_content) if req_data: if action == 'ValidateTemplate': return validate_template(req_data) if action in ['CreateStack', 'UpdateStack']: do_replace_url = is_real_s3_url(req_data.get('TemplateURL')) if do_replace_url: req_data['TemplateURL'] = convert_s3_to_local_url( req_data['TemplateURL']) url = req_data.get('TemplateURL', '') is_custom_local_endpoint = is_local_service_url( url) and '://localhost:' not in url modified_template_body = transform_template(req_data) if not modified_template_body and is_custom_local_endpoint: modified_template_body = get_template_body(req_data) if modified_template_body: req_data.pop('TemplateURL', None) req_data['TemplateBody'] = modified_template_body if modified_template_body or do_replace_url: data = urlparse.urlencode(req_data, doseq=True) return Request(data=data, headers=headers, method=method) if data != data_orig or action in [ 'DescribeChangeSet', 'ExecuteChangeSet' ]: return Request(data=urlparse.urlencode(req_data, doseq=True), headers=headers, method=method) return True
def forward_request(self, method, path, data, headers): modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256' ) == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = expand_multipart_filename(original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if query == 'notification' or 'notification' in query_map: response = Response() response.status_code = 200 if method == 'GET': # TODO check if bucket exists result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3 if bucket in S3_NOTIFICATIONS: notif = S3_NOTIFICATIONS[bucket] for dest in ['Queue', 'Topic', 'CloudFunction']: if dest in notif: result += ('''<{dest}Configuration> <Id>{uid}</Id> <{dest}>{endpoint}</{dest}> <Event>{event}</Event> </{dest}Configuration>''').format( dest=dest, uid=uuid.uuid4(), endpoint=S3_NOTIFICATIONS[bucket][dest], event=S3_NOTIFICATIONS[bucket]['Event']) result += '</NotificationConfiguration>' response._content = result if method == 'PUT': tree = ET.fromstring(data) for dest in ['Queue', 'Topic', 'CloudFunction']: config = tree.find('{%s}%sConfiguration' % (XMLNS_S3, dest)) if config is not None and len(config): # TODO: what if we have multiple destinations - would we overwrite the config? S3_NOTIFICATIONS[bucket] = { 'Id': get_xml_text(config, 'Id'), 'Event': get_xml_text(config, 'Event', ns=XMLNS_S3), # TODO extract 'Events' attribute (in addition to 'Event') dest: get_xml_text(config, dest, ns=XMLNS_S3), } # return response for ?notification request return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True
def forward_request(self, method, path, data, headers): result = handle_special_request(method, path, data, headers) if result is not None: return result # prepare request headers self.prepare_request_headers(headers) data_orig = data data = data or '{}' data = json.loads(to_str(data)) ddb_client = aws_stack.connect_to_service('dynamodb') action = headers.get('X-Amz-Target') if self.should_throttle(action): return error_response_throughput() ProxyListenerDynamoDB.thread_local.existing_item = None if action == '%s.CreateTable' % ACTION_PREFIX: # Check if table exists, to avoid error log output from DynamoDBLocal if self.table_exists(ddb_client, data['TableName']): return error_response(message='Table already created', error_type='ResourceInUseException', code=400) if action == '%s.CreateGlobalTable' % ACTION_PREFIX: return create_global_table(data) elif action == '%s.DescribeGlobalTable' % ACTION_PREFIX: return describe_global_table(data) elif action == '%s.ListGlobalTables' % ACTION_PREFIX: return list_global_tables(data) elif action == '%s.UpdateGlobalTable' % ACTION_PREFIX: return update_global_table(data) elif action in ('%s.PutItem' % ACTION_PREFIX, '%s.UpdateItem' % ACTION_PREFIX, '%s.DeleteItem' % ACTION_PREFIX): # find an existing item and store it in a thread-local, so we can access it in return_response, # in order to determine whether an item already existed (MODIFY) or not (INSERT) try: if has_event_sources_or_streams_enabled(data['TableName']): ProxyListenerDynamoDB.thread_local.existing_item = find_existing_item( data) except Exception as e: if 'ResourceNotFoundException' in str(e): return get_table_not_found_error() raise # Fix incorrect values if ReturnValues==ALL_OLD and ReturnConsumedCapacity is # empty, see https://github.com/localstack/localstack/issues/2049 if ((data.get('ReturnValues') == 'ALL_OLD') or (not data.get('ReturnValues'))) \ and not data.get('ReturnConsumedCapacity'): data['ReturnConsumedCapacity'] = 'TOTAL' return Request(data=json.dumps(data), method=method, headers=headers) elif action == '%s.DescribeTable' % ACTION_PREFIX: # Check if table exists, to avoid error log output from DynamoDBLocal if not self.table_exists(ddb_client, data['TableName']): return get_table_not_found_error() elif action == '%s.DeleteTable' % ACTION_PREFIX: # Check if table exists, to avoid error log output from DynamoDBLocal if not self.table_exists(ddb_client, data['TableName']): return get_table_not_found_error() elif action == '%s.BatchWriteItem' % ACTION_PREFIX: existing_items = [] for table_name in sorted(data['RequestItems'].keys()): for request in data['RequestItems'][table_name]: for key in ['PutRequest', 'DeleteRequest']: inner_request = request.get(key) if inner_request: existing_items.append( find_existing_item(inner_request, table_name)) ProxyListenerDynamoDB.thread_local.existing_items = existing_items elif action == '%s.Query' % ACTION_PREFIX: if data.get('IndexName'): if not is_index_query_valid(to_str(data['TableName']), data.get('Select')): return error_response( message= 'One or more parameter values were invalid: Select type ALL_ATTRIBUTES ' 'is not supported for global secondary index id-index because its projection ' 'type is not ALL', error_type='ValidationException', code=400) elif action == '%s.TransactWriteItems' % ACTION_PREFIX: existing_items = [] for item in data['TransactItems']: for key in ['Put', 'Update', 'Delete']: inner_item = item.get(key) if inner_item: existing_items.append(find_existing_item(inner_item)) ProxyListenerDynamoDB.thread_local.existing_items = existing_items elif action == '%s.UpdateTimeToLive' % ACTION_PREFIX: # TODO: TTL status is maintained/mocked but no real expiry is happening for items response = Response() response.status_code = 200 self._table_ttl_map[data['TableName']] = { 'AttributeName': data['TimeToLiveSpecification']['AttributeName'], 'Status': data['TimeToLiveSpecification']['Enabled'] } response._content = json.dumps( {'TimeToLiveSpecification': data['TimeToLiveSpecification']}) fix_headers_for_updated_response(response) return response elif action == '%s.DescribeTimeToLive' % ACTION_PREFIX: response = Response() response.status_code = 200 if data['TableName'] in self._table_ttl_map: if self._table_ttl_map[data['TableName']]['Status']: ttl_status = 'ENABLED' else: ttl_status = 'DISABLED' response._content = json.dumps({ 'TimeToLiveDescription': { 'AttributeName': self._table_ttl_map[data['TableName']] ['AttributeName'], 'TimeToLiveStatus': ttl_status } }) else: # TTL for dynamodb table not set response._content = json.dumps({ 'TimeToLiveDescription': { 'TimeToLiveStatus': 'DISABLED' } }) fix_headers_for_updated_response(response) return response elif action == '%s.TagResource' % ACTION_PREFIX or action == '%s.UntagResource' % ACTION_PREFIX: response = Response() response.status_code = 200 response._content = '' # returns an empty body on success. fix_headers_for_updated_response(response) return response elif action == '%s.ListTagsOfResource' % ACTION_PREFIX: response = Response() response.status_code = 200 response._content = json.dumps({ 'Tags': [{ 'Key': k, 'Value': v } for k, v in TABLE_TAGS.get(data['ResourceArn'], {}).items()] }) fix_headers_for_updated_response(response) return response return Request(data=data_orig, method=method, headers=headers)
def test_auth_header(asap_config_file): request = AsapAuth(asap_config_file)(Request()) assert 'Authorization' in request.headers assert request.headers['Authorization'].startswith('Bearer ')
def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=2, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None, log=True): if params: url = '{}?{}'.format(url, urlencode(params)) headers = headers or { 'user-agent': random.choice(self._user_agent_list) } req = Request( method=method.upper(), url=url, headers=headers, files=files, data=data or {}, json=json, auth=auth, cookies=cookies, hooks=hooks, ) prep = self.prepare_request(req) proxies = proxies or {} settings = self.merge_environment_settings(prep.url, proxies, stream, verify, cert) send_kwargs = { 'timeout': timeout, 'allow_redirects': allow_redirects, } send_kwargs.update(settings) if log: print('{}: {}'.format(method, url)) for i in range(4): try: r = self.send(prep, **send_kwargs) r.raise_for_status() return r except Timeout: why = ' Timeout ' except HTTPError: why = '{} Error'.format(r.status_code) if i != 3: sleep(1) now = datetime.now() t = now.strftime('%y-%m-%d %H:%M:%S') print('[{}] {}'.format(why, url)) with open('./MyRequestsError.log', 'a', encoding='utf-8') as f: f.write('[{}] [{}] {}\n'.format(t, why, url))
def setUp(self): # Make a new serializer to test with self.test_serializer = Serializer() serializers.serializer_registry['test'] = self.test_serializer # Instantiate the cassette to test with self.cassette = cassette.Cassette( TestCassette.cassette_name, 'test', record_mode='once' ) # Create a new object to serialize r = Response() r.status_code = 200 r.reason = 'OK' r.encoding = 'utf-8' r.headers = CaseInsensitiveDict({'Content-Type': decode('foo')}) r.url = 'http://example.com' util.add_urllib3_response({ 'body': { 'string': decode('foo'), 'encoding': 'utf-8' } }, r, HTTPHeaderDict({'Content-Type': decode('foo')})) self.response = r # Create an associated request r = Request() r.method = 'GET' r.url = 'http://example.com' r.headers = {} r.data = {'key': 'value'} self.response.request = r.prepare() self.response.request.headers.update( {'User-Agent': 'betamax/test header'} ) # Expected serialized cassette data. self.json = { 'request': { 'body': { 'encoding': 'utf-8', 'string': 'key=value', }, 'headers': { 'User-Agent': ['betamax/test header'], 'Content-Length': ['9'], 'Content-Type': ['application/x-www-form-urlencoded'], }, 'method': 'GET', 'uri': 'http://example.com/', }, 'response': { 'body': { 'string': decode('foo'), 'encoding': 'utf-8', }, 'headers': {'Content-Type': [decode('foo')]}, 'status': {'code': 200, 'message': 'OK'}, 'url': 'http://example.com', }, 'recorded_at': '2013-08-31T00:00:00', } self.date = datetime(2013, 8, 31) self.cassette.save_interaction(self.response, self.response.request) self.interaction = self.cassette.interactions[0] self.interaction.recorded_at = self.date
def update_cloudformation(method, path, data, headers, response=None, return_forward_info=False): req_data = None if method == 'POST' and path == '/': req_data = urlparse.parse_qs(data) action = req_data.get('Action')[0] if return_forward_info: if req_data: if action == 'CreateChangeSet': return create_change_set(req_data) elif action == 'DescribeChangeSet': return describe_change_set(req_data) elif action == 'ExecuteChangeSet': return execute_change_set(req_data) elif action == 'UpdateStack' and req_data.get('TemplateURL'): # Temporary fix until the moto CF backend can handle TemplateURL (currently fails) url = re.sub(r'https?://s3\.amazonaws\.com', aws_stack.get_local_service_url('s3'), req_data.get('TemplateURL')[0]) req_data['TemplateBody'] = requests.get(url).content modified_data = urlparse.urlencode(req_data, doseq=True) return Request(data=modified_data, headers=headers, method=method) return True if req_data: if action == 'DescribeStackResources': if response.status_code < 300: response_dict = xmltodict.parse( response.content)['DescribeStackResourcesResponse'] resources = response_dict['DescribeStackResourcesResult'][ 'StackResources'] if not resources: # Check if stack exists stack_name = req_data.get('StackName')[0] cloudformation_client = aws_stack.connect_to_service( 'cloudformation') try: cloudformation_client.describe_stacks( StackName=stack_name) except Exception as e: return error_response( 'Stack with id %s does not exist' % stack_name, code=404) if action == 'DescribeStackResource': if response.status_code >= 500: # fix an error in moto where it fails with 500 if the stack does not exist return error_response('Stack resource does not exist', code=404) elif action == 'CreateStack' or action == 'UpdateStack': # run the actual deployment template = template_deployer.template_to_json( req_data.get('TemplateBody')[0]) template_deployer.deploy_template(template, req_data.get('StackName')[0]) if response.status_code >= 400: return make_response(action)
def session_request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=False, cert=None, json=None): # Create the Request. merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies) default_header = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36", "Connection": "close" } req = Request( method=method.upper(), url=url, headers=merge_setting(headers, default_header), files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=merged_cookies, hooks=hooks, ) prep = self.prepare_request(req) raw = '' if prep.body: raw = "{}\n{}\n\n{}\n\n".format( prep.method + ' ' + prep.url + ' HTTP/1.1', '\n'.join('{}: {}'.format(k, v) for k, v in prep.headers.items()), prep.body) else: raw = "{}\n{}\n\n".format( prep.method + ' ' + prep.url + ' HTTP/1.1', '\n'.join('{}: {}'.format(k, v) for k, v in prep.headers.items())) proxies = proxies or {} if conf["proxy_config_bool"] and not proxies: proxies = conf["proxy"] # prep.url = prep.url.encode('utf-8', errors='ignore').decode('utf-8', errors='ignore') # fix https://github.com/boy-hack/w13scan/issues/64 settings = self.merge_environment_settings(prep.url, proxies, stream, verify, cert) # Send the request. send_kwargs = { 'timeout': timeout or conf["timeout"], 'allow_redirects': allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) if resp.encoding == 'ISO-8859-1': encodings = get_encodings_from_content(resp.text) if encodings: encoding = encodings[0] else: encoding = resp.apparent_encoding resp.encoding = encoding setattr(resp, 'reqinfo', raw) return resp
def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=False, cert=None, json=None): """Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :rtype: requests.Response """ # Create the Request. req = Request( method=method.upper(), url=url, headers=headers, files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=cookies, hooks=hooks, ) try: # 预发环境自定义hosts from flask import current_app from config.config_hosts import HOSTS from urllib import parse if hasattr(current_app, 'pre'): host = parse.urlparse(req.url).netloc if host in HOSTS: req.url = req.url.replace(host, HOSTS[host]) req.headers['Host'] = host except: logging.info('非预发环境,跳过hosts切换') prep = self.prepare_request(req) proxies = proxies or {} settings = self.merge_environment_settings(prep.url, proxies, stream, verify, cert) # Send the request. send_kwargs = { 'timeout': timeout, 'allow_redirects': allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) try: req_body = prep.body req_body = req_body if not isinstance( req_body, bytes) else req_body.decode() req_body = "" if req_body is None else req_body logging.info("{0} {1} {2}".format(prep.method, prep.url, resp.status_code)) api_log.add_api_records(req_method=prep.method, req_url=resp.url, req_data=req_body, req_headers=prep.headers, res_status=resp.status_code, res_content=resp.text, elapsed=resp.elapsed.total_seconds(), update_time=time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(time.time())), remark=get_mac_address(), res_headers=resp.headers) except Exception as e: logging.error("收集api调用日志发生错误!!! {}".format(e)) return resp
def cmd_impl(*args, **kwargs): # use default empty string args if not enough args provided, str.format ignored extra args url_args = list(args[:]) url_args.extend([''] * 10) url = url_format.format(*url_args) return Request(url=url).prepare().url, ResultType.REDIRECTION
def forward_request(self, method, path, data, headers): # Make sure we use 'localhost' as forward host, to ensure moto uses path style addressing. # Note that all S3 clients using LocalStack need to enable path style addressing. if 's3.amazonaws.com' not in headers.get('host', ''): headers['host'] = 'localhost' # check content md5 hash integrity if 'Content-MD5' in headers: response = check_content_md5(data, headers) if response is not None: return response modified_data = None # TODO: For some reason, moto doesn't allow us to put a location constraint on us-east-1 to_find = to_bytes('<LocationConstraint>us-east-1</LocationConstraint>') if data and data.startswith(to_bytes('<')) and to_find in data: modified_data = data.replace(to_find, '') # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(modified_data or data) headers['content-length'] = headers.get('x-amz-decoded-content-length') # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = multipart_content.expand_multipart_filename(original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # If no content-type is provided, 'binary/octet-stream' should be used # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html if method == 'PUT' and not headers.get('content-type'): headers['content-type'] = 'binary/octet-stream' # persist this API call to disk persistence.record('s3', method, path, data, headers) # parse query params parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query, keep_blank_values=True) if query == 'notification' or 'notification' in query_map: # handle and return response for ?notification request response = handle_notification_request(bucket, method, data) return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) if modified_data is not None: return Request(data=modified_data, headers=headers, method=method) return True
def forward_request(self, method, path, data, headers): if method == "OPTIONS": return 200 # check region try: aws_stack.check_valid_region(headers) aws_stack.set_default_region_in_headers(headers) except Exception as e: return make_error(message=str(e), code=400) if method == "POST": # parse payload and extract fields req_data = urlparse.parse_qs(to_str(data), keep_blank_values=True) # parse data from query path if not req_data: parsed_path = urlparse.urlparse(path) req_data = urlparse.parse_qs(parsed_path.query, keep_blank_values=True) req_action = req_data["Action"][0] topic_arn = (req_data.get("TargetArn") or req_data.get("TopicArn") or req_data.get("ResourceArn")) if topic_arn: topic_arn = topic_arn[0] topic_arn = aws_stack.fix_account_id_in_arns(topic_arn) if req_action == "SetSubscriptionAttributes": sub = get_subscription_by_arn(req_data["SubscriptionArn"][0]) if not sub: return make_error( message="Unable to find subscription for given ARN", code=400) attr_name = req_data["AttributeName"][0] attr_value = req_data["AttributeValue"][0] sub[attr_name] = attr_value return make_response(req_action) elif req_action == "GetSubscriptionAttributes": sub = get_subscription_by_arn(req_data["SubscriptionArn"][0]) if not sub: return make_error( message="Subscription with arn {0} not found".format( req_data["SubscriptionArn"][0]), code=404, code_string="NotFound", ) content = "<Attributes>" for key, value in sub.items(): if key in HTTP_SUBSCRIPTION_ATTRIBUTES: continue content += "<entry><key>%s</key><value>%s</value></entry>\n" % ( key, value, ) content += "</Attributes>" return make_response(req_action, content=content) elif req_action == "Subscribe": if "Endpoint" not in req_data: return make_error( message="Endpoint not specified in subscription", code=400) elif req_action == "ConfirmSubscription": if "TopicArn" not in req_data: return make_error( message= "TopicArn not specified in confirm subscription request", code=400, ) if "Token" not in req_data: return make_error( message= "Token not specified in confirm subscription request", code=400, ) do_confirm_subscription( req_data.get("TopicArn")[0], req_data.get("Token")[0]) elif req_action == "Unsubscribe": if "SubscriptionArn" not in req_data: return make_error( message= "SubscriptionArn not specified in unsubscribe request", code=400, ) do_unsubscribe(req_data.get("SubscriptionArn")[0]) elif req_action == "DeleteTopic": do_delete_topic(topic_arn) elif req_action == "Publish": if req_data.get("Subject") == [""]: return make_error(code=400, code_string="InvalidParameter", message="Subject") sns_backend = SNSBackend.get() # No need to create a topic to send SMS or single push notifications with SNS # but we can't mock a sending so we only return that it went well if "PhoneNumber" not in req_data and "TargetArn" not in req_data: if topic_arn not in sns_backend.sns_subscriptions: return make_error( code=404, code_string="NotFound", message="Topic does not exist", ) message_id = publish_message(topic_arn, req_data, headers) # return response here because we do not want the request to be forwarded to SNS backend return make_response(req_action, message_id=message_id) elif req_action == "ListTagsForResource": tags = do_list_tags_for_resource(topic_arn) content = "<Tags/>" if len(tags) > 0: content = "<Tags>" for tag in tags: content += "<member>" content += "<Key>%s</Key>" % tag["Key"] content += "<Value>%s</Value>" % tag["Value"] content += "</member>" content += "</Tags>" return make_response(req_action, content=content) elif req_action == "CreateTopic": sns_backend = SNSBackend.get() topic_arn = aws_stack.sns_topic_arn(req_data["Name"][0]) tag_resource_success = self._extract_tags( topic_arn, req_data, True, sns_backend) sns_backend.sns_subscriptions[topic_arn] = ( sns_backend.sns_subscriptions.get(topic_arn) or []) # in case if there is an error it returns an error , other wise it will continue as expected. if not tag_resource_success: return make_error( code=400, code_string="InvalidParameter", message="Topic already exists with different tags", ) elif req_action == "TagResource": sns_backend = SNSBackend.get() self._extract_tags(topic_arn, req_data, False, sns_backend) return make_response(req_action) elif req_action == "UntagResource": tags_to_remove = [] req_tags = { k: v for k, v in req_data.items() if k.startswith("TagKeys.member.") } req_tags = req_tags.values() for tag in req_tags: tags_to_remove.append(tag[0]) do_untag_resource(topic_arn, tags_to_remove) return make_response(req_action) data = self._reset_account_id(data) return Request(data=data, headers=headers, method=method) return True
def update_s3(method, path, data, headers, response=None, return_forward_info=False): if return_forward_info: modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/atlassian/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if query == 'notification' or 'notification' in query_map: response = Response() response.status_code = 200 if method == 'GET': # TODO check if bucket exists result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3 if bucket in S3_NOTIFICATIONS: notif = S3_NOTIFICATIONS[bucket] for dest in ['Queue', 'Topic', 'CloudFunction']: if dest in notif: result += ('''<{dest}Configuration> <Id>{uid}</Id> <{dest}>{endpoint}</{dest}> <Event>{event}</Event> </{dest}Configuration>''').format( dest=dest, uid=uuid.uuid4(), endpoint=S3_NOTIFICATIONS[bucket][dest], event=S3_NOTIFICATIONS[bucket]['Event']) result += '</NotificationConfiguration>' response._content = result if method == 'PUT': tree = ET.fromstring(data) for dest in ['Queue', 'Topic', 'CloudFunction']: config = tree.find('{%s}%sConfiguration' % (XMLNS_S3, dest)) if config is not None and len(config): # TODO: what if we have multiple destinations - would we overwrite the config? S3_NOTIFICATIONS[bucket] = { 'Id': get_xml_text(config, 'Id'), 'Event': get_xml_text(config, 'Event', ns=XMLNS_S3), # TODO extract 'Events' attribute (in addition to 'Event') dest: get_xml_text(config, dest, ns=XMLNS_S3), } # return response for ?notification request return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True # get subscribers and send bucket notifications if method in ('PUT', 'DELETE') and '/' in path[1:]: parts = path[1:].split('/', 1) bucket_name = parts[0] object_path = '/%s' % parts[1] send_notifications(method, bucket_name, object_path) # append CORS headers to response if response: parsed = urlparse.urlparse(path) bucket_name = parsed.path.split('/')[0] append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response) # we need to un-pretty-print the XML, otherwise we run into this issue with Spark: # https://github.com/jserver/mock-s3/pull/9/files # https://github.com/localstack/localstack/issues/183 response_content_str = None try: response_content_str = to_str(response._content) except Exception as e: pass if response_content_str and response_content_str.startswith('<'): is_bytes = isinstance(response._content, six.binary_type) response._content = re.sub(r'>\n\s*<', '><', response_content_str, flags=re.MULTILINE) if is_bytes: response._content = to_bytes(response._content) response.headers['content-length'] = len(response._content)
def forward_request(self, method, path, data, headers): if method == "OPTIONS": return 200 req_data = parse_request_data(method, path, data) if is_sqs_queue_url(path) and method == "GET": if not headers.get("Authorization"): headers["Authorization"] = aws_stack.mock_aws_request_headers( service="sqs")["Authorization"] method = "POST" req_data = { "Action": "GetQueueUrl", "Version": API_VERSION, "QueueName": path.split("/")[-1], } if req_data: action = req_data.get("Action") if action in ("SendMessage", "SendMessageBatch") and SQS_BACKEND_IMPL == "moto": # check message contents for key, value in req_data.items(): if not re.match(MSG_CONTENT_REGEX, str(value)): return make_requests_error( code=400, code_string="InvalidMessageContents", message="Message contains invalid characters", ) elif action == "SetQueueAttributes": # TODO remove this function if we stop using ElasticMQ queue_url = _queue_url(path, req_data, headers) if SQS_BACKEND_IMPL == "elasticmq": forward_attrs = _set_queue_attributes(queue_url, req_data) if len(req_data) != len(forward_attrs): # make sure we only forward the supported attributes to the backend return _get_attributes_forward_request( method, path, headers, req_data, forward_attrs) elif action == "TagQueue": req_data = self.fix_missing_tag_values(req_data) elif action == "CreateQueue": req_data = self.fix_missing_tag_values(req_data) changed_attrs = _fix_dlq_arn_in_attributes(req_data) if changed_attrs: return _get_attributes_forward_request( method, path, headers, req_data, changed_attrs) elif action == "DeleteQueue": queue_url = _queue_url(path, req_data, headers) QUEUE_ATTRIBUTES.pop(queue_url, None) sns_listener.unsubscribe_sqs_queue(queue_url) elif action == "ListDeadLetterSourceQueues": # TODO remove this function if we stop using ElasticMQ entirely queue_url = _queue_url(path, req_data, headers) if SQS_BACKEND_IMPL == "elasticmq": headers = {"content-type": "application/xhtml+xml"} content_str = _list_dead_letter_source_queues( QUEUE_ATTRIBUTES, queue_url) return requests_response(content_str, headers=headers) if "QueueName" in req_data: encoded_data = urlencode( req_data, doseq=True) if method == "POST" else "" modified_url = None if method == "GET": base_path = path.partition("?")[0] modified_url = "%s?%s" % ( base_path, urlencode(req_data, doseq=True), ) return Request(data=encoded_data, url=modified_url, headers=headers, method=method) return True
def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=5, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None): req = Request( method=method.upper(), url=url, headers=headers, files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=cookies, hooks=hooks, ) prep = self.prepare_request(req) proxies = proxies or {} settings = self.merge_environment_settings(prep.url, proxies, stream, verify, cert) send_kwargs = { 'timeout': timeout, 'allow_redirects': allow_redirects, } send_kwargs.update(settings) message = '%s: %s' % (method, prep.url) logger.info(message) for i in range(TRY_COUNT + 1): try: r = self.send(prep, **send_kwargs) r.raise_for_status() return r except Timeout: why = 'Timeout' except ConnectionError: why = 'ConnectionError' except HTTPError: why = '%s' % r.status_code except ChunkedEncodingError: # 读到的字节数与实际字节数不符 why = 'ChunkedEncodingError' if i != TRY_COUNT: logger.warning('%s, %s, retry %d >>> %s' % (os.getpid(), why, i + 1, message)) time.sleep(3) logger.error('[%s] %s' % (why, message))
def forward_request(self, method, path, data, headers): # check region try: aws_stack.check_valid_region(headers) except Exception as e: return make_error(message=str(e), code=400) if method == 'POST' and path == '/': # parse payload and extract fields req_data = urlparse.parse_qs(to_str(data)) req_action = req_data['Action'][0] topic_arn = req_data.get('TargetArn') or req_data.get('TopicArn') if topic_arn: topic_arn = topic_arn[0] topic_arn = aws_stack.fix_account_id_in_arns(topic_arn) if req_action == 'SetSubscriptionAttributes': sub = get_subscription_by_arn(req_data['SubscriptionArn'][0]) if not sub: return make_error( message='Unable to find subscription for given ARN', code=400) attr_name = req_data['AttributeName'][0] attr_value = req_data['AttributeValue'][0] sub[attr_name] = attr_value return make_response(req_action) elif req_action == 'GetSubscriptionAttributes': sub = get_subscription_by_arn(req_data['SubscriptionArn'][0]) if not sub: return make_error( message='Unable to find subscription for given ARN', code=400) content = '<Attributes>' for key, value in sub.items(): content += '<entry><key>%s</key><value>%s</value></entry>\n' % ( key, value) content += '</Attributes>' return make_response(req_action, content=content) elif req_action == 'Subscribe': if 'Endpoint' not in req_data: return make_error( message='Endpoint not specified in subscription', code=400) elif req_action == 'Unsubscribe': if 'SubscriptionArn' not in req_data: return make_error( message= 'SubscriptionArn not specified in unsubscribe request', code=400) do_unsubscribe(req_data.get('SubscriptionArn')[0]) elif req_action == 'DeleteTopic': do_delete_topic(topic_arn) elif req_action == 'Publish': # No need to create a topic to send SMS with SNS # but we can't mock a sending so we only return that it went well if 'PhoneNumber' not in req_data: if topic_arn not in SNS_SUBSCRIPTIONS.keys(): return make_error(code=404, code_string='NotFound', message='Topic does not exist') publish_message(topic_arn, req_data) # return response here because we do not want the request to be forwarded to SNS backend return make_response(req_action) elif req_action == 'ListTagsForResource': tags = do_list_tags_for_resource(topic_arn) content = '<Tags/>' if len(tags) > 0: content = '<Tags>' for tag in tags: content += '<member>' content += '<Key>%s</Key>' % tag['Key'] content += '<Value>%s</Value>' % tag['Value'] content += '</member>' content += '</Tags>' return make_response(req_action, content=content) elif req_action == 'TagResource': tags = [] req_tags = { k: v for k, v in req_data.items() if k.startswith('Tags.member.') } for i in range(int(len(req_tags.keys()) / 2)): key = req_tags['Tags.member.' + str(i + 1) + '.Key'][0] value = req_tags['Tags.member.' + str(i + 1) + '.Value'][0] tags.append({'Key': key, 'Value': value}) do_tag_resource(topic_arn, tags) return make_response(req_action) elif req_action == 'UntagResource': tags_to_remove = [] req_tags = { k: v for k, v in req_data.items() if k.startswith('TagKeys.member.') } req_tags = req_tags.values() for tag in req_tags: tags_to_remove.append(tag[0]) do_untag_resource(topic_arn, tags_to_remove) return make_response(req_action) data = self._reset_account_id(data) return Request(data=data, headers=headers, method=method) return True
def forward_request(self, method, path, data, headers): if method == 'OPTIONS': return 200 req_data = parse_request_data(method, path, data) if is_sqs_queue_url(path) and method == 'GET': if not headers.get('Authorization'): headers['Authorization'] = aws_stack.mock_aws_request_headers( service='sqs')['Authorization'] method = 'POST' req_data = { 'Action': 'GetQueueUrl', 'Version': API_VERSION, 'QueueName': path.split('/')[-1] } if req_data: action = req_data.get('Action') if action in ('SendMessage', 'SendMessageBatch') and SQS_BACKEND_IMPL == 'moto': # check message contents for key, value in req_data.items(): if not re.match(MSG_CONTENT_REGEX, str(value)): return make_requests_error( code=400, code_string='InvalidMessageContents', message='Message contains invalid characters') elif action == 'SetQueueAttributes': # TODO remove this function if we stop using ElasticMQ entirely queue_url = _queue_url(path, req_data, headers) if SQS_BACKEND_IMPL == 'elasticmq': forward_attrs = _set_queue_attributes(queue_url, req_data) if len(req_data) != len(forward_attrs): # make sure we only forward the supported attributes to the backend return _get_attributes_forward_request( method, path, headers, req_data, forward_attrs) elif action == 'DeleteQueue': queue_url = _queue_url(path, req_data, headers) QUEUE_ATTRIBUTES.pop(queue_url, None) sns_listener.unsubscribe_sqs_queue(queue_url) elif action == 'ListDeadLetterSourceQueues': # TODO remove this function if we stop using ElasticMQ entirely queue_url = _queue_url(path, req_data, headers) if SQS_BACKEND_IMPL == 'elasticmq': headers = {'content-type': 'application/xhtml+xml'} content_str = _list_dead_letter_source_queues( QUEUE_ATTRIBUTES, queue_url) return requests_response(content_str, headers=headers) if 'QueueName' in req_data: encoded_data = urlencode( req_data, doseq=True) if method == 'POST' else '' modified_url = None if method == 'GET': base_path = path.partition('?')[0] modified_url = '%s?%s' % (base_path, urlencode(req_data, doseq=True)) return Request(data=encoded_data, url=modified_url, headers=headers, method=method) return True
def session_request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=False, cert=None, json=None): # In order to remove headers that are set to None def _merge_retain_none(request_setting, session_setting, dict_class=OrderedDict): if session_setting is None: return request_setting if request_setting is None: return session_setting # Bypass if not a dictionary (e.g. verify) if not (isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping)): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) return merged_setting # Create the Request. merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies or (conf.cookie if 'cookie' in conf else None)) if not conf.agent: conf.http_headers[ HTTP_HEADER.USER_AGENT] = generate_random_user_agent() req = Request( method=method.upper(), url=url, headers=_merge_retain_none( headers, conf.http_headers if 'http_headers' in conf else {}), files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=merged_cookies, hooks=hooks, ) prep = self.prepare_request(req) # proxies = proxies or (conf.proxies if 'proxies' in conf else {}) if proxies is None: proxies = conf.proxies if 'proxies' in conf else {} settings = self.merge_environment_settings(prep.url, proxies, stream, verify, cert) timeout = timeout or conf.get("timeout", None) if timeout: timeout = float(timeout) # Send the request. send_kwargs = { 'timeout': timeout, 'allow_redirects': allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) if resp.encoding == 'ISO-8859-1': encodings = get_encodings_from_content(resp.text) if encodings: encoding = encodings[0] else: encoding = resp.apparent_encoding resp.encoding = encoding return resp
def session_request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=False, cert=None, json=None): # Create the Request. merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies) default_header = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36" } req = Request( method=method.upper(), url=url, headers=merge_setting(headers, default_header), files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=merged_cookies, hooks=hooks, ) prep = self.prepare_request(req) raw = '' if prep.body: raw = "{}\n{}\n\n{}".format( prep.method + ' ' + prep.url, '\n'.join('{}: {}'.format(k, v) for k, v in prep.headers.items()), prep.body) else: raw = "{}\n{}".format( prep.method + ' ' + prep.url, '\n'.join('{}: {}'.format(k, v) for k, v in prep.headers.items())) proxies = proxies or {} if PROXY_CONFIG_BOOL and not proxies: proxies = PROXY_CONFIG settings = self.merge_environment_settings(prep.url, proxies, stream, verify, cert) # Send the request. send_kwargs = { 'timeout': timeout or TIMEOUT, 'allow_redirects': allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) if resp.encoding == 'ISO-8859-1': encodings = get_encodings_from_content(resp.text) if encodings: encoding = encodings[0] else: encoding = resp.apparent_encoding resp.encoding = encoding resp.raw = raw return resp
def forward_request(self, method, path, data, headers): if path.startswith('/shell') or method == 'GET': if path == '/shell': headers = {'Refresh': '0; url=%s/shell/' % config.TEST_DYNAMODB_URL} return aws_responses.requests_response('', headers=headers) return True if method == 'OPTIONS': return 200 if not data: data = '{}' data = json.loads(to_str(data)) ddb_client = aws_stack.connect_to_service('dynamodb') action = headers.get('X-Amz-Target') if random.random() < config.DYNAMODB_ERROR_PROBABILITY: throttled = ['%s.%s' % (ACTION_PREFIX, a) for a in THROTTLED_ACTIONS] if action in throttled: return error_response_throughput() ProxyListenerDynamoDB.thread_local.existing_item = None if action == '%s.CreateTable' % ACTION_PREFIX: # Check if table exists, to avoid error log output from DynamoDBLocal table_names = ddb_client.list_tables()['TableNames'] if to_str(data['TableName']) in table_names: return 200 elif action in ('%s.PutItem' % ACTION_PREFIX, '%s.UpdateItem' % ACTION_PREFIX, '%s.DeleteItem' % ACTION_PREFIX): # find an existing item and store it in a thread-local, so we can access it in return_response, # in order to determine whether an item already existed (MODIFY) or not (INSERT) try: ProxyListenerDynamoDB.thread_local.existing_item = find_existing_item(data) except Exception as e: if 'ResourceNotFoundException' in str(e): return get_table_not_found_error() raise # Fix incorrect values if ReturnValues==ALL_OLD and ReturnConsumedCapacity is # empty, see https://github.com/localstack/localstack/issues/2049 if ((data.get('ReturnValues') == 'ALL_OLD') or (not data.get('ReturnValues'))) \ and not data.get('ReturnConsumedCapacity'): data['ReturnConsumedCapacity'] = 'TOTAL' return Request(data=json.dumps(data), method=method, headers=headers) elif action == '%s.DescribeTable' % ACTION_PREFIX: # Check if table exists, to avoid error log output from DynamoDBLocal table_names = ddb_client.list_tables()['TableNames'] if to_str(data['TableName']) not in table_names: return get_table_not_found_error() elif action == '%s.DeleteTable' % ACTION_PREFIX: # Check if table exists, to avoid error log output from DynamoDBLocal table_names = ddb_client.list_tables()['TableNames'] if to_str(data['TableName']) not in table_names: return get_table_not_found_error() elif action == '%s.BatchWriteItem' % ACTION_PREFIX: existing_items = [] for table_name in sorted(data['RequestItems'].keys()): for request in data['RequestItems'][table_name]: for key in ['PutRequest', 'DeleteRequest']: inner_request = request.get(key) if inner_request: existing_items.append(find_existing_item(inner_request, table_name)) ProxyListenerDynamoDB.thread_local.existing_items = existing_items elif action == '%s.TransactWriteItems' % ACTION_PREFIX: existing_items = [] for item in data['TransactItems']: for key in ['Put', 'Update', 'Delete']: inner_item = item.get(key) if inner_item: existing_items.append(find_existing_item(inner_item)) ProxyListenerDynamoDB.thread_local.existing_items = existing_items elif action == '%s.UpdateTimeToLive' % ACTION_PREFIX: # TODO: TTL status is maintained/mocked but no real expiry is happening for items response = Response() response.status_code = 200 self._table_ttl_map[data['TableName']] = { 'AttributeName': data['TimeToLiveSpecification']['AttributeName'], 'Status': data['TimeToLiveSpecification']['Enabled'] } response._content = json.dumps({'TimeToLiveSpecification': data['TimeToLiveSpecification']}) fix_headers_for_updated_response(response) return response elif action == '%s.DescribeTimeToLive' % ACTION_PREFIX: response = Response() response.status_code = 200 if data['TableName'] in self._table_ttl_map: if self._table_ttl_map[data['TableName']]['Status']: ttl_status = 'ENABLED' else: ttl_status = 'DISABLED' response._content = json.dumps({ 'TimeToLiveDescription': { 'AttributeName': self._table_ttl_map[data['TableName']]['AttributeName'], 'TimeToLiveStatus': ttl_status } }) else: # TTL for dynamodb table not set response._content = json.dumps({'TimeToLiveDescription': {'TimeToLiveStatus': 'DISABLED'}}) fix_headers_for_updated_response(response) return response elif action == '%s.TagResource' % ACTION_PREFIX or action == '%s.UntagResource' % ACTION_PREFIX: response = Response() response.status_code = 200 response._content = '' # returns an empty body on success. fix_headers_for_updated_response(response) return response elif action == '%s.ListTagsOfResource' % ACTION_PREFIX: response = Response() response.status_code = 200 response._content = json.dumps({ 'Tags': [ {'Key': k, 'Value': v} for k, v in TABLE_TAGS.get(data['ResourceArn'], {}).items() ] }) fix_headers_for_updated_response(response) return response return True
def update_s3(method, path, data, headers, response=None, return_forward_info=False): if return_forward_info: modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/atlassian/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256' ) == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if method == 'PUT' and (query == 'notification' or 'notification' in query_map): tree = ET.fromstring(data) for dest in ['Queue', 'Topic', 'CloudFunction']: config = tree.find('{%s}%sConfiguration' % (XMLNS_S3, dest)) if config is not None and len(config): S3_NOTIFICATIONS[bucket] = { 'Id': get_xml_text(config, 'Id'), 'Event': get_xml_text(config, 'Event', ns=XMLNS_S3), dest: get_xml_text(config, dest, ns=XMLNS_S3), } if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True # get subscribers and send bucket notifications if method in ('PUT', 'DELETE') and '/' in path[1:]: parts = path[1:].split('/', 1) bucket_name = parts[0] object_path = '/%s' % parts[1] send_notifications(method, bucket_name, object_path) # append CORS headers to response if response: parsed = urlparse.urlparse(path) bucket_name = parsed.path.split('/')[0] append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response)
def forward_request(self, method, path, data, headers): modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256' ) == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = multipart_content.expand_multipart_filename( original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if query == 'notification' or 'notification' in query_map: response = Response() response.status_code = 200 if method == 'GET': # TODO check if bucket exists result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3 if bucket in S3_NOTIFICATIONS: notif = S3_NOTIFICATIONS[bucket] for dest in ['Queue', 'Topic', 'CloudFunction']: if dest in notif: dest_dict = { '%sConfiguration' % dest: { 'Id': uuid.uuid4(), dest: notif[dest], 'Event': notif['Event'], 'Filter': notif['Filter'] } } result += xmltodict.unparse(dest_dict, full_document=False) result += '</NotificationConfiguration>' response._content = result if method == 'PUT': parsed = xmltodict.parse(data) notif_config = parsed.get('NotificationConfiguration') S3_NOTIFICATIONS.pop(bucket, None) for dest in ['Queue', 'Topic', 'CloudFunction']: config = notif_config.get('%sConfiguration' % (dest)) if config: events = config.get('Event') if isinstance(events, six.string_types): events = [events] event_filter = config.get('Filter', {}) # make sure FilterRule is an array s3_filter = _get_s3_filter(event_filter) if s3_filter and not isinstance( s3_filter.get('FilterRule', []), list): s3_filter['FilterRule'] = [s3_filter['FilterRule']] # create final details dict notification_details = { 'Id': config.get('Id'), 'Event': events, dest: config.get(dest), 'Filter': event_filter } # TODO: what if we have multiple destinations - would we overwrite the config? S3_NOTIFICATIONS[bucket] = clone(notification_details) # return response for ?notification request return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True
def forward_request(self, method, path, data, headers): if method == 'OPTIONS': return 200 # check region try: aws_stack.check_valid_region(headers) aws_stack.set_default_region_in_headers(headers) except Exception as e: return make_error(message=str(e), code=400) if method == 'POST': # parse payload and extract fields req_data = urlparse.parse_qs(to_str(data), keep_blank_values=True) # parse data from query path if not req_data: parsed_path = urlparse.urlparse(path) req_data = urlparse.parse_qs(parsed_path.query, keep_blank_values=True) req_action = req_data['Action'][0] topic_arn = req_data.get('TargetArn') or req_data.get( 'TopicArn') or req_data.get('ResourceArn') if topic_arn: topic_arn = topic_arn[0] topic_arn = aws_stack.fix_account_id_in_arns(topic_arn) if req_action == 'SetSubscriptionAttributes': sub = get_subscription_by_arn(req_data['SubscriptionArn'][0]) if not sub: return make_error( message='Unable to find subscription for given ARN', code=400) attr_name = req_data['AttributeName'][0] attr_value = req_data['AttributeValue'][0] sub[attr_name] = attr_value return make_response(req_action) elif req_action == 'GetSubscriptionAttributes': sub = get_subscription_by_arn(req_data['SubscriptionArn'][0]) if not sub: return make_error( message='Unable to find subscription for given ARN', code=400) content = '<Attributes>' for key, value in sub.items(): content += '<entry><key>%s</key><value>%s</value></entry>\n' % ( key, value) content += '</Attributes>' return make_response(req_action, content=content) elif req_action == 'Subscribe': if 'Endpoint' not in req_data: return make_error( message='Endpoint not specified in subscription', code=400) elif req_action == 'ConfirmSubscription': if 'TopicArn' not in req_data: return make_error( message= 'TopicArn not specified in confirm subscription request', code=400) if 'Token' not in req_data: return make_error( message= 'Token not specified in confirm subscription request', code=400) do_confirm_subscription( req_data.get('TopicArn')[0], req_data.get('Token')[0]) elif req_action == 'Unsubscribe': if 'SubscriptionArn' not in req_data: return make_error( message= 'SubscriptionArn not specified in unsubscribe request', code=400) do_unsubscribe(req_data.get('SubscriptionArn')[0]) elif req_action == 'DeleteTopic': do_delete_topic(topic_arn) elif req_action == 'Publish': if req_data.get('Subject') == ['']: return make_error(code=400, code_string='InvalidParameter', message='Subject') # No need to create a topic to send SMS or single push notifications with SNS # but we can't mock a sending so we only return that it went well if 'PhoneNumber' not in req_data and 'TargetArn' not in req_data: if topic_arn not in SNS_SUBSCRIPTIONS: return make_error(code=404, code_string='NotFound', message='Topic does not exist') message_id = publish_message(topic_arn, req_data) # return response here because we do not want the request to be forwarded to SNS backend return make_response(req_action, message_id=message_id) elif req_action == 'ListTagsForResource': tags = do_list_tags_for_resource(topic_arn) content = '<Tags/>' if len(tags) > 0: content = '<Tags>' for tag in tags: content += '<member>' content += '<Key>%s</Key>' % tag['Key'] content += '<Value>%s</Value>' % tag['Value'] content += '</member>' content += '</Tags>' return make_response(req_action, content=content) elif req_action == 'CreateTopic': topic_arn = aws_stack.sns_topic_arn(req_data['Name'][0]) tag_resource_success = self._extract_tags( topic_arn, req_data, True) SNS_SUBSCRIPTIONS[topic_arn] = SNS_SUBSCRIPTIONS.get( topic_arn) or [] # in case if there is an error it returns an error , other wise it will continue as expected. if not tag_resource_success: return make_error( code=400, code_string='InvalidParameter', message='Topic already exists with different tags') elif req_action == 'TagResource': self._extract_tags(topic_arn, req_data, False) return make_response(req_action) elif req_action == 'UntagResource': tags_to_remove = [] req_tags = { k: v for k, v in req_data.items() if k.startswith('TagKeys.member.') } req_tags = req_tags.values() for tag in req_tags: tags_to_remove.append(tag[0]) do_untag_resource(topic_arn, tags_to_remove) return make_response(req_action) data = self._reset_account_id(data) return Request(data=data, headers=headers, method=method) return True
def cpp(arg): payload = {'q': arg} return Request(url=CPLUSPLUS, params=payload).prepare().url
def forward_request(self, method, path, data, headers): if method == 'POST' and path == '/': data = MessageConversion._reset_account_id(data) return Request(data=data, headers=headers, method=method) return True
def g(arg): payload = {'q': arg} return Request(url=GOOGLE_SEARCH, params=payload).prepare().url
def __init__(self, host=None, port=None, username=None, database='default', auth=None, configuration=None, kerberos_service_name=None, password=None, thrift_transport=None, service_mode='binary', http_path=None, is_zookeeper=False, zookeeper_name_space='hiveserver2', keytab_file=None, krb_conf=None): """Connect to HiveServer2 :param host: What host HiveServer2 runs on :param port: What port HiveServer2 runs on. Defaults to 10000. :param auth: The value of hive.server2.authentication used by HiveServer2. Defaults to ``NONE``. :param configuration: A dictionary of Hive settings (functionally same as the `set` command) :param kerberos_service_name: Use with auth='KERBEROS' only :param password: Use with auth='LDAP' or auth='CUSTOM' only :param thrift_transport: A ``TTransportBase`` for custom advanced usage. Incompatible with host, port, auth, kerberos_service_name, and password. :param service_mode: Set thrift transport mode ('http' or 'binary') :param http_path: Use with service_mode='http' only :param is_zookeeper: Set whether or not zookeeper method :param zookeeper_name_space: Use with service_mode='http' and is_zookeeper='true' only :param keytab_file: Use with service_mode='http' and auth='KERBEROS' only :param krb_conf: pycquery_krb.common.conf.KerberosConf instance. Use with service_mode='http' and auth='KERBEROS' only The way to support LDAP and GSSAPI is originated from cloudera/Impyla: https://github.com/cloudera/impyla/blob/255b07ed973d47a3395214ed92d35ec0615ebf62 /impala/_thrift_api.py#L152-L160 """ self._opened = False self.auth = auth self.kerberos_service_name = kerberos_service_name self.username = username or getpass.getuser() self.password = password self.service_mode = service_mode self.keytab_file = keytab_file self.auth_lock = threading.Lock() self.realm = None self.kdc = None self.kerb_client = None self.krb_conf = krb_conf self.expired_time = 0 configuration = configuration or {} last_exception = None # if (password is not None) != (auth in ('LDAP', 'CUSTOM')): # raise ValueError("Password should be set if and only if in LDAP or CUSTOM mode; " # "Remove password or use one of those modes") if auth == 'KERBEROS': if kerberos_service_name is None: raise ValueError( "kerberos_service_name must be set in KERBEROS mode") if krb_conf is None: raise ValueError("krb_conf must be set in KERBEROS mode") p = self.username.split('@') self.username = p[0] if len(p) > 1: self.realm = p[1] else: self.realm = krb_conf.lib_defaults.default_realm if self.realm is None: raise ValueError( "Kerberos realm must be specified at username or krb5.conf in KERBEROS mode" ) conf_realm = krb_conf.find_realm(self.realm) if conf_realm is None: raise ValueError("No matching realm in krb5.conf") if len(conf_realm.kdc) == 0: raise ValueError( "No kdc information in {} realm of krb5.conf".format( self.realm)) # use the first kdc in the list for implementation simpleness p = conf_realm.kdc[0].split(':') self.kdc = {"host": p[0], "port": p[1]} if thrift_transport is not None: has_incompatible_arg = (host is not None or port is not None or auth is not None or kerberos_service_name is not None or password is not None) if has_incompatible_arg: raise ValueError( "thrift_transport cannot be used with " "host/port/auth/kerberos_service_name/password") if is_zookeeper: # It randomly shuffles node information stored in zookeeper. remaining_nodes = self._get_hiveserver2_info_with_zookeeper( host, port, zookeeper_name_space) random.shuffle(remaining_nodes) else: # Direct access to host and port if not zookeeper. remaining_nodes = [{'host': host, 'port': port}] # Access nodes sequentially and if they fail, access other nodes. while len(remaining_nodes) > 0: node = remaining_nodes.pop() self.host = node['host'] self.port = node['port'] if thrift_transport is not None: self._transport = thrift_transport elif service_mode == 'binary': if self.port is None: self.port = 10000 if self.auth is None: self.auth = 'NONE' socket = thrift.transport.TSocket.TSocket(self.host, self.port) if auth == 'NOSASL': # NOSASL corresponds to hive.server2.authentication=NOSASL in hive-site.xml self._transport = thrift.transport.TTransport.TBufferedTransport( socket) elif self.auth in ('LDAP', 'KERBEROS', 'NONE', 'CUSTOM', 'NOSASL'): # Defer import so package dependency is optional import sasl import thrift_sasl if self.auth == 'KERBEROS': # KERBEROS mode in hive.server2.authentication is GSSAPI in sasl library sasl_auth = 'GSSAPI' else: sasl_auth = 'PLAIN' if self.password is None: # Password doesn't matter in NONE mode, just needs to be nonempty. self.password = '******' def sasl_factory(): sasl_client = sasl.Client() sasl_client.setAttr('host', self.host) if sasl_auth == 'GSSAPI': sasl_client.setAttr('service', kerberos_service_name) elif sasl_auth == 'PLAIN': sasl_client.setAttr('username', username) sasl_client.setAttr('password', password) else: raise AssertionError sasl_client.init() return sasl_client self._transport = thrift_sasl.TSaslClientTransport( sasl_factory, sasl_auth, socket) else: # All HS2 config options: # https://cwiki.apache.org/confluence/display/Hive/Setting+Up+HiveServer2#SettingUpHiveServer2-Configuration # PAM currently left to end user via thrift_transport option. raise NotImplementedError( "Only NONE, NOSASL, LDAP, KERBEROS, CUSTOM " "authentication are supported with binary mode, got {}" .format(auth)) elif service_mode == 'http': if self.auth is None: self.auth = 'NONE' if self.auth in ('NONE', 'LDAP', 'KERBEROS', 'CUSTOM'): if self.password is None: self.password = '******' self._transport = thrift.transport.THttpClient.THttpClient( "http://{}:{}/{}".format(self.host, self.port, http_path)) if auth == 'KERBEROS': self.set_auth_setting() else: auth_header = HTTPBasicAuth(username, self.password) self._transport.setCustomHeaders( auth_header(Request()).headers) else: raise NotImplementedError( "Only NONE, NOSASL, LDAP, KERBEROS, CUSTOM " "authentication is supported with http mode, got {}". format(auth)) else: raise NotImplementedError( "Only binary, http are supported for the transport mode, " "got {}".format(service_mode)) protocol = thrift.protocol.TBinaryProtocol.TBinaryProtocol( self._transport) self._client = TCLIService.Client(protocol) # oldest version that still contains features we care about # "V6 uses binary type for binary payload (was string) and uses columnar result set" protocol_version = ttypes.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6 try: self._transport.open() open_session_req = ttypes.TOpenSessionReq( client_protocol=protocol_version, configuration=configuration, username=username, ) response = self._client.OpenSession(open_session_req) _check_status(response) assert response.sessionHandle is not None, "Expected a session from OpenSession" self._sessionHandle = response.sessionHandle assert response.serverProtocolVersion == protocol_version, \ "Unable to handle protocol version {}".format(response.serverProtocolVersion) self._opened = True with contextlib.closing(self.cursor()) as cursor: cursor.execute('USE `{}`'.format(database)) atexit.register(self.close) except Exception as ex: import traceback # If the node fails to access, it will try to reconnect to the remaining node. _logger.warning('Failed to connect to %s:%s. (message = %s)' % (self.host, self.port, 'Error opening session' if isinstance(ex, EOFError) else ex)) last_exception = ex self.close() else: # If any of the remaining nodes passed to zookeeper is successful, return. _logger.info('Connected to %s:%s' % (self.host, self.port)) return # Return the last error I received. raise last_exception