def test_empty(): assert safe_int("") == 0 assert safe_int('') == 0 assert safe_int('', 7) == 7 assert safe_float("") == 0.0 assert safe_float('') == 0.0 assert safe_float('', default=7.7) == 7.7 assert safe_dict("") == {} assert safe_dict('') == {} assert safe_dict('', {'Max': 'Ohad'}) == {'Max': 'Ohad'}
def test_None(): assert safe_int(None) == 0 assert safe_int(None, 7) == 7 assert safe_float(None) == 0.0 assert safe_float(None, default=7.7) == 7.7 assert safe_dict(None) == {} assert safe_dict(None, {'Jeff': 'Tanner'}) == {'Jeff': 'Tanner'} assert safe_str(None) == '' assert safe_str(None, "stas") == "stas" assert safe_cast(None, str) is None assert safe_cast(None, str, default="TuliTuliTuli") == "TuliTuliTuli"
def transform_transaction(content, transformed_operation): return { 'level': transformed_operation['level'], 'timestamp': transformed_operation['timestamp'], 'block_hash': transformed_operation['block_hash'], 'operation_hash': transformed_operation['operation_hash'], 'operation_group_id': transformed_operation['operation_group_id'], 'operation_id': transformed_operation['operation_id'], 'source': content['source'], 'destination': content['destination'], 'fee': int(content['fee']), 'amount': int(content['amount']), 'counter': int(content['counter']), 'gas_limit': int(content['gas_limit']), 'storage_limit': int(content['storage_limit']), 'status': content['metadata']['operation_result']['status'], 'consumed_gas': safe_int( get_none_if_key_error(content['metadata']['operation_result'], 'consumed_gas')), 'storage_size': safe_int( get_none_if_key_error(content['metadata']['operation_result'], 'storage_size')) }
def format(self, record): fmt_record_items = {} for key, meta in TUNE_STANDARD_FORMAT_SUPPORTED_KEYS.items(): if key in record.__dict__: if 'label' in meta and meta['label']: key_lower = str(key).lower() fmt_record_item = "{}: ".format(key_lower) else: fmt_record_item = "" fmt_record_item.lower() if 'fmt' in meta: meta_fmt = meta['fmt'] else: meta_fmt = '{},' try: if meta['type'] == str: fmt_record_item += \ "\"{}\", ".format(safe_str(record.__dict__[key])) elif meta['type'] == int: fmt_record_item += \ "{:,}, ".format(safe_int(record.__dict__[key])) elif meta['type'] == float: fmt_record_item += \ "{0:.2f}, ".format(safe_float(record.__dict__[key])) elif meta['type'] == dict: fmt_record_item += \ "{}, ".format( json.loads( json.dumps(dict(record.__dict__[key])) ) ) else: fmt_record_item += meta_fmt.format( record.__dict__[key]) except ValueError as va_ex: fmt_record_item += "{}: '{}'".format( type(record.__dict__[key]).__name__, str(record.__dict__[key])) hash = "{0:03d}".format(meta['order']) fmt_record_items[hash] = fmt_record_item str_fmt_record = "" for key in sorted(fmt_record_items.keys()): value = fmt_record_items[key] str_fmt_record += value return str_fmt_record[:-2]
def transform_block(response): blocks = [] number_of_operations = 0 for operation_group in response['operations']: number_of_operations += len(operation_group) blocks.append({ 'level': response['header']['level'], 'proto': response['header']['proto'], 'predecessor': response['header']['predecessor'], 'timestamp': convert_timestr_to_timestamp(response['header']['timestamp']) * 10**3, 'validation_pass': response['header']['validation_pass'], 'operations_hash': response['header']['operations_hash'], 'fitness': response['header']['fitness'], 'context': response['header']['context'], 'protocol': response['protocol'], 'chain_id': response['chain_id'], 'block_hash': response['hash'], 'nonce_hash': get_none_if_key_error(response['metadata'], 'nonce_hash'), 'consumed_gas': safe_int(get_none_if_key_error(response['metadata'], 'consumed_gas')), 'baker': get_none_if_key_error(response['metadata'], 'baker'), 'voting_period_kind': get_none_if_key_error(response['metadata'], 'voting_period_kind'), 'cycle': get_none_if_type_error( get_none_if_key_error(response['metadata'], 'level'), 'cycle'), 'cycle_position': get_none_if_type_error( get_none_if_key_error(response['metadata'], 'level'), 'cycle_position'), 'voting_period': get_none_if_type_error( get_none_if_key_error(response['metadata'], 'level'), 'voting_period'), 'voting_period_position': get_none_if_type_error( get_none_if_key_error(response['metadata'], 'level'), 'voting_period_position'), 'expected_commitment': get_none_if_type_error( get_none_if_key_error(response['metadata'], 'level'), 'expected_commitment'), 'number_of_operation_groups': len(response['operations']), 'number_of_operations': number_of_operations }) return blocks
def _check_v3_job_status_on_queue(self, export_job, request_retry=None, request_label="TMC v3 Job Status On Queue"): """Status of Export Report. Args: export_job: request_retry: Returns: """ request_label = "v3 Logs Advertisers Check Export Status" request_url = \ self.tune_mat_request_path( mat_api_version="v3", controller=self.controller, action="exports/{}".format( export_job ) ) self.logger.info(( "TMC v3 Logs Advertisers Base: Check Export Status: " "Logs '{}': " "Action: 'exports status', " "Status of Export Report for " "Job Handle: '{}'" ).format(self.logs_advertisers_type, export_job)) tries = -1 # default: -1 (indefinite) delay = 10 jitter = 0 max_delay = 60 if request_retry: if 'delay' in request_retry: delay = request_retry['delay'] if 'jitter' in request_retry: jitter = request_retry['jitter'] if 'max_delay' in request_retry: max_delay = request_retry['max_delay'] request_params = {"session_token": self.session_token} self.logger.debug("TMC v3 Logs Advertisers Base: Check Export Status", extra={'request_url': request_url}) self.logger.debug( "TMC v3 Logs Advertisers Base: Check Export Status: Request Retry", extra={'tries': tries, 'delay': delay, 'jitter': jitter, 'max_delay': max_delay} ) self.logger.debug( "TMC v3 Logs Advertisers Base: Check Export Status: Request", extra={'request_params': safe_dict(request_params)} ) report_url = None _attempts = 1 export_percent_complete = 0 export_status_action = 'exports status' self.logger.warning( "TMC v3 Logs Advertisers Base: Check Export Status", extra={'job': export_job, 'attempt': _attempts, 'action': export_status_action} ) time.sleep(10) _tries, _delay = tries, delay while True: try: response = self.mv_request.request( request_method="GET", request_url=request_url, request_params=request_params, request_retry=None, request_retry_http_status_codes=None, request_retry_func=self.tune_v3_request_retry_func, request_retry_excps_func=None, request_label=request_label ) except TuneRequestBaseError as tmc_req_ex: self.logger.error( "TMC v3 Logs Advertisers Base: Check Export Status: Failed", extra=tmc_req_ex.to_dict(), ) raise except TuneReportingError as tmc_rep_ex: self.logger.error( "TMC v3 Logs Advertisers Base: Check Export Status: Failed", extra=tmc_rep_ex.to_dict(), ) raise except Exception as ex: print_traceback(ex) self.logger.error( "TMC v3 Logs Advertisers Base: Check Export Status: Failed", extra={'error': get_exception_message(ex)} ) raise http_status_successful = is_http_status_type( http_status_code=response.status_code, http_status_type=HttpStatusType.SUCCESSFUL ) if not http_status_successful: raise TuneReportingError( error_message="Failed to get export status on queue: {}".format(response.status_code), error_code=TuneReportingErrorCodes.REP_ERR_REQUEST ) json_response = response.json() export_percent_complete = 0 if "percent_complete" in json_response: export_percent_complete = \ safe_int(json_response["percent_complete"]) self.logger.info( "TMC v3 Logs Advertisers Base: Check Job Export Status", extra={ 'job': export_job, 'response_status_code': json_response["status"], 'export_percent_complete': safe_int(export_percent_complete) } ) if (export_percent_complete == 100 and json_response["status"] == "complete" and json_response["url"]): report_url = json_response["url"] self.logger.info( "TMC v3 Logs Advertisers Base: Check Job Export Status: Completed", extra={ 'job': export_job, 'report_url': report_url, 'request_label': request_label, 'export_percent_complete': safe_int(export_percent_complete) } ) break if tries >= 0: _tries -= 1 if _tries == 0: self.logger.error(("TMC v3 Logs Advertisers Base: " "Check Job Export Status: Exhausted Retries"), extra={ 'attempt': _attempts, 'tries': _tries, 'action': export_status_action, 'request_label': request_label, 'export_percent_complete': export_percent_complete }) raise TuneReportingError( error_message=( "TMC v3 Logs Advertisers Base: " "Check Job Export Status: " "Exhausted Retries: " "Percent Completed: {}" ).format(export_percent_complete), error_code=TuneReportingErrorCodes.REP_ERR_JOB_STOPPED ) _attempts += 1 self.logger.warning( "TMC v3 Logs Advertisers Base: Check Export Status", extra={'attempt': _attempts, 'job': export_job, 'delay': _delay, 'action': 'exports status'} ) time.sleep(_delay) _delay += jitter _delay = min(_delay, max_delay) if export_percent_complete == 100 and not report_url: raise TuneReportingError( error_message=( "TMC v3 Logs Advertisers Base: Check Job Export Status: " "Download report URL: Undefined" ) ) self.logger.info( "TMC v3 Logs Advertisers Base: Check Job Export Status: Finished", extra={ 'attempt': _attempts, 'action': export_status_action, 'report_url': report_url, 'request_label': request_label, 'export_percent_complete': export_percent_complete, 'job': export_job } ) return report_url
def _check_v2_job_status_on_queue( self, auth_type, auth_value, export_status_controller, export_status_action, export_job_id, request_retry=None, ): """Check Job Export Status Args: export_status_controller: export_status_action: export_job_id: request_retry: Returns: """ request_label = "TMC v2 Advertiser Stats: Check Export Status" v2_export_status_request_url = \ self.tune_mat_request_path( mat_api_version="v2", controller=export_status_controller, action=export_status_action ) request_params = {auth_type: auth_value, "job_id": export_job_id} self.logger.info( ("TMC v2 Advertiser Stats: Check Job Status"), extra={ 'action': export_status_action, 'job_id': export_job_id, 'request_url': v2_export_status_request_url, 'request_params': safe_dict(request_params) }) tries = 60 # -1 (indefinite) delay = 10 jitter = 10 max_delay = 60 if request_retry is not None: if 'delay' in request_retry: delay = request_retry['delay'] if 'jitter' in request_retry: jitter = request_retry['jitter'] if 'max_delay' in request_retry: max_delay = request_retry['max_delay'] if 'tries' in request_retry: tries = request_retry['tries'] else: request_retry.update({'tries': 60}) else: request_retry = {'tries': 60, 'delay': 10, 'timeout': 60} self.logger.debug(msg=("TMC v2 Advertiser Stats: Check Job Status: " "Request Retry"), extra=request_retry) report_url = None _attempts = 1 export_percent_complete = 0 time.sleep(10) _tries, _delay = tries, delay while True: try: response = self.mv_request.request( request_method="GET", request_url=v2_export_status_request_url, request_params=request_params, request_label=request_label, request_retry_func=self.tune_v2_request_retry_func) except TuneRequestBaseError as tmc_req_ex: self.logger.error( "TMC v2 Advertiser Stats: Check Job Status: Failed", extra=tmc_req_ex.to_dict(), ) raise except TuneReportingError as tmc_rep_ex: self.logger.error( "TMC v2 Advertiser Stats: Check Job Status: Failed", extra=tmc_rep_ex.to_dict(), ) raise except Exception as ex: print_traceback(ex) self.logger.error( "TMC v2 Advertiser Stats: Check Job Status: {}".format( get_exception_message(ex))) raise http_status_successful = is_http_status_type( http_status_code=response.status_code, http_status_type=HttpStatusType.SUCCESSFUL) if not http_status_successful: raise TuneReportingError( error_message=( "Failed to get export status on queue: {}").format( response.status_code), error_code=TuneReportingErrorCodes.REP_ERR_REQUEST) if hasattr(response, 'url'): self.logger.info( "TMC v2 Advertiser Stats: Reporting API: Status URL", extra={'response_url': response.url}) json_response = response.json() if not json_response: request_status_successful = False elif 'status_code' not in json_response: request_status_successful = False else: status_code = json_response['status_code'] request_status_successful = is_http_status_type( http_status_code=status_code, http_status_type=HttpStatusType.SUCCESSFUL) errors = None if 'errors' in json_response: errors = json_response['errors'] if not request_status_successful: error_message = ( "TMC v2 Advertiser Stats: Check Job Status: GET '{}', Failed: {}, {}" ).format(v2_export_status_request_url, status_code, errors) if (status_code == TuneReportingError.EX_SRV_ERR_500_INTERNAL_SERVER): self.logger.error(error_message) elif (status_code == TuneReportingError.EX_SRV_ERR_503_SERVICE_UNAVAILABLE): self.logger.error(error_message) elif (status_code == TuneReportingError.EX_SRV_ERR_504_SERVICE_TIMEOUT): self.logger.error(error_message) continue elif (status_code == TuneReportingError.EX_CLT_ERR_408_REQUEST_TIMEOUT): self.logger.error( "GET '{}' request timeout, Retrying: {}".format( v2_export_status_request_url, status_code)) continue else: raise TuneReportingError(error_message=error_message, error_code=status_code) if tries >= 0 and _tries <= 1: if (status_code == HttpStatusCode.GATEWAY_TIMEOUT): raise TuneReportingError( error_message=error_message, error_code=TuneReportingErrorCodes.GATEWAY_TIMEOUT) elif (status_code == HttpStatusCode.REQUEST_TIMEOUT): raise TuneReportingError( error_message=error_message, error_code=TuneReportingErrorCodes.REQUEST_TIMEOUT) else: raise TuneReportingError(error_message=error_message, error_code=status_code) else: self.logger.warning(error_message) export_percent_complete = 0 if 'data' in json_response and json_response['data']: json_data = json_response['data'] if "percent_complete" in json_data: export_percent_complete = \ safe_int(json_data["percent_complete"]) self.logger.info(msg=("TMC v2 Advertiser Stats: " "Check Job Export Status: " "Response Success"), extra={ 'job_id': export_job_id, 'export_status': json_data["status"], 'export_percent_complete': safe_int(export_percent_complete), 'attempt': _attempts }) if (export_status_action == TuneV2AdvertiserStatsStatusAction.STATUS): if (export_percent_complete == 100 and json_data["status"] == "complete" and json_data["url"]): report_url = json_data["url"] self.logger.debug( ("TMC v2 Advertiser Stats: " "Check Job Export Status: Completed"), extra={ 'job_id': export_job_id, 'action': export_status_action, 'report_url': report_url, 'request_label': request_label }) break elif (export_status_action == TuneV2AdvertiserStatsStatusAction.DOWNLOAD): if (export_percent_complete == 100 and json_data["status"] == "complete" and json_data["data"]["url"]): report_url = json_data["data"]["url"] self.logger.debug( ("TMC v2 Advertiser Stats: " "Check Job Export Status: Completed"), extra={ 'job_id': export_job_id, 'action': export_status_action, 'report_url': report_url, 'request_label': request_label }) break else: self.logger.debug("TMC v2 Advertiser Stats: " "Check Job Export Status: " "No Data Available") if tries >= 0: _tries -= 1 if _tries == 0: self.logger.error( ("TMC v2 Advertiser Stats: " "Check Job Export Status: Exhausted Retries"), extra={ 'attempt': _attempts, 'tries': _tries, 'action': export_status_action, 'request_label': request_label, 'export_percent_complete': safe_int(export_percent_complete), 'job_id': export_job_id }) raise TuneReportingError( error_message=("TMC v2 Advertiser Stats: " "Check Job Export Status: " "Exhausted Retries: " "Percent Completed: {}").format( safe_int(export_percent_complete)), error_code=TuneReportingErrorCodes. REP_ERR_RETRY_EXHAUSTED) _attempts += 1 self.logger.info("TMC v2 Advertiser Stats: Check Job Status", extra={ 'attempt': _attempts, 'job_id': export_job_id, 'delay': _delay, 'action': export_status_action }) time.sleep(_delay) _delay += jitter _delay = min(_delay, max_delay) if export_percent_complete == 100 and not report_url: raise TuneReportingError( error_message=( "TMC v2 Advertiser Stats: Check Job Export Status: " "Download report URL: Undefined"), error_code=TuneReportingErrorCodes.REP_ERR_UNEXPECTED_VALUE) self.logger.info( "TMC v2 Advertiser Stats: Check Job Export Status: Finished", extra={ 'attempt': _attempts, 'action': export_status_action, 'report_url': report_url, 'request_label': request_label, 'export_percent_complete': export_percent_complete, 'job_id': export_job_id }) return report_url
def main(tmc_api_key): TIMEZONE_COLLECT = "America/New_York" tune_v2_advertiser_stats_actuals = \ TuneV2AdvertiserStatsActuals( timezone=TIMEZONE_COLLECT, logger_level=logging.INFO, logger_format=TuneLoggingFormat.JSON ) dw_file_path = "data.{}".format(TuneV2AdvertiserStatsFormats.JSON) if os.path.exists(dw_file_path): os.remove(dw_file_path) tz = pytz.timezone(TIMEZONE_COLLECT) yesterday = datetime.now(tz).date() - timedelta(days=1) str_yesterday = str(yesterday) request_params = { 'format': TuneV2AdvertiserStatsFormats.CSV, 'fields': ("ad_clicks," "ad_clicks_unique," "ad_impressions," "ad_impressions_unique," "ad_network_id," "advertiser_id," "country.code," "date_hour," "events," "installs," "is_reengagement," "payouts," "publisher_id," "publisher_sub_ad.ref," "publisher_sub_adgroup.ref," "publisher_sub_campaign.ref," "publisher_sub_publisher.ref," "publisher_sub_site.ref," "site_id"), 'group': ("country_id," "is_reengagement," "publisher_id," "publisher_sub_ad_id," "publisher_sub_adgroup_id," "publisher_sub_campaign_id," "publisher_sub_publisher_id," "publisher_sub_site_id," "site_id"), 'timezone': "America/Los_Angeles" } try: tune_v2_advertiser_stats_actuals.tmc_auth(tmc_api_key=tmc_api_key) response = tune_v2_advertiser_stats_actuals.stream( auth_value=tmc_api_key, auth_type=TuneV2AuthenticationTypes.API_KEY, auth_type_use=TuneV2AuthenticationTypes.API_KEY, start_date=str_yesterday, end_date=str_yesterday, request_params=request_params, request_retry={ 'delay': 15, 'timeout': 30, 'tries': 5 }) line_count = 0 csv_keys_list = None json_keys_dict = { "publisher_sub_campaign.ref": "sub_campaign_ref", "publisher_sub_ad.ref": "sub_ad_ref", "publisher_sub_adgroup.ref": "sub_adgroup_ref", "publisher_sub_publisher.ref": "sub_publisher_ref", "publisher_sub_site.ref": "sub_site_ref", "publisher_sub_placement.ref": "sub_placement_ref", "publisher_sub_campaign.name": "sub_campaign_name", "publisher_sub_ad.name": "sub_ad_name", "publisher_sub_adgroup.name": "sub_adgroup_name", "publisher_sub_publisher.name": "sub_publisher_name", "publisher_sub_site.name": "sub_site_name", "publisher_sub_placement.name": "sub_placement_name", "publisher_sub_campaign_id": "sub_campaign_id", "publisher_sub_ad_id": "sub_ad_id", "publisher_sub_adgroup_id": "sub_adgroup_id", "publisher_sub_publisher_id": "sub_publisher_id", "publisher_sub_site_id": "sub_site_id", "publisher_sub_placement_id": "publisher_sub_placement_id", "country.code": "country_code", "ad_impressions": "received_impressions_gross", "ad_impressions_unique": "received_impressions_unique", "ad_clicks": "received_clicks_gross", "ad_clicks_unique": "received_clicks_unique", "events": "received_engagements", "installs": "received_installs", "payouts": "cost" } json_types_dict = { "client_id": int, "partner_id": int, "vendor_id": int, "date": str, "hour": int, "timezone": str, "granularity": str, "site_ref_id": str, "site_ref_type": str, "partner_ref_id": int, "partner_ref_type": str, "partner_vendor_ref_id": int, "partner_vendor_ref_type": str, "sub_campaign_type": str, "sub_campaign_ref": str, "sub_ad_ref": str, "sub_adgroup_ref": str, "sub_publisher_ref": str, "sub_site_ref": str, "sub_placement_ref": str, "sub_campaign_name": str, "sub_ad_name": str, "sub_adgroup_name": str, "sub_publisher_name": str, "sub_site_name": str, "sub_placement_name": str, "sub_campaign_name": str, "sub_ad_name": str, "sub_adgroup_name": str, "sub_publisher_name": str, "sub_site_name": str, "sub_placement_name": str, "sub_campaign_id": int, "sub_ad_id": int, "sub_adgroup_id": int, "sub_publisher_id": int, "sub_site_id": int, "publisher_sub_placement_id": int, "country_code": str, "received_impressions_gross": int, "received_impressions_unique": int, "received_clicks_gross": int, "received_clicks_unique": int, "received_installs": int, "received_engagements": int, "received_conversions": int, "cost": float, "cost_currency": str, "site_id": int, "publisher_id": int, "advertiser_id": int, "ad_network_id": int, "ad_impressions": int, } client_id = 0 partner_id = 0 vendor_id = 0 timezone = "TBD" granularity = "TBD" config_extra = { "client_id": client_id, "partner_id": partner_id, "vendor_id": vendor_id, "timezone": timezone, "granularity": granularity, "cost_currency": "USD", "received_conversions": 0, "site_ref_type": "tmc", "partner_ref_type": "tmc", "partner_vendor_ref_type": "tmc" } with open(file=dw_file_path, mode='w') as dw_file_w: for bytes_line in response.iter_lines(chunk_size=4096): if bytes_line: # filter out keep-alive new chunks line_count += 1 str_line = bytes_line.decode("utf-8") if line_count == 1: csv_keys_list = str_line.split(',') for index, csv_key in enumerate(csv_keys_list): if csv_key in json_keys_dict: csv_keys_list[index] = json_keys_dict[csv_key] continue elif line_count > 2: dw_file_w.write('\n') csv_values_list = str_line.split(',') json__dict = {} is_reengagement = 0 received_installs = 0 received_engagements = 0 for csv_key, csv_value in zip(csv_keys_list, csv_values_list): csv_value_strip = csv_value.strip('"') if csv_key == "date_hour": parts_date_time = csv_value_strip.split(" ") rdate_yyyy_mm_dd = parts_date_time[0] parts_time = parts_date_time[1].split(":") rhour = safe_int(parts_time[0]) json__dict.update({"date": rdate_yyyy_mm_dd}) json__dict.update({"hour": rhour}) elif csv_key == "is_reengagement": is_reengagement = safe_int(csv_value_strip) elif csv_key == "received_installs": received_installs = safe_int(csv_value_strip) json__dict.update( {'received_installs': received_installs}) elif csv_key == "received_engagements": received_engagements = safe_int(csv_value_strip) json__dict.update( {'received_engagements': received_engagements}) else: if csv_key in json_types_dict: if json_types_dict[csv_key] == str: csv_value_typed = safe_str(csv_value_strip) elif json_types_dict[csv_key] == int: csv_value_typed = safe_int(csv_value_strip) elif json_types_dict[csv_key] == float: csv_value_typed = safe_float( csv_value_strip) else: csv_value_typed = safe_str(csv_value_strip) else: csv_value_typed = safe_str(csv_value_strip) json__dict.update({csv_key: csv_value_typed}) if is_reengagement == 1: engagements = received_engagements else: engagements = 0 if engagements > 0 and received_installs > 0: sub_campaign_type = "acquisition_engagement" elif received_installs > 0: sub_campaign_type = "acquisition" elif engagements > 0: sub_campaign_type = "engagement" else: sub_campaign_type = "" json__dict.update({'sub_campaign_type': sub_campaign_type}) json__dict.update(config_extra) json_str = json.dumps(json__dict) dw_file_w.write(json_str) dw_file_w.flush() statinfo = os.stat(dw_file_path) extra = { 'response_status_code': response.status_code, 'response_headers': response.headers, 'dw_file_path': dw_file_path, 'dw_file_size': convert_size(statinfo.st_size), 'line_count': line_count, 'csv_header_list': csv_keys_list } pprint(extra) except TuneRequestBaseError as tmc_req_ex: print_traceback(tmc_req_ex) pprint(tmc_req_ex.to_dict()) print(str(tmc_req_ex)) except TuneReportingError as tmc_rep_ex: pprint(tmc_rep_ex.to_dict()) print(str(tmc_rep_ex)) except Exception as ex: print_traceback(ex) print(get_exception_message(ex))
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @copyright 2017 TUNE, Inc. (http://www.tune.com) # @namespace smart-cast from pprintpp import pprint from safe_cast import ( safe_int, safe_float, safe_str, ) pprint(safe_int(4)) pprint(safe_float(4)) pprint(safe_str(4)) pprint(safe_int('4')) pprint(safe_float('4')) pprint(safe_str('4')) pprint(safe_int(4.0)) pprint(safe_float(4.0)) pprint(safe_str(4.0)) pprint(safe_int('4.0')) pprint(safe_float('4.0')) pprint(safe_str('4.0')) pprint(safe_int('1.0')) pprint(safe_int('1'))
def validate_response( response, request_curl, request_label=None, ): """Validate response Args: response: request_curl: request_label: Returns: """ response_extra = {} if request_label is None: request_label = 'Validate Response' if not response: error_message = "{0}: Failed: None".format(request_label) log.error(error_message, extra=response_extra) raise RequestsFortifiedModuleError( error_message=error_message, error_request_curl=request_curl, error_code=RequestsFortifiedErrorCodes.REQ_ERR_SOFTWARE ) log.debug("{0}: Defined".format(request_label), extra=response_extra) response_extra.update({'http_status_code': response.status_code}) # Not using hasattr on purpose # Assuming positive approach will give us speedup since when attribute exists # hasattr takes double the time. # Anyway we use text attribute here to logging purpose only try: response_extra.update({'response_text_length': len(response.text)}) except AttributeError: pass if response.headers: if 'Content-Type' in response.headers: response_headers_content_type = \ safe_str(response.headers['Content-Type']) response_extra.update({'Content-Type': response_headers_content_type}) if 'Content-Length' in response.headers: response_headers_content_length = \ safe_int(response.headers['Content-Length']) response_extra.update({'Content-Length': bytes_to_human(response_headers_content_length)}) if 'Content-Encoding' in response.headers: response_content_encoding = \ safe_str(response.headers['Content-Encoding']) response_extra.update({'Content-Encoding': response_content_encoding}) if 'Transfer-Encoding' in response.headers: response_transfer_encoding = \ safe_str(response.headers['Transfer-Encoding']) response_extra.update({'Transfer-Encoding': response_transfer_encoding}) if not is_http_status_successful(http_status_code=response.status_code): error_message = "{0}: Failed".format(request_label) log.error(error_message, extra=response_extra) raise RequestsFortifiedModuleError( error_message=error_message, error_request_curl=request_curl, error_code=RequestsFortifiedErrorCodes.REQ_ERR_SOFTWARE ) log.debug("{0}: Success".format(request_label), extra=response_extra)
def build_response_error_details(request_label, request_url, response): """Build gather status of Requests' response. Args: request_label: request_url: response: Returns: """ http_status_code = \ response.status_code http_status_type = \ get_http_status_type(http_status_code) http_status_desc = \ get_http_status_desc(http_status_code) response_status = "%s: %s: %s" % (http_status_code, http_status_type, http_status_desc) response_error_details = { 'request_url': request_url, 'request_label': request_label, 'response_status': response_status, 'response_status_code': http_status_code, 'response_status_type': http_status_type, 'response_status_desc': http_status_desc } if response.headers: if 'Content-Type' in response.headers: response_headers_content_type = \ safe_str(response.headers['Content-Type']) response_error_details.update({'Content-Type': response_headers_content_type}) if 'Content-Length' in response.headers and \ response.headers['Content-Length']: response_headers_content_length = \ safe_int(response.headers['Content-Length']) response_error_details.update({'Content-Length': response_headers_content_length}) if 'Transfer-Encoding' in response.headers and \ response.headers['Transfer-Encoding']: response_headers_transfer_encoding = \ safe_str(response.headers['Transfer-Encoding']) response_error_details.update({'Transfer-Encoding': response_headers_transfer_encoding}) if 'Content-Encoding' in response.headers and \ response.headers['Content-Encoding']: response_headers_content_encoding = \ safe_str(response.headers['Content-Encoding']) response_error_details.update({'Content-Encoding': response_headers_content_encoding}) if hasattr(response, "reason") and response.reason: response_error_details.update({'response_reason': response.reason}) response_details = None response_details_source = None try: response_details = response.json() response_details_source = 'json' except Exception: if hasattr(response, 'text') and \ response.text and \ len(response.text) > 0: response_details = response.text response_details_source = 'text' if response_details.startswith('<html'): response_details_source = 'html' soup_html = bs4.BeautifulSoup(response_details, "html.parser") # kill all script and style elements for script in soup_html(["script", "style"]): script.extract() # rip it out text_html = soup_html.get_text() lines_html = [line for line in text_html.split('\n') if line.strip() != ''] lines_html = [line.strip(' ') for line in lines_html] response_details = lines_html elif response_details.startswith('<?xml'): response_details_source = 'xml' response_details = json.dumps(xmltodict.parse(response_details)) response_error_details.update({ 'response_details': response_details, 'response_details_source': response_details_source }) # pprint(response_error_details) return response_error_details
def test_safe_int(): # test type: assert isinstance(safe_int(0), int) assert isinstance(safe_int(0.0), int) assert isinstance(safe_int('0'), int) # test numeric cast: assert safe_int(0) == 0 assert safe_int(10) == 10 assert safe_int(-1) == -1 assert safe_int(10.5) == 10 # test str cast: assert safe_int('10') == 10 assert safe_int('-1') == -1 assert safe_int('1,000.5') == 1000 # test default param: assert safe_int('###', 256) == 256 # test exception raising: with pytest.raises(ValueError, message='Expecting ValueError to be raised'): safe_int('##')
print('==================================') print(safe_str(.1)) print(safe_str(.12)) print(safe_str(.123)) print(safe_str(.1234)) print(safe_str(.12345)) print(safe_str(4)) print(safe_str('4')) print(safe_str('4.0')) print(safe_str(4.0)) print("\n") print('==================================') print('safe_int()') print('==================================') print(safe_int(.1)) print(safe_int(.12)) print(safe_int(.123)) print(safe_int(.1234)) print(safe_int(.12345)) print(safe_int(4)) print(safe_int('4')) print(safe_int(4.0)) print(safe_int('4.0')) print(safe_int('1.0')) print(safe_int('1')) print(safe_int(1.0)) print(safe_int(1.00)) print(safe_int(1)) print(safe_int('4.1')) print(safe_int('4.12'))
def test_safe_int(): # test type: assert isinstance(safe_int(0), int) assert isinstance(safe_int(0.0), int) assert isinstance(safe_int('0'), int) # test numeric cast: assert safe_int(0) == 0 assert safe_int(10) == 10 assert safe_int(-1) == -1 assert safe_int(10.5) == 10 # test str cast: assert safe_int('10') == 10 assert safe_int('-1') == -1 assert safe_int('1,000.5') == 1000 # test default param: assert safe_int('###', 256) == 256 # test exception raising: with pytest.raises(ValueError, message='Expecting ValueError to be raised') as excinfo: safe_int('##') assert 'Error: Could not convert string to float:' \ in str(excinfo.value) assert 'Value: ##' in str(excinfo.value) assert 'Cast: str to float' in str(excinfo.value)
def validate_response( response, request_curl, request_label=None, ): """Validate response Args: response: request_curl: request_label: Returns: """ response_extra = {} if request_label is None: request_label = 'Validate Response' if not response: error_message = f'{request_label}: Failed: None' log.error(error_message, extra=response_extra) raise TuneRequestModuleError( error_message=error_message, error_request_curl=request_curl, error_code=TuneRequestErrorCodes.REQ_ERR_SOFTWARE) log.debug(f'{request_label}: Defined', extra=response_extra) response_extra.update({'http_status_code': response.status_code}) # Not using hasattr on purpose # Assuming positive approach will give us speedup since when attribute exists # hasattr takes double the time. # Anyway we use text attribute here to logging purpose only try: response_extra.update({'response_text_length': len(response.text)}) except AttributeError: pass if response.headers: if 'Content-Type' in response.headers: response_headers_content_type = \ safe_str(response.headers['Content-Type']) response_extra.update( {'Content-Type': response_headers_content_type}) if 'Content-Length' in response.headers: response_headers_content_length = \ safe_int(response.headers['Content-Length']) response_extra.update({ 'Content-Length': bytes_to_human(response_headers_content_length) }) if 'Content-Encoding' in response.headers: response_content_encoding = \ safe_str(response.headers['Content-Encoding']) response_extra.update( {'Content-Encoding': response_content_encoding}) if 'Transfer-Encoding' in response.headers: response_transfer_encoding = \ safe_str(response.headers['Transfer-Encoding']) response_extra.update( {'Transfer-Encoding': response_transfer_encoding}) if not is_http_status_successful(http_status_code=response.status_code): error_message = f'{request_label}: Failed' log.error(error_message, extra=response_extra) raise TuneRequestModuleError( error_message=error_message, error_request_curl=request_curl, error_code=TuneRequestErrorCodes.REQ_ERR_SOFTWARE) log.debug(f'{request_label}: Success', extra=response_extra)
def build_response_error_details(request_label, request_url, response): """Build gather status of Requests' response. Args: request_label: request_url: response: Returns: """ http_status_code = \ response.status_code http_status_type = \ get_http_status_type(http_status_code) http_status_desc = \ get_http_status_desc(http_status_code) response_status = f"{http_status_code}: {http_status_type}: {http_status_desc}" response_error_details = { 'request_url': request_url, 'request_label': request_label, 'response_status': response_status, 'response_status_code': http_status_code, 'response_status_type': http_status_type, 'response_status_desc': http_status_desc } if response.headers: if 'Content-Type' in response.headers: response_headers_content_type = \ safe_str(response.headers['Content-Type']) response_error_details.update( {'Content-Type': response_headers_content_type}) if 'Content-Length' in response.headers and \ response.headers['Content-Length']: response_headers_content_length = \ safe_int(response.headers['Content-Length']) response_error_details.update( {'Content-Length': response_headers_content_length}) if 'Transfer-Encoding' in response.headers and \ response.headers['Transfer-Encoding']: response_headers_transfer_encoding = \ safe_str(response.headers['Transfer-Encoding']) response_error_details.update( {'Transfer-Encoding': response_headers_transfer_encoding}) if 'Content-Encoding' in response.headers and \ response.headers['Content-Encoding']: response_headers_content_encoding = \ safe_str(response.headers['Content-Encoding']) response_error_details.update( {'Content-Encoding': response_headers_content_encoding}) if hasattr(response, "reason") and response.reason: response_error_details.update({'response_reason': response.reason}) response_details = None response_details_source = None try: response_details = response.json() response_details_source = 'json' except Exception: if hasattr(response, 'text') and \ response.text and \ len(response.text) > 0: response_details = response.text response_details_source = 'text' if response_details.startswith('<html'): response_details_source = 'html' soup_html = bs4.BeautifulSoup(response_details, "html.parser") # kill all script and style elements for script in soup_html(["script", "style"]): script.extract() # rip it out text_html = soup_html.get_text() lines_html = [ line for line in text_html.split('\n') if line.strip() != '' ] lines_html = [line.strip(' ') for line in lines_html] response_details = lines_html elif response_details.startswith('<?xml'): response_details_source = 'xml' response_details = json.dumps( xmltodict.parse(response_details)) response_error_details.update({ 'response_details': response_details, 'response_details_source': response_details_source }) # pprint(response_error_details) return response_error_details