def _send_request(url: str, method: str, headers: dict, data: dict) -> Response: """ Send the correct http request to url from method [get, post, delete, patch, put]. Raise a TypeError 'Unknown method to requests {method}' when the method is not one of the above. """ common_kwargs = { 'url': url, 'headers': headers, } if method == 'get': api_response = requests.get(**common_kwargs) elif method == 'post': api_response = requests.post(**common_kwargs, data=json.dumps(data)) elif method == 'delete': api_response = requests.delete(**common_kwargs, data=json.dumps(data)) elif method == 'patch': api_response = requests.patch(**common_kwargs, data=json.dumps(data)) elif method == 'put': api_response = requests.put(**common_kwargs, data=json.dumps(data)) else: logger.debug( 'ERROR : Wrong requests, please only do [get, post, put, patch, delete] method' ) raise TypeError('Unknown method to requests %s', method) return api_response
def handle_bulk_task(self, task_uuid, retrieve_bulk_result_url, *, timeout, additional_checks: List[Check] = None) \ -> Json: """ Handle a generic bulk task, blocking until the task is done or the timeout is up :param task_uuid: uuid of the bulk task :param retrieve_bulk_result_url: endpoint to query, must contained a task_uuid field :param timeout: timeout after which a TimeoutError is raised :param additional_checks: functions to call on a potential json, if all checks return True, the Json is returned :return: a Json returned on HTTP 200 validating all additional_checks """ retrieve_bulk_result_url = retrieve_bulk_result_url.format( task_uuid=task_uuid) spinner = None if logger.isEnabledFor(logging.INFO): spinner = Halo(text=f'Waiting for bulk task {task_uuid} response', spinner='dots') spinner.start() start_time = time() back_off_time = 1 json_response = None while not json_response: headers = {'Authorization': self.token_manager.access_token} response = requests.get(url=retrieve_bulk_result_url, headers=headers, verify=self.requests_ssl_verify) if response.status_code == 200: potential_json_response = response.json() if additional_checks and not all( check(potential_json_response) for check in additional_checks): continue # the json isn't valid if spinner: spinner.succeed(f'bulk task {task_uuid} done') json_response = potential_json_response elif response.status_code == 401: logger.debug('Refreshing expired Token') self.token_manager.process_auth_error( response.json().get('messages')) elif time() - start_time + back_off_time < timeout: sleep(back_off_time) back_off_time = min(back_off_time * 2, self.OCD_DTL_MAX_BACK_OFF_TIME) else: if spinner: spinner.fail(f'bulk task {task_uuid} timeout') logger.error() raise TimeoutError( f'No bulk result after waiting {timeout / 60:.0f} mins\n' f'task_uuid: "{task_uuid}"') if spinner: spinner.stop() return json_response
def _get_terminal_size() -> int: """Return the terminal size for pretty print""" try: terminal_size = os.get_terminal_size() if len(terminal_size) == 2: return int(terminal_size[1]) except OSError: logger.debug( "Couldn't get terminal size, falling back to 80 char wide") return 80
def main(override_args=None): """Method to start the script""" # Load initial args parser = BaseScripts.start('Add tags and/or comments to a specified list of hashkeys.') parser.add_argument( 'hashkeys', help='hashkeys of the threat to add tags and/or the comment', nargs='*', ) parser.add_argument( '-i', '--input_file', help='hashkey txt file, with one hashkey by line', ) parser.add_argument( '-p', '--public', help='set the visibility to public', action='store_true', ) parser.add_argument( '--comment', help='add the given comment', required=True, ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() configure_logging(args.loglevel) # Load api_endpoints and tokens endpoint_config = Config().load_config() token_manager = TokenManager(endpoint_config, environment=args.env) post_engine_add_comments = CommentsPost(endpoint_config, args.env, token_manager) if not args.hashkeys and not args.input_file: parser.error("either a hashkey or an input_file is required") hashkeys = set(args.hashkeys) if args.hashkeys else set() if args.input_file: retrieve_hashkeys_from_file(args.input_file, hashkeys) response_dict = post_engine_add_comments.post_comments( hashkeys, args.comment, public=args.public, ) if args.output: save_output(args.output, response_dict) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: add_comments.py')
def post_tags(hashkeys, tags, public, dtl): return_value = [] for hashkey in hashkeys: try: dtl.Tags.add_to_threat(hashkey, tags, public) except ValueError as e: logger.warning('\x1b[6;30;41m' + hashkey + ': FAILED\x1b[0m') logger.debug('\x1b[6;30;41m' + hashkey + ': FAILED : ' + str(e) + '\x1b[0m') return_value.append(hashkey + ': FAILED') else: return_value.append(hashkey + ': OK') logger.info('\x1b[6;30;42m' + hashkey + ': OK\x1b[0m') return return_value
def main(override_args=None): """Method to start the script""" # Load initial args parser = BaseScripts.start( 'Add tags and/or comments to a specified list of hashkeys.') parser.add_argument( 'hashkeys', help='hashkeys of the threat to add tags and/or the comment', nargs='*', ) parser.add_argument( '-i', '--input_file', help='hashkey txt file, with one hashkey by line', ) parser.add_argument( '-p', '--public', help='set the visibility to public', action='store_true', ) parser.add_argument( '--tags', nargs='+', help='add a list of tags', required=True, ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() if not args.hashkeys and not args.input_file: parser.error("either a hashkey or an input_file is required") hashkeys = set(args.hashkeys) if args.hashkeys else set() if args.input_file: retrieve_hashkeys_from_file(args.input_file, hashkeys) dtl = Datalake(env=args.env, log_level=args.loglevel) response_dict = post_tags(hashkeys, args.tags, args.public, dtl) if args.output: save_output(args.output, response_dict) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: add_tags.py')
def _post_comment(self, hashkey: str, comment: str, visibility: str = 'organization') -> dict: """ Post comment on threat hashkey """ payload = { 'content': comment, 'visibility': visibility, } url = self.url.format(hashkey=hashkey) logger.debug(url) return self.datalake_requests(url, 'post', self._post_headers(), payload)
def main(override_args=None): """Method to start the script""" logger.debug(f'START: get_threats_by_hashkey.py') # Load initial args parser = BaseScripts.start( 'Retrieve threats (as Json) from a list of ids (hashkeys)') parser.add_argument( 'hashkeys', help='hashkeys of the threats to retreive', nargs='*', ) parser.add_argument( '-i', '--input_file', help='list of threats ids (hashkeys) that need to be retrieved', ) parser.add_argument( '--lost', help='saved hashes that were not found', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() configure_logging(args.loglevel) if not args.hashkeys and not args.input_file: parser.error("either a hashkey or an input_file is required") threats_list = load_list( args.input_file) if args.input_file else args.hashkeys logger.debug(f'TOTAL: {len(threats_list)} threats found') # Load api_endpoints and tokens endpoint_config = Config().load_config() token_manager = TokenManager(endpoint_config, environment=args.env) search_engine_threats = ThreatsSearch(endpoint_config, args.env, token_manager) list_threats, list_lost_hashes = search_engine_threats.get_json( threats_list) if args.output: save_output(args.output, list_threats) logger.debug(f'Threats JSON saved in {args.output}\n') if args.lost: save_output(args.lost, list_lost_hashes) logger.debug(f'Threats lost saved in {args.lost}\n') logger.debug(f'END: get_threats_by_hashkey.py')
def fetch_new_token(self): logger.debug('Token will be refreshed') headers = {'Authorization': self.refresh_token} response = requests.post(url=self.url_refresh, headers=headers) json_response = response.json() if response.status_code == 401 and json_response.get( 'messages') == 'Token has expired': logger.debug('Refreshing the refresh token') # Refresh token is also expired, we need to restart the authentication from scratch self.get_token() elif 'access_token' in json_response: self.access_token = f'Token {json_response["access_token"]}' else: # an error occurred logger.error( f'An error occurred while refreshing the refresh token, for URL: {self.url_refresh}\n' f'response of the API: {response.text}') raise ValueError(f'Could not refresh the token: {response.text}')
def main(override_args=None): logger.debug(f'START: get_query_hash.py') # Load initial args parser = BaseScripts.start( 'Retrieve a query hash from a query body (a json used for the Advanced Search).' ) required_named = parser.add_argument_group('required arguments') required_named.add_argument( 'query_body_path', help='path to the json file containing the query body', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() # Load api_endpoints and tokens with open(args.query_body_path, 'r') as query_body_file: query_body = json.load(query_body_file) logger.debug(f'Retrieving query hash for query body: {query_body}') dtl = Datalake(env=args.env, log_level=args.loglevel) resp = dtl.AdvancedSearch.advanced_search_from_query_body( query_body, limit=0, offset=0, output=Output.JSON) if not resp or 'query_hash' not in resp: logger.error( "Couldn't retrieve a query hash, is the query body valid ?") exit(1) query_hash = resp['query_hash'] if args.output: with open(args.output, 'w') as output: output.write(query_hash) logger.info(f'Query hash saved in {args.output}') else: logger.info(f'Query hash associated: {query_hash}')
def datalake_requests( self, url: str, method: str, headers: dict, post_body: dict = None, ) -> Response: """ Use it to request the API """ tries_left = self.SET_MAX_RETRY while tries_left > 0: headers['Authorization'] = self.token_manager.access_token logger.debug( self._pretty_debug_request(url, method, post_body, headers)) response = self._send_request(url, method, headers, post_body) logger.debug(f'API response:\n{str(response.text)}') if response.status_code == 401: logger.warning( 'Token expired or Missing authorization header. Updating token' ) self.token_manager.process_auth_error( response.json().get('messages')) elif response.status_code == 422: logger.warning('Bad authorization header. Updating token') logger.debug(f'422 HTTP code: {response.text}') self.token_manager.process_auth_error( response.json().get('messages')) elif response.status_code < 200 or response.status_code > 299: logger.error( f'API returned non 2xx response code : {response.status_code}\n{response.text}\n Retrying' ) else: return response tries_left -= 1 logger.error('Request failed') raise ValueError(f'{response.status_code}: {response.text.strip()}')
def main(override_args=None): """Method to start the script""" # Load initial args parser = BaseScripts.start('Submit a new threat to Datalake from a file') required_named = parser.add_argument_group('required arguments') csv_controle = parser.add_argument_group('CSV control arguments') required_named.add_argument( '-i', '--input', help='read threats to add from FILE', required=True, ) required_named.add_argument( '-a', '--atom_type', help='set it to define the atom type', required=True, ) csv_controle.add_argument( '--is_csv', help='set if the file input is a CSV', action='store_true', ) csv_controle.add_argument( '-d', '--delimiter', help='set the delimiter of the CSV file', default=',', ) csv_controle.add_argument( '-c', '--column', help='select column of the CSV file, starting at 1', type=int, default=1, ) parser.add_argument( '-p', '--public', help='set the visibility to public', action='store_true', ) parser.add_argument( '-w', '--whitelist', help='set it to define the added threats as whitelist', action='store_true', ) parser.add_argument( '-t', '--threat_types', nargs='+', help= 'choose specific threat types and their score, like: ddos 50 scam 15', default=[], action='append', ) parser.add_argument( '--tag', nargs='+', help='add a list of tags', default=[], ) parser.add_argument( '--link', help='add link as external_analysis_link', nargs='+', ) parser.add_argument( '--permanent', help= 'sets override_type to permanent. Scores won\'t be updated by the algorithm. Default is temporary', action='store_true', ) parser.add_argument( '--lock', help= 'sets override_type to lock. Scores won\'t be updated by the algorithm for three months. Default is ' 'temporary', action='store_true', ) parser.add_argument( '--no-bulk', help= 'force an api call for each threats, useful to retrieve the details of threats created', action='store_true', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: add_new_threats.py') if not args.threat_types and not args.whitelist: parser.error( "threat types is required if the atom is not for whitelisting") if args.permanent and args.lock: parser.error("Only one override type is authorized") if args.permanent: override_type = OverrideType.PERMANENT elif args.lock: override_type = OverrideType.LOCK else: override_type = OverrideType.TEMPORARY if args.is_csv: try: list_new_threats = load_csv(args.input, args.delimiter, args.column - 1) except ValueError as ve: logger.error(ve) exit() else: list_new_threats = load_list(args.input) if not list_new_threats: raise parser.error('No atom found in the input file.') list_new_threats = defang_threats(list_new_threats, args.atom_type) list_new_threats = list(OrderedDict.fromkeys( list_new_threats)) # removing duplicates while preserving order args.threat_types = flatten_list(args.threat_types) threat_types = parse_threat_types(args.threat_types) atom_type = AtomType[args.atom_type.upper()] dtl = Datalake(env=args.env, log_level=args.loglevel) spinner = Halo(text=f'Creating threats', spinner='dots') spinner.start() threat_response = dtl.Threats.add_threats(list_new_threats, atom_type, threat_types, override_type, args.whitelist, args.public, args.tag, args.link, args.no_bulk) spinner.stop() terminal_size = Endpoint._get_terminal_size() if args.no_bulk: for threat in threat_response: logger.info( f'{threat["hashkey"].ljust(terminal_size - 6, " ")} \x1b[0;30;42m OK \x1b[0m' ) else: failed = [] failed_counter = 0 created_counter = 0 for batch_res in threat_response: failed.extend(batch_res['failed']) for success in batch_res['success']: for val_created in success['created_atom_values']: created_counter += 1 logger.info( f'{val_created.ljust(terminal_size - 6, " ")} \x1b[0;30;42m OK \x1b[0m' ) for failed_obj in failed: for failed_atom_val in failed_obj['failed_atom_values']: failed_counter += 1 logger.info( f'Creation failed for value {failed_atom_val.ljust(terminal_size - 6, " ")} \x1b[0;30;4\ 1m KO \x1b[0m') logger.info( f'Number of batches: {len(threat_response)}\nCreated threats: {created_counter}\nFailed threat creation: {failed_counter}' ) if args.output: save_output(args.output, threat_response) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: add_new_threats.py')
def main(override_args=None): # Load initial args parser = BaseScripts.start('Submit a new threat to Datalake from a file') required_named = parser.add_argument_group('required arguments') csv_control = parser.add_argument_group('CSV control arguments') parser.add_argument( 'threats', help='threats to lookup', nargs='*', ) parser.add_argument( '-i', '--input', help='read threats to add from FILE', ) parser.add_argument( '-td', '--threat_details', action='store_true', help='set if you also want to have access to the threat details ', ) parser.add_argument( '-ot', '--output_type', default='json', help= 'set to the output type desired {json,csv}. Default is json if not specified', ) required_named.add_argument( '-a', '--atom_type', help='set it to define the atom type', required=True, ) csv_control.add_argument( '--is_csv', help='set if the file input is a CSV', action='store_true', ) csv_control.add_argument( '-d', '--delimiter', help='set the delimiter of the CSV file', default=',', ) csv_control.add_argument( '-c', '--column', help='select column of the CSV file, starting at 1', type=int, default=1, ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: lookup_threats.py') if not args.threats and not args.input: parser.error("either a threat or an input_file is required") if args.output_type: try: args.output_type = BaseEngine.output_type2header(args.output_type) except ParserError as e: logger.exception( f'Exception raised while getting output type from headers # {str(e)}', exc_info=False) exit(1) hashkey_only = not args.threat_details dtl = Datalake(env=args.env, log_level=args.loglevel) list_threats = list(args.threats) if args.threats else [] if args.input: if args.is_csv: try: list_threats = list_threats + load_csv( args.input, args.delimiter, args.column - 1) except ValueError as ve: logger.error(ve) exit() else: list_threats = list_threats + load_list(args.input) full_response = {} atom_type = parse_atom_type_or_exit(args.atom_type) list_threats = list(OrderedDict.fromkeys( list_threats)) # removing duplicates while preserving order for threat in list_threats: response = dtl.Threats.lookup(threat, atom_type=atom_type, hashkey_only=hashkey_only) found = response.get('threat_found', True) text, color = boolean_to_text_and_color[found] logger.info('{}{} hashkey:{} {}\x1b[0m'.format(color, threat, response['hashkey'], text)) full_response[threat] = response if args.output: if args.output_type == 'text/csv': full_response = CsvBuilder.create_look_up_csv( full_response, args.atom_type, has_details=args.threat_details, ) save_output(args.output, full_response) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: lookup_threats.py')
def main(override_args=None): """Method to start the script""" logger.debug(f'START: get_threats_from_query_hash.py') # Load initial args parser = BaseScripts.start( 'Retrieve a list of response from a given query hash.') parser.add_argument( '--query_fields', help= 'fields to be retrieved from the threat (default: only the hashkey)\n' 'If an atom detail isn\'t present in a particular atom, empty string is returned.', nargs='+', default=['threat_hashkey'], ) parser.add_argument( '--list', help= 'Turn the output in a list (require query_fields to be a single element)', action='store_true', ) required_named = parser.add_argument_group('required arguments') required_named.add_argument( 'query_hash', help= 'the query hash from which to retrieve the response hashkeys or a path to the query body json file', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() configure_logging(args.loglevel) if len(args.query_fields) > 1 and args.list: parser.error( "List output format is only available if a single element is queried (via query_fields)" ) query_body = {} query_hash = args.query_hash if len(query_hash) != 32 or os.path.exists(query_hash): try: with open(query_hash, 'r') as query_body_file: query_body = json.load(query_body_file) except FileNotFoundError: logger.error( f"Couldn't understand the given value as a query hash or path to query body: {query_hash}" ) exit(1) # Load api_endpoints and tokens dtl = Datalake(env=args.env, log_level=args.loglevel) logger.debug( f'Start to search for threat from the query hash:{query_hash}') spinner = None if logger.isEnabledFor(logging.INFO): spinner = Halo(text=f'Creating bulk task', spinner='dots') spinner.start() task = dtl.BulkSearch.create_task(query_body=query_body, query_hash=query_hash, query_fields=args.query_fields) if spinner: spinner.text = f'Waiting for bulk task {task.uuid} response' response = task.download_sync() original_count = response.get('count', 0) if spinner: spinner.succeed() spinner.info( f'Number of threat that have been retrieved: {original_count}') formatted_output = format_output(response, args.list) if args.output: with open(args.output, 'w') as output: output.write(formatted_output) else: logger.info(formatted_output) if args.output: logger.info(f'Threats saved in {args.output}') else: logger.info('Done')
def main(override_args=None): parser = BaseScripts.start( 'Gets threats from given query body or query hash.', output_file_required=True) parser.add_argument('-i', '--input', help='read query body from a json file') parser.add_argument('--query-hash', help='sets the query hash for the advanced search') parser.add_argument( '-l', '--limit', help= 'defines how many items will be returned in one page slice. Accepted values: 0 to 5000, default is 20', type=int, default=20) parser.add_argument( '--offset', help= 'defines an index of the first requested item. Accepted values: 0 and bigger, default is 0.', type=int, default=0) parser.add_argument( '-ot', '--output-type', help= 'sets the output type desired {json, csv, stix, misp}. Default is json', default='json') parser.add_argument( '--ordering', help= 'threat field to filter on. To sort the results by relevance (if any "search" is applied), just skip ' 'this field. To use the reversed order, use minus, i.e. --ordering="-last_updated" in your command line.' ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: advanced_search.py') if bool(args.input) == bool(args.query_hash): raise ValueError( 'Either an input file with a query body or a query hash needs to be provided.' ) try: output_type = Output[args.output_type.upper()] except KeyError: logger.error( 'Not supported output, please use json, stix, misp or csv') exit(1) dtl = Datalake(env=args.env, log_level=args.loglevel) if args.input: query_body = load_json(args.input) resp = dtl.AdvancedSearch.advanced_search_from_query_body( query_body, limit=args.limit, offset=args.offset, output=output_type, ordering=args.ordering) else: resp = dtl.AdvancedSearch.advanced_search_from_query_hash( args.query_hash, limit=args.limit, offset=args.offset, output=output_type, ordering=args.ordering) save_output(args.output, resp) logger.info( f'\x1b[0;30;42m OK: MATCHING THREATS SAVED IN {args.output} \x1b[0m') logger.debug(f'END: advanced_search.py')
class BaseEngine: ACCEPTED_HEADERS = {'json': 'application/json', 'csv': 'text/csv'} OCD_DTL_QUOTA_TIME = int(os.getenv('OCD_DTL_QUOTA_TIME', 1)) OCD_DTL_REQUESTS_PER_QUOTA_TIME = int( os.getenv('OCD_DTL_REQUESTS_PER_QUOTA_TIME', 5)) logger.debug( f'Throttle selected: {OCD_DTL_REQUESTS_PER_QUOTA_TIME} queries per {OCD_DTL_QUOTA_TIME}s' ) Json = Union[ dict, list] # json like object that can be a dict or root level array def __init__(self, endpoint_config: dict, environment: str, token_manager: TokenManager): self.endpoint_config = endpoint_config self.environment = environment self.requests_ssl_verify = suppress_insecure_request_warns(environment) self.url = self._build_url(endpoint_config, environment) self.token_manager = token_manager self.endpoint = Endpoint(endpoint_config, environment, token_manager) def datalake_requests(self, url: str, method: str, headers: dict, post_body: dict = None): """ Wrapper around the new datalake_requests to keep compatibility with old scrips """ try: response = self.endpoint.datalake_requests(url, method, headers, post_body) except ValueError: logger.error( 'Request failed: Will return nothing for this request') return {} if 'Content-Type' in response.headers and 'text/csv' in response.headers[ 'Content-Type']: return response.text else: try: dict_response = self._load_response(response) return dict_response except JSONDecodeError: logger.error( 'Request unexpectedly returned non dict value. Retrying') @staticmethod def output_type2header(value): """ this method gets the CLI input arg value and generate the header content-type :param value: value to header :return: returns content-type header or raise an exception if there isn't an associated content-type value """ if value.lower() in BaseEngine.ACCEPTED_HEADERS: return BaseEngine.ACCEPTED_HEADERS[value.lower()] raise parser.ParserError( f'{value.lower()} is not a valid. Use some of {BaseEngine.ACCEPTED_HEADERS.keys()}' ) @staticmethod def _load_response(api_response: Response): """ Load the API response from JSON format to dict. The endpoint for events is a bit special, the json.loads() doesn't work for the return format of the API. We get for this special case a return dict containing the length of the response i.e.: if length of response == 3 then: no events :param: api_response: dict :return: dict_response """ if api_response.text.startswith('[') and api_response.text.endswith( ']\n'): # This condition is for the date-histogram endpoints dict_response = {'response_length': len(api_response.text)} else: dict_response = json.loads(api_response.text) return dict_response def _build_url(self, endpoint_config: dict, environment: str): """To be implemented by each subclass""" raise NotImplemented() def _build_url_for_endpoint(self, endpoint_name): base_url = urljoin(self.endpoint_config['main'][self.environment], self.endpoint_config['api_version']) enpoints = self.endpoint_config['endpoints'] return urljoin(base_url, enpoints[endpoint_name], allow_fragments=True)
def main(override_args=None): """Method to start the script""" # Load initial args parser = BaseScripts.start( 'Edit scores of a specified list of ids (hashkeys)') parser.add_argument( 'hashkeys', help='hashkeys of the threat to edit score.', nargs='*', ) parser.add_argument( '-i', '--input_file', help='hashkey txt file, with one hashkey by line.', ) parser.add_argument( '-t', '--threat_types', nargs='+', help= 'choose specific threat types and their score, like: ddos 50 scam 15', default=[], action='append', ) parser.add_argument( '-w', '--whitelist', help= 'Whitelist the input, equivalent to setting all threat types at 0.', action='store_true', ) parser.add_argument( '--permanent', help= '''Permanent: all values will override any values provided by both newer and older IOCs. Newer IOCs with override_type permanent can still override old permanent changes. temporary: all values should override any values provided by older IOCs, but not newer ones.''', action='store_true', ) parser.add_argument( '--lock', help= 'sets override_type to lock. Scores won\'t be updated by the algorithm for three months. Default is ' 'temporary', action='store_true', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: edit_score.py') if not args.hashkeys and not args.input_file: parser.error("either a hashkey or an input_file is required") if args.permanent and args.lock: parser.error("Only one override type is authorized") if args.permanent: override_type = OverrideType.PERMANENT elif args.lock: override_type = OverrideType.LOCK else: override_type = OverrideType.TEMPORARY if args.whitelist: parsed_threat_type = get_whitelist_threat_types() else: args.threat_types = flatten_list(args.threat_types) if not args.threat_types or len(args.threat_types) % 2 != 0: parser.error( "threat_types invalid ! should be like: ddos 50 scam 15") parsed_threat_type = parse_threat_types(args.threat_types) # removing duplicates while preserving order hashkeys = args.hashkeys if args.input_file: retrieve_hashkeys_from_file(args.input_file, hashkeys) if not hashkeys: raise parser.error('No hashkey found in the input file.') hashkeys_chunks = list( split_list( list(OrderedDict.fromkeys(hashkeys)) if hashkeys else [], 100)) dtl = Datalake(env=args.env, log_level=args.loglevel) response_list = [] for index, hashkeys in enumerate(hashkeys_chunks): try: dtl.Threats.edit_score_by_hashkeys(hashkeys, parsed_threat_type, override_type) except ValueError as e: logger.warning( f'\x1b[6;30;41mBATCH {str(index+1)}/{len(list(hashkeys_chunks))}: FAILED\x1b[0m' ) for hashkey in hashkeys: response_list.append(hashkey + ': FAILED') logger.warning(f'\x1b[6;30;41m{hashkey} : FAILED\x1b[0m') logger.warning(e) else: logger.info( f'\x1b[6;30;42mBATCH {str(index+1)}/{len(list(hashkeys_chunks))}: OK\x1b[0m' ) for hashkey in hashkeys: response_list.append(hashkey + ': OK') if args.output: save_output(args.output, response_list) logger.info(f'Results saved in {args.output}\n') logger.debug(f'END: edit_score.py')
class Endpoint: OCD_DTL_QUOTA_TIME = int(os.getenv('OCD_DTL_QUOTA_TIME', 1)) OCD_DTL_REQUESTS_PER_QUOTA_TIME = int( os.getenv('OCD_DTL_REQUESTS_PER_QUOTA_TIME', 5)) logger.debug( f'Throttle selected: {OCD_DTL_REQUESTS_PER_QUOTA_TIME} queries per {OCD_DTL_QUOTA_TIME}s' ) SET_MAX_RETRY = 3 def __init__(self, endpoint_config: dict, environment: str, token_manager: TokenManager): self.endpoint_config = endpoint_config self.environment = environment self.terminal_size = self._get_terminal_size() self.token_manager = token_manager self.SET_MAX_RETRY = 3 @staticmethod def _get_terminal_size() -> int: """Return the terminal size for pretty print""" try: terminal_size = os.get_terminal_size() if len(terminal_size) == 2: return int(terminal_size[1]) except OSError: logger.debug( "Couldn't get terminal size, falling back to 80 char wide") return 80 @throttle( period=OCD_DTL_QUOTA_TIME, call_per_period=OCD_DTL_REQUESTS_PER_QUOTA_TIME, ) def datalake_requests( self, url: str, method: str, headers: dict, post_body: dict = None, ) -> Response: """ Use it to request the API """ tries_left = self.SET_MAX_RETRY while tries_left > 0: headers['Authorization'] = self.token_manager.access_token logger.debug( self._pretty_debug_request(url, method, post_body, headers)) response = self._send_request(url, method, headers, post_body) logger.debug(f'API response:\n{str(response.text)}') if response.status_code == 401: logger.warning( 'Token expired or Missing authorization header. Updating token' ) self.token_manager.process_auth_error( response.json().get('messages')) elif response.status_code == 422: logger.warning('Bad authorization header. Updating token') logger.debug(f'422 HTTP code: {response.text}') self.token_manager.process_auth_error( response.json().get('messages')) elif response.status_code < 200 or response.status_code > 299: logger.error( f'API returned non 2xx response code : {response.status_code}\n{response.text}\n Retrying' ) else: return response tries_left -= 1 logger.error('Request failed') raise ValueError(f'{response.status_code}: {response.text.strip()}') @staticmethod def _post_headers(output=Output.JSON) -> dict: """headers for POST endpoints""" return {'Accept': output.value, 'Content-Type': 'application/json'} @staticmethod def _get_headers(output=Output.JSON) -> dict: """headers for GET endpoints""" return {'Accept': output.value} @staticmethod def _send_request(url: str, method: str, headers: dict, data: dict) -> Response: """ Send the correct http request to url from method [get, post, delete, patch, put]. Raise a TypeError 'Unknown method to requests {method}' when the method is not one of the above. """ common_kwargs = { 'url': url, 'headers': headers, } if method == 'get': api_response = requests.get(**common_kwargs) elif method == 'post': api_response = requests.post(**common_kwargs, data=json.dumps(data)) elif method == 'delete': api_response = requests.delete(**common_kwargs, data=json.dumps(data)) elif method == 'patch': api_response = requests.patch(**common_kwargs, data=json.dumps(data)) elif method == 'put': api_response = requests.put(**common_kwargs, data=json.dumps(data)) else: logger.debug( 'ERROR : Wrong requests, please only do [get, post, put, patch, delete] method' ) raise TypeError('Unknown method to requests %s', method) return api_response def _pretty_debug_request(self, url: str, method: str, data: dict, headers: dict): debug = ('-' * self.terminal_size + 'DEBUG - datalake_requests:\n' + f' - url: \n{url}\n' + f' - method: \n{method}\n' + f' - headers: \n{headers}\n' + f' - data: \n{data}\n' + f' - token: \n{self.token_manager.access_token}\n' + f' - refresh_token: \n{self.token_manager.refresh_token}\n' + '-' * self.terminal_size) return debug def _build_url_for_endpoint(self, endpoint_name): base_url = urljoin(self.endpoint_config['main'][self.environment], self.endpoint_config['api_version']) enpoints = self.endpoint_config['endpoints'] return urljoin(base_url, enpoints[endpoint_name], allow_fragments=True)