def datalake(): url = 'https://datalake.cert.orangecyberdefense.com/api/v2/auth/token/' auth_response = { "access_token": "12345", "refresh_token": "123456" } responses.add(responses.POST, url, json=auth_response, status=200) return Datalake(username='******', password='******')
def main(override_args=None): """Method to start the script""" # Load initial args parser = BaseScripts.start( 'Add tags and/or comments to a specified list of hashkeys.') parser.add_argument( 'hashkeys', help='hashkeys of the threat to add tags and/or the comment', nargs='*', ) parser.add_argument( '-i', '--input_file', help='hashkey txt file, with one hashkey by line', ) parser.add_argument( '-p', '--public', help='set the visibility to public', action='store_true', ) parser.add_argument( '--tags', nargs='+', help='add a list of tags', required=True, ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() if not args.hashkeys and not args.input_file: parser.error("either a hashkey or an input_file is required") hashkeys = set(args.hashkeys) if args.hashkeys else set() if args.input_file: retrieve_hashkeys_from_file(args.input_file, hashkeys) dtl = Datalake(env=args.env, log_level=args.loglevel) response_dict = post_tags(hashkeys, args.tags, args.public, dtl) if args.output: save_output(args.output, response_dict) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: add_tags.py')
def test_invalid_credentials(caplog): url = 'https://datalake.cert.orangecyberdefense.com/api/v2/auth/token/' api_error_msg = "Wrong credentials provided" api_response = {'messages': api_error_msg} responses.add(responses.POST, url, json=api_response, status=401) with caplog.at_level(logging.ERROR): with pytest.raises(ValueError) as ve: Datalake(username='******', password='******') assert str( ve.value) == f'Could not login: {{"messages": "{api_error_msg}"}}' assert caplog.messages == [ f'An error occurred while retrieving an access token, for URL: {url}\n' f'response of the API: {{"messages": "{api_error_msg}"}}' ]
def main(override_args=None): logger.debug(f'START: get_query_hash.py') # Load initial args parser = BaseScripts.start( 'Retrieve a query hash from a query body (a json used for the Advanced Search).' ) required_named = parser.add_argument_group('required arguments') required_named.add_argument( 'query_body_path', help='path to the json file containing the query body', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() # Load api_endpoints and tokens with open(args.query_body_path, 'r') as query_body_file: query_body = json.load(query_body_file) logger.debug(f'Retrieving query hash for query body: {query_body}') dtl = Datalake(env=args.env, log_level=args.loglevel) resp = dtl.AdvancedSearch.advanced_search_from_query_body( query_body, limit=0, offset=0, output=Output.JSON) if not resp or 'query_hash' not in resp: logger.error( "Couldn't retrieve a query hash, is the query body valid ?") exit(1) query_hash = resp['query_hash'] if args.output: with open(args.output, 'w') as output: output.write(query_hash) logger.info(f'Query hash saved in {args.output}') else: logger.info(f'Query hash associated: {query_hash}')
def main(override_args=None): """Method to start the script""" # Load initial args parser = BaseScripts.start('Submit a new threat to Datalake from a file') required_named = parser.add_argument_group('required arguments') csv_controle = parser.add_argument_group('CSV control arguments') required_named.add_argument( '-i', '--input', help='read threats to add from FILE', required=True, ) required_named.add_argument( '-a', '--atom_type', help='set it to define the atom type', required=True, ) csv_controle.add_argument( '--is_csv', help='set if the file input is a CSV', action='store_true', ) csv_controle.add_argument( '-d', '--delimiter', help='set the delimiter of the CSV file', default=',', ) csv_controle.add_argument( '-c', '--column', help='select column of the CSV file, starting at 1', type=int, default=1, ) parser.add_argument( '-p', '--public', help='set the visibility to public', action='store_true', ) parser.add_argument( '-w', '--whitelist', help='set it to define the added threats as whitelist', action='store_true', ) parser.add_argument( '-t', '--threat_types', nargs='+', help= 'choose specific threat types and their score, like: ddos 50 scam 15', default=[], action='append', ) parser.add_argument( '--tag', nargs='+', help='add a list of tags', default=[], ) parser.add_argument( '--link', help='add link as external_analysis_link', nargs='+', ) parser.add_argument( '--permanent', help= 'sets override_type to permanent. Scores won\'t be updated by the algorithm. Default is temporary', action='store_true', ) parser.add_argument( '--lock', help= 'sets override_type to lock. Scores won\'t be updated by the algorithm for three months. Default is ' 'temporary', action='store_true', ) parser.add_argument( '--no-bulk', help= 'force an api call for each threats, useful to retrieve the details of threats created', action='store_true', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: add_new_threats.py') if not args.threat_types and not args.whitelist: parser.error( "threat types is required if the atom is not for whitelisting") if args.permanent and args.lock: parser.error("Only one override type is authorized") if args.permanent: override_type = OverrideType.PERMANENT elif args.lock: override_type = OverrideType.LOCK else: override_type = OverrideType.TEMPORARY if args.is_csv: try: list_new_threats = load_csv(args.input, args.delimiter, args.column - 1) except ValueError as ve: logger.error(ve) exit() else: list_new_threats = load_list(args.input) if not list_new_threats: raise parser.error('No atom found in the input file.') list_new_threats = defang_threats(list_new_threats, args.atom_type) list_new_threats = list(OrderedDict.fromkeys( list_new_threats)) # removing duplicates while preserving order args.threat_types = flatten_list(args.threat_types) threat_types = parse_threat_types(args.threat_types) atom_type = AtomType[args.atom_type.upper()] dtl = Datalake(env=args.env, log_level=args.loglevel) spinner = Halo(text=f'Creating threats', spinner='dots') spinner.start() threat_response = dtl.Threats.add_threats(list_new_threats, atom_type, threat_types, override_type, args.whitelist, args.public, args.tag, args.link, args.no_bulk) spinner.stop() terminal_size = Endpoint._get_terminal_size() if args.no_bulk: for threat in threat_response: logger.info( f'{threat["hashkey"].ljust(terminal_size - 6, " ")} \x1b[0;30;42m OK \x1b[0m' ) else: failed = [] failed_counter = 0 created_counter = 0 for batch_res in threat_response: failed.extend(batch_res['failed']) for success in batch_res['success']: for val_created in success['created_atom_values']: created_counter += 1 logger.info( f'{val_created.ljust(terminal_size - 6, " ")} \x1b[0;30;42m OK \x1b[0m' ) for failed_obj in failed: for failed_atom_val in failed_obj['failed_atom_values']: failed_counter += 1 logger.info( f'Creation failed for value {failed_atom_val.ljust(terminal_size - 6, " ")} \x1b[0;30;4\ 1m KO \x1b[0m') logger.info( f'Number of batches: {len(threat_response)}\nCreated threats: {created_counter}\nFailed threat creation: {failed_counter}' ) if args.output: save_output(args.output, threat_response) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: add_new_threats.py')
def main(override_args=None): # Load initial args parser = BaseScripts.start('Submit a new threat to Datalake from a file') required_named = parser.add_argument_group('required arguments') csv_control = parser.add_argument_group('CSV control arguments') parser.add_argument( 'threats', help='threats to lookup', nargs='*', ) parser.add_argument( '-i', '--input', help='read threats to add from FILE', ) parser.add_argument( '-td', '--threat_details', action='store_true', help='set if you also want to have access to the threat details ', ) parser.add_argument( '-ot', '--output_type', default='json', help= 'set to the output type desired {json,csv}. Default is json if not specified', ) required_named.add_argument( '-a', '--atom_type', help='set it to define the atom type', required=True, ) csv_control.add_argument( '--is_csv', help='set if the file input is a CSV', action='store_true', ) csv_control.add_argument( '-d', '--delimiter', help='set the delimiter of the CSV file', default=',', ) csv_control.add_argument( '-c', '--column', help='select column of the CSV file, starting at 1', type=int, default=1, ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: lookup_threats.py') if not args.threats and not args.input: parser.error("either a threat or an input_file is required") if args.output_type: try: args.output_type = BaseEngine.output_type2header(args.output_type) except ParserError as e: logger.exception( f'Exception raised while getting output type from headers # {str(e)}', exc_info=False) exit(1) hashkey_only = not args.threat_details dtl = Datalake(env=args.env, log_level=args.loglevel) list_threats = list(args.threats) if args.threats else [] if args.input: if args.is_csv: try: list_threats = list_threats + load_csv( args.input, args.delimiter, args.column - 1) except ValueError as ve: logger.error(ve) exit() else: list_threats = list_threats + load_list(args.input) full_response = {} atom_type = parse_atom_type_or_exit(args.atom_type) list_threats = list(OrderedDict.fromkeys( list_threats)) # removing duplicates while preserving order for threat in list_threats: response = dtl.Threats.lookup(threat, atom_type=atom_type, hashkey_only=hashkey_only) found = response.get('threat_found', True) text, color = boolean_to_text_and_color[found] logger.info('{}{} hashkey:{} {}\x1b[0m'.format(color, threat, response['hashkey'], text)) full_response[threat] = response if args.output: if args.output_type == 'text/csv': full_response = CsvBuilder.create_look_up_csv( full_response, args.atom_type, has_details=args.threat_details, ) save_output(args.output, full_response) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: lookup_threats.py')
def main(override_args=None): """Method to start the script""" logger.debug(f'START: get_threats_from_query_hash.py') # Load initial args parser = BaseScripts.start( 'Retrieve a list of response from a given query hash.') parser.add_argument( '--query_fields', help= 'fields to be retrieved from the threat (default: only the hashkey)\n' 'If an atom detail isn\'t present in a particular atom, empty string is returned.', nargs='+', default=['threat_hashkey'], ) parser.add_argument( '--list', help= 'Turn the output in a list (require query_fields to be a single element)', action='store_true', ) required_named = parser.add_argument_group('required arguments') required_named.add_argument( 'query_hash', help= 'the query hash from which to retrieve the response hashkeys or a path to the query body json file', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() configure_logging(args.loglevel) if len(args.query_fields) > 1 and args.list: parser.error( "List output format is only available if a single element is queried (via query_fields)" ) query_body = {} query_hash = args.query_hash if len(query_hash) != 32 or os.path.exists(query_hash): try: with open(query_hash, 'r') as query_body_file: query_body = json.load(query_body_file) except FileNotFoundError: logger.error( f"Couldn't understand the given value as a query hash or path to query body: {query_hash}" ) exit(1) # Load api_endpoints and tokens dtl = Datalake(env=args.env, log_level=args.loglevel) logger.debug( f'Start to search for threat from the query hash:{query_hash}') spinner = None if logger.isEnabledFor(logging.INFO): spinner = Halo(text=f'Creating bulk task', spinner='dots') spinner.start() task = dtl.BulkSearch.create_task(query_body=query_body, query_hash=query_hash, query_fields=args.query_fields) if spinner: spinner.text = f'Waiting for bulk task {task.uuid} response' response = task.download_sync() original_count = response.get('count', 0) if spinner: spinner.succeed() spinner.info( f'Number of threat that have been retrieved: {original_count}') formatted_output = format_output(response, args.list) if args.output: with open(args.output, 'w') as output: output.write(formatted_output) else: logger.info(formatted_output) if args.output: logger.info(f'Threats saved in {args.output}') else: logger.info('Done')
def main(override_args=None): parser = BaseScripts.start( 'Gets threats from given query body or query hash.', output_file_required=True) parser.add_argument('-i', '--input', help='read query body from a json file') parser.add_argument('--query-hash', help='sets the query hash for the advanced search') parser.add_argument( '-l', '--limit', help= 'defines how many items will be returned in one page slice. Accepted values: 0 to 5000, default is 20', type=int, default=20) parser.add_argument( '--offset', help= 'defines an index of the first requested item. Accepted values: 0 and bigger, default is 0.', type=int, default=0) parser.add_argument( '-ot', '--output-type', help= 'sets the output type desired {json, csv, stix, misp}. Default is json', default='json') parser.add_argument( '--ordering', help= 'threat field to filter on. To sort the results by relevance (if any "search" is applied), just skip ' 'this field. To use the reversed order, use minus, i.e. --ordering="-last_updated" in your command line.' ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: advanced_search.py') if bool(args.input) == bool(args.query_hash): raise ValueError( 'Either an input file with a query body or a query hash needs to be provided.' ) try: output_type = Output[args.output_type.upper()] except KeyError: logger.error( 'Not supported output, please use json, stix, misp or csv') exit(1) dtl = Datalake(env=args.env, log_level=args.loglevel) if args.input: query_body = load_json(args.input) resp = dtl.AdvancedSearch.advanced_search_from_query_body( query_body, limit=args.limit, offset=args.offset, output=output_type, ordering=args.ordering) else: resp = dtl.AdvancedSearch.advanced_search_from_query_hash( args.query_hash, limit=args.limit, offset=args.offset, output=output_type, ordering=args.ordering) save_output(args.output, resp) logger.info( f'\x1b[0;30;42m OK: MATCHING THREATS SAVED IN {args.output} \x1b[0m') logger.debug(f'END: advanced_search.py')
def main(override_args=None): """Method to start the script""" # Load initial args parser = BaseScripts.start( 'Edit scores of a specified list of ids (hashkeys)') parser.add_argument( 'hashkeys', help='hashkeys of the threat to edit score.', nargs='*', ) parser.add_argument( '-i', '--input_file', help='hashkey txt file, with one hashkey by line.', ) parser.add_argument( '-t', '--threat_types', nargs='+', help= 'choose specific threat types and their score, like: ddos 50 scam 15', default=[], action='append', ) parser.add_argument( '-w', '--whitelist', help= 'Whitelist the input, equivalent to setting all threat types at 0.', action='store_true', ) parser.add_argument( '--permanent', help= '''Permanent: all values will override any values provided by both newer and older IOCs. Newer IOCs with override_type permanent can still override old permanent changes. temporary: all values should override any values provided by older IOCs, but not newer ones.''', action='store_true', ) parser.add_argument( '--lock', help= 'sets override_type to lock. Scores won\'t be updated by the algorithm for three months. Default is ' 'temporary', action='store_true', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: edit_score.py') if not args.hashkeys and not args.input_file: parser.error("either a hashkey or an input_file is required") if args.permanent and args.lock: parser.error("Only one override type is authorized") if args.permanent: override_type = OverrideType.PERMANENT elif args.lock: override_type = OverrideType.LOCK else: override_type = OverrideType.TEMPORARY if args.whitelist: parsed_threat_type = get_whitelist_threat_types() else: args.threat_types = flatten_list(args.threat_types) if not args.threat_types or len(args.threat_types) % 2 != 0: parser.error( "threat_types invalid ! should be like: ddos 50 scam 15") parsed_threat_type = parse_threat_types(args.threat_types) # removing duplicates while preserving order hashkeys = args.hashkeys if args.input_file: retrieve_hashkeys_from_file(args.input_file, hashkeys) if not hashkeys: raise parser.error('No hashkey found in the input file.') hashkeys_chunks = list( split_list( list(OrderedDict.fromkeys(hashkeys)) if hashkeys else [], 100)) dtl = Datalake(env=args.env, log_level=args.loglevel) response_list = [] for index, hashkeys in enumerate(hashkeys_chunks): try: dtl.Threats.edit_score_by_hashkeys(hashkeys, parsed_threat_type, override_type) except ValueError as e: logger.warning( f'\x1b[6;30;41mBATCH {str(index+1)}/{len(list(hashkeys_chunks))}: FAILED\x1b[0m' ) for hashkey in hashkeys: response_list.append(hashkey + ': FAILED') logger.warning(f'\x1b[6;30;41m{hashkey} : FAILED\x1b[0m') logger.warning(e) else: logger.info( f'\x1b[6;30;42mBATCH {str(index+1)}/{len(list(hashkeys_chunks))}: OK\x1b[0m' ) for hashkey in hashkeys: response_list.append(hashkey + ': OK') if args.output: save_output(args.output, response_list) logger.info(f'Results saved in {args.output}\n') logger.debug(f'END: edit_score.py')