def main(override_args=None): """Method to start the script""" starter = BaseScripts() # Load initial args parser = starter.start( 'Add tags and/or comments to a specified list of hashkeys.') parser.add_argument( 'hashkeys', help='hashkeys of the threat to add tags and/or the comment', nargs='*', ) parser.add_argument( '-i', '--input_file', help='hashkey txt file, with one hashkey by line', ) parser.add_argument( '-p', '--public', help='set the visibility to public', action='store_true', ) parser.add_argument( '--tags', nargs='+', help='add a list of tags', required=True, ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() # Load api_endpoints and tokens endpoint_config, main_url, tokens = starter.load_config(args) post_engine_add_comments = TagsPost(endpoint_config, args.env, tokens) if not args.hashkeys and not args.input_file: parser.error("either a hashkey or an input_file is required") hashkeys = set(args.hashkeys) if args.hashkeys else set() if args.input_file: retrieve_hashkeys_from_file(args.input_file, hashkeys) response_dict = post_engine_add_comments.post_tags( hashkeys, args.tags, public=args.public, ) if args.output: starter.save_output(args.output, response_dict) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: add_tags.py')
def main(override_args=None): """Method to start the script""" starter = BaseScripts() logger.debug(f'START: get_threats_by_hashkey.py') # Load initial args parser = starter.start( 'Retrieve threats (as Json) from a list of ids (hashkeys)') parser.add_argument( 'hashkeys', help='hashkeys of the threats to retreive', nargs='*', ) parser.add_argument( '-i', '--input_file', help='list of threats ids (hashkeys) that need to be retrieved', ) parser.add_argument( '--lost', help='saved hashes that were not found', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() if not args.hashkeys and not args.input_file: parser.error("either a hashkey or an input_file is required") threats_list = starter._load_csv( args.input_file) if args.input_file else args.hashkeys # Load api_endpoints and tokens endpoint_url, main_url, tokens = starter.load_config(args) logger.debug(f'TOTAL: {len(threats_list)} threats found') url_threats = main_url + endpoint_url['endpoints']['threats'] search_engine_threats = ThreatsSearch(url_threats, main_url, tokens) list_threats, list_lost_hashes = search_engine_threats.get_json( threats_list) if args.output: starter.save_output(args.output, list_threats) logger.debug(f'Threats JSON saved in {args.output}\n') if args.lost: starter.save_output(args.lost, list_lost_hashes) logger.debug(f'Threats lost saved in {args.lost}\n') logger.debug(f'END: get_threats_by_hashkey.py')
def main(override_args=None): """Method to start the script""" starter = BaseScripts() # Load initial args parser = starter.start('Submit a new threat to Datalake from a file') required_named = parser.add_argument_group('required arguments') csv_controle = parser.add_argument_group('CSV control arguments') required_named.add_argument( '-i', '--input', help='read threats to add from FILE', required=True, ) required_named.add_argument( '-a', '--atom_type', help='set it to define the atom type', required=True, ) csv_controle.add_argument( '--is_csv', help='set if the file input is a CSV', action='store_true', ) csv_controle.add_argument( '-d', '--delimiter', help='set the delimiter of the CSV file', default=',', ) csv_controle.add_argument( '-c', '--column', help='select column of the CSV file, starting at 1', type=int, default=1, ) parser.add_argument( '-p', '--public', help='set the visibility to public', action='store_true', ) parser.add_argument( '-w', '--whitelist', help='set it to define the added threats as whitelist', action='store_true', ) parser.add_argument( '-t', '--threat_types', nargs='+', help= 'choose specific threat types and their score, like: ddos 50 scam 15', default=[], ) parser.add_argument( '--tag', nargs='+', help='add a list of tags', default=[], ) parser.add_argument( '--link', help='add link as external_analysis_link', nargs='+', ) parser.add_argument( '--permanent', help= 'sets override_type to permanent. Scores won\'t be updated by the algorithm. Default is temporary', action='store_true', ) parser.add_argument( '--no-bulk', help= 'force an api call for each threats, useful to retrieve the details of threats created', action='store_true', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: add_new_threats.py') if not args.threat_types and not args.whitelist: parser.error( "threat types is required if the atom is not for whitelisting") permanent = 'permanent' if args.permanent else 'temporary' if args.is_csv: try: list_new_threats = starter._load_csv(args.input, args.delimiter, args.column - 1) except ValueError as ve: logger.error(ve) exit() else: list_new_threats = starter._load_list(args.input) list_new_threats = defang_threats(list_new_threats, args.atom_type) list_new_threats = list(OrderedDict.fromkeys( list_new_threats)) # removing duplicates while preserving order threat_types = ThreatsPost.parse_threat_types(args.threat_types) or [] # Load api_endpoints and tokens endpoint_config, main_url, tokens = starter.load_config(args) if args.no_bulk: post_engine_add_threats = ThreatsPost(endpoint_config, args.env, tokens) response_dict = post_engine_add_threats.add_threats( list_new_threats, args.atom_type, args.whitelist, threat_types, args.public, args.tag, args.link, permanent) else: post_engine_add_threats = BulkThreatsPost(endpoint_config, args.env, tokens) hashkeys = post_engine_add_threats.add_bulk_threats( list_new_threats, args.atom_type, args.whitelist, threat_types, args.public, args.tag, args.link, permanent) response_dict = {'haskeys': list(hashkeys)} if args.output: starter.save_output(args.output, response_dict) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: add_new_threats.py')
def main(override_args=None): """Method to start the script""" starter = BaseScripts() # Load initial args parser = starter.start('Submit a new threat to Datalake from a file') required_named = parser.add_argument_group('required arguments') csv_controle = parser.add_argument_group('CSV control arguments') required_named.add_argument( '-i', '--input', help='read threats to add from FILE', required=True, ) required_named.add_argument( '-a', '--atom_type', help='set it to define the atom type', required=True, ) csv_controle.add_argument( '--is_csv', help='set if the file input is a CSV', action='store_true', ) csv_controle.add_argument( '-d', '--delimiter', help='set the delimiter of the CSV file', default=',', ) csv_controle.add_argument( '-c', '--column', help='select column of the CSV file, starting at 1', type=int, default=1, ) parser.add_argument( '-p', '--public', help='set the visibility to public', action='store_true', ) parser.add_argument( '-w', '--whitelist', help='set it to define the added threats as whitelist', action='store_true', ) parser.add_argument( '-t', '--threat_types', nargs='+', help='choose specific threat types and their score, like: ddos 50 scam 15', default=[], ) parser.add_argument( '--tag', nargs='+', help='add a list of tags', default=[], ) parser.add_argument( '--link', help='add link as external_analysis_link', nargs='+', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: add_new_threats.py') if not args.threat_types and not args.whitelist: parser.error("threat types is required if the atom is not for whitelisting") # Load api_endpoints and tokens endpoint_url, main_url, tokens = starter.load_config(args) url_manual_threats = main_url + endpoint_url['endpoints']['threats-manual'] post_engine_add_threats = AddThreatsPost(url_manual_threats, main_url, tokens) if args.is_csv: list_new_threats = starter._load_csv(args.input, args.delimiter, args.column - 1) else: list_new_threats = starter._load_list(args.input) threat_types = AddThreatsPost.parse_threat_types(args.threat_types) or [] response_dict = post_engine_add_threats.add_threats( list_new_threats, args.atom_type, args.whitelist, threat_types, args.public, args.tag, args.link, ) if args.output: starter.save_output(args.output, response_dict) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: add_new_threats.py')
def main(override_args=None): """Method to start the script""" starter = BaseScripts() # Load initial args parser = starter.start('Submit a new threat to Datalake from a file') required_named = parser.add_argument_group('required arguments') csv_controle = parser.add_argument_group('CSV control arguments') parser.add_argument( 'threats', help='threats to lookup', nargs='*', ) parser.add_argument( '-i', '--input', help='read threats to add from FILE', ) parser.add_argument( '-td', '--threat_details', action='store_true', help='set if you also want to have access to the threat details ', ) parser.add_argument( '-ot', '--output_type', default='json', help= 'set to the output type desired {json,csv}. Default is json if not specified', ) required_named.add_argument( '-a', '--atom_type', help='set it to define the atom type', required=True, ) csv_controle.add_argument( '--is_csv', help='set if the file input is a CSV', action='store_true', ) csv_controle.add_argument( '-d', '--delimiter', help='set the delimiter of the CSV file', default=',', ) csv_controle.add_argument( '-c', '--column', help='select column of the CSV file, starting at 1', type=int, default=1, ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: lookup_threats.py') if not args.threats and not args.input: parser.error("either a threat or an input_file is required") if args.atom_type not in PostEngine.authorized_atom_value: parser.error("atom type must be in {}".format(','.join( PostEngine.authorized_atom_value))) args.output_type = output_type2header(args.output_type, parser) hashkey_only = not args.threat_details # Load api_endpoints and tokens endpoint_config, main_url, tokens = starter.load_config(args) get_engine_lookup_threats = LookupThreats(endpoint_config, args.env, tokens) list_threats = list(args.threats) if args.threats else [] if args.input: if args.is_csv: try: list_threats = list_threats + starter._load_csv( args.input, args.delimiter, args.column - 1) except ValueError as ve: logger.error(ve) exit() else: list_threats = list_threats + starter._load_list(args.input) list_threats = list(OrderedDict.fromkeys( list_threats)) # removing duplicates while preserving order response_dict = get_engine_lookup_threats.lookup_threats( list_threats, args.atom_type, hashkey_only, args.output_type) if args.output: starter.save_output(args.output, response_dict) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: lookup_threats.py')
def main(override_args=None): """Method to start the script""" # Load initial args starter = BaseScripts() parser = starter.start( 'Gets threats or hashkeys from given atom types and atom values.') supported_atom_types = parser.add_argument_group('Supported Atom Types') parser.add_argument( 'untyped_atoms', help= 'untyped atom values to lookup. Useful when you do not know what is the atom type', nargs='*', ) for atom_type in ATOM_TYPES_FLAGS: supported_atom_types.add_argument( f'--{atom_type}', action='append', help=f'set a single {atom_type} atom type with its value', ) parser.add_argument( '-ad', '--atom-details', dest='hashkey_only', default=True, action='store_false', help='returns threats full details', ) parser.add_argument( '-i', '--input', action='append', help='read threats to add from FILE. [atomtype:path/to/file.txt]', ) parser.add_argument( '-ot', '--output-type', help='set to the output type desired {json,csv}. Default is json', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: bulk_lookup_threats.py') # create output type header accept_header = {'Accept': None} if args.output_type: try: accept_header['Accept'] = BaseEngine.output_type2header( args.output_type) except ParserError as e: logger.exception( f'Exception raised while getting output type headers # {str(e)}', exc_info=False) exit(1) # to gather all typed atoms passed by arguments and input files typed_atoms = {} # set validations flags regarding the presence or absence of cli arguments has_file = False if args.input is None else True has_flag = False for flag in ATOM_TYPES_FLAGS: atom_values = getattr(args, flag) if atom_values is not None: typed_atoms[flag] = atom_values has_flag = True # validate that at least there is one untyped atom or one atom or one input file if (not has_flag and not has_file and not args.untyped_atoms) or (SUBCOMMAND_NAME in args.untyped_atoms): parser.error( "you must provide at least one of following: untyped atom, atom type, input file." ) # process input files if has_file: for input_file in args.input: file_atom_type, filename = get_atom_type_from_filename(input_file) logger.debug(f'file {filename} was recognized as {file_atom_type}') if file_atom_type == UNTYPED_ATOM_TYPE: args.untyped_atoms += starter._load_list(filename) else: typed_atoms.setdefault(file_atom_type, []).extend(starter._load_list(filename)) # load api_endpoints and tokens endpoints_config, main_url, tokens = starter.load_config(args) post_engine_bulk_lookup_threats = BulkLookupThreats( endpoints_config, args.env, tokens) post_engine_atom_values_extractor = AtomValuesExtractor( endpoints_config, args.env, tokens) # lookup for atom types if args.untyped_atoms: atoms_values_extractor_response = post_engine_atom_values_extractor.atom_values_extract( args.untyped_atoms) if atoms_values_extractor_response['found'] > 0: typed_atoms = join_dicts( typed_atoms, atoms_values_extractor_response['results']) else: logger.warning('none of your untyped atoms could be typed') # find out what atoms couldn't be typed for printing them if atoms_values_extractor_response['not_found'] > 0: for atom_type, atom_list in atoms_values_extractor_response[ 'results'].items(): args.untyped_atoms = [ untyped_atom for untyped_atom in args.untyped_atoms if untyped_atom not in atoms_values_extractor_response['results'][atom_type] ] logger.warning( f'\x1b[6;37;43m{"#" * 60} UNTYPED ATOMS {"#" * 47}\x1b[0m') logger.warning('\n'.join(args.untyped_atoms)) logger.warning('') response = post_engine_bulk_lookup_threats.bulk_lookup_threats( threats=typed_atoms, additional_headers=accept_header, hashkey_only=args.hashkey_only) pretty_print(response, args.output_type) if args.output: starter.save_output(args.output, response) logger.debug(f'Results saved in {args.output}\n') logger.debug(f'END: lookup_threats.py')
def main(override_args=None): """Method to start the script""" starter = BaseScripts() # Load initial args parser = starter.start('Edit scores of a specified list of ids (hashkeys)') parser.add_argument( 'hashkeys', help='hashkeys of the threat to edit score.', nargs='*', ) parser.add_argument( '-i', '--input_file', help='hashkey txt file, with one hashkey by line.', ) parser.add_argument( '-t', '--threat_types', nargs='+', help= 'Choose specific threat types and their score, like: ddos 50 scam 15.', ) parser.add_argument( '--permanent', help= '''Permanent: all values will override any values provided by both newer and older IOCs. Newer IOCs with override_type permanent can still override old permanent changes. temporary: all values should override any values provided by older IOCs, but not newer ones.''', action='store_true', ) if override_args: args = parser.parse_args(override_args) else: args = parser.parse_args() logger.debug(f'START: edit_score.py') if not args.hashkeys and not args.input_file: parser.error("either a hashkey or an input_file is required") if not args.threat_types or len(args.threat_types) % 2 != 0: parser.error("threat_types invalid ! should be like: ddos 50 scam 15") parsed_threat_type = AddThreatsPost.parse_threat_types(args.threat_types) hashkeys = set(args.hashkeys) if args.hashkeys else set() if args.input_file: retrieve_hashkeys_from_file(args.input_file, hashkeys) # Load api_endpoints and tokens endpoint_url, main_url, tokens = starter.load_config(args) url_threats = main_url + endpoint_url['endpoints']['threats'] post_engine_edit_score = ThreatsScoringPost(url_threats, main_url, tokens) response_dict = post_engine_edit_score.post_new_score_from_list( hashkeys, parsed_threat_type, 'permanent' if args.permanent else 'temporary', ) if args.output: starter.save_output(args.output, response_dict) logger.info(f'Results saved in {args.output}\n') logger.debug(f'END: edit_score.py')