def main(): options, args = parse_arguments() query = build_query(options, args) api = RFAPI(options.token) substitute_fields = ['attributes'] output_columns = [ 'id', 'momentum', 'positive', 'negative', 'canonical.id', 'type', 'document.id', 'document.published', 'document.downloaded', 'start', 'stop', 'document.url', 'document.title', 'document.sourceId.id', 'document.sourceId.name', 'document.sourceId.media_type', 'document.sourceId.topic', 'document.sourceId.country', 'fragment', 'attributes' ] entity_columns = ['id', 'name', 'hits', 'type', 'momentum', 'attributes'] out = csv.DictWriter(sys.stdout, output_columns, extrasaction='ignore') if query.get('aggregate') or query.get('output', {}).get('count'): res = api.query(query) print res return if options.header: out.writerow(dict(zip(output_columns, output_columns))) if options.entityfile: entityout = csv.DictWriter(open(options.entityfile, 'w'), entity_columns, extrasaction='ignore') entityout.writerow(dict(zip(entity_columns, entity_columns))) for res in api.paged_query(query): for i in res['instances']: i['positive'] = i.get('attributes', {}).get('positive', 0.0) i['negative'] = i.get('attributes', {}).get('negative', 0.0) out.writerow( encode_instance( flatten_instance(i, res['entities'], substitute_fields))) if options.entityfile: entities = pack_entity_attributes(res['entities'], entity_columns) for e in entities: #Here we reuse the instance formatting code to format entities for output. entityout.writerow( encode_instance(flatten_instance(e, res['entities'], []))) if not options.page: break
def __init__(self, token, iocs, entity_type, mode='core'): ''' Parameters ---------- token : str Recorded Future API token iocs : list or dict List of IOCs to enrich or dict of IOCs keyed by name with the value as the RFID. entity_type : {"IpAddress", "Hash", "InternetDomainName"} Name of Recorded Future entity type for IOC. mode : {"core", "related", "debug"} Subset of features to return with enrichment. "core" is default. ''' self.rfqapi = RFAPI(token) self.response = collections.OrderedDict() # need all features early for scoring; they're removed later # need to test whether this can be avoided keys = self._FEATURES['core'] keys.update(self._FEATURES['debug']) if mode in ('related', 'debug'): keys.update(self._FEATURES['related']) if mode not in ('core', 'related', 'debug'): raise ValueError( '"mode" must be one of ("core", "related", "debug"). Input: %s.' % mode) self.mode = mode self.entity_type = entity_type if isinstance(iocs, list): self.iocs = self._get_rfids(iocs) elif isinstance(iocs, dict): self.iocs = iocs else: raise ValueError('"iocs" must be list or dict.') for ioc in self.iocs: new_resp = {} for key in keys: new_resp[key] = keys[key] if key == 'Name': new_resp[key] = ioc elif key == 'RFID': new_resp[key] = self.iocs[ioc] elif key == 'EntityType': new_resp[key] = self.entity_type self.response[ioc] = new_resp self.keys = keys
def __init__(self, default_fields=None, fav_locations=None, cahe_folder=CACHE_DIR): super(HouseScore, self).__init__() self.cahe_folder = cahe_folder self.rfapi = RFAPI(cahe_folder=cahe_folder) if default_fields is not None: self.default_fields = default_fields else: self.default_fields = HouseScore.DEFAULTS.copy() if fav_locations is not None: self.fav_locations = fav_locations else: self.fav_locations = HouseScore.LoadFavorits( os.path.join(SCRIPT_DIR, "FavoriteLocations.json"))
def main(): # Construct a RFAPI query object rfqapi = RFAPI(TOKEN) # Query for the metadata mdata_result = rfqapi.paged_query(q) # Loop over all the metadata and each metadata attributes for metadata in mdata_result: mdata_types = metadata['types'] for md_type in mdata_types: # Print each Root Metadata Type parent_type="" if 'parent' in md_type: parent_type = str(md_type['parent']) print md_type['name']+"("+parent_type+")" # Loop over attributes in this metadata type and print their corresponding types for md_attr_list in md_type['attrs']: print_attributes(md_attr_list)
def get_all_iocs(token, e_type, index_min, index_max): '''Gets all entities of type e_type found between index_min and index_max ''' rfqapi = RFAPI(token) q = { "instance": { "type": "Event", "attributes": [{ "entity": { "type": e_type } }], "document": { "indexed": { "min": index_min, "max": index_max } } }, "output": { "count": { "axis": [{ "name": "attributes.entities", "type": e_type, "aspect": "all" }], "values": ["instances"] } } } res = rfqapi.query(q) iocs = res["counts"][0].keys() ioc_dict = {} for ioc in iocs: ioc_name, rfid, unused = rf_agg_name_parser(ioc) ioc_dict[ioc_name] = rfid return ioc_dict
for row in csv_fd: risk = int(row['Risk']) if risk >= args.ip_risk_floor: ip_form = row['Name'] if '/' in ip_form: # We don't want to include CIDR ranges. continue print('\t'.join([ ip_form, 'Intel::ADDR', meta_source, intel_summ_link('ip:' + ip_form), do_notice, '-' ])) c += 1 # Hashes. api = RFAPI(args.token) hash_query = { "cluster": { "data_group": "Hash", "limit": 10000, "attributes": [{ "name": "stats.metrics.riskScore", "range": { "gte": args.hash_risk_floor } }] }, "output": { "exclude": ["stats.entity_lists"],
def main(): options, args = parse_arguments() query = build_query(options, args) api = RFAPI(options.token) res = api.query(query) print res
"not": { "ip": "192.168.0.0/16" } }, { "not": { "ip": "127.0.0.1" } }, { "not": { "ip": "0.0.0.0" } }], "limit": 10 }, "output": { "exclude": ["stats"], "inline_entities": True } } # Using RFAPI module, run query # Note: To pull back all results, use rfqapi.paged_query(q) # and a higher limit. rfqapi = RFAPI(token) result = rfqapi.query(q) # Display the results (in this case, limit is 1) for res in result['events']: print "Event: \n" print str(res) + '\n'
def __init__(self, token): self.rfapi = RFAPI(token)