def set_object_id(self, src, dest, crits_id=None, edge_id=None): timestamp=util_.nowutc() query = {'src': src, 'dest': dest, 'crits_id': crits_id, 'edge_id': edge_id} doc = self.get_object_id(src, dest, crits_id=crits_id, edge_id=edge_id) if doc: # there's already a crits-edge mapping so just update # the timestamp self.collection.update(doc, {'$set': {'modified': timestamp}}) else: # insert a new mapping query['created'] = timestamp query['modified'] = timestamp self.collection.insert(query)
def taxii_poll(config, src, dest, timestamp=None): '''pull stix from edge via taxii''' client = tc.HttpClient() client.setUseHttps(config['edge']['sites'][src]['taxii']['ssl']) client.setAuthType(client.AUTH_BASIC) client.setAuthCredentials( {'username': config['edge']['sites'][src]['taxii']['user'], 'password': config['edge']['sites'][src]['taxii']['pass']}) if not timestamp: earliest = util_.epoch_start() else: earliest = timestamp latest = util_.nowutc() poll_request = tm10.PollRequest( message_id=tm10.generate_message_id(), feed_name=config['edge']['sites'][src]['taxii']['collection'], exclusive_begin_timestamp_label=earliest, inclusive_end_timestamp_label=latest, content_bindings=[t.CB_STIX_XML_11]) http_response = client.callTaxiiService2( config['edge']['sites'][src]['host'], config['edge']['sites'][src]['taxii']['path'], t.VID_TAXII_XML_10, poll_request.to_xml(), port=config['edge']['sites'][src]['taxii']['port']) taxii_message = t.get_message_from_http_response(http_response, poll_request.message_id) if isinstance(taxii_message, tm10.StatusMessage): config['logger'].error(log_.log_messages['polling_error'].format( type_='taxii', error=taxii_message.message)) elif isinstance(taxii_message, tm10.PollResponse): incidents = dict() indicators = dict() observables = dict() for content_block in taxii_message.content_blocks: (incidents_, indicators_, observables_) = \ process_taxii_content_blocks(config, content_block) incidents.update(incidents_) indicators.update(indicators_) observables.update(observables_) return(latest, incidents, indicators, observables)
def edge2crits(config, src, dest, daemon=False, now=None, last_run=None): '''sync an edge instance with crits''' # check if (and when) we synced src and dest... if not now: now = util_.nowutc() if not last_run: # didn't get last_run as an arg so check the db... last_run = config['db'].get_last_sync(src=src, dest=dest, direction='e2c') config['logger'].info(log_.log_messages['start_sync'].format( type_='edge', last_run=str(last_run), src=src, dest=dest)) # setup the tally counters config['edge_tally'] = dict() endpoints = ['ips', 'domains', 'samples', 'emails', 'indicators', 'relationships', 'events'] config['edge_tally']['all'] = {'incoming': 0, 'processed': 0} for endpoint in endpoints: config['edge_tally'][endpoint] = {'incoming': 0, 'processed': 0} # poll for new edge data... (latest, incidents, indicators, observables) = \ taxii_poll(config, src, dest, last_run) process_observables(config, src, dest, observables) process_incidents(config, src, dest, incidents) process_indicators(config, src, dest, indicators) process_relationships(config, src, dest) for endpoint in endpoints: if config['edge_tally'][endpoint]['incoming'] > 0: config['logger'].info(log_.log_messages['incoming_tally'].format( count=config['edge_tally'][endpoint]['incoming'], type_=endpoint, src='edge', dest='crits')) if (config['edge_tally'][endpoint]['incoming'] - config['edge_tally'][endpoint]['processed']) > 0: config['logger'].info(log_.log_messages['failed_tally'].format( count=(config['edge_tally'][endpoint]['incoming'] - config['edge_tally'][endpoint]['processed']), type_=endpoint, src='edge', dest='crits')) if config['edge_tally'][endpoint]['processed'] > 0: config['logger'].info(log_.log_messages['processed_tally'].format( count=config['edge_tally'][endpoint]['processed'], type_=endpoint, src='edge', dest='crits')) if config['edge_tally']['all']['incoming'] > 0: config['logger'].info(log_.log_messages['incoming_tally'].format( count=config['edge_tally']['all']['incoming'], type_='total', src='edge', dest='crits')) if (config['edge_tally']['all']['incoming'] - config['edge_tally']['all']['processed']) > 0: config['logger'].info(log_.log_messages['failed_tally'].format( count=(config['edge_tally']['all']['incoming'] - config['edge_tally']['all']['processed']), type_='total', src='edge', dest='crits')) if config['edge_tally']['all']['processed'] > 0: config['logger'].info(log_.log_messages['processed_tally'].format( count=config['edge_tally']['all']['processed'], type_='total', src='edge', dest='crits')) # save state to disk for next run... if config['daemon']['debug']: poll_interval = \ config['edge']['sites'][src]['taxii']['poll_interval'] next_run = str(now + datetime.timedelta(seconds=poll_interval)) config['logger'].debug(log_.log_messages['saving_state'].format( next_run=next_run)) if not daemon: config['db'].set_last_sync(src=src, dest=dest, direction='e2c', timestamp=now) return(None) else: return(util_.nowutc())
def crits2edge(config, src, dest, daemon=False, now=None, last_run=None): xmlns_name = config['edge']['sites'][dest]['stix']['xmlns_name'] # check if (and when) we synced src and dest... if not now: now = util_.nowutc() if not last_run: last_run = config['db'].get_last_sync(src=src, dest=dest, direction='c2e') config['logger'].info( log_.log_messages['start_sync'].format( type_='crits', last_run=last_run, src=src, dest=dest)) endpoints = ['ips', 'domains', 'samples', 'emails', 'indicators', 'events'] # setup the tally counters config['crits_tally'] = dict() config['crits_tally']['all'] = {'incoming': 0, 'processed': 0} for endpoint in endpoints: config['crits_tally'][endpoint] = {'incoming': 0, 'processed': 0} ids = dict() for endpoint in endpoints: ids[endpoint] = fetch_crits_object_ids(config, src, endpoint, last_run) if not len(ids[endpoint]): continue else: for crits_id in ids[endpoint]: (id_, json_) = crits_poll(config, src, endpoint, crits_id,) if endpoint == 'indicators': indicator = json2indicator(config, src, dest, endpoint, json_, id_) config['crits_tally']['indicators']['incoming'] += 1 config['crits_tally']['all']['incoming'] += 1 if not indicator: config['logger'].info( log_.log_messages['obj_inbox_error'].format( src_type='crits', id_=crits_id, dest_type='edge')) continue stix_ = stix_pkg(config, src, endpoint, indicator, dest=dest) if not stix_: config['logger'].info( log_.log_messages['obj_inbox_error'].format( src_type='crits', id_=crits_id, dest_type='edge')) continue success = edge_.taxii_inbox(config, dest, stix_, src=src, crits_id=endpoint + ':' + crits_id) if not success: config['logger'].info( log_.log_messages['obj_inbox_error'].format( src_type='crits', id_=crits_id, dest_type='edge')) continue else: # track the related crits/json ids (by src/dest) config['db'].set_object_id(src, dest, edge_id=indicator.id_, crits_id=(xmlns_name + ':' + endpoint + '-' + crits_id)) config['crits_tally']['indicators']['processed'] += 1 config['crits_tally']['all']['processed'] += 1 elif endpoint == 'events': incident = json2incident(config, src, dest, endpoint, json_, id_) config['crits_tally']['events']['incoming'] += 1 config['crits_tally']['all']['incoming'] += 1 if not incident: config['logger'].info( log_.log_messages['obj_inbox_error'].format( src_type='crits', id_=crits_id, dest_type='edge')) continue stix_ = stix_pkg(config, src, endpoint, incident, dest=dest) if not stix_: config['logger'].info( log_.log_messages['obj_inbox_error'].format( src_type='crits', id_=crits_id, dest_type='edge')) continue success = edge_.taxii_inbox(config, dest, stix_, src=src, crits_id=endpoint + ':' + crits_id) if not success: config['logger'].info( log_.log_messages['obj_inbox_error'].format( src_type='crits', id_=crits_id, dest_type='edge')) continue else: # track the related crits/json ids (by src/dest) config['db'].set_object_id(src, dest, edge_id=incident.id_, crits_id=(xmlns_name + ':' + endpoint + '-' + crits_id)) config['crits_tally']['events']['processed'] += 1 config['crits_tally']['all']['processed'] += 1 else: observable = json2observable(config, src, dest, endpoint, json_, crits_id) config['crits_tally'][endpoint]['incoming'] += 1 config['crits_tally']['all']['incoming'] += 1 if not observable: config['logger'].info( log_.log_messages['obj_inbox_error'].format( src_type='crits', id_=crits_id, dest_type='edge')) continue stix_ = stix_pkg(config, src, endpoint, observable, dest=dest) if not stix_: config['logger'].info( log_.log_messages['obj_inbox_error'].format( src_type='crits', id_=crits_id, dest_type='edge')) continue success = edge_.taxii_inbox(config, dest, stix_) if not success: config['logger'].info( log_.log_messages['obj_inbox_error'].format( src_type='crits', id_=crits_id, dest_type='edge')) continue else: config['crits_tally'][endpoint]['processed'] += 1 config['crits_tally']['all']['processed'] += 1 config['db'].set_object_id(src, dest, edge_id=observable.id_, crits_id=(xmlns_name + ':' + endpoint + '-' + crits_id)) for endpoint in endpoints: if config['crits_tally'][endpoint]['incoming'] > 0: config['logger'].info(log_.log_messages['incoming_tally'].format( count=config['crits_tally'][endpoint]['incoming'], type_=endpoint, src='crits', dest='edge')) if (config['crits_tally'][endpoint]['incoming'] - config['crits_tally'][endpoint]['processed']) > 0: config['logger'].info(log_.log_messages['failed_tally'].format( count=(config['crits_tally'][endpoint]['incoming'] - config['crits_tally'][endpoint]['processed']), type_=endpoint, src='crits', dest='edge')) if config['crits_tally'][endpoint]['processed'] > 0: config['logger'].info(log_.log_messages['processed_tally'].format( count=config['crits_tally'][endpoint]['processed'], type_=endpoint, src='crits', dest='edge')) if config['crits_tally']['all']['incoming'] > 0: config['logger'].info(log_.log_messages['incoming_tally'].format( count=config['crits_tally']['all']['incoming'], type_='total', src='crits', dest='edge')) if (config['crits_tally']['all']['incoming'] - config['crits_tally']['all']['processed']) > 0: config['logger'].info(log_.log_messages['failed_tally'].format( count=(config['crits_tally']['all']['incoming'] - config['crits_tally']['all']['processed']), type_='total', src='crits', dest='edge')) if config['crits_tally']['all']['processed'] > 0: config['logger'].info(log_.log_messages['processed_tally'].format( count=config['crits_tally']['all']['processed'], type_='total', src='crits', dest='edge')) # save state to disk for next run... if config['daemon']['debug']: poll_interval = config['crits']['sites'][src]['api']['poll_interval'] config['logger'].debug( log_.log_messages['saving_state'].format( next_run=str(now + datetime.timedelta(seconds=poll_interval)))) if not daemon: config['db'].set_last_sync(src=src, dest=dest, direction='c2e', timestamp=now) return(None) else: return(util_.nowutc())