def analyze(ip, results): links = set() result = ShodanApi.fetch(ip, results.settings['shodan_api_key']) results.update(raw=pformat(result)) if 'tags' in result and result['tags'] is not None: ip.tag(result['tags']) if 'asn' in result and result['asn'] is not None: o_asn = Text.get_or_create(value=result['asn']) links.update(ip.active_link_to(o_asn, 'asn#', 'Shodan Query')) if 'hostnames' in result and result['hostnames'] is not None: for hostname in result['hostnames']: h = Hostname.get_or_create(value=hostname) links.update(h.active_link_to(ip, 'A record', 'Shodan Query')) if 'isp' in result and result['isp'] is not None: o_isp = Company.get_or_create(name=result['isp']) links.update(ip.active_link_to(o_isp, 'hosting', 'Shodan Query')) for context in ip.context: if context['source'] == 'shodan_query': break else: # Remove the data part (Shodan Crawler Data, etc.) result.pop("data", None) result['source'] = 'shodan_query' ip.add_context(result) return list(links)
def analyze(observable, results): links = set() parts = extract(observable.value) if parts.subdomain == '': data = DomainToolsApi.get("/{}/whois/history".format(observable.value), results.settings) results.update(raw=json.dumps(data, indent=2)) for record in data['response']['history']: created = datetime.strptime(record['whois']['registration']['created'], "%Y-%m-%d") expires = datetime.strptime(record['whois']['registration']['expires'], "%Y-%m-%d") registrar = Company.get_or_create(name=record['whois']['registration']['registrar']) registrant = Text.get_or_create(value=record['whois']['registrant']) links.update(observable.link_to(registrar, 'Registrar', 'DomainTools', created, expires)) links.update(observable.link_to(registrant, 'Registrant', 'DomainTools', created, expires)) parsed = parse_raw_whois([record['whois']['record']], normalized=True) email = get_value_at(parsed, 'contacts.registrant.email') if email: email = Email.get_or_create(value=email) links.update(observable.link_to(email, 'Registrant Email', 'DomainTools', created, expires)) return list(links)
def _process_asn_data(page, observable): links = set() if page['page'].get('asn'): asn = AutonomousSystem.get_or_create(value=page['page']['asn'].replace("AS", "")) links.update(asn.active_link_to(observable, 'asn#', 'UrlScanIo Query')) if page['page'].get('asnname'): asnname = Text.get_or_create(value=page['page']['asnname']) links.update(asnname.active_link_to(observable, 'asn_name', 'UrlScanIoQuerycanIo Query')) if page['page'].get('server'): server = Text.get_or_create(value=page['page']['server']) links.update(server.active_link_to(observable, 'server', 'UrlScanIo Query')) return list(links)
def _process_asn_data(page, observable): links = set() if page["page"].get("asn"): asn = AutonomousSystem.get_or_create( value=page["page"]["asn"].replace("AS", "")) links.update( asn.active_link_to(observable, "asn#", "UrlScanIo Query")) if page["page"].get("asnname"): asnname = Text.get_or_create(value=page["page"]["asnname"]) links.update( asnname.active_link_to(observable, "asn_name", "UrlScanIoQuerycanIo Query")) if page["page"].get("server"): server = Text.get_or_create(value=page["page"]["server"]) links.update( server.active_link_to(observable, "server", "UrlScanIo Query")) return list(links)
def analyze(observable, results): links = set() parts = tldextract_parser(observable.value) if parts.subdomain == "": data = DomainToolsApi.get( "/{}/whois/history".format(observable.value), results.settings ) results.update(raw=json.dumps(data, indent=2)) for record in data["response"]["history"]: created = datetime.strptime( record["whois"]["registration"]["created"], "%Y-%m-%d" ) expires = datetime.strptime( record["whois"]["registration"]["expires"], "%Y-%m-%d" ) registrar = Company.get_or_create( name=record["whois"]["registration"]["registrar"] ) registrant = Text.get_or_create(value=record["whois"]["registrant"]) links.update( observable.link_to( registrar, "Registrar", "DomainTools", created, expires ) ) links.update( observable.link_to( registrant, "Registrant", "DomainTools", created, expires ) ) parsed = parse_raw_whois([record["whois"]["record"]], normalized=True) email = get_value_at(parsed, "contacts.registrant.email") if email: email = Email.get_or_create(value=email) links.update( observable.link_to( email, "Registrant Email", "DomainTools", created, expires ) ) return list(links)
def whois_links(observable, data): links = set() created_date = data.get("createdDate") expires_date = data.get("expiresDate") first_seen = convert_to_datetime(created_date) last_seen = convert_to_datetime(expires_date) registrar_name = data.get("registrarName") if registrar_name is not None: node = Text.get_or_create(value=registrar_name) try: links.update( observable.link_to(node, "Registrar name", SOURCE, first_seen, last_seen)) except Exception as e: logger.error(e.message) contacts = data.get("contact", []) for contact in contacts: email = contact.get("email") if email is not None: node = Email.get_or_create(value=email) try: links.update(observable.link_to(node, "Contact email", SOURCE), first_seen, last_seen) except Exception as e: logger.error(e.message) name_servers = data.get("nameServers", []) name_servers = name_servers if name_servers is not None else [] for name_server in name_servers: node = Hostname.get_or_create(value=name_server) try: links.update(observable.link_to(node, "Name server", SOURCE), first_seen, last_seen) except Exception as e: logger.error(e.message) return list(links)
def analyze(observable, result): links = set() context = {"source": "VirusTotal"} endpoint = "/files/%s/contacted_ips" % observable.value api_key = result.settings["virutotal_api_key"] result = VirustotalApi.fetch(api_key, endpoint) if result: for data in result["data"]: ip = Ip.get_or_create(value=data["id"]) attributes = data["attributes"] context["whois"] = attributes["whois"] whois_timestamp = attributes["whois_date"] whois_date = datetime.fromtimestamp( whois_timestamp).isoformat() context["whois_date"] = whois_date context["country"] = attributes["country"] asn = Text.get_or_create(value=str(attributes["asn"])) ip.active_link_to(asn, "AS", "Virustotal.com") context["as_owner"] = attributes["as_owner"] if "last_https_certificate" in attributes: context["last_https_certificate"] = json.dumps( attributes["last_https_certificate"]) stat_files = attributes["last_analysis_stats"] for k, v in stat_files.items(): context[k] = v ip.add_context(context) links.update( ip.active_link_to(observable, "contacted by", context["source"])) return list(links)
def analyze_ip(cls, ip, results): """Specific analyzer for Ip observables.""" links = set() result = {} for rec in db.passive.get(db.passive.searchhost(ip.value)): LOG.debug("%s.analyze_ip: record %r", cls.__name__, rec) if rec["recontype"] == "DNS_ANSWER": value = rec["value"] hostname = Hostname.get_or_create(value=value) rec_type = "dns-%s" % rec["source"].split("-", 1)[0] result.setdefault(rec_type, set()).add(value) links.update( ip.link_to( hostname, rec_type, "IVRE - DNS-%s" % rec["source"], first_seen=rec["firstseen"], last_seen=rec["lastseen"], )) elif rec["recontype"] == "HTTP_CLIENT_HEADER_SERVER": if rec["source"] == "HOST": value = rec["value"] result.setdefault("http-host", set()).add(value) _try_link( links, ip, Hostname, value, "http-host", "IVRE - HTTP Host: header", first_seen=rec["firstseen"], last_seen=rec["lastseen"], ) else: continue elif rec["recontype"] == "HTTP_SERVER_HEADER": if rec["source"] == "SERVER": value = rec["value"] result.setdefault("http-server", set()).add(value) links.update( ip.link_to( Text.get_or_create(value=value), "http-server", "IVRE - HTTP Server: header", first_seen=rec["firstseen"], last_seen=rec["lastseen"], )) else: continue elif rec["recontype"] == "HTTP_CLIENT_HEADER": if rec["source"] == "USER-AGENT": value = rec["value"] result.setdefault("http-user-agent", set()).add(value) links.update( ip.link_to( Text.get_or_create(value=value), "http-server", "IVRE - HTTP User-Agent: header", first_seen=rec["firstseen"], last_seen=rec["lastseen"], )) else: continue elif rec["recontype"] == "SSL_SERVER": if rec["source"] == "cert": cert = _handle_cert(db.passive, rec, links) result.setdefault("ssl-cert", set()).add(cert.value) links.update( ip.link_to( cert, "ssl-cert", "IVRE - SSL X509 certificate", first_seen=rec["firstseen"], last_seen=rec["lastseen"], )) else: continue else: continue if result: results.update(raw=pformat( {key: list(value) for key, value in result.items()})) if all(context["source"] != "ivre_passive" for context in ip.context): ip.add_context({"source": "ivre_passive", "results": result}) return list(links)
def _add_events_nodes(self, events, context, tags): log.debug('_add_events_nodes on {nb} events'.format(nb=len(events))) attach_unsupported = dict([ (_, 0) for _ in ['UNSUPPORTED_TYPE', 'TOO_SMALL', None] ]) event_nodes = list() for msg in events: create_t = datetime.strptime(msg['messageTime'], "%Y-%m-%dT%H:%M:%S.%fZ") # PPS unique value guid = Text.get_or_create(value='proofpoint://%s' % msg['GUID'], created=create_t, context=[context]) log.debug('Event {msg}'.format(msg=msg['messageID'])) message_contents = list() src_ip = Ip.get_or_create(value=msg['senderIP'], created=create_t, context=[context]) src_ip.tag(['MTA']) guid.active_link_to([src_ip], "MTA src ip", self.name) # new event event_nodes.append(guid) # if self.config['import_email_metadata']: # email details # messageID message_id = Email.get_or_create(value=msg['messageID'], created=create_t, context=[context]) guid.active_link_to([message_id], "seen in", self.name) # sender _s1 = Email.get_or_create(value=msg['sender'], created=create_t, context=[context]) _s1.tag(['sender']) guid.active_link_to([_s1], "sender", self.name) if 'headerFrom' in msg: # header From _s2 = Email.get_or_create(value=msg['headerFrom'], created=create_t, context=[context]) _s2.tag(['sender']) guid.active_link_to([_s2], "headerFrom", self.name) # FIXME is that a duplicate of attachment-malware ? # attachment events for attach in msg['messageParts']: if attach['sandboxStatus'] in ['THREAT']: md5 = Hash.get_or_create(value=attach['md5'], created=create_t, context=[context]) md5.tag([t['name'] for t in tags]) fname = File.get_or_create(value=attach['filename'], created=create_t, context=[context]) fname.tag([t['name'] for t in tags]) # this should be a DUP from threat_nodes in analyse() sha_threat = Hash.get_or_create(value=attach['sha256'], created=create_t, context=[context]) sha_threat.active_link_to([md5, fname], "relates", self.name) sha_threat.tag([t['name'] for t in tags]) message_contents.append(sha_threat) # link the 3 together elif attach['sandboxStatus'] in [ 'UNSUPPORTED_TYPE', 'TOO_SMALL', None ]: attach_unsupported[attach['sandboxStatus']] += 1 log.debug(pprint.pformat(attach)) # add context to the hashes guid.active_link_to(message_contents, "delivers", self.name) _stats = ', '.join("%s: %d" % (k, v) for k, v in attach_unsupported.items()) log.warning('Ignored unsupported attachments: %s', _stats) for o in event_nodes: o.tag([t['name'] for t in tags]) return event_nodes
def _get_threat_forensics_nodes_inner(self, evidence, general_context, tags): # create context from notes context = general_context.copy() _ctx = self._make_context_from_notes([evidence]) context.update(_ctx) # add evidence['type'] and unicify tags tags = [{ 'name': _ } for _ in set([evidence['type']] + [d['name'] for d in tags])] # create Tags in DB for _ in tags: Tag.get_or_create(name=_['name']) # threat_forensics = [] # technical hack: set optional comments values for optional in ['action', 'rule', 'path', 'rule']: if optional not in evidence['what']: evidence['what'][optional] = None # add attributes for the known evidence type if evidence['type'] in ['file', 'dropper']: if 'path' in evidence['what']: threat_forensics.append( File.get_or_create(value=evidence['what']['path'], context=[context])) if 'md5' in evidence['what']: threat_forensics.append( Hash.get_or_create(value=evidence['what']['md5'], context=[context])) if 'sha256' in evidence['what']: threat_forensics.append( Hash.get_or_create(value=evidence['what']['sha256'], context=[context])) elif evidence['type'] == 'cookie': pass elif evidence['type'] == 'dns': threat_forensics.append( Hostname.get_or_create(value=evidence['what']['host'], context=[context])) elif evidence['type'] == 'ids': threat_forensics.append( Text.get_or_create(value=evidence['what']['ids'], context=[context])) elif evidence['type'] == 'mutex': threat_forensics.append( Text.get_or_create(value=evidence['what']['name'], context=[context])) elif evidence['type'] == 'network': if 'ip' in evidence['what']: # FIXME port, type threat_forensics.append( Ip.get_or_create(value=evidence['what']['ip'], context=[context])) elif 'domain' in evidence['what']: threat_forensics.append( Hostname.get_or_create(value=evidence['what']['domain'], context=[context])) elif evidence['type'] == 'process': pass elif evidence['type'] == 'registry': # threat_forensics.append(evidence['what']['key']) # threat_forensics.append(evidence['what']['value']) pass elif evidence['type'] == 'url': # BUG yeti-#115 ObservableValidationError: Invalid URL: http://xxxxx-no-tld/ threat_forensics.append( Url.get_or_create(value=evidence['what']['url'], context=[context])) # add note as tag because its a signature if 'note' in evidence: threat_forensics[-1].tag(evidence['note'].replace( '.', '_').strip('_')) # tag all of that for o in threat_forensics: o.tag([t['name'] for t in tags]) return threat_forensics
def _query_and_filter_previous_new_threat_for_campaign( campaign_info, context): # get all threat for this campaign from the API # filter out the threat we already have in DB # return the net new threats # TODO: alternative solution, query by type, get all campaign threat, intersect sets # Q/A: why do i have to play with perf issues ? # only create Observables and link them when they do not exists. cls_action = { 'COMPLETE_URL': Url, 'NORMALIZED_URL': Url, 'ATTACHMENT': Hash, 'DOMAIN': Hostname, 'HOSTNAME': Hostname } threats = [] log.info("There are {nb} threat associated to campaign".format( nb=len(campaign_info['campaignMembers']))) for threat in campaign_info['campaignMembers']: # ATTACHMENT, COMPLETE_URL, NORMALIZED_URL, or DOMAIN # BUG #5: undocumentated value HOSTNAME, could be hostname or ip v = threat['threat'] # t = threat['threatTime'][:10] # last_seen ? create_t = datetime.strptime(threat['threatTime'], "%Y-%m-%dT%H:%M:%S.%fZ") # TODO threat['threatStatus'] in active, ... if threat['threatStatus'] != 'active': log.warning('Campaign threat - threatStatus %s unsupported', threat['threatStatus']) # FIXME Campaign threat - threatStatus falsePositive unsupported # threatStatus ? if threat['subType'] not in cls_action: log.error('Campaign threat - subtype %s unsupported', threat['subType']) continue cls = cls_action[threat['subType']] try: # if it exists, don't do anything. tags and context are the same cls.objects.get(value=v) except DoesNotExist: # otherwise return it to link to it. # threats.append(cls.get_or_create(value=v, context=[context], created=t)) # tags named argument in constructor does not work the same as .tag() try: o = cls.get_or_create(value=v, context=[context], created=create_t) o.tag([threat['type'], threat['subType']]) except DoesNotExist: # wtf log.error("{cls} {v} has a weird problem - FIXME".format( cls=cls, v=v)) except ObservableValidationError: try: if threat['subType'] == 'HOSTNAME': # could be an Ip o = Ip.get_or_create(value=v, context=[context], created=create_t) except ObservableValidationError as e: log.error(e) log.error(pprint.pformat(threat)) log.error("Campaign {name}".format( name=campaign_info['name'])) o = Text.get_or_create(value=v, context=[context], created=create_t) o.tag([threat['type'], threat['subType']]) threats.append(o) log.info("Found %d new threat on campaign, new to us", len(threats)) # there is a bug here... log.debug(", ".join( ["%s:%s" % (t.__class__.__name__, t.value) for t in threats])) return threats
def _get_threat_forensics_nodes_inner(self, evidence, general_context, tags): # create context from notes context = general_context.copy() _ctx = self._make_context_from_notes([evidence]) context.update(_ctx) # add evidence['type'] and unicify tags tags = [{ "name": _ } for _ in set([evidence["type"]] + [d["name"] for d in tags])] # create Tags in DB for _ in tags: Tag.get_or_create(name=_["name"]) # threat_forensics = [] # technical hack: set optional comments values for optional in ["action", "rule", "path", "rule"]: if optional not in evidence["what"]: evidence["what"][optional] = None # add attributes for the known evidence type if evidence["type"] in ["file", "dropper"]: if "path" in evidence["what"]: threat_forensics.append( File.get_or_create(value=evidence["what"]["path"], context=[context])) if "md5" in evidence["what"]: threat_forensics.append( Hash.get_or_create(value=evidence["what"]["md5"], context=[context])) if "sha256" in evidence["what"]: threat_forensics.append( Hash.get_or_create(value=evidence["what"]["sha256"], context=[context])) elif evidence["type"] == "cookie": pass elif evidence["type"] == "dns": threat_forensics.append( Hostname.get_or_create(value=evidence["what"]["host"], context=[context])) elif evidence["type"] == "ids": threat_forensics.append( Text.get_or_create(value=evidence["what"]["ids"], context=[context])) elif evidence["type"] == "mutex": threat_forensics.append( Text.get_or_create(value=evidence["what"]["name"], context=[context])) elif evidence["type"] == "network": if "ip" in evidence["what"]: # FIXME port, type threat_forensics.append( Ip.get_or_create(value=evidence["what"]["ip"], context=[context])) elif "domain" in evidence["what"]: threat_forensics.append( Hostname.get_or_create(value=evidence["what"]["domain"], context=[context])) elif evidence["type"] == "process": pass elif evidence["type"] == "registry": # threat_forensics.append(evidence['what']['key']) # threat_forensics.append(evidence['what']['value']) pass elif evidence["type"] == "url": # BUG yeti-#115 ObservableValidationError: Invalid URL: http://xxxxx-no-tld/ threat_forensics.append( Url.get_or_create(value=evidence["what"]["url"], context=[context])) # add note as tag because its a signature if "note" in evidence: threat_forensics[-1].tag(evidence["note"].replace( ".", "_").strip("_")) # tag all of that for o in threat_forensics: o.tag([t["name"] for t in tags]) return threat_forensics
def _add_events_nodes(self, events, context, tags): log.debug('_add_events_nodes on {nb} events'.format(nb=len(events))) attach_unsupported = dict( [(_, 0) for _ in ['UNSUPPORTED_TYPE', 'TOO_SMALL', None]]) event_nodes = list() for msg in events: create_t = datetime.strptime( msg['messageTime'], "%Y-%m-%dT%H:%M:%S.%fZ") # PPS unique value guid = Text.get_or_create( value='proofpoint://%s' % msg['GUID'], created=create_t, context=[context]) log.debug('Event {msg}'.format(msg=msg['messageID'])) message_contents = list() src_ip = Ip.get_or_create( value=msg['senderIP'], created=create_t, context=[context]) src_ip.tag(['MTA']) guid.active_link_to([src_ip], "MTA src ip", self.name) # new event event_nodes.append(guid) # if self.config['import_email_metadata']: # email details # messageID message_id = Email.get_or_create( value=msg['messageID'], created=create_t, context=[context]) guid.active_link_to([message_id], "seen in", self.name) # sender _s1 = Email.get_or_create( value=msg['sender'], created=create_t, context=[context]) _s1.tag(['sender']) guid.active_link_to([_s1], "sender", self.name) if 'headerFrom' in msg: # header From _s2 = Email.get_or_create( value=msg['headerFrom'], created=create_t, context=[context]) _s2.tag(['sender']) guid.active_link_to([_s2], "headerFrom", self.name) # FIXME is that a duplicate of attachment-malware ? # attachment events for attach in msg['messageParts']: if attach['sandboxStatus'] in ['THREAT']: md5 = Hash.get_or_create( value=attach['md5'], created=create_t, context=[context]) md5.tag([t['name'] for t in tags]) fname = File.get_or_create( value=attach['filename'], created=create_t, context=[context]) fname.tag([t['name'] for t in tags]) # this should be a DUP from threat_nodes in analyse() sha_threat = Hash.get_or_create( value=attach['sha256'], created=create_t, context=[context]) sha_threat.active_link_to([md5, fname], "relates", self.name) sha_threat.tag([t['name'] for t in tags]) message_contents.append(sha_threat) # link the 3 together elif attach['sandboxStatus'] in ['UNSUPPORTED_TYPE', 'TOO_SMALL', None]: attach_unsupported[attach['sandboxStatus']] += 1 log.debug(pprint.pformat(attach)) # add context to the hashes guid.active_link_to(message_contents, "delivers", self.name) _stats = ', '.join( "%s: %d" % (k, v) for k, v in attach_unsupported.items()) log.warning('Ignored unsupported attachments: %s', _stats) for o in event_nodes: o.tag([t['name'] for t in tags]) return event_nodes
def _get_threat_forensics_nodes_inner( self, evidence, general_context, tags): # create context from notes context = general_context.copy() _ctx = self._make_context_from_notes([evidence]) context.update(_ctx) # add evidence['type'] and unicify tags tags = [{ 'name': _ } for _ in set([evidence['type']] + [d['name'] for d in tags])] # create Tags in DB for _ in tags: Tag.get_or_create(name=_['name']) # threat_forensics = [] # technical hack: set optional comments values for optional in ['action', 'rule', 'path', 'rule']: if optional not in evidence['what']: evidence['what'][optional] = None # add attributes for the known evidence type if evidence['type'] in ['file', 'dropper']: if 'path' in evidence['what']: threat_forensics.append( File.get_or_create( value=evidence['what']['path'], context=[context])) if 'md5' in evidence['what']: threat_forensics.append( Hash.get_or_create( value=evidence['what']['md5'], context=[context])) if 'sha256' in evidence['what']: threat_forensics.append( Hash.get_or_create( value=evidence['what']['sha256'], context=[context])) elif evidence['type'] == 'cookie': pass elif evidence['type'] == 'dns': threat_forensics.append( Hostname.get_or_create( value=evidence['what']['host'], context=[context])) elif evidence['type'] == 'ids': threat_forensics.append( Text.get_or_create( value=evidence['what']['ids'], context=[context])) pass elif evidence['type'] == 'mutex': threat_forensics.append( Text.get_or_create( value=evidence['what']['name'], context=[context])) elif evidence['type'] == 'network': if 'ip' in evidence['what']: # FIXME port, type threat_forensics.append( Ip.get_or_create( value=evidence['what']['ip'], context=[context])) elif 'domain' in evidence['what']: threat_forensics.append( Hostname.get_or_create( value=evidence['what']['domain'], context=[context])) elif evidence['type'] == 'process': pass elif evidence['type'] == 'registry': # threat_forensics.append(evidence['what']['key']) # threat_forensics.append(evidence['what']['value']) pass elif evidence['type'] == 'url': # BUG yeti-#115 ObservableValidationError: Invalid URL: http://xxxxx-no-tld/ threat_forensics.append( Url.get_or_create( value=evidence['what']['url'], context=[context])) # add note as tag because its a signature if 'note' in evidence: threat_forensics[-1].tag( evidence['note'].replace('.', '_').strip('_')) # tag all of that for o in threat_forensics: o.tag([t['name'] for t in tags]) return threat_forensics
def _query_and_filter_previous_new_threat_for_campaign( campaign_info, context): # get all threat for this campaign from the API # filter out the threat we already have in DB # return the net new threats # TODO: alternative solution, query by type, get all campaign threat, intersect sets # Q/A: why do i have to play with perf issues ? # only create Observables and link them when they do not exists. cls_action = { 'COMPLETE_URL': Url, 'NORMALIZED_URL': Url, 'ATTACHMENT': Hash, 'DOMAIN': Hostname, 'HOSTNAME': Hostname } threats = [] log.info( "There are {nb} threat associated to campaign".format( nb=len(campaign_info['campaignMembers']))) for threat in campaign_info['campaignMembers']: # ATTACHMENT, COMPLETE_URL, NORMALIZED_URL, or DOMAIN # BUG #5: undocumentated value HOSTNAME, could be hostname or ip v = threat['threat'] # t = threat['threatTime'][:10] # last_seen ? create_t = datetime.strptime( threat['threatTime'], "%Y-%m-%dT%H:%M:%S.%fZ") # TODO threat['threatStatus'] in active, ... if threat['threatStatus'] != 'active': log.warning( 'Campaign threat - threatStatus %s unsupported', threat['threatStatus']) # FIXME Campaign threat - threatStatus falsePositive unsupported # threatStatus ? if threat['subType'] not in cls_action: log.error( 'Campaign threat - subtype %s unsupported', threat['subType']) continue cls = cls_action[threat['subType']] try: # if it exists, don't do anything. tags and context are the same cls.objects.get(value=v) except DoesNotExist: # otherwise return it to link to it. # threats.append(cls.get_or_create(value=v, context=[context], created=t)) # tags named argument in constructor does not work the same as .tag() try: o = cls.get_or_create( value=v, context=[context], created=create_t) o.tag([threat['type'], threat['subType']]) except DoesNotExist: # wtf log.error( "{cls} {v} has a weird problem - FIXME".format( cls=cls, v=v)) except ObservableValidationError: try: if threat['subType'] == 'HOSTNAME': # could be an Ip o = Ip.get_or_create( value=v, context=[context], created=create_t) except ObservableValidationError as e: log.error(e) log.error(pprint.pformat(threat)) log.error( "Campaign {name}".format( name=campaign_info['name'])) o = Text.get_or_create( value=v, context=[context], created=create_t) o.tag([threat['type'], threat['subType']]) threats.append(o) log.info("Found %d new threat on campaign, new to us", len(threats)) # there is a bug here... log.debug( ", ".join( ["%s:%s" % (t.__class__.__name__, t.value) for t in threats])) return threats