def main(instance, zconfig, fangs, productdistros=None): """ productdistros should be the full path to the location of additional products to be loaded """ parser = optparse.OptionParser(usage=usage) parser.add_option("-r", "--refang", action="store_true", help="refang, don't defang", default=False) opts, args = parser.parse_args() initialize_zope(instance, zconfig, productdistros) for zodbfile, file_fangs in fangs.items(): if opts.refang: output = zodbfile + ".refanged" else: output = zodbfile + ".defanged" tmp = os.path.join(os.path.dirname(output), "." + os.path.basename(output)) print "Copying", zodbfile, "to", tmp shutil.copy(zodbfile, tmp) try: if opts.refang: print "Refanging", tmp defang.refang(file_fangs, tmp) else: print "Defanging", tmp defang.defang(file_fangs, tmp) print "Renaming", tmp, "to", output os.rename(tmp, output) except Exception, e: traceback.print_exc() sys.exit(-1)
def print_active_ticket(id): try: ticket = "ticket/%s" % id response = resource.get(path=ticket) for r in response.parsed: l = {a: b for a, b in r} ticket_status = l["Status"] if ticket_status == "open" or ticket_status == "new": #print r attachments = "ticket/%s/attachments" % id response = resource.get(path=attachments) for r in response.parsed: l = {a: b for a, b in r} ticket_attachments = l["Attachments"] attachment = ticket_attachments.split(":") ta = attachment[0] ticket_attachment = "ticket/%s/attachments/%s/content" % ( id, ta) print ticket_attachment response = resource.get(path=ticket_attachment) body = response.body body = defang.refang(body) extracted_url = re.search("(?P<url>https?://[^\s]+)", body).group("url") print extracted_url online, size = is_online(extracted_url) if not online: print "Resource %s is offline (size: %s)" % ( extracted_url, size) close_ticket(id) except RTResourceError as e: logger.error(e.response.status_int) logger.error(e.response.status) logger.error(e.response.parsed)
def test_refang(): for fanged, defanged in ( ('example.org', 'example[.]org'), ('http://example.org', 'hXXp://example[.]org'), ('example.org\nbadguy.example.org\n', 'example[.]org\nbadguy.example[.]org\n'), ('http://EVIL-guy.badguy.NET', 'hXXp://EVIL-guy.badguy[.]NET'), ('ssh://foobar.example.org/', '(ssh)://foobar.example[.]org/'), ('ftp://foo-bar.example.org', 'fXp://foo-bar.example[.]org'), ('http://sub.domain.org/path/to?bad=stuff', 'hXXp://sub.domain[.]org/path/to?bad=stuff'), ('gopher://badstuff.org/', '(gopher)://badstuff[.]org/'), ('s3://something.amazon.com/testing?zxc=zxc', 's3://something[DOT]amazon(dot)com/testing?zxc=zxc'), (''' https://otherstuff.org badstuff.org goodstuff.org/and/path foo://newstuff.org/what?foo=true bar-baz://crazy.stuff.other.foo.co.uk ''', ''' hXXps://otherstuff(DOT)org badstuff[DOT]org goodstuff[.]org/and/path foo://newstuff(.)org/what?foo=true bar-baz://crazy[.]stuff(DOT)other[dot]foo(.)co.uk '''), ): assert refang(defanged) == fanged
def parse_ip(ips, ip_file): ip_list = [] if ips: ip_list.extend(ips.split(',')) if ip_file: ip_list.extend(import_ip_file(ip_file)) ip_list = list(set(ip_list)) ip_list = list(map(lambda x: defang.refang(x), ip_list)) return ip_list
def parse_domain(domains, domain_file): domain_list = [] if domains: domain_list.extend(domains.split(',')) if domain_file: domain_list.extend(import_domain_file(domain_file)) domain_list = list(set(domain_list)) domain_list = list(map(lambda x: defang.refang(x), domain_list)) return domain_list
def test_refang(self): self.assertEqual(refang("hXXp://google[.]fr"), "http://google.fr") self.assertEqual(refang("hxxp://google[.]fr"), "http://google.fr") self.assertEqual(refang("fXp://google[.]fr"), "ftp://google.fr") self.assertEqual(refang("fxp://google[.]fr"), "ftp://google.fr") self.assertEqual(refang("purr://google[.]fr"), "http://google.fr") self.assertEqual(refang("meow://google[.]fr"), "http://google.fr")
def manage_domain_name(domain_name): domain_name = refang(domain_name) if request.method == 'POST': return post(domain_name) elif request.method == 'GET': return get(domain_name) elif request.method == 'PUT': return put(domain_name) elif request.method == 'DELETE': return delete(domain_name)
def get_validate_url(url): class bcolors: WARNING = '\033[93m' ENDC = '\033[0m' url = defang.refang(url) o = urlparse(url) if o.scheme == '': newurl = 'https://' + url print(bcolors.WARNING + '{} doesn\'t have a scheme which is modified to {}'.format( url, newurl) + bcolors.ENDC, file=sys.stderr) url = newurl return url
def manage_follow(domain_name): domain_name = refang(domain_name) username = get_jwt_identity() if request.method == 'POST': if UserDn.exists(username, domain_name): return error_view(500, "you are already following this DN") user_dn = UserDn.new(username, domain_name, False) user_dn.insert() return valid_view("DN added to your follows") elif request.method == 'DELETE': if not UserDn.exists(username, domain_name): return error_view(404, "you are not following thiS DN") user_dn = UserDn.get(username, domain_name) user_dn.delete() return valid_view("DN removed from your follows")
email_data = email_data.split(stopword, 1)[0] # Find the first forwarding message and use that content position = 99999 t_email_data = email_data for identifier in forward_identifiers: new_position = email_data.find(identifier) if new_position == -1: new_position = position if new_position < position: t_before, t_split, t_email_data = email_data.partition(identifier) position = new_position email_data = t_email_data # Refang email data email_data = refang(email_data) ## Extract various IOCs urllist = list() urllist += re.findall(urlmarker.WEB_URL_REGEX, email_data) urllist += re.findall(urlmarker.IP_REGEX, email_data) if debug: syslog.syslog(str(urllist)) # Init Faup f = Faup() # Add tags according to configuration for malware in malwaretags: if malware in email_subject.lower():
def misp_send(self, strMISPEventID, strInput, strInfo, strUsername): # Establish communication with MISP # event = MISPEvent() # event.info = 'Test event' # event.analysis = 0 # event.distribution = 3 # event.threat_level_id = 2 # event.add_attribute('md5', '678ff97bf16d8e1c95679c4681834c41') # #<add more attributes> # self.misp.add_event(event) # exit() try: objects = [] #get comments and tags from string input str_comment, tags = self.get_comm_and_tags(strInput) print(tags) if tags == None: self.misp_logger.info('Irate not in Tags: %s equals None' % tags) response = None return response #setup misp objects mispobj_email = MISPObject(name="email") mispobj_file = MISPObject(name="file") mispobj_files = {} mispobj_domainip = MISPObject(name="domain-ip") url_no = 0 file_no = 0 mispobj_urls = {} #process input for line in strInput.splitlines(): if ("domain:" in line.lower() ): #Catch domain and add to domain/IP object mispobj_domainip = MISPObject(name="domain-ip") vals = line.split(":", 1) mispobj_domainip.add_attribute("domain", value=vals[1].strip(), comment=str_comment) objects.append(mispobj_domainip) elif ("ip:" in line.lower()) or ("ip-dst:" in line.lower( )) or ("ip-src:" in line.lower()): #Catch IP and add to domain/IP object if "domain:" in strInput.splitlines(): mispobj_domainip = MISPObject(name="domain-ip") vals = line.split(":", 1) mispobj_domainip.add_attribute("ip", value=vals[1].strip(), comment=str_comment) objects.append(mispobj_domainip) else: mispobj_network_connection = MISPObject( name="network-connection") vals = line.split(":", 1) if ("ip:" in line.lower()) or ("ip-dst:" in line.lower()): mispobj_network_connection.add_attribute( "ip-dst", type="ip-dst", value=vals[1].strip(), comment=str_comment) else: mispobj_network_connection.add_attribute( "ip-src", type="ip-src", value=vals[1].strip(), comment=str_comment) objects.append(mispobj_network_connection) elif ("source-email:" in line.lower()) or ("email-source" in line.lower()) or ( "from:" in line.lower() ): #Catch email and add to email object vals = line.split(":", 1) mispobj_email.add_attribute("from", value=vals[1].strip(), comment=str_comment) elif ("url:" in line.lower()) or ( ('kit:' in line.lower() or ('creds:' in line.lower())) and (('hxxp' in line.lower()) or ('http' in line.lower())) ): #Catch URL and add to URL object vals = line.split(":", 1) url = vals[1].strip() url = refang(url) parsed = urlparse(url) mispobj_url = MISPObject(name="url") mispobj_url.add_attribute("url", value=parsed.geturl(), category="Payload delivery", comment=str_comment) if parsed.hostname: mispobj_url.add_attribute("host", value=parsed.hostname, comment=str_comment) if parsed.scheme: mispobj_url.add_attribute("scheme", value=parsed.scheme, comment=str_comment) if parsed.port: mispobj_url.add_attribute("port", value=parsed.port, comment=str_comment) mispobj_urls[url_no] = mispobj_url url_no += 1 #Catch different hashes and add to file object elif ("sha1:" in line.lower()) or ("SHA1:" in line): vals = line.split(":", 1) mispobj_file.add_attribute("sha1", value=vals[1].strip(), comment=str_comment) elif ("sha256:" in line.lower()) or ("SHA256:" in line): vals = line.split(":", 1) mispobj_file.add_attribute("sha256", value=vals[1].strip(), comment=str_comment) elif ("md5:" in line.lower()) or ("MD5:" in line): vals = line.split(":", 1) mispobj_file.add_attribute("md5", value=vals[1].strip(), comment=str_comment) elif ( "subject:" in line.lower() ): #or ("subject:" in line): #Catch subject and add to email object self.misp_logger.info('adding subject') vals = line.split(":", 1) mispobj_email.add_attribute("subject", value=vals[1].strip(), comment=str_comment) elif ("hash|filename:" in line.lower() ): #catch hash|filename pair and add to file object vals = line.split(":", 1) val = vals[1].split("|") l_hash = val[0] l_filename = val[1] l_mispobj_file = MISPObject(name="file") if len(re.findall(r"\b[a-fA-F\d]{32}\b", l_hash)) > 0: l_mispobj_file.add_attribute("md5", value=l_hash.strip(), comment=str_comment) l_mispobj_file.add_attribute("filename", value=l_filename.strip(), comment=str_comment) mispobj_files[file_no] = l_mispobj_file elif len(re.findall(r'\b[0-9a-f]{40}\b', l_hash)) > 0: l_mispobj_file.add_attribute("sha1", value=l_hash.strip(), comment=str_comment) l_mispobj_file.add_attribute("filename", value=l_filename.strip(), comment=str_comment) mispobj_files[file_no] = l_mispobj_file elif len(re.findall(r'\b[A-Fa-f0-9]{64}\b', l_hash)) > 0: l_mispobj_file.add_attribute("sha256", value=l_hash.strip(), comment=str_comment) l_mispobj_file.add_attribute("filename", value=l_filename.strip(), comment=str_comment) mispobj_files[file_no] = l_mispobj_file file_no += 1 #add all misp objects to List to be processed and submitted to MISP server as one. if len(mispobj_file.attributes) > 0: objects.append(mispobj_file) if len(mispobj_email.attributes) > 0: objects.append(mispobj_email) for u_key, u_value in mispobj_urls.items(): if len(u_value.attributes) > 0: objects.append(u_value) for f_key, f_value in mispobj_files.items(): if len(f_value.attributes) > 0: objects.append(f_value) # Update timestamp and event except Exception as e: error = traceback.format_exc() response = "Error occured when converting string to misp objects:\n %s" % error self.misp_logger.error(response) return response if self.check_object_length(objects) != True: self.misp_logger.error( 'Input from %s did not contain accepted tags.\n Input: \n%s' % (strUsername, strInput)) return "Error in the tags you entered. Please see the guide for accepted tags." try: # self.misp_logger.error(dir(self.misp)) misp_event = MISPEvent() misp_event.info = strInfo misp_event.distribution = 0 misp_event.analysis = 2 misp_event.threat_level_id = 3 # event.add_attribute('md5', '678ff97bf16d8e1c95679c4681834c41') #event = self.misp.new_event(info=strInfo, distribution='0', analysis='2', threat_level_id='3', published=False) #misp_event = MISPEvent() #misp_event.load(event) add = self.misp.add_event(misp_event) self.misp_logger.info("Added event %s" % add) a, b = self.submit_to_misp(self.misp, misp_event, objects) for tag in tags: self.misp.tag(misp_event.uuid, tag) #self.misp.add_internal_comment(misp_event.id, reference="Author: " + strUsername, comment=str_comment) ccc = self.misp.publish(misp_event, alert=False) self.misp_logger.info(ccc) misp_event = self.misp.get_event(misp_event) response = misp_event #for response in misp_event: if ('errors' in response and response['errors'] != None): return ("Submission error: " + repr(response['errors'])) else: if response['Event']['RelatedEvent']: e_related = "" for each in response['Event']['RelatedEvent']: e_related = e_related + each['Event']['id'] + ", " return "Created ID: " + str( response['Event'] ['id']) + "\nRelated Events: " + ''.join(e_related) else: return "Created ID: " + str(response['Event']['id']) except Exception as e: error = traceback.format_exc() response = "Error occured when submitting to misp:\n %s" % error self.misp_logger.error(response) return response
def _capture(self, url: str, *, perma_uuid: str, cookies_pseudofile: Optional[Union[BufferedIOBase, str]] = None, depth: int = 1, listing: bool = True, user_agent: Optional[str] = None, referer: Optional[str] = None, proxy: Optional[str] = None, os: Optional[str] = None, browser: Optional[str] = None, parent: Optional[str] = None) -> Tuple[bool, str]: '''Launch a capture''' url = url.strip() url = refang(url) if not url.startswith('http'): url = f'http://{url}' if self.only_global_lookups: splitted_url = urlsplit(url) if splitted_url.netloc: if splitted_url.hostname: if splitted_url.hostname.split('.')[-1] != 'onion': try: ip = socket.gethostbyname(splitted_url.hostname) except socket.gaierror: self.logger.info('Name or service not known') return False, 'Name or service not known.' if not ipaddress.ip_address(ip).is_global: return False, 'Capturing ressources on private IPs is disabled.' else: return False, 'Unable to find hostname or IP in the query.' cookies = load_cookies(cookies_pseudofile) if not user_agent: # Catch case where the UA is broken on the UI, and the async submission. ua: str = get_config('generic', 'default_user_agent') else: ua = user_agent if int(depth) > int(get_config('generic', 'max_depth')): self.logger.warning( f'Not allowed to capture on a depth higher than {get_config("generic", "max_depth")}: {depth}' ) depth = int(get_config('generic', 'max_depth')) self.logger.info(f'Capturing {url}') try: items = crawl(self.splash_url, url, cookies=cookies, depth=depth, user_agent=ua, referer=referer, proxy=proxy, log_enabled=True, log_level=get_config('generic', 'splash_loglevel')) except Exception as e: self.logger.critical( f'Something went terribly wrong when capturing {url}.') raise e if not items: # broken self.logger.critical( f'Something went terribly wrong when capturing {url}.') return False, 'Something went terribly wrong when capturing {url}.' width = len(str(len(items))) now = datetime.now() dirpath = self.capture_dir / str( now.year) / f'{now.month:02}' / now.isoformat() safe_create_dir(dirpath) if os or browser: meta = {} if os: meta['os'] = os if browser: meta['browser'] = browser with (dirpath / 'meta').open('w') as _meta: json.dump(meta, _meta) # Write UUID with (dirpath / 'uuid').open('w') as _uuid: _uuid.write(perma_uuid) # Write no_index marker (optional) if not listing: (dirpath / 'no_index').touch() # Write parent UUID (optional) if parent: with (dirpath / 'parent').open('w') as _parent: _parent.write(parent) for i, item in enumerate(items): if 'error' in item: with (dirpath / 'error.txt').open('w') as _error: json.dump(item['error'], _error) # The capture went fine harfile = item['har'] png = base64.b64decode(item['png']) html = item['html'] last_redirect = item['last_redirected_url'] with (dirpath / '{0:0{width}}.har'.format(i, width=width)).open('w') as _har: json.dump(harfile, _har) with (dirpath / '{0:0{width}}.png'.format( i, width=width)).open('wb') as _img: _img.write(png) with (dirpath / '{0:0{width}}.html'.format( i, width=width)).open('w') as _html: _html.write(html) with (dirpath / '{0:0{width}}.last_redirect.txt'.format( i, width=width)).open('w') as _redir: _redir.write(last_redirect) if 'childFrames' in item: child_frames = item['childFrames'] with (dirpath / '{0:0{width}}.frames.json'.format( i, width=width)).open('w') as _iframes: json.dump(child_frames, _iframes) if 'cookies' in item: cookies = item['cookies'] with (dirpath / '{0:0{width}}.cookies.json'.format( i, width=width)).open('w') as _cookies: json.dump(cookies, _cookies) self.redis.hset('lookup_dirs', perma_uuid, str(dirpath)) return True, 'All good!'
def capture(self, url: str, cookies_pseudofile: Optional[Union[BufferedIOBase, str]] = None, depth: int = 1, listing: bool = True, user_agent: Optional[str] = None, referer: str = '', perma_uuid: Optional[str] = None, os: Optional[str] = None, browser: Optional[str] = None) -> Union[bool, str]: url = url.strip() url = refang(url) if not url.startswith('http'): url = f'http://{url}' if self.only_global_lookups: splitted_url = urlsplit(url) if splitted_url.netloc: if splitted_url.hostname: try: ip = socket.gethostbyname(splitted_url.hostname) except socket.gaierror: self.logger.info('Name or service not known') return False if not ipaddress.ip_address(ip).is_global: return False else: return False cookies = load_cookies(cookies_pseudofile) if not user_agent: # Catch case where the UA is broken on the UI, and the async submission. ua: str = get_config('generic', 'default_user_agent') else: ua = user_agent if int(depth) > int(get_config('generic', 'max_depth')): self.logger.warning( f'Not allowed to capture on a depth higher than {get_config("generic", "max_depth")}: {depth}' ) depth = int(get_config('generic', 'max_depth')) items = crawl(self.splash_url, url, cookies=cookies, depth=depth, user_agent=ua, referer=referer, log_enabled=True, log_level=get_config('generic', 'splash_loglevel')) if not items: # broken return False if not perma_uuid: perma_uuid = str(uuid4()) width = len(str(len(items))) dirpath = self.capture_dir / datetime.now().isoformat() safe_create_dir(dirpath) for i, item in enumerate(items): if not listing: # Write no_index marker (dirpath / 'no_index').touch() with (dirpath / 'uuid').open('w') as _uuid: _uuid.write(perma_uuid) if os or browser: meta = {} if os: meta['os'] = os if browser: meta['browser'] = browser with (dirpath / 'meta').open('w') as _meta: json.dump(meta, _meta) if 'error' in item: with (dirpath / 'error.txt').open('w') as _error: json.dump(item['error'], _error) # The capture went fine harfile = item['har'] png = base64.b64decode(item['png']) html = item['html'] last_redirect = item['last_redirected_url'] with (dirpath / '{0:0{width}}.har'.format(i, width=width)).open('w') as _har: json.dump(harfile, _har) with (dirpath / '{0:0{width}}.png'.format( i, width=width)).open('wb') as _img: _img.write(png) with (dirpath / '{0:0{width}}.html'.format( i, width=width)).open('w') as _html: _html.write(html) with (dirpath / '{0:0{width}}.last_redirect.txt'.format( i, width=width)).open('w') as _redir: _redir.write(last_redirect) if 'childFrames' in item: child_frames = item['childFrames'] with (dirpath / '{0:0{width}}.frames.json'.format( i, width=width)).open('w') as _iframes: json.dump(child_frames, _iframes) if 'cookies' in item: cookies = item['cookies'] with (dirpath / '{0:0{width}}.cookies.json'.format( i, width=width)).open('w') as _cookies: json.dump(cookies, _cookies) self._set_capture_cache(dirpath) return perma_uuid
def process_body_iocs(self, email_object=None): if email_object: body = html.unescape( email_object.email.get_body( preferencelist=('html', 'plain')).get_payload(decode=True).decode( 'utf8', 'surrogateescape')) else: body = self.clean_email_body # Cleanup body content # Depending on the source of the mail, there is some cleanup to do. Ignore lines in body of message for ignoreline in self.config.ignorelist: body = re.sub(rf'^{ignoreline}.*\n?', '', body, flags=re.MULTILINE) # Remove everything after the stopword from the body body = body.split(self.config.stopword, 1)[0] # Add tags to the event if keywords are found in the mail for tag in self.config.tlptags: for alternativetag in self.config.tlptags[tag]: if alternativetag in body.lower(): self.misp_event.add_tag(tag) # Prepare extraction of IOCs # Refang email data body = refang(body) # Extract and add hashes contains_hash = False for h in set(re.findall(hashmarker.MD5_REGEX, body)): contains_hash = True attribute = self.misp_event.add_attribute( 'md5', h, enforceWarninglist=self.config.enforcewarninglist) if email_object: email_object.add_reference(attribute.uuid, 'contains') if self.config.sighting: self.sightings_to_add.append((h, self.config.sighting_source)) for h in set(re.findall(hashmarker.SHA1_REGEX, body)): contains_hash = True attribute = self.misp_event.add_attribute( 'sha1', h, enforceWarninglist=self.config.enforcewarninglist) if email_object: email_object.add_reference(attribute.uuid, 'contains') if self.config.sighting: self.sightings_to_add.append((h, self.config.sighting_source)) for h in set(re.findall(hashmarker.SHA256_REGEX, body)): contains_hash = True attribute = self.misp_event.add_attribute( 'sha256', h, enforceWarninglist=self.config.enforcewarninglist) if email_object: email_object.add_reference(attribute.uuid, 'contains') if self.config.sighting: self.sightings_to_add.append((h, self.config.sighting_source)) if contains_hash: [ self.misp_event.add_tag(tag) for tag in self.config.hash_only_tags ] # # Extract network IOCs urllist = [] urllist += re.findall(urlmarker.WEB_URL_REGEX, body) urllist += re.findall(urlmarker.IP_REGEX, body) if self.debug: syslog.syslog(str(urllist)) hostname_processed = [] # Add IOCs and expanded information to MISP for entry in set(urllist): ids_flag = True self.f.decode(entry) domainname = self.f.get_domain() if domainname in self.config.excludelist: # Ignore the entry continue hostname = self.f.get_host() scheme = self.f.get_scheme() if scheme: scheme = scheme resource_path = self.f.get_resource_path() if resource_path: resource_path = resource_path if self.debug: syslog.syslog(domainname) if domainname in self.config.internallist and self.urlsonly is False: # Add link to internal reference unless in urlsonly mode attribute = self.misp_event.add_attribute( 'link', entry, category='Internal reference', to_ids=False, enforceWarninglist=False) if email_object: email_object.add_reference(attribute.uuid, 'contains') elif domainname in self.config.externallist or self.urlsonly is False: # External analysis attribute = self.misp_event.add_attribute( 'link', entry, category='External analysis', to_ids=False, enforceWarninglist=False) if email_object: email_object.add_reference(attribute.uuid, 'contains') elif domainname in self.config.externallist or self.urlsonly: # External analysis if self.urlsonly: comment = self.subject + " (from: " + self.sender + ")" else: comment = "" attribute = self.misp.add_attribute( self.urlsonly, { "type": 'link', "value": entry, "category": 'External analysis', "to_ids": False, "comment": comment }) for tag in self.config.tlptags: for alternativetag in self.config.tlptags[tag]: if alternativetag in self.subject.lower(): self.misp.tag(attribute["uuid"], tag) new_subject = comment.replace(alternativetag, '') self.misp.change_comment(attribute["uuid"], new_subject) else: # The URL is probably an indicator. comment = "" if (domainname in self.config.noidsflaglist) or ( hostname in self.config.noidsflaglist): ids_flag = False comment = "Known host (mostly for connectivity test or IP lookup)" if self.debug: syslog.syslog(str(entry)) if scheme: if is_ip(hostname): attribute = self.misp_event.add_attribute( 'url', entry, to_ids=False, enforceWarninglist=self.config.enforcewarninglist) if email_object: email_object.add_reference(attribute.uuid, 'contains') else: if resource_path: # URL has path, ignore warning list attribute = self.misp_event.add_attribute( 'url', entry, to_ids=ids_flag, enforceWarninglist=False, comment=comment) if email_object: email_object.add_reference( attribute.uuid, 'contains') else: # URL has no path attribute = self.misp_event.add_attribute( 'url', entry, to_ids=ids_flag, enforceWarninglist=self.config. enforcewarninglist, comment=comment) if email_object: email_object.add_reference( attribute.uuid, 'contains') if self.config.sighting: self.sightings_to_add.append( (entry, self.config.sighting_source)) if hostname in hostname_processed: # Hostname already processed. continue hostname_processed.append(hostname) if self.config.sighting: self.sightings_to_add.append( (hostname, self.config.sighting_source)) if self.debug: syslog.syslog(hostname) comment = '' port = self.f.get_port() if port: port = port comment = f'on port: {port}' if is_ip(hostname): attribute = self.misp_event.add_attribute( 'ip-dst', hostname, to_ids=ids_flag, enforceWarninglist=self.config.enforcewarninglist, comment=comment) if email_object: email_object.add_reference(attribute.uuid, 'contains') else: related_ips = [] if HAS_DNS and self.config.enable_dns: try: syslog.syslog(hostname) for rdata in dns.resolver.query(hostname, 'A'): if self.debug: syslog.syslog(str(rdata)) related_ips.append(rdata.to_text()) except Exception as e: if self.debug: syslog.syslog(str(e)) if related_ips: hip = MISPObject(name='ip-port') hip.add_attribute( 'hostname', value=hostname, to_ids=ids_flag, enforceWarninglist=self.config.enforcewarninglist, comment=comment) for ip in set(related_ips): hip.add_attribute('ip', type='ip-dst', value=ip, to_ids=False, enforceWarninglist=self.config. enforcewarninglist) self.misp_event.add_object(hip) if email_object: email_object.add_reference(hip.uuid, 'contains') else: if self.urlsonly is False: attribute = self.misp_event.add_attribute( 'hostname', value=hostname, to_ids=ids_flag, enforceWarninglist=self.config. enforcewarninglist, comment=comment) if email_object: email_object.add_reference(attribute.uuid, 'contains')
async def _capture(self, url: str, *, perma_uuid: str, cookies_pseudofile: Optional[Union[BufferedIOBase, str]] = None, listing: bool = True, user_agent: Optional[str] = None, referer: Optional[str] = None, headers: Optional[Dict[str, str]] = None, proxy: Optional[Union[str, Dict]] = None, os: Optional[str] = None, browser: Optional[str] = None, parent: Optional[str] = None) -> Tuple[bool, str]: '''Launch a capture''' url = url.strip() url = refang(url) if not url.startswith('http'): url = f'http://{url}' splitted_url = urlsplit(url) if self.only_global_lookups: if splitted_url.netloc: if splitted_url.hostname and splitted_url.hostname.split( '.')[-1] != 'onion': try: ip = socket.gethostbyname(splitted_url.hostname) except socket.gaierror: self.logger.info('Name or service not known') return False, 'Name or service not known.' if not ipaddress.ip_address(ip).is_global: return False, 'Capturing ressources on private IPs is disabled.' else: return False, 'Unable to find hostname or IP in the query.' # check if onion if (not proxy and splitted_url.netloc and splitted_url.hostname and splitted_url.hostname.split('.')[-1] == 'onion'): proxy = get_config('generic', 'tor_proxy') cookies = load_cookies(cookies_pseudofile) if not user_agent: # Catch case where the UA is broken on the UI, and the async submission. self.user_agents.user_agents # triggers an update if needed ua: str = self.user_agents.default['useragent'] else: ua = user_agent self.logger.info(f'Capturing {url}') try: async with Capture(proxy=proxy) as capture: capture.prepare_cookies(cookies) capture.user_agent = ua if headers: capture.http_headers = headers await capture.prepare_context() entries = await capture.capture_page(url, referer=referer) except Exception as e: self.logger.exception( f'Something went terribly wrong when capturing {url} - {e}') return False, f'Something went terribly wrong when capturing {url}.' if not entries: # broken self.logger.critical( f'Something went terribly wrong when capturing {url}.') return False, f'Something went terribly wrong when capturing {url}.' now = datetime.now() dirpath = self.capture_dir / str( now.year) / f'{now.month:02}' / now.isoformat() safe_create_dir(dirpath) if os or browser: meta = {} if os: meta['os'] = os if browser: meta['browser'] = browser with (dirpath / 'meta').open('w') as _meta: json.dump(meta, _meta) # Write UUID with (dirpath / 'uuid').open('w') as _uuid: _uuid.write(perma_uuid) # Write no_index marker (optional) if not listing: (dirpath / 'no_index').touch() # Write parent UUID (optional) if parent: with (dirpath / 'parent').open('w') as _parent: _parent.write(parent) if 'error' in entries: with (dirpath / 'error.txt').open('w') as _error: json.dump(entries['error'], _error) if 'har' not in entries: return False, entries['error'] if entries[ 'error'] else "Unknown error" with (dirpath / '0.har').open('w') as _har: json.dump(entries['har'], _har) if 'png' in entries and entries['png']: with (dirpath / '0.png').open('wb') as _img: _img.write(entries['png']) if 'html' in entries and entries['html']: with (dirpath / '0.html').open('w') as _html: _html.write(entries['html']) if 'last_redirected_url' in entries and entries['last_redirected_url']: with (dirpath / '0.last_redirect.txt').open('w') as _redir: _redir.write(entries['last_redirected_url']) if 'cookies' in entries and entries['cookies']: with (dirpath / '0.cookies.json').open('w') as _cookies: json.dump(entries['cookies'], _cookies) await self.redis.hset('lookup_dirs', perma_uuid, str(dirpath)) return True, 'All good!'
'--servername', dest='servername', help='servername') parser.add_argument('-c', '--cert', dest='cert_file', help='cert file') parser.add_argument('-t', '--tsv', action='store_true', dest='output_tsv', default=False, help='output tsv') parser.add_argument('-j', '--json', action='store_true', dest='output_json', default=False, help='output json') args = parser.parse_args() return args if __name__ == '__main__': args = parse_options() if args.servername: servername = defang.refang(args.servername) cert_txt_file = get_save_pem(servername) if args.cert_file: cert_txt_file = args.cert_file cert_items = get_certificate_items_from_file(cert_txt_file) output(cert_items, args.output_tsv, args.output_json)
def defang_func(): #Determines fonts custom_fig = Figlet(font='doom') subtext_fig = Figlet(font='digital') sub = "With all the stuff you care about & none of the stuff you don\'t!" #Subtext for menu #Main Menu print(Fore.CYAN + custom_fig.renderText('URL Pacifier v4.1')) print(Style.DIM + Fore.YELLOW + subtext_fig.renderText(sub.center(40))) print( Fore.CYAN + "---------------------------------------------------------------------------" ) print("\n") uput = input( "Would you like to pacify or depacify the URL?\nPress 1.) to pacify or 2.) to depacify: " ) if uput == '1': malURL = input("\nInsert your Malicious URL: ") output = defang(malURL, all_dots=True, colon=True) print("\nHere's the pacified link:\n" + output + "\n") user_choice() elif uput == '2': malURL = input("\nInsert your Malicious URL: ") output = refang(malURL) #Takes refanged URL and submits it to the VT DB in order to see if it's malicious or not try: resp = vtotal.request("url/scan", params={"url": output}, method="POST") url_resp = resp.json() scan_id = url_resp["scan_id"] analysis_resp = vtotal.request("url/report", params={"resource": scan_id}) #outputs the results JSON data jdata = analysis_resp.json() #Next, we need to filter the information so that way we know whether or not a URL us malicious #If positives are more than 0, then it outputs the reason for it being malicious if jdata['positives'] > 0: #Outputs how many positives are found out of the total scans print(Fore.RED + "\nVirus Total has detected that " + str(jdata['positives']) + " out of " + str(jdata['total']) + " scan engines have detected this URL as malicious.\n") print("Anti-Virus findings listed below: \n") #for loop that takes each value within the JSON scan dictionary and outputs the results as long as 'detected' is equal to true for value in jdata['scans']: if jdata['scans'][value]['detected'] == True: pprint(jdata['scans'][value]['result']) #If positives == 0, then VT will think that its safe else: print( Fore.GREEN + "\nVirus Total does not think the link is malicious - further testing advised." ) #catches any keyerrors except KeyError as e: print(Fore.MAGENTA + f"\nKey Error - retry running URL.\nReason for Error: {e}") #catches any errors occured by VT except VirustotalError as err: print( Fore.MAGENTA + f"\nAn error occured: {err} \nCatching and continuing with program." ) print(Fore.CYAN + "\nHere's the depacified link (be careful!):\n" + output + "\n") #returns refanged url for further testing user_choice() else: print("Please input a valid option!\n") defang_func()
#Store values in args table args = parser.parse_args() #if/else statement to traffic whether or not the program will open or execute from the command line #Handles if -d and -vt are not in use if args.url != None and args.depacify == False and args.vt == False: badURL = args.url output = defang(badURL, all_dots=True, colon=True) print("Here's the safe link:\n" + output) exit() #Allows -d flag to be in use and "depacifies" the URL elif args.depacify == True: badURL = args.url output = refang(badURL) print("Here's the depacified link (be careful!):\n" + output) exit() #Runs a scan of VT DB and refangs the URL elif args.vt == True: badURL = args.url output = refang(badURL) try: resp = vtotal.request("url/scan", params={"url": output}, method="POST") url_resp = resp.json()
def get(self): """simple refang of a string""" if self.get_args(self.parser): url = request.query_string.decode()[4:] self.output = refang(url) return self.response
def parse_domain_list(domain_list): domain_list = domain_list.split(',') domain_list = list(map(lambda x: defang.refang(x), domain_list)) domain_list = list(map(lambda x: get_domain_from_dns(x), domain_list)) return domain_list
def extract_urls(line): # MISC CLEANUP line = line.replace('[:]', ':') line = line.replace('[.]', '.') line = line.replace('[://]', '://') line = line.replace('"', '') line = refang(line) line = line.strip() line = line.decode("utf-8", "ignore").encode("utf-8", "ignore").lower() domains = line.split() newurls = [] purgeurls = [] skipurls = [] for domain in domains: domain = domain.strip() if len(domain) > 2000: if args.verbose: print ">>> DOMAIN " + domain + " TOO LONG. Skipping.." continue ## CONVERT INPUT DOMAINS INTO PROPER URLs if (domain[0:4] != 'http' and domain[0] != '-' and domain[0] != '#'): newurl = 'http://' + domain newurl = urlparse(newurl) newurls.append(newurl.geturl()) elif domain[0] == '-': # if domain[1:5] != 'http': purgeurl = 'http://' + domain purgeurl = purgeurl.replace('http://-','-http://') purgeurl = urlparse(purgeurl) purgeurls.append(purgeurl.geturl()) else: purgeurl = urlparse(domain) purgeurls.append(purgeurl.geturl()) elif domain[0] == '#': if domain[1:5] != 'http': skipurl = 'http://' + domain skipurl = skipurl.replace('http://#','#http://') skipurl = urlparse(skipurl) skipurls.append(skipurl.geturl()) else: skipurl = urlparse(domain) skipurls.append(skipurl.geturl()) else: newurl = urlparse(domain) newurls.append(newurl.geturl()) ## EXTRACT ALL URLs ON EACH LINE OF INPUT ## https://mail.python.org/pipermail/tutor/2002-February/012481.html urls = '(%s)' % '|'.join("""http https""".split()) ltrs = r'\w' gunk = r'/#~:.?+=&%@!\-' punc = r'.:?\-' any = "%(ltrs)s%(gunk)s%(punc)s" % { 'ltrs' : ltrs, 'gunk' : gunk, 'punc' : punc } url = r""" \b # start at word boundary ( # begin \1 { %(urls)s : # need resource and a colon [%(any)s] +? # followed by one or more # of any valid character, but # be conservative and take only # what you need to.... ) # end \1 } (?= # look-ahead non-consumptive assertion [%(punc)s]* # either 0 or more punctuation [^%(any)s] # followed by a non-url char | # or else $ # then end of the string ) """ % {'urls' : urls, 'any' : any, 'punc' : punc } url_re = re.compile(url, re.VERBOSE) newmatch = url_re.findall(" ".join(newurls)) skipmatch = url_re.findall(" ".join(skipurls)) purgematch = url_re.findall(" ".join(purgeurls)) extractedurls = {} for item in newmatch: url = urlparse(item[0]) hostname = url.netloc hostname = hostname.strip() if not re.search(r'[a-zA-Z]+\.', hostname): continue url = url.geturl() extractedurls[url] = 0 for item in skipmatch: url = urlparse(item[0]) hostname = url.netloc hostname = hostname.strip() if not re.search(r'[a-zA-Z]+\.', hostname): continue url = url.geturl() extractedurls[url] = -1 for item in purgematch: url = urlparse(item[0]) hostname = url.netloc hostname = hostname.strip() if not re.search(r'[a-zA-Z]+\.', hostname): continue url = url.geturl() extractedurls[url] = 1 return extractedurls
def refang_args(arguments): for x, y in enumerate(arguments): if '[.]' in arguments[x]: arguments[x] = refang(arguments[x])