def acquire_semaphore(self): """Wait for the semaphore to become available, or return immediately if this module does not use a semaphore.""" if self.semaphore_name is None: logging.warning("semaphore name is None for {}".format(self)) return False if self.cancel_analysis_flag: logging.info("called acquire_semaphore() for module {} but cancel_analysis_flag is set".format(self)) return False # semaphores can be globally disabled if not saq.SEMAPHORES_ENABLED: logging.debug("semaphores are disabled - not using semaphore {}".format(self.semaphore)) return False # create a new semaphore client to use self.semaphore = NetworkSemaphoreClient() #logging.debug("analysis module {0} acquiring semaphore {1}".format(self, self.semaphore_name)) try: if not self.semaphore.acquire(self.semaphore_name): raise RuntimeError("acquire returned False") #logging.debug("analysis module {0} acquired semaphore {1}".format(self, self.semaphore_name)) except Exception as e: logging.error("unable to acquire semaphore {} : {}".format(self.semaphore_name, e)) report_exception() self.semaphore = None # TODO fall back to something else we can use return False return True
def post_analysis(self, root): try: root.sync_profile_points() except Exception as e: logging.error("unable to sync profile points for {}: {}".format( root, e)) report_exception()
def load_path_regexes(self): logging.debug("loading path regexes from {}".format(self.regex_path)) path_regexes = [] try: with open(self.regex_path, 'r') as fp: for line in fp: line = line.strip() # skip comments if line.startswith('#'): continue # skip blank lines if line == '': continue # try to compile it try: path_regexes.append(re.compile(line, re.I)) except Exception as e: logging.error("regular expression {} does not compile: {}".format(line, e)) self.path_regexes = path_regexes logging.debug("loaded {} path regexes".format(len(self.path_regexes))) except Exception as e: logging.error("unable to load path regexes from {}: {}".format(self.regex_path, e)) report_exception()
def loop(self): enable_cached_db_connections() while True: try: result = self.execute() # if we did something then we immediately look for more work unless we're shutting down if result == WORK_SUBMITTED: if self.shutdown_event.is_set(): break # if were was no work available to be submitted then wait a second and look again elif result == NO_WORK_AVAILABLE: if self.shutdown_event.wait(1): break # if there were no NODES available then wait a little while longer and look again elif result == NO_NODES_AVAILABLE: if self.shutdown_event.wait( self.node_status_update_frequency / 2): break elif result == NO_WORK_SUBMITTED: if self.shutdown_event.wait(1): break except Exception as e: logging.error( "unexpected exception thrown in loop for {}: {}".format( self, e)) report_exception() if self.shutdown_event.wait(1): break disable_cached_db_connections()
def execute_with_lock(self, *args, **kwargs): # we use this lock to determine if a hunt is running, and, to wait for execution to complete. logging.debug(f"waiting for execution lock on {self}") self.execution_lock.acquire() # remember the last time we executed self.last_executed_time = local_time() # notify the manager that this is now executing # this releases the manager thread to continue processing hunts logging.debug(f"clearing barrier for {self}") self.startup_barrier.wait() submission_list = None try: logging.info(f"executing {self}") start_time = local_time() return self.execute(*args, **kwargs) self.record_execution_time(local_time() - start_time) except Exception as e: logging.error(f"{self} failed: {e}") report_exception() self.record_hunt_exception(e) finally: self.startup_barrier.reset() self.execution_lock.release()
def check_watched_files(self): # is it time to check the files this module is watching? if self.next_check_watched_files is not None and datetime.datetime.now( ) < self.next_check_watched_files: return # check every 5 seconds self.next_check_watched_files = datetime.datetime.now() + \ datetime.timedelta(seconds=saq.CONFIG['global'].getint( 'check_watched_files_frequency')) for watched_file in self.watched_files.values(): try: current_mtime = os.stat(watched_file.path).st_mtime if watched_file.last_mtime != current_mtime: logging.info("detected change to {}".format( watched_file.path)) watched_file.last_mtime = current_mtime try: watched_file.callback() except Exception as e: logging.error( "callback failed for {} file {}: {}".format( self, watched_file.path, e)) report_exception() except Exception as e: logging.error("unable to check file {}: {}".format( watched_file.path, e)) report_exception()
def execute_post_analysis(self, db, c): import saq.database self.initialize_state(None) # we only look at faqueue alerts if not self.root.alert_type == 'faqueue': return True c.execute("SELECT disposition FROM alerts WHERE uuid = %s", (self.root.uuid, )) row = c.fetchone() if row is None: return False # no alert yet - try again later if row[0] is None: return False # no disposition yet -- try again later new_disposition = row[0] # has the disposition changed? if self.state and new_disposition == self.state: logging.debug("disposition for alert {} has not changed".format( self.root)) return False # try again later # remember the disposition self.state = new_disposition crits_analysis_value = None if new_disposition == DISPOSITION_FALSE_POSITIVE: crits_analysis_value = 'Informational' else: crits_analysis_value = 'Analyzed' if 'indicator' not in self.root.details: logging.error("missing indicator key in faqueue alert {}".format( self.root)) return True if 'crits_id' not in self.root.details['indicator']: logging.error("missing crits_id key in faqueue alert {}".format( self.root)) return True # update mongo crits_id = self.root.details['indicator']['crits_id'] logging.info("updating crits_id {} to status {}".format( crits_id, crits_analysis_value)) try: total_crits_indicators_updated = update_status( crits_id, crits_analysis_value) logging.info("updated {} crits indicators".format( total_crits_indicators_updated)) except Exception as e: logging.error( f"unable to update crits indicator {crits_id} to {crits_analysis_value}: {e}" ) report_exception() return False # it can change again so we try again alter if the disposition changes
def root_analysis_completed(self, root): if root.delayed: return # mark the analysis as completed try: with get_db_connection('cloudphish') as db: c = db.cursor() c.execute( "UPDATE analysis_results SET status = %s WHERE sha256_url = UNHEX(%s)", ( STATUS_ANALYZED, root.details[KEY_DETAILS_SHA256_URL], )) db.commit() except Exception as e: logging.error("unable to update database: {}".format(e)) report_exception() # delete the work directory if not self.keep_work_dir: try: shutil.rmtree(root.storage_dir) except Exception as e: logging.error("unable to delete work directory {}: {}".format( root.storage_dir, e)) report_exception()
def loop(self): logging.info(f"started {self.name} for route {self.route}") while not self.control_event.is_set(): try: sleep_time = self.execute() except ControlledStop: logging.info("caught controlled stop") break except Exception as e: logging.error(f"uncaught exception: {e}") report_exception() sleep_time = 30 finally: try: saq.db.close() except Exception as e: logging.error(f"unable to close db connection: {e}") report_exception() if sleep_time is None: sleep_time = 0 self.control_event.wait(sleep_time) logging.info(f"stopped {self.name}")
def stdin_reader_loop(self): try: while True: data = self.stdin_reader_pipe.read(BLOCK_SIZE) if len(data) == 0: break logging.debug("read {} bytes from stdin".format(len(data))) self.stdin_timestamp = int(time.time()) # not used? # in text mode we translate to binary to send it over if self.pipe_mode == MODE_TEXT: data = data.encode() # we send the data in chunks send_data_block(self.connection, data) # tell the server we're done sending by sending a block size of 0 logging.debug("finished sending stdin data blocks (sending zero block)") send_block0(self.connection) except Exception as e: logging.error("unable to read from pipe: {}".format(e)) report_exception() logging.debug("thread exited")
def initialize_collector(self): # the list of EWSCollectionBaseConfiguration objects we're operating self.account_configurations = [] for section in saq.CONFIG.sections(): if section.startswith('ews_'): if not saq.CONFIG[section].getboolean('enabled', fallback=False): continue module_name = saq.CONFIG[section]['module'] try: _module = importlib.import_module(module_name) except Exception as e: logging.error(f"unable to import ews account config module {module_name}: {e}") report_exception() continue class_name = saq.CONFIG[section]['class'] try: module_class = getattr(_module, class_name) except AttributeError as e: logging.error("class {} does not exist in module {} in ews account config {}".format( class_name, module_name, section)) report_exception() continue account_config = module_class(self) account_config.load_from_config(section) logging.info(f"loaded EWS account configuration {section}") self.account_configurations.append(account_config)
def execute_threaded_loop(self): # continue to execute until analysis has completed while True: try: self.execute_threaded() except Exception as e: logging.error("{} failed threaded execution on {}: {}".format( self, self.root, e)) report_exception() return # wait for self.threaded_execution_frequency seconds before we execute again # make sure we exit when we're asked to timeout = self.threaded_execution_frequency while not self.engine.cancel_analysis_flag and \ not self.threaded_execution_stop_event.is_set() and \ timeout > 0: time.sleep(1) timeout -= 1 if self.engine.cancel_analysis_flag: return if self.threaded_execution_stop_event.is_set(): return
def is_uncommon_network(self, value): try: return self._is_uncommon_fqdn(value) except Exception as e: logging.error("unable to query brocess: {}".format(e)) report_exception() return False
def prepare_submission_files(self, submission): # we COPY the files over to another directory for transfer # we'll DELETE them later if we are able to copy them all and then insert the entry into the database if submission.files: target_dir = self.get_submission_target_dir(submission) if os.path.exists(target_dir): logging.warning( f"target directory {target_dir} already exists") else: try: os.mkdir(target_dir) for f in submission.files: # this could be a tuple of (source_file, target_name) if isinstance(f, tuple): f = f[0] target_path = os.path.join(target_dir, os.path.basename(f)) # TODO use hard links instead of copies to reduce I/O shutil.copy2(f, target_path) logging.debug(f"copied file from {f} to {target_path}") except Exception as e: logging.error( f"I/O error moving files into {target_dir}: {e}") report_exception()
def load_blacklist(self): logging.debug("loading blacklist from {}".format(self.blacklist_path)) blacklisted_fqdn = [] blacklisted_cidr = [] try: with open(self.blacklist_path, 'r') as fp: for line in fp: line = line.strip() # skip comments if line.startswith('#'): continue # skip blank lines if line == '': continue if is_ipv4(line): blacklisted_cidr.append(IPv4Network(add_netmask(line))) else: blacklisted_fqdn.append(line) self.blacklisted_cidr = blacklisted_cidr self.blacklisted_fqdn = blacklisted_fqdn logging.debug("loaded {} cidr {} fqdn blacklisted items".format( len(self.blacklisted_cidr), len(self.blacklisted_fqdn))) except Exception as e: logging.error("unable to load blacklist {}: {}".format(self.blacklist_path, e)) report_exception()
def __init__(self, *args, **kwargs): # load the scripts self.pscripts = [] for pscript_path in glob.glob( os.path.join(saq.SAQ_HOME, 'etc', 'pp', 'pscript', '*.p')): logging.info("loading pscript {}".format(pscript_path)) try: with open(pscript_path, 'r') as fp: line_number = 1 for line in fp.readlines(): try: p = compile_pscript(line) except Exception as e: logging.error( "unable to load line #{} of {}: {}".format( line_number, pscript_path, e)) report_exception() continue finally: line_number += 1 logging.info("loaded pscript {}".format(p)) self.pscripts.append(p) except Exception as e: logging.error("unable to load pscript {}: {}".format( pscript_path, e)) report_exception()
def clear(uuid, lock_uuid, db, c): validate_uuid(uuid) validate_uuid(lock_uuid) # make sure this uuid is locked with with the given lock_uuid # this is less a security feature than it is a mistake-blocker :-) c.execute("SELECT uuid FROM locks WHERE uuid = %s AND lock_uuid = %s", (uuid, lock_uuid)) row = c.fetchone() if row is None: logging.warning("request to clear uuid {} with invalid lock uuid {}".format(uuid, lock_uuid)) abort(Response("nope", 400)) target_dir = storage_dir_from_uuid(uuid) if saq.CONFIG['engine']['work_dir'] and not os.path.isdir(target_dir): target_dir = workload_storage_dir(uuid) if not os.path.isdir(target_dir): logging.error("request to clear unknown target {}".format(target_dir)) abort(Response("unknown target {}".format(target_dir))) logging.info("received request to clear {} from {}".format(uuid, request.remote_addr)) try: shutil.rmtree(target_dir) except Exception as e: logging.error("unable to clear {}: {}".format(target_dir, e)) report_exception() abort(Response("clear failed")) # looks like it worked return json_result({'result': True})
def handle_request(self, connection): try: connection.handle_request_execute() except Exception as e: logging.error("unable to handle request: {}".format(e)) report_exception() finally: connection.cleanup()
def execute_extended_collection(self): try: self.collect_bricata_alerts() except Exception as e: logging.error(f"unable to collect alerts: {e}") report_exception() return self.query_frequency.total_seconds()
def create_engine(self, cls, *args, **kwargs): try: self.tracked_engine = cls(*args, **kwargs) return self.tracked_engine except Exception as e: logging.error("unable to create engine {}: {}".format(cls, e)) report_exception() self.fail("unable to create engine {}: {}".format(cls, e))
def execute(self, remediation): # execute this remediation try: remediation_result = self.execute_request(remediation) if remediation_result is None: raise RuntimeError( "forgot to return remediation object in execute_request") logging.info(f"completed remediation item {remediation}") if remediation_result.successful and self.message_on_success: try: send_message( f"remediation for {remediation_result.key} completed: {remediation_result.result}", MESSAGE_TYPE_REMEDIATION_SUCCESS) except Exception as e: logging.error(f"unable to send completed message: {e}") elif not remediation_result.successful and self.message_on_error: try: send_message( f":rotating_light: remediation for {remediation_result.key} failed:\n{remediation_result.result}", MESSAGE_TYPE_REMEDIATION_FAILURE) except Exception as e: logging.error(f"unable to send completed message: {e}") saq.db.execute(Remediation.__table__.update().values( status=REMEDIATION_STATUS_COMPLETED, successful=remediation_result.successful, result=remediation_result.result).where( Remediation.id == remediation_result.id)) saq.db.commit() except Exception as e: logging.error( f"unable to execute remediation item {remediation.id}: {e}") report_exception() try: saq.db.execute(Remediation.__table__.update().values( status=REMEDIATION_STATUS_COMPLETED, successful=False, result=str(e))\ .where(Remediation.id == remediation.id)) saq.db.commit() if self.message_on_error: send_message( f":rotating_light: attempt to execute remediation {remediation.key} failed:\n{e}", MESSAGE_TYPE_REMEDIATION_FAILURE) except Exception as e: logging.error( f"unable to record error for remediation item {remediation.id}: {e}" ) report_exception()
def _remediate_email_o365_EWS(emails): """Remediates the given emails specified by a list of tuples of (message-id, recipient email address).""" assert emails assert all([ len(e) == 2 and isinstance(e[0], str) and isinstance(e[1], str) for e in emails ]) result = [] # tuple(message_id, recipient, result_code, result_text) # get the hostname and port for our EWS proxy system # this system receives requests for remediation and restorations and submits them to EWS on our behalf ews_host = saq.CONFIG['remediation']['ews_host'] ews_port = saq.CONFIG['remediation'].getint('ews_port') # the format of each request is a POST to # https://host:port/delete # with JSON as the POST data content # note that we make a separate request for each one url = 'https://{}:{}/delete'.format(saq.CONFIG['remediation']['ews_host'], saq.CONFIG['remediation']['ews_port']) session = requests.Session() data = {'recipient': None, 'message_id': None} headers = {'Content-Type': 'application/json'} for message_id, recipient in emails: if recipient.startswith('<'): recipient = recipient[1:] if recipient.endswith('>'): recipient = recipient[:-1] data['recipient'] = recipient data['message_id'] = message_id json_data = json.dumps(data) try: logging.info("remediating message_id {} to {}".format( message_id, recipient)) r = session.post(url, headers=headers, data=json_data, verify=False) logging.info( "got result {} text {} for message_id {} to {}".format( r.status_code, r.text, message_id, recipient)) result.append((message_id, recipient, r.status_code, r.text)) except Exception as e: error_message = 'unable to remediate message_id {} to {}: {}'.format( message_id, recipient, str(e)) logging.error(error_message) report_exception() result.append((message_id, recipient, 'N/A', str(e))) return result
def get_cached_analysis(url): """Returns the CloudphishAnalysisResult of the cached analysis or None if analysis is not cached.""" try: return _get_cached_analysis(url) except Exception as e: message = "Unable to get analysis for url {}: {}".format(url, e) logging.error(message) report_exception() return CloudphishAnalysisResults(RESULT_ERROR, message)
def stop_tracked_engine(self): if self.tracked_engine: try: self.tracked_engine.stop() self.wait_engine(self.tracked_engine) except Exception as e: logging.error("unable to stop tracked engine {}: {}".format(self.tracked_engine, e)) report_exception() finally: self.tracked_engine = None
def _atexit_callback(): if local_lock_manager: try: logging.info("shutting down local lock manager...") local_lock_manager.shutdown() logging.info("shut down local lock manager") except Exception as e: logging.error( "unable to shutdown local lock manager: {}".format(e)) report_exception()
def loop(self): logging.debug("starting notification manager") while not self.control.is_set(): try: self.execute() self.control.wait(1) except Exception as e: logging.error(str(e)) report_exception() self.control.wait(5)
def server_loop(self): while not self.shutdown: try: self.server_socket = socket.socket( ) # defaults to AF_INET, SOCK_STREAM self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.server_socket.bind((self.bind_address, self.bind_port)) self.server_socket.listen(5) while not self.shutdown: logging.debug( "waiting for next connection on {}:{}".format( self.bind_address, self.bind_port)) client_socket, remote_address = self.server_socket.accept() remote_host, remote_port = remote_address logging.info("got connection from {0}:{1}".format( remote_host, remote_port)) if self.shutdown: return allowed = False remote_host_ipv4 = ipaddress.ip_address(remote_host) for ipv4_network in self.allowed_ipv4: if remote_host_ipv4 in ipv4_network: allowed = True break if not allowed: logging.warning( "blocking invalid remote host {0}".format( remote_host)) try: client_socket.close() except: pass continue # start a thread to deal with this client t = Thread(target=self.client_loop, args=(remote_host, remote_port, client_socket), name="Client {0}".format(remote_host)) t.daemon = True t.start() #record_metric(METRIC_THREAD_COUNT, threading.active_count()) except Exception as e: logging.error("uncaught exception: {0}".format(str(e))) report_exception() # TODO clean up socket stuff to restart if not self.shutdown: time.sleep(1)
def release(self): if not self.semaphore_acquired: logging.warning( "release called on unacquired semaphore {0}".format( self.semaphore_name)) # are we releasing a fallback semaphore? if self.fallback_semaphore is not None: logging.debug("releasing fallback semaphore {0}".format( self.semaphore_name)) try: self.fallback_semaphore.release() except Exception as e: logging.error( "unable to release fallback semaphore {0}: {1}".format( self.semaphore_name, str(e))) report_exception(e) # make sure we set this so that the monitor thread exits self.semaphore_acquired = False return try: # send the command for release logging.debug("releasing semaphore {0}".format( self.semaphore_name)) self.socket.sendall("release|".encode('ascii')) # wait for the ok command = self.socket.recv(128).decode('ascii') if command == '': logging.debug("detected client disconnect") return logging.debug("recevied response from server: {0}".format(command)) if command == 'ok|': logging.debug("successfully released semaphore {0}".format( self.semaphore_name)) return else: logging.error("invalid response from server") return except Exception as e: logging.error("error trying to release semaphore {0}: {1}".format( self.semaphore_name, str(e))) finally: try: self.socket.close() except Exception: pass # make sure we set this so that the monitor thread exits self.semaphore_acquired = False
def release_semaphore(self): """Release the acquired semaphore, or do nothing if this module does not use a semaphore.""" if self.semaphore is None: return try: self.semaphore.release() #logging.debug("analysis module {0} released semaphore {1}".format(self, self.semaphore_name)) except Exception as e: logging.error("unable to release semaphore: {0}".format(str(e))) report_exception()
def create_analysis(url, reprocess, details): try: # url must be parsable urlparse(url) return _create_analysis(url, reprocess, details) except Exception as e: message = "unable to create analysis request for url {}: {}".format(url, e) logging.error(message) report_exception() return CloudphishAnalysisResult(RESULT_ERROR, message)