def apimethod_run_nmap_scan(sensor_id, target, idm, scan_type, rdns, scan_timing, autodetect, scan_ports, output_file_prefix="", save_to_file=False, job_id=""): """Launches an MAP scan Args: sensor_id: The system IP where you want to get the [sensor]/interfaces from ossim_setup.conf target: IP address of the component where the NMAP will be executed idm: Convert results into idm events scan_type: Sets the NMAP scan type rdns: Tells Nmap to do reverse DNS resolution on the active IP addresses it finds scan_timing: Set the timing template autodetect: Aggressive scan options (enable OS detection) scan_ports: Only scan specified ports output_file_prefix: Prefix string to be added to the output filename save_to_file: Indicates whether you want to save the NMAP report to a file or not. job_id: Celery job ID. Returns: nmap_report: The NMAP report or the filename where the report has been saved. Raises: APINMAPScanCannotRun APICannotResolveSensorID APINMAPScanCannotRetrieveBaseFolder APINMAPScanCannotCreateLocalFolder """ (result, sensor_ip) = get_sensor_ip_from_sensor_id(sensor_id, local_loopback=False) if result is False: api_log.error( "[apimethod_run_nmap_scan] Cannot retrieve the sensor ip from the given sensor id <%s>" % sensor_id) raise APICannotResolveSensorID(sensor_id) success, nmap_report = ansible_run_nmap_scan(sensor_ip=sensor_ip, target=target, scan_type=scan_type, rdns=rdns, scan_timing=scan_timing, autodetect=autodetect, scan_ports=scan_ports, job_id=job_id) if not success: api_log.error('Failed to launch NMAP scan: %s' % nmap_report) raise APINMAPScanCannotRun(nmap_report) filename = None if save_to_file: base_path = get_nmap_directory(sensor_id) filename = "%s/nmap_report_%s.json" % (base_path, output_file_prefix) with open(filename, "w") as f: f.write(json.dumps(nmap_report)) if idm: conn = IDMConnection(sensor_id=sensor_id) if conn.connect(): conn.send_events_from_hosts(nmap_report) try: if filename is not None: os.remove(filename) except Exception: pass else: api_log.error("[apimethod_run_nmap_scan] Cannot connect with the IDM Service") try: apimethods_nmap_purge_scan_files(job_id) except Exception as exp: api_log.warning("[apimethod_run_nmap_scan] Cannot purge the scan files %s" % str(exp)) return nmap_report
def get_pulse_updates(self): """Update the redis with the pulses that must been re-added and deleted. Args: None Returns: tuple: Number of pulses updated and deleted. """ total_add = 0 total_del = 0 subscribed_timestamp = self.get_latest_request('subscribed') events_timestamp = self.get_latest_request('events') #If it is the first time we download the pulses we don't execute this call. if subscribed_timestamp is not None: #Getting event time or subscribed time in case event time is null by any reason. events_timestamp = subscribed_timestamp if events_timestamp is None else events_timestamp next_request = "%s/pulses/events?limit=20&since=%s" % ( self.url_base, events_timestamp) else: return total_add, total_del event = {} while next_request: try: json_data = self.make_request(next_request) #We need to apply the action in each iteration to keep the order of each modification. for event in json_data.get('results'): e_type = event.get('object_type') e_action = event.get('action') e_id = event.get('object_id') #Authors to delete if e_type == 'user' and e_action in [ 'unsubscribe', 'delete' ]: total_del += self.remove_pulses_from_authors([e_id]) #Authors to subscribe elif e_type == 'user' and e_action == 'subscribe': total_add += self.add_pulses_from_authors([e_id]) #Pulses to delete elif e_type == 'pulse' and e_action in [ 'unsubscribe', 'delete' ]: total_del += self.remove_pulses([e_id]) #Pulses to add elif e_type == 'pulse' and e_action == 'subscribe': total_add += self.add_pulses_from_list([e_id]) # Get next request next_request = json_data.get('next') except Exception as error: api_log.warning("Cannot download pulse updates: %s" % str(error)) raise update_timestamp = event.get('created', None) if update_timestamp is not None: self.update_latest_request('events', update_timestamp) return total_add, total_del
def is_task_in_rabbit(task_id): """Checks if the task is in rabbit or not. If the task is found returns the json data, otherwise returns none {u'retries': 0, u'task': u'celerymethods.tasks.monitor_tasks.monitor_retrieves_remote_info', u'eta': None, u'args': [], u'expires': None, u'callbacks': None, u'errbacks': None, u'kwargs': {}, u'id': u'de1cd3b1-d001-4bea-a050-8ffe610bee21', u'utc': False} """ try: conn = amqp.Connection(host="localhost:5672 ", userid="guest", password="******", virtual_host="/", insist=False) chan = conn.channel() # Inspect all available queues for celery_queue in CELERY_QUEUES: while True: msg = chan.basic_get(queue=celery_queue.name) if msg is not None: try: task_json = json.loads(msg.body) if task_json['id'] == task_id: api_log.warning("Task found in rabbit... that means celery is busy..") return task_json except Exception as cannot_parse: api_log.warning("Cannot parse rabbit message: %s" % str(cannot_parse)) else: break except Exception as e: api_log.error("Cannot inspect rabbitmq: %s" % str(e)) return None
def get_sensor_plugins_enabled_by_asset(sensor_id, asset_id=None, no_cache=False): """ Get the list of plugins enabled in a sensor by asset Params: sensor_id (UUID): sensor id asset_id (UUID): filter for a specific asset Return: dictionary with the plugins enabled by asset in the sensor filtered by asset_id if provided Raises: APICannotResolveSensorID APICannotGetSensorPlugins """ asset_plugins = {} sensor_data = get_sensor_plugins(sensor_id=sensor_id, no_cache=no_cache) if 'enabled' in sensor_data: asset_plugins = sensor_data['enabled'].get('devices', {}) if asset_id is not None: asset_plugins = dict((key, value) for key, value in asset_plugins.iteritems() if key == asset_id) # Fill the plugin info plugins = {} for (asset, plugin_list) in asset_plugins.iteritems(): for plugin in plugin_list: if plugin in sensor_data['plugins']: if asset not in plugins: plugins[asset] = {} plugins[asset][plugin] = sensor_data['plugins'][plugin] else: api_log.warning("[get_sensor_plugins_enabled_by_asset] " "plugin '{0}' enabled in asset '{1}' in sensor '{2}' Not found".format( plugin, asset, sensor_id)) return plugins
def add_pulses_from_authors(self, authors): """Add the pulses from some given authors. Args: authors(list): List of authors we want their pulses to be added Returns: integer: Number of new pulses added. """ pulse_downloaded = 0 for author in authors: next_request = "{}/pulses/subscribed?limit=20&author_name={}".format( self.url_base, author) while next_request: try: json_data = self.make_request(next_request) # Save pulse data on redis pulse_downloaded += self.save_pulses( json_data.get('results')) # Get next request next_request = json_data.get('next') except Exception as err: api_log.warning( "Cannot download pulses from author {}: {}".format( author, err)) continue return pulse_downloaded
def apimethod_delete_nmap_scan(sensor_id, task_id): """ Args: sensor_id task_id Returns: Raises: APINMAPScanCannotRetrieveBaseFolder APINMAPScanReportNotFound APINMAPScanCannotCreateLocalFolder APINMAPScanReportCannotBeDeleted """ try: # When the NMAP scan has been stopped by the user, it could leave some files in tmp folder. apimethods_nmap_purge_scan_files(task_id) except Exception as exp: api_log.warning("[apimethod_delete_nmap_scan] Cannot purge the scan files %s" % str(exp)) apimethod_nmapdb_delete_task(task_id) directory = get_nmap_directory(sensor_id) nmap_report_path = "{0}/nmap_report_{1}.json".format(directory, task_id) # if not os.path.isfile(nmap_report_path): # raise APINMAPScanReportNotFound(nmap_report_path) try: if os.path.isfile(nmap_report_path): os.remove(nmap_report_path) except Exception as e: api_log.error("[apimethod_delete_nmap_scan] {0}".format(str(e))) raise APINMAPScanReportCannotBeDeleted()
def __get_software_version(self, sensor_id_bin): try: version = db.session.query(Sensor_Properties.version).filter( Sensor_Properties.sensor_id == sensor_id_bin ).one()[0] except Exception as exc: api_log.warning('Did not manage to get sensor software version: {}'.format(format_exc(exc))) return None return version
def is_task_in_celery(task_id): """Look whether a task is scheduled, reserved or active Args: The task id Returns the task dictionary or None """ try: # When celery is down, inspect will be None, in this case we will wait for a while. i = None tries = 0 while tries < 3: try: i = inspect(timeout=10) if inspect is not None: break except Exception as exp: api_log.warning("Cannot inspect the celery queue.. let's wait for while... %s" % str(exp)) finally: tries = tries + 1 time.sleep(5) if inspect is None: return None active = i.active() scheduled = i.scheduled() reserved = i.reserved() if active is not None: for node, tasks in active.iteritems(): for task in tasks: if str(task['id']) == task_id: del i return task.copy() if reserved is not None: for node, tasks in reserved.iteritems(): for task in tasks: if str(task['id']) == task_id: del i return task.copy() if scheduled is not None: for node, tasks in scheduled.iteritems(): for task in tasks: if str(task['id']) == task_id: del i return task.copy() del i # Wow, we have reached this point... # Maybe celery is to busy to get tasks from the queue, let's see whether the task is in rabbit. task_in_rabbit = is_task_in_rabbit(task_id) if task_in_rabbit is not None: return task_in_rabbit except Exception as exp: api_log.error("[is_task_in_celery] An error occurred while reading the task list %s" % str(exp)) return None
def get_pulse_updates(self): """Update the redis with the pulses that must been re-added and deleted. Args: None Returns: tuple: Number of pulses updated and deleted. """ total_add = 0 total_del = 0 subscribed_timestamp = self.get_latest_request('subscribed') events_timestamp = self.get_latest_request('events') #If it is the first time we download the pulses we don't execute this call. if subscribed_timestamp is not None: #Getting event time or subscribed time in case event time is null by any reason. events_timestamp = subscribed_timestamp if events_timestamp is None else events_timestamp next_request = "%s/pulses/events?limit=20&since=%s" % (self.url_base, events_timestamp) else: return total_add, total_del event = {} while next_request: try: json_data = self.make_request(next_request) #We need to apply the action in each iteration to keep the order of each modification. for event in json_data.get('results'): e_type = event.get('object_type') e_action = event.get('action') e_id = event.get('object_id') #Authors to delete if e_type == 'user' and e_action in ['unsubscribe', 'delete']: total_del += self.remove_pulses_from_authors([e_id]) #Authors to subscribe elif e_type == 'user' and e_action == 'subscribe': total_add += self.add_pulses_from_authors([e_id]) #Pulses to delete elif e_type == 'pulse' and e_action in ['unsubscribe', 'delete']: total_del += self.remove_pulses([e_id]) #Pulses to add elif e_type == 'pulse' and e_action == 'subscribe': total_add += self.add_pulses_from_list([e_id]) # Get next request next_request = json_data.get('next') except Exception as error: api_log.warning("Cannot download pulse updates: %s" % str(error)) raise update_timestamp = event.get('created', None) if update_timestamp is not None: self.update_latest_request('events', update_timestamp) return total_add, total_del
def __get_software_version(self, sensor_id_bin): try: version = db.session.query(Sensor_Properties.version).filter( Sensor_Properties.sensor_id == sensor_id_bin).one()[0] except Exception as exc: api_log.warning( 'Did not manage to get sensor software version: {}'.format( format_exc(exc))) return None return version
def _sanitize_tasks(self): # Special case for telemetry if Task.TELEMERY not in self._custom_tasks: try: success, value = db_get_config('track_usage_information') if success and value != '': telemetry_task = self.get_task(Task.TELEMERY) telemetry_task.enabled = bool(int(value)) self.update_task(telemetry_task) except Exception as e: api_log.warning("[Scheduler._sanitize_tasks] {0}".format(str(e)))
def process(self): """Processes the plugin checks""" if not self.__plugin_loaded: self.__load_plugin() try: data = self.__plugin_file.check() except Exception as e: api_log.warning("[PluginTester] Cannot check the plugin %s" % str(e)) raise APICannotCheckPlugin(self.__plugin_file_name) return data
def _sanitize_tasks(self): # Special case for telemetry if Task.TELEMERY not in self._custom_tasks: try: success, value = db_get_config('track_usage_information') if success and value != '': telemetry_task = self.get_task(Task.TELEMERY) telemetry_task.enabled = bool(int(value)) self.update_task(telemetry_task) except Exception as e: api_log.warning("[Scheduler._sanitize_tasks] {0}".format( str(e)))
def unregister_console(self): token = self.__token_repository.get_token() if token is None: raise Exception('unregister_console failed: token was not found') try: self.__console_proxy.send_disconnect_notification(token) except Exception as exc: # Disconnect notification is not obligatory, so it should not block the unregistration. # This allows to unregister from the console which is not accessible without extending # business logic with something like 'force unregister'. api_log.warning('Disconnect notification failed, ignoring. Error details: {}'.format(format_exc(exc))) finally: self.__token_repository.delete_token() return CentralConsoleStatus(CONSOLE_CONNECTION_NOT_CONFIGURED, None)
def get_new_pulses(self): """Update the redis with the pulses that must been added. Args: None Returns: integer: Number of new pulses downloaded. """ pulse_downloaded = 0 subscribed_timestamp = self.get_latest_request('subscribed') if subscribed_timestamp is not None: next_request = "{}/pulses/subscribed?limit=20&modified_since={}".format( self.url_base, subscribed_timestamp) else: next_request = "{}/pulses/subscribed?limit=20".format( self.url_base) # This var will store the date of the newest pulse that will be used to query the next time. update_timestamp = None while next_request: try: json_data = self.make_request(next_request) p_data = json_data.get('results', []) # First we remove the pulse to avoid IOC inconsistency problems. self.remove_pulses([p.get('id', '') for p in p_data]) # Save pulse data on redis pulse_downloaded += self.save_pulses(p_data) # Save the newest pulse date if update_timestamp is None: try: # We save the first pulse modified date. update_timestamp = p_data[0]['modified'] except: pass # Get next request next_request = json_data.get('next') except Exception as err: api_log.warning("Cannot download new pulses: {}".format(err)) raise # Saving the request date if update_timestamp is not None: self.update_latest_request('subscribed', update_timestamp) # If it is the first time we download the pulses, we update the event request time to the current UTC timestamp. if subscribed_timestamp is None: self.update_latest_request('events') return pulse_downloaded
def get_otx_user_version(): """ Returns string like 'OTX USM/5.2.4' if able to get system version or '' otherwise """ otx_user_version = '' try: data_retrieved, version_data = get_alienvault_version() if data_retrieved: # we need only product name and version number, so get first 2 version_data = version_data.replace('ALIENVAULT', 'USM').split()[:2] otx_user_version = 'OTX {}'.format('/'.join(version_data)) else: api_log.warning('Bad result returned for alienvault version: {}'.format(version_data)) except Exception as err: api_log.warning('Failed to get alienvault version. Reason: {}'.format(err)) return otx_user_version
def check_token(self): """Checks if a OTX token is valid and return user info if so. Args: None Returns: user_data(dict): A dict with the user info. """ url = "{}/user/".format(self.url_base) try: user_data = self.make_request(url) except Exception as err: api_log.warning("OTX key activation error: {}".format(err)) raise return user_data
def check_token(self): """Checks if a OTX token is valid and return user info if so. Args: None Returns: user_data(dict): A dict with the user info. """ url = "%s/user/" % self.url_base try: user_data = self.make_request(url) except Exception as error: api_log.warning("OTX key activation error: %s" % str(error)) raise return user_data
def has_admin_users(): """ Tells if there are any admin users in the database or not. Returns: result(bool):True if success, False otherwise """ result = False try: result = db.session.query(Config).filter(Config.conf == 'first_login').one().value.lower() == 'no' except NoResultFound: result = False api_log.warning("[has_admin_users] No first_login row found.") except Exception as error: api_log.error("[has_admin_users] %s" % str(error)) result = False return result
def get_new_pulses(self): """Update the redis with the pulses that must been added. Args: None Returns: integer: Number of new pulses downloaded. """ pulse_downloaded = 0 subscribed_timestamp = self.get_latest_request('subscribed') if subscribed_timestamp is not None: next_request = "%s/pulses/subscribed?limit=20&modified_since=%s" % (self.url_base, subscribed_timestamp) else: next_request = "%s/pulses/subscribed?limit=20" % self.url_base #This var will store the date of the newest pulse that will be used to query the next time. update_timestamp = None while next_request: try: json_data = self.make_request(next_request) p_data = json_data.get('results', []) # First we remove the pulse to avoid IOC inconsistency problems. self.remove_pulses([p.get('id', '') for p in p_data]) # Save pulse data on redis pulse_downloaded += self.save_pulses(p_data) #Save the newest pulse date if update_timestamp is None: try: #We save the first pulse modified date. update_timestamp = p_data[0]['modified'] except: pass # Get next request next_request = json_data.get('next') except Exception as error: api_log.warning("Cannot download new pulses: %s" % str(error)) raise #Saving the request date if update_timestamp is not None: self.update_latest_request('subscribed', update_timestamp) #If it is the first time we download the pulses, we update the event request time to the current UTC timestamp. if subscribed_timestamp is None: self.update_latest_request('events') return pulse_downloaded
def has_admin_users(): """ Tells if there are any admin users in the database or not. Returns: result(bool):True if success, False otherwise """ result = False try: result = db.session.query(Config).filter( Config.conf == 'first_login').one().value.lower() == 'no' except NoResultFound: result = False api_log.warning("[has_admin_users] No first_login row found.") except Exception as error: api_log.error("[has_admin_users] %s" % str(error)) result = False return result
def get_latest_request(self, d_type): """Loads the latest request timestamp Args: type (str): The type of date to get. Returns: None or the date in string format """ date_type = self.date_types.get(d_type, None) if date_type is None: return date_type try: latest_timestamp = self.pulse_db.get(date_type) except Exception as err: api_log.warning("Cannot get messages revision: {}".format(err)) return None return None if latest_timestamp == "" else latest_timestamp
def get_latest_request(self, d_type): """Loads the latest request timestamp Args: type (str): The type of date to get. Returns: None or the date in string format """ date_type = self.date_types.get(d_type, None) if date_type is None: return date_type try: latest_timestamp = self.pulse_db.get(date_type) except Exception as err: api_log.warning("Cannot get messages revision: %s" % str(err)) return None return None if latest_timestamp == "" else latest_timestamp
def add_pulses_from_list(self, pulses): """Add the pulses given. Args: pulses(list): List of pulses we want to add Returns: integer: Number of new pulses downloaded. """ p_download = [] for p_id in pulses: request = "%s/pulses/%s/" % (self.url_base, p_id) try: json_data = self.make_request(request) # Save pulse data on redis p_download.append(json_data) except Exception as error: api_log.warning("Cannot download pulse %s: %s" % (str(p_id), str(error))) continue return self.save_pulses(p_download)
def cache_func(*args, **kwargs): cache = _get_cache(namespace=namespace, expire=expire) # Handle cases where caching is down or otherwise not available. if cache.connection is None: return function(*args, **kwargs) # Clean kwargs. Remove no_cache and convert str to unicode key_kwargs = kwargs.copy() if 'no_cache' in kwargs: key_kwargs.pop('no_cache') for key in key_kwargs: if isinstance(key_kwargs[key], str): try: key_kwargs[key] = unicode(key_kwargs[key]) except Exception as e: api_log.warning("use_cache: %s" % str(e)) # Clean args. convert str to unicode key_args = [] for arg in args: if isinstance(arg, str): unicode_arg = arg try: unicode_arg = unicode(arg) except Exception as e: api_log.warning("use_cache: %s" % str(e)) key_args.append(unicode_arg) else: key_args.append(arg) # Use funtion name, args and kwargs for cache key cache_key = hashlib.sha256( str(function.__name__) + str(key_args) + str(key_kwargs)).hexdigest() if 'no_cache' not in kwargs or not kwargs['no_cache']: try: # Consider using pickle instead of json # json can't store tuples return cache.get_json(cache_key) except (ExpiredKeyException, CacheMissException): pass except Exception, msg: api_log.error(str(msg))
def get_otx_user_version(): """ Returns string like 'OTX USM/5.2.4' if able to get system version or '' otherwise """ otx_user_version = '' try: data_retrieved, version_data = get_alienvault_version() if data_retrieved: # we need only product name and version number, so get first 2 version_data = version_data.replace('ALIENVAULT', 'USM').split()[:2] otx_user_version = 'OTX {}'.format('/'.join(version_data)) else: api_log.warning( 'Bad result returned for alienvault version: {}'.format( version_data)) except Exception as err: api_log.warning( 'Failed to get alienvault version. Reason: {}'.format(err)) return otx_user_version
def add_pulses_from_list(self, pulses): """Add the pulses given. Args: pulses(list): List of pulses we want to add Returns: integer: Number of new pulses downloaded. """ p_download = [] for p_id in pulses: request = "{}/pulses/{}/".format(self.url_base, p_id) try: json_data = self.make_request(request) # Save pulse data on redis p_download.append(json_data) except Exception as err: api_log.warning("Cannot download pulse {}: {}".format( p_id, err)) continue return self.save_pulses(p_download)
def cache_func(*args, **kwargs): cache = _get_cache(namespace=namespace, expire=expire) # Handle cases where caching is down or otherwise not available. if cache.connection is None: return function(*args, **kwargs) # Clean kwargs. Remove no_cache and convert str to unicode key_kwargs = kwargs.copy() if 'no_cache' in kwargs: key_kwargs.pop('no_cache') for key in key_kwargs: if isinstance(key_kwargs[key], str): try: key_kwargs[key] = unicode(key_kwargs[key]) except Exception as e: api_log.warning("use_cache: %s" % str(e)) # Clean args. convert str to unicode key_args = [] for arg in args: if isinstance(arg, str): unicode_arg = arg try: unicode_arg = unicode(arg) except Exception as e: api_log.warning("use_cache: %s" % str(e)) key_args.append(unicode_arg) else: key_args.append(arg) # Use funtion name, args and kwargs for cache key cache_key = hashlib.sha256(str(function.__name__) + str(key_args) + str(key_kwargs)).hexdigest() if 'no_cache' not in kwargs or not kwargs['no_cache']: try: # Consider using pickle instead of json # json can't store tuples return cache.get_json(cache_key) except (ExpiredKeyException, CacheMissException): pass except Exception, msg: api_log.error(str(msg))
def get_plugin_package_info(system_ip): """ If exists, return the md5sum of INSTALLED VERSION of package alienvault-plugin-sid IMPORTANT NOTE: The file must be called alienvault-plugin_<version>_all.deb """ (success, version) = get_plugin_package_version(system_ip) if success: command = """md5sum /var/cache/apt/archives/alienvault-plugin-sids_%s_all.deb|awk {'print $1'}""" % version response = ansible.run_module(host_list=[system_ip], module="shell", args=command) if system_ip in response['contacted']: if response['contacted'][system_ip]['rc'] == 0: md5 = response['contacted'][system_ip]['stdout'].split('\n')[0] # Only first line else: api_log.warning("Can't obtanin md5 for alienvault-plugin-sids") md5 = '' result = (True, {'version': version, 'md5': md5}) else: result = (False, str(response['dark'][system_ip])) else: result = (False, "Can't obtain package version") return result
def add_pulses_from_authors(self, authors): """Add the pulses from some given authors. Args: authors(list): List of authors we want their pulses to be added Returns: integer: Number of new pulses added. """ pulse_downloaded = 0 for author in authors: next_request = "%s/pulses/subscribed?limit=20&author_name=%s" % (self.url_base, author) while next_request: try: json_data = self.make_request(next_request) # Save pulse data on redis pulse_downloaded += self.save_pulses(json_data.get('results')) # Get next request next_request = json_data.get('next') except Exception as error: api_log.warning("Cannot download pulses from author %s: %s" % (str(author), str(error))) continue return pulse_downloaded
def is_task_in_rabbit(task_id): """Checks if the task is in rabbit or not. If the task is found returns the json data, otherwise returns none {u'retries': 0, u'task': u'celerymethods.tasks.monitor_tasks.monitor_retrieves_remote_info', u'eta': None, u'args': [], u'expires': None, u'callbacks': None, u'errbacks': None, u'kwargs': {}, u'id': u'de1cd3b1-d001-4bea-a050-8ffe610bee21', u'utc': False} """ try: conn = amqp.Connection(host="localhost:5672 ", userid="guest", password="******", virtual_host="/", insist=False) chan = conn.channel() while True: msg = chan.basic_get(queue="celery") if msg is not None: try: task_json = json.loads(msg.body) if task_json['id'] == task_id: api_log.warning("Task found in rabbit... that means celery is busy..") return task_json except Exception as cannot_parse: api_log.warning("Cannot parse rabbit message: %s" % str(cannot_parse)) else: break except Exception as e: api_log.error("Cannot inspect rabbitmq: %s" % str(e)) return None
def start(self): """ Starts the monitor activity """ try: # Remove the previous monitor data. self.remove_monitor_data() monitor_data = {} success, system_id = get_system_id_from_local() if not success: return False # Now now = int(time.time()) # Firstly, wizard data! wizard_dict = {} success, start_welcome_wizard, welcome_wizard_date = get_wizard_data( ) if not success: api_log.error("There was an error retrieving the wizard data") wizard_shown = True if start_welcome_wizard == 2: # if difference between now and welcome_wizard_date is less # than a week, display message if (now - welcome_wizard_date) < 420: wizard_shown = False wizard_dict['wizard_shown'] = wizard_shown monitor_data[ self.__WEB_MESSAGES['MESSAGE_WIZARD_SHOWN']] = wizard_dict # Time to look for orphan sensors orphan_sensors_dict = {} success, message = check_any_orphan_sensor() orphan_sensors = False if not success: api_log.error(message) orphan_sensors = True orphan_sensors_dict['orphan_sensors'] = orphan_sensors monitor_data[self.__WEB_MESSAGES[ 'MESSAGE_SENSOR_NOT_INSERTED']] = orphan_sensors_dict # Has the trial version expired? success, expires, message = get_trial_expiration_date() trial_expired = False trial_expires_7days = False trial_expires_2days = False if not success: rc, pro = system_is_professional() if rc: if pro: # OK, we have an error here api_log.error(message) else: pass else: # expire=9999-12-31 expiration_date = expires.split('=')[1] if expiration_date: mktime_expression = datetime.datetime.strptime( expiration_date, "%Y-%m-%d").timetuple() expires = int(time.mktime(mktime_expression)) one_week_left = now - 604800 two_days_left = now - 172800 if expires < one_week_left: trial_expires_7days = True elif expires < two_days_left: trial_expires_2days = True elif expires < now: trial_expired = True else: pass else: if os.path.isfile("/etc/ossim/ossim.lic"): api_log.warning( "Valid license but no web admin user found!") else: api_log.debug( "Expiration date can't be determined: License file not found" ) monitor_data[self.__WEB_MESSAGES["MESSAGE_TRIAL_EXPIRED"]] = { 'trial_checked': success, 'trial_expired': trial_expired } monitor_data[ self.__WEB_MESSAGES["MESSAGE_TRIAL_EXPIRES_7DAYS"]] = { 'trial_checked': success, 'trial_expired': trial_expires_7days } monitor_data[ self.__WEB_MESSAGES["MESSAGE_TRIAL_EXPIRES_2DAYS"]] = { 'trial_checked': success, 'trial_expired': trial_expires_2days } # Check max number of assets assets = len(get_asset_list()) contracted_devices = get_license_devices() over_assets = False exceeding_assets = 0 #if assets > contracted_devices: # exceeding_assets = assets - contracted_devices # over_assets = True monitor_data[self.__WEB_MESSAGES["MESSAGE_LICENSE_VIOLATION"]] = { 'over_assets': over_assets, 'exceeding_assets': exceeding_assets } # OTX contribution otx_enabled = apimethod_is_otx_enabled() monitor_data[self.__WEB_MESSAGES["MESSAGE_OTX_CONNECTION"]] = { 'otx_enabled': otx_enabled } # Backup in progress? success, running, message = check_backup_process_running() if not success: api_log.error(message) monitor_data[self.__WEB_MESSAGES["MESSAGE_BACKUP_RUNNING"]] = { 'backup_check': success, 'backup_running': running } # Save monitor data self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message(monitor_data)) except Exception as err: api_log.error( "Error processing WebUIData monitor information: %s" % str(err)) return False return True
def start(self): """ Starts the monitor activity """ try: # Remove the previous monitor data. self.remove_monitor_data() monitor_data = {} success, system_id = get_system_id_from_local() if not success: return False # Now now = int(time.time()) # Firstly, wizard data! wizard_dict = {} success, start_welcome_wizard, welcome_wizard_date = get_wizard_data() if not success: api_log.error("There was an error retrieving the wizard data") wizard_shown = True if start_welcome_wizard == 2: # if difference between now and welcome_wizard_date is less # than a week, display message if (now - welcome_wizard_date) < 420: wizard_shown = False wizard_dict['wizard_shown'] = wizard_shown monitor_data[self.__WEB_MESSAGES['MESSAGE_WIZARD_SHOWN']] = wizard_dict # Time to look for orphan sensors orphan_sensors_dict = {} success, message = check_any_orphan_sensor() orphan_sensors = False if not success: api_log.error(message) orphan_sensors = True orphan_sensors_dict['orphan_sensors'] = orphan_sensors monitor_data[self.__WEB_MESSAGES['MESSAGE_SENSOR_NOT_INSERTED']] = orphan_sensors_dict # Has the trial version expired? success, expires, message = get_trial_expiration_date() trial_expired = False trial_expires_7days = False trial_expires_2days = False if not success: rc, pro = system_is_professional() if rc: if pro: # OK, we have an error here api_log.error(message) else: pass else: # expire=9999-12-31 expiration_date = expires.split('=')[1] if expiration_date: mktime_expression = datetime.datetime.strptime(expiration_date, "%Y-%m-%d").timetuple() expires = int(time.mktime(mktime_expression)) one_week_left = now - 604800 two_days_left = now - 172800 if expires < one_week_left: trial_expires_7days = True elif expires < two_days_left: trial_expires_2days = True elif expires < now: trial_expired = True else: pass else: if os.path.isfile("/etc/ossim/ossim.lic"): api_log.warning("Valid license but no web admin user found!") else: api_log.debug("Expiration date can't be determined: License file not found") monitor_data[self.__WEB_MESSAGES["MESSAGE_TRIAL_EXPIRED"]] = {'trial_checked': success, 'trial_expired': trial_expired} monitor_data[self.__WEB_MESSAGES["MESSAGE_TRIAL_EXPIRES_7DAYS"]] = {'trial_checked': success, 'trial_expired': trial_expires_7days} monitor_data[self.__WEB_MESSAGES["MESSAGE_TRIAL_EXPIRES_2DAYS"]] = {'trial_checked': success, 'trial_expired': trial_expires_2days} # Check max number of assets assets = len(get_asset_list()) contracted_devices = get_license_devices() over_assets = False exceeding_assets = 0 #if assets > contracted_devices: # exceeding_assets = assets - contracted_devices # over_assets = True monitor_data[self.__WEB_MESSAGES["MESSAGE_LICENSE_VIOLATION"]] = {'over_assets': over_assets, 'exceeding_assets': exceeding_assets} # OTX contribution otx_enabled = apimethod_is_otx_enabled() monitor_data[self.__WEB_MESSAGES["MESSAGE_OTX_CONNECTION"]] = {'otx_enabled': otx_enabled} # Backup in progress? success, running, message = check_backup_process_running() if not success: api_log.error(message) monitor_data[self.__WEB_MESSAGES["MESSAGE_BACKUP_RUNNING"]] = {'backup_check': success, 'backup_running': running} # Save monitor data self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message(monitor_data)) except Exception as err: api_log.error("Error processing WebUIData monitor information: %s" % str(err)) return False return True
def load_mcserver_messages(message_list): """Adds or updates messages coming from the mcserver Args: message_list[Status_Message] Returns: success (bool): True if successful, False elsewhere result (str): Error message (if any) """ result = "" success = True try: db.session.begin() for msg in message_list: msg_id_str = str(msg['msg_id']) msg_id_binary = get_bytes_from_uuid(msg_id_str) additional_info_json = "" if msg['additional_info'] is not None and msg['additional_info'] != "": try: additional_info_json = json.dumps(msg['additional_info']) except Exception as e: api_log.warning("Message with an invalid additional_info %s - %s" % (msg_id_str, str(e))) additional_info_json = "" success, status_message = get_status_message_from_id(message_id=msg_id_binary, is_admin=True, serialize=False) if success: #update values: status_message.level = Status_Message.get_level_integer_from_string(str(msg['level'])) status_message.title = msg['title'] status_message.description = msg['description'] status_message.type = msg['type'] success, current_status_message = get_current_status_from_message_id(msg_id_str) if not success or len(current_status_message) != 1: api_log.error("Invalid external message %s. Current_Status: %s, tuples(%s)" % ( msg_id_str, success, len(current_status_message))) continue current_status_message[0].additional_info = additional_info_json db.session.merge(current_status_message[0]) db.session.merge(status_message) else: new_msg = Status_Message() new_msg.id = msg_id_binary new_msg.level = Status_Message.get_level_integer_from_string(str(msg['level'])) new_msg.title = msg['title'] new_msg.description = msg['description'] new_msg.type = msg['type'] new_msg.expire = datetime.strptime(msg['valid_to'], "%Y-%m-%dT%H:%M:%S") new_msg.actions = "" new_msg.alternative_actions = "" new_msg.source = "external" current_status_message = Current_Status() current_status_message.id = uuid4().bytes current_status_message.component_type = 'external' current_status_message.creation_time = datetime.strptime(msg['valid_from'], "%Y-%m-%dT%H:%M:%S") current_status_message.message_id = new_msg.id current_status_message.additional_info = "" current_status_message.suppressed = 0 current_status_message.viewed = 0 current_status_message.additional_info = additional_info_json db.session.add(new_msg) db.session.add(current_status_message) db.session.commit() except Exception, e: success = False result = "[load_mcserver_messages(] Error: %s" % str(e) db.session.rollback()
def load_mcserver_messages(message_list): """Adds or updates messages coming from the mcserver Args: message_list[Status_Message] Returns: success (bool): True if successful, False elsewhere result (str): Error message (if any) """ result = "" success = True try: db.session.begin() for msg in message_list: msg_id_str = str(msg['msg_id']) msg_id_binary = get_bytes_from_uuid(msg_id_str) additional_info_json = "" if msg['additional_info'] is not None and msg[ 'additional_info'] != "": try: additional_info_json = json.dumps(msg['additional_info']) except Exception as e: api_log.warning( "Message with an invalid additional_info %s - %s" % (msg_id_str, str(e))) additional_info_json = "" success, status_message = get_status_message_from_id( message_id=msg_id_binary, is_admin=True, serialize=False) if success: #update values: status_message.level = Status_Message.get_level_integer_from_string( str(msg['level'])) status_message.title = msg['title'] status_message.description = msg['description'] status_message.type = msg['type'] success, current_status_message = get_current_status_from_message_id( msg_id_str) if not success or len(current_status_message) != 1: api_log.error( "Invalid external message %s. Current_Status: %s, tuples(%s)" % (msg_id_str, success, len(current_status_message))) continue current_status_message[ 0].additional_info = additional_info_json db.session.merge(current_status_message[0]) db.session.merge(status_message) else: new_msg = Status_Message() new_msg.id = msg_id_binary new_msg.level = Status_Message.get_level_integer_from_string( str(msg['level'])) new_msg.title = msg['title'] new_msg.description = msg['description'] new_msg.type = msg['type'] new_msg.expire = datetime.strptime(msg['valid_to'], "%Y-%m-%dT%H:%M:%S") new_msg.actions = "" new_msg.alternative_actions = "" new_msg.source = "external" current_status_message = Current_Status() current_status_message.id = uuid4().bytes current_status_message.component_type = 'external' current_status_message.creation_time = datetime.strptime( msg['valid_from'], "%Y-%m-%dT%H:%M:%S") current_status_message.message_id = new_msg.id current_status_message.additional_info = "" current_status_message.suppressed = 0 current_status_message.viewed = 0 current_status_message.additional_info = additional_info_json db.session.add(new_msg) db.session.add(current_status_message) db.session.commit() except Exception, e: success = False result = "[load_mcserver_messages(] Error: %s" % str(e) db.session.rollback()