def start(self): """ Starts the monitor activity :return: True on success, False otherwise """ self.remove_monitor_data() monitor_data = {} success, system_id = get_system_id_from_local() if not success: return False # Just return if there is no internet connection. if not self.__check_internet_connection__(): logger.error("Cannot connect to the Telemetry Server") monitor_data['telemetry_server_connectivity'] = False self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message(monitor_data)) return True # Find the list of connected systems. (result, sensor_dict) = get_systems('Sensor', convert_to_dict=True, exclusive=True) if not result: logger.error("Cannot retrieve connected sensors") return False (result, database_dict) = get_systems('Database', convert_to_dict=True, exclusive=True) if not result: logger.error("Cannot retrieve connected databases") return False system_dict = dict(sensor_dict, **database_dict) result, local_system_id = get_system_id_from_local() if not result: logger.error("Cannot retrieve the local id") return False result, local_system_ip = get_system_ip_from_system_id(local_system_id) if not result: logger.error("Cannot retrieve the local IP address") return False system_dict = dict({local_system_id: local_system_ip}, **system_dict) args = { 'output_type': 'ansible', 'plugin_list': ','.join(self.__strike_zone_plugins), 'verbose': 2 } ansible_output = get_doctor_data(system_dict.values(), args) if ansible_output.get('dark'): logger.error('Cannot collect telemetry data: %s' % str(ansible_output.get('dark'))) return False return self.__send_data__(local_system_id, ansible_output)
def start(self): """ Starts the monitor activity :return: True on success, False otherwise """ self.remove_monitor_data() monitor_data = {} success, system_id = get_system_id_from_local() if not success: return False # Just return if there is no internet connection. if not self.__check_internet_connection__(): logger.error("Cannot connect to the Telemetry Server") monitor_data['telemetry_server_connectivity'] = False self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message(monitor_data)) return True # Find the list of connected systems. (result, sensor_dict) = get_systems('Sensor', convert_to_dict=True, exclusive=True) if not result: logger.error("Cannot retrieve connected sensors") return False (result, database_dict) = get_systems('Database', convert_to_dict=True, exclusive=True) if not result: logger.error("Cannot retrieve connected databases") return False system_dict = dict(sensor_dict, **database_dict) result, local_system_id = get_system_id_from_local() if not result: logger.error("Cannot retrieve the local id") return False result, local_system_ip = get_system_ip_from_system_id(local_system_id) if not result: logger.error("Cannot retrieve the local IP address") return False system_dict = dict({local_system_id: local_system_ip}, **system_dict) args = {'output_type': 'ansible', 'plugin_list': ','.join(self.__strike_zone_plugins), 'verbose': 2} ansible_output = get_doctor_data(system_dict.values(), args) if ansible_output.get('dark'): logger.error('Cannot collect telemetry data: %s' % str(ansible_output.get('dark'))) return False return self.__send_data__(local_system_id, ansible_output)
def start(self): self.remove_monitor_data() rc, system_list = get_systems() if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False for (system_id, system_ip) in system_list: try: reachable = ping_system(system_id, no_cache=True) except APIException: reachable = False if not reachable: # Check whether is sensor or not sensor, sensor_id = get_sensor_id_from_system_id(system_id) if not self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message({'contacted': False, 'is_sensor': sensor})): logger.error("Can't save monitor info") else: self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message({'contacted': True})) return True
def start(self): """ Starts the monitor activity """ result, systems = get_systems('server') if not result: logger.error("Cannot retrieve system info: %s" % str(systems)) return False self.remove_monitor_data() success, main_key = db_get_config('open_threat_exchange_key') # if error or otx key is not activated then we don't keep checking. if not success or main_key == '': return False for system_id, system_ip in systems: try: key = ansible_get_otx_key(system_ip) if main_key != key: monitor_data = {'same_otx_key': False} if not self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message(monitor_data)): logger.error("Cannot save monitor info") except Exception, exc: logger.error("[MonitorFederatedOTXKey]: %s" % str(exc))
def start(self): """ Starts the monitor activity """ # Remove the previous monitor data. self.remove_monitor_data() # Check if we have custom plugins. We need only cfg local_path = "/etc/alienvault/plugins/custom/" plugins_to_sync = glob.glob(local_path + '*.cfg') if not plugins_to_sync: logger.info('Nothing to sync...') return True # Iterate over the sensors. # We should specify exclusive=True to fetch only sensors instead of # every machine with sensor profile result, systems = get_systems(system_type="Sensor", exclusive=True) if not result: logger.error("[MonitorSyncCustomPlugins] Can't retrieve the system info: {}".format(str(systems))) return False for (system_id, system_ip) in systems: for plugin_file_path in plugins_to_sync: success, msg = rsync_push(local_ip="127.0.0.1", remote_ip=system_ip, local_file_path=plugin_file_path, remote_file_path=plugin_file_path) if not success: logger.error("[MonitorSyncCustomPlugins] Can't rsync with {}".format(system_ip)) return False return True
def start(self): """ Starts the monitor activity """ rt = True self.remove_monitor_data() # Load all system from current_local logger.info("Checking systems cpu load") result, systems = get_systems() if not result: logger.error("Can't retrieve the system info: %s" % str(systems)) return False for (system_id, system_ip) in systems: (result, load) = get_system_load(system_ip) if result: try: logger.info("CPU Load: %s %f" % (system_ip, load)) monitor_data = {"cpu_load": load} self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message(monitor_data)) except Exception as e: logger.error("Error==>> " + traceback.format_exc()) rt = False break else: logger.error("MonitorSystemCPULoad: %s" % load) rt = False break return rt
def start(self): self.remove_monitor_data() rc, system_list = get_systems() if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False for (system_id, system_ip) in system_list: try: reachable = ping_system(system_id, no_cache=True) except APIException: reachable = False if not reachable: # Check whether is sensor or not sensor, sensor_id = get_sensor_id_from_system_id(system_id) if not self.save_data( system_id, ComponentTypes.SYSTEM, self.get_json_message({ 'contacted': False, 'is_sensor': sensor })): logger.error("Can't save monitor info") else: self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message({'contacted': True})) return True
def start(self): """ Starts the monitor activity """ rt = True try: self.remove_monitor_data() logger.info("Monitor %s Working..." % self.monitor_id) rc, sensor_list = get_systems(system_type="Sensor") if not rc: logger.error("Can't retrieve sensor list: %s" % str(sensor_list)) return False for (sensor_id, sensor_ip) in sensor_list: if sensor_id == '': logger.warning("Sensor (%s) ID not found" % sensor_ip) continue logger.info("Getting dropped packets for sensor_ip %s" % sensor_ip) sensor_stats = get_network_stats(sensor_ip) # print sensor_stats try: packet_lost_average = sensor_stats["contacted"][sensor_ip]["stats"]["packet_lost_average"] monitor_data = {'packet_loss': packet_lost_average} logger.info("Lost packet average for sensor: %s = %s" % (sensor_ip, packet_lost_average)) #Save data component_id = canonical uuid if not self.save_data(sensor_id, ComponentTypes.SENSOR, self.get_json_message(monitor_data)): logger.error("Can't save monitor info") except KeyError: pass except Exception, e: logger.error("Something wrong happen while running the monitor..%s, %s" % (self.get_monitor_id(), str(e))) rt = False
def remove_old_database_files(): """Task to run periodically.""" result, systems = get_systems() all_task_ok = True if result: for system_id, system_ip in systems: try: backup_days = get_config_backup_days() logger.info("Backup days... %s" % backup_days) data = remove_old_files(target=system_ip, rm_filter="configuration*", n_days=backup_days) if data[system_ip]['failures'] > 0 or data[system_ip][ 'unreachable'] > 0: logger.info("Removing old configuration files Error %s" % data) all_task_ok = False data = remove_old_files(target=system_ip, rm_filter="environment*", n_days=backup_days) if data[system_ip]['failures'] > 0 or data[system_ip][ 'unreachable'] > 0: logger.info("Removing old files Error %s" % data) all_task_ok = False except Exception, e: logger.info("Removing old files error %s" % str(e))
def start(self): self.remove_monitor_data() rc, system_list = get_systems() if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False for (system_id, system_ip) in system_list: result, ansible_output = ping_system(system_ip) if not result: # Check whether is sensor or not sensor, sensor_id = get_sensor_id_from_system_id(system_id) if not self.save_data( system_id, ComponentTypes.SYSTEM, self.get_json_message({ 'remote_certificates': 'Error: %s' % str(ansible_output), 'contacted': False, 'is_sensor': sensor })): logger.error("Can't save monitor info") else: self.save_data( system_id, ComponentTypes.SYSTEM, self.get_json_message({ 'remote_certificates': 'Ping OK', 'contacted': True })) return True
def start(self): """ Starts the monitor activity """ rt = True self.remove_monitor_data() # Load all system from current_local logger.info("Checking systems cpu load") result, systems = get_systems() if not result: logger.error("Can't retrieve the system info: %s" % str(systems)) return False for (system_id, system_ip) in systems: (result, load) = get_system_load(system_ip) if result: try: logger.info("CPU Load: %s %f" % (system_ip, load)) monitor_data = {"cpu_load": load} self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message(monitor_data)) except Exception: logger.error("Error==>> " + traceback.format_exc()) rt = False break else: logger.error("MonitorSystemCPULoad: %s" % load) rt = False break return rt
def make_backup_in_all_systems(backup_type): """ Make the backup for: - Local system - All connected remote sensors return True if all the backups finished successfully, False otherwise """ result, systems = get_systems(system_type='Sensor', directly_connected=True) if not result: notifier.error("An error occurred while making the Backup " + "[%s]. Cant' retrieve the systems " % backup_type) return False result, local_system_id = get_system_id_from_local() if not result: notifier.error("An error occurred while making the Backup " + "[%s]. Cant' retrieve the systems " % backup_type) return False system_ids = [x[0] for x in systems] if local_system_id not in system_ids: system_ids.append(local_system_id) all_backups_ok = True for system_id in system_ids: success, msg = make_system_backup(system_id=system_id, backup_type=backup_type, rotate=True) if not success: all_backups_ok = False return all_backups_ok
def start(self): """ Starts the monitor activity """ rt = True self.remove_monitor_data() # Load all system from current_local logger.info("Checking for pending updates") result, systems = get_systems() if not result: logger.error("Can't retrieve the system info: %s" % str(systems)) return False pending_updates = False for (system_id, system_ip) in systems: (success, info) = apimethod_get_update_info(system_id) if success: try: sys_pending_updates = info['pending_updates'] pending_updates = pending_updates or sys_pending_updates logger.info("Pending Updates for system %s (%s): %s" % (system_id, system_ip, sys_pending_updates)) monitor_data = {"pending_updates": sys_pending_updates} self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message(monitor_data)) except Exception as e: logger.error("[MonitorPendingUpdates] Error: %s" % str(e)) rt = False break else: logger.error("MonitorPendingUpdates: %s" % info) rt = False break if pending_updates: success, local_ip = get_system_ip_from_local() if not success: logger.error( "[MonitorPendingUpdates] Unable to get local IP: %s" % local_ip) return False success, is_pro = get_is_professional(local_ip) if success and is_pro: success, is_trial = system_is_trial('local') if success and is_trial: logger.info( "[MonitorPendingUpdates] Trial version. Skipping download of release info file" ) return rt success, msg = ansible_download_release_info(local_ip) if not success: logger.error( "[MonitorPendingUpdates] Unable to retrieve release info file: %s" % msg) return False return rt
def update_hids_agents(): """ Task to update the info of hids agents of each sensor """ insert_message = False send_refresh = False not_linked_assets = 0 msg_id_binary = get_bytes_from_uuid("00000000-0000-0000-0000-000000010032") delete_current_status_messages([msg_id_binary]) try: success, systems = get_systems(system_type='Sensor', directly_connected=True) if not success: logger.error("[update_hids_agents] %s" % str(systems)) raise APICannotRetrieveSystems() success, local_system_id = get_system_id_from_local() if not success: logger.error("[update_hids_agents] %s" % str(local_system_id)) raise APICannotResolveLocalSystemID() system_ids = [x[0] for x in systems] if local_system_id not in system_ids: system_ids.append(local_system_id) for system_id in system_ids: try: not_linked_assets_by_sensor, new_host = update_system_hids_agents( system_id) # Update counter not_linked_assets = not_linked_assets + not_linked_assets_by_sensor if not_linked_assets_by_sensor > 0: insert_message = True if not send_refresh and new_host: send_refresh = True except APIException as e: logger.error("[update_hids_agents] %s" % str(e)) except Exception as e: logger.error("[update_hids_agents] %s" % str(e)) return False if insert_message: success, local_system_id = get_system_id_from_local() additional_info = json.dumps({"not_linked_assets": not_linked_assets}) insert_current_status_message("00000000-0000-0000-0000-000000010032", local_system_id, "system", additional_info) if send_refresh: refresh_hosts() return True
def start(self): """ Starts the monitor activity """ # Remove the previous monitor data. self.remove_monitor_data() # Iterate over the sensors. success, systems = get_systems(system_type="Sensor") if not success: logger.error("[MonitorUpdateHostPlugins] " "Can't retrieve the system info: {0}".format( str(systems))) return False assets = {} for (system_id, system_ip) in systems: success, sensor_id = get_sensor_id_from_system_id(system_id) if not success: logger.error( "[MonitorUpdateHostPlugins] " "Can't resolve senor_id of system {0}: {1}".format( system_id, sensor_id)) continue try: sensor_plugins = get_sensor_plugins_enabled_by_asset( sensor_id=sensor_id, no_cache=True) except APIException as e: logger.error( "[MonitorUpdateHostPlugins] " "Can't obtain plugin information from system {0}: {1}". format(system_id, str(e))) continue # Add asset plugin sids to assets list try: for asset, asset_plugins in sensor_plugins.iteritems(): if asset not in assets: assets[asset] = [] assets[asset] += [ plugin['plugin_id'] for plugin in asset_plugins.values() ] except KeyError as e: logger.warning( "[MonitorUpdateHostPlugins] " "Bad format in plugins enabled by asset: {0}".format( str(e))) success, msg = update_host_plugins(data=assets) if not success: logger.error( "[MonitorUpdateHostPlugins] " "Can't update host plugin information: {0}".format(msg)) return False return True
def start(self): """ Start monitor """ rt = True try: self.remove_monitor_data() logger.info("Monitor %s Working..." % self.monitor_id) rc, sensor_list = get_systems(system_type="Sensor") (success, version) = get_plugin_package_info_local() if not success: raise Exception(str(version)) (success, local_version) = get_plugin_package_info_local() for (system_id, _) in sensor_list: (success, sensor_id) = get_sensor_id_from_system_id(system_id) #logger.info("INFO => " + str(sensor_id)) if success: if sensor_id == '': logger.warning("Sensor (%s) ID not found" % sensor_id) continue (success, info) = get_plugin_package_info_from_sensor_id(sensor_id) if success: if info['version'] != '': data_sensor = { 'version': info['version'], 'md5': info['md5'], 'comparison': compare_dpkg_version(info['version'], local_version['version']) } else: data_sensor = { 'version': info['version'], 'md5': info['md5'], 'comparison': '' } if not self.save_data( sensor_id, ComponentTypes.SENSOR, self.get_json_message(data_sensor)): logger.error( "Can't save monitor info for sensor '%s'" % sensor_id) else: logger.warning( "Can't obtain plugin version for sensor '%s'", sensor_id) else: logger.warning("Can't obtain sensor_id for system_id '%s'", system_id) except Exception, e: logger.error( "Something wrong happen while running the monitor..%s, %s" % (self.get_monitor_id(), str(e))) rt = False
def update_hids_agents(): """ Task to update the info of hids agents of each sensor """ insert_message = False send_refresh = False not_linked_assets = 0 msg_id_binary = get_bytes_from_uuid("00000000-0000-0000-0000-000000010032") delete_current_status_messages([msg_id_binary]) try: success, systems = get_systems(system_type='Sensor', directly_connected=True) if not success: logger.error("[update_hids_agents] %s" % str(systems)) raise APICannotRetrieveSystems() success, local_system_id = get_system_id_from_local() if not success: logger.error("[update_hids_agents] %s" % str(local_system_id)) raise APICannotResolveLocalSystemID() system_ids = [x[0] for x in systems] if local_system_id not in system_ids: system_ids.append(local_system_id) for system_id in system_ids: try: not_linked_assets_by_sensor, new_host = update_system_hids_agents(system_id) # Update counter not_linked_assets = not_linked_assets + not_linked_assets_by_sensor if not_linked_assets_by_sensor > 0: insert_message = True if not send_refresh and new_host: send_refresh = True except APIException as e: logger.error("[update_hids_agents] %s" % str(e)) except Exception as e: logger.error("[update_hids_agents] %s" % str(e)) return False if insert_message: success, local_system_id = get_system_id_from_local() additional_info = json.dumps({"not_linked_assets": not_linked_assets}) insert_current_status_message("00000000-0000-0000-0000-000000010032", local_system_id, "system", additional_info) if send_refresh: refresh_hosts() return True
def start(self): self.remove_monitor_data() rc, system_list = get_systems() if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False for (system_id, system_ip) in system_list: # Use ansible to get the DNS config. result, ansible_output = get_av_config(system_ip, {'general_admin_dns': ''}) logger.info("DNS returned from ossim_setup.conf %s" % str(ansible_output)) if result: dnslist = [] if 'general_admin_dns' in ansible_output: dnslist = ansible_output['general_admin_dns'].split(',') count = 0 for ip in dnslist: r = dns_is_external(ip) if r == -2: count += 1 elif r == -1: logger.error( "Bad data in admin_dns field of ossim_setup.conf: " + str(ip)) # logger.info("DNS IP count = " + str(count)) if count == len(dnslist): admin_dns_msg = "Warning: All DNS configured are externals" self.save_data( system_id, ComponentTypes.SYSTEM, self.get_json_message({ 'admin_dns': admin_dns_msg, 'internal_dns': False })) else: self.save_data( system_id, ComponentTypes.SYSTEM, self.get_json_message({ 'admin_dns': 'DNS ok. You have at least one internal DNS', 'internal_dns': True })) else: if not self.save_data( system_id, ComponentTypes.SYSTEM, self.get_json_message( { 'admin_dns': 'Error: %s' % str(ansible_output), 'internal_dns': True })): logger.error("Can't save monitor info") return True
def start(self): """ Starts the monitor activity """ rt = True self.remove_monitor_data() # Load all system from current_local logger.info("Checking for pending updates") result, systems = get_systems() if not result: logger.error("Can't retrieve the system info: %s" % str(systems)) return False pending_updates = False for (system_id, system_ip) in systems: (success, info) = apimethod_get_update_info(system_id) if success: try: sys_pending_updates = info['pending_updates'] pending_updates = pending_updates or sys_pending_updates logger.info("Pending Updates for system %s (%s): %s" % (system_id, system_ip, sys_pending_updates)) monitor_data = {"pending_updates": sys_pending_updates} self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message(monitor_data)) except Exception as e: logger.error("[MonitorPendingUpdates] Error: %s" % str(e)) rt = False break else: logger.error("MonitorPendingUpdates: %s" % info) rt = False break if pending_updates: success, local_ip = get_system_ip_from_local() if not success: logger.error("[MonitorPendingUpdates] Unable to get local IP: %s" % local_ip) return False success, is_pro = get_is_professional(local_ip) if success and is_pro: success, is_trial = system_is_trial('local') if success and is_trial: logger.info("[MonitorPendingUpdates] Trial version. Skipping download of release info file") return rt success, msg = ansible_download_release_info(local_ip) if not success: logger.error("[MonitorPendingUpdates] Unable to retrieve release info file: %s" % msg) return False return rt
def apimethod_get_remote_software_update(system_id, no_cache=False): """Retrieves the available updates for the given system_id Args: system_id(str): The system id of which we want to know if it has available updates Returns: (success,data): success=True when the operation when ok, otherwise success=False. On success data will contain a json object with the updates information. """ systems = [] # Systems that we are going to check the updates updates = {} # Dic with the updates available for each system if system_id == 'all': # If all, we load all the systems result, all_systems = get_systems(directly_connected=False) if not result: api_log.error("Can't retrieve the system info: %s" % str(systems)) return False, "Can't retrieve the system info: %s" % str(systems) for (system_id, system_ip) in all_systems: systems.append(system_id) else: # Otherwise we only load in the list the system given. systems.append(system_id) #For each system, getting the update info for sys_id in systems: success, data = apimethod_get_update_info(sys_id, no_cache=no_cache) if not success: error_msg = "Can't retrieve the system updates " + \ "for system %s: %s" % (str(sys_id), str(data)) api_log.error(error_msg) updates[sys_id] = {} continue info = { 'current_version': data['current_version'], 'last_update': data['last_update'], 'packages': { 'total': data['total_packages'], 'pending_updates': data['pending_updates'], 'pending_feed_updates': data['pending_feed_updates'] } } updates[sys_id] = info return True, updates
def start(self): """ Starts the monitor activity """ # Remove the previous monitor data. self.remove_monitor_data() # Iterate over the sensors. success, systems = get_systems(system_type="Sensor") if not success: logger.error("[MonitorUpdateHostPlugins] " "Can't retrieve the system info: {0}".format(str(systems))) return False assets = {} for (system_id, system_ip) in systems: success, sensor_id = get_sensor_id_from_system_id(system_id) if not success: logger.error("[MonitorUpdateHostPlugins] " "Can't resolve senor_id of system {0}: {1}".format(system_id, sensor_id)) continue try: sensor_plugins = get_sensor_plugins_enabled_by_asset(sensor_id=sensor_id, no_cache=True) except APIException as e: logger.error("[MonitorUpdateHostPlugins] " "Can't obtain plugin information from system {0}: {1}".format( system_id, str(e))) continue # Add asset plugin sids to assets list try: for asset, asset_plugins in sensor_plugins.iteritems(): if asset not in assets: assets[asset] = [] assets[asset] += [plugin['plugin_id'] for plugin in asset_plugins.values()] except KeyError as e: logger.warning("[MonitorUpdateHostPlugins] " "Bad format in plugins enabled by asset: {0}".format(str(e))) success, msg = update_host_plugins(data=assets) if not success: logger.error("[MonitorUpdateHostPlugins] " "Can't update host plugin information: {0}".format(msg)) return False return True
def start(self): """ Starts the monitor activity """ result, systems = get_systems() if not result: logger.error("Cannot retrieve system info: %s" % str(systems)) return False self.remove_monitor_data() for system_id, system_ip in systems: try: insecure = ansible_check_insecure_vpn(system_ip) if not self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message({'vpn_insecure': insecure})): logger.error("Cannot save monitor info") except Exception, exc: logger.error("[MonitorInsecureVPN]: %s" % str(exc))
def start(self): """ Starts the monitor activity """ # Remove the previous monitor data. self.remove_monitor_data() # Iterate over the sensors. success, systems = get_systems(system_type="Sensor") if not success: logger.error("[MonitorEnabledPluginsLimit] Can't retrieve the system info: {0}".format(str(systems))) return False for (system_id, system_ip) in systems: success, sensor_id = get_sensor_id_from_system_id(system_id) if not success: logger.error("[MonitorEnabledPluginsLimit] " "Can't resolve sensor_id of system {0}: {1}".format(system_id, sensor_id)) continue try: sensor_plugins = get_sensor_plugins(sensor_id=sensor_id, no_cache=True) enabled_plugins = sensor_plugins.get('enabled', {}) enabled_global_count = len(enabled_plugins.get('detectors', [])) enabled_per_asset_count = len(list(chain.from_iterable(enabled_plugins.get('devices', {}).values()))) enabled_total = enabled_global_count + enabled_per_asset_count # Temporal, should read it from from agent config (ansible method needed) warning_threshold = 85 max_limit_threshold = 100 monitor_data = { 'system_id': system_id, 'system_ip': system_ip, 'plugins_enabled_total': enabled_total, 'plugins_allowed_to_add': max_limit_threshold - enabled_total, 'limit_reached': enabled_total >= max_limit_threshold, 'warning_reached': (warning_threshold <= enabled_total) and (enabled_total < max_limit_threshold) } if not self.save_data(sensor_id, ComponentTypes.SENSOR, self.get_json_message(monitor_data)): logger.error("[MonitorEnabledPluginsLimit] Cannot save monitor info") except APIException as e: logger.error("[MonitorEnabledPluginsLimit] " "Can't obtain plugin information from system {0}: {1}".format(system_id, str(e))) continue return True
def make_backup(bk_type): """Make the backup and return the result""" current_tries = 0 # Current try. all_backups_ok = True result, systems = get_systems('Sensor') if not result: notifier.error("An error occurred while making the Backup [%s]. Cant' retrieve the systems " % bk_type) return False result, local_system_ip = get_system_ip_from_local(local_loopback=False) if not result: notifier.error("An error occurred while making the Backup [%s]. Cant' retrieve the systems " % bk_type) return False system_ips = [x[1] for x in systems] if local_system_ip not in system_ips: system_ips.append(local_system_ip) for system_ip in system_ips: backup_error = "" backup_made = False current_tries = 0 while current_tries < MAX_TRIES: try: data = run_backup(target=system_ip, backup_type=bk_type) if data[system_ip]['failures'] > 0 or data[system_ip]['unreachable'] > 0: backup_error = "Backup (%s) Error %s" % (bk_type, data) else: notifier.info("Backup successfully made [%s - %s] " % (system_ip,bk_type)) backup_made = True current_tries=MAX_TRIES+1 except Exception as e: backup_error = "An exception occurred while making the Backup(%s) %s" % (bk_type,str( e)) notifier.error("An exception occurred while making the Backup [%s - %s]" % (system_ip,bk_type)) finally: current_tries+=1 if not backup_made: all_backups_ok = False notifier.error("Backup(%s) Fails: %s" % (bk_type,backup_error)) #TODO: It should throw an alarm # Don't launch the clean logger if backup fails if all_backups_ok: if not clean_logger(): notifier.error("An error occurred while cleaning the logger logs.") return all_backups_ok
def remove_plugin_from_sensors(plugin_file): """ Disable and remove custom plugin from all systems. Args: plugin_file: (str) Full path to plugin file. Returns: (bool) Status """ plugin_name = splitext(basename(plugin_file))[0] result, added_sensors = get_systems(system_type="Sensor", exclusive=True, convert_to_dict=True) # In [3]: systems # Out[3]: {'564d1731-5369-d912-e91b-61c1fff3cf6c': '192.168.87.197'} if not result: api_log.error( 'Cannot get list of connected sensors: {}'.format(added_sensors)) return False # Add local check if isinstance(added_sensors, dict): added_sensors['local'] = '127.0.0.1' if added_sensors: for sensor_id, sensor_ip in added_sensors.iteritems(): api_log.info( 'Trying to disable global plugin "{}" plugin on - {}'.format( plugin_name, sensor_ip)) result, msg = disable_plugin_globally(plugin_name, sensor_ip) if not result: api_log.error(msg) api_log.info( 'Trying to disable per-asset plugin "{}" plugin on - {}'. format(plugin_name, sensor_ip)) result, msg = disable_plugin_per_assets(plugin_name, sensor_ip) if not result: api_log.error(msg) # Remove plugin file from disk api_log.info('Removing plugin file: {} on sensors {}'.format( plugin_file, added_sensors)) result = remove_file(host_list=added_sensors.values(), file_name=plugin_file) return result
def start(self): """ Check in all system if there is a tunnel up and keys exits. if keys exists and tunnel down, exec all tunnel shutdown process """ success, systems = ret = get_systems() if not success: logger.error("Can't get systems list") return ret result = True for (system_id, system_ip) in systems: logger.info("Checking supports tunnels in system ('%s','%s')" % (system_id, system_ip)) success, result = check_support_tunnels(system_ip) if not result: logger.error("Can't check support tunnel in system ('%s','%s')" % (system_id, system_ip)) else: logger.info("Tunnel in ('%s','%s'): %s" % (system_id, system_ip, result)) return result, ''
def apimethod_get_remote_software_update(system_id, no_cache=False): """Retrieves the available updates for the given system_id Args: system_id(str): The system id of which we want to know if it has available updates Returns: (success,data): success=True when the operation when ok, otherwise success=False. On success data will contain a json object with the updates information. """ systems = [] # Systems that we are going to check the updates updates = {} # Dic with the updates available for each system if system_id == 'all': # If all, we load all the systems result, all_systems = get_systems(directly_connected=False) if not result: api_log.error("Can't retrieve the system info: %s" % str(systems)) return False, "Can't retrieve the system info: %s" % str(systems) for (system_id, system_ip) in all_systems: systems.append(system_id) else: # Otherwise we only load in the list the system given. systems.append(system_id) #For each system, getting the update info for sys_id in systems: success, data = apimethod_get_update_info(sys_id, no_cache=no_cache) if not success: error_msg = "Can't retrieve the system updates " + \ "for system %s: %s" % (str(sys_id), str(data)) api_log.error(error_msg) updates[sys_id] = {} continue info = {'current_version': data['current_version'], 'last_update': data['last_update'], 'packages': {'total': data['total_packages'], 'pending_updates': data['pending_updates'], 'pending_feed_updates': data['pending_feed_updates']}} updates[sys_id] = info return True, updates
def start(self): """ Starts the monitor activity """ result, systems = get_systems() if not result: logger.error("Cannot retrieve system info: %s" % str(systems)) return False self.remove_monitor_data() for system_id, system_ip in systems: try: insecure = ansible_check_insecure_vpn(system_ip) if not self.save_data( system_id, ComponentTypes.SYSTEM, self.get_json_message({'vpn_insecure': insecure})): logger.error("Cannot save monitor info") except Exception, exc: logger.error("[MonitorInsecureVPN]: %s" % str(exc))
def start(self): """ Starts the monitor activity """ result, systems = get_systems() if not result: logger.error("Cannot retrieve system info: %s" % str(systems)) return False self.remove_monitor_data() for (system_id, system_ip) in systems: result, msg = system_reboot_needed(system_ip) if result: if not self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message({'reboot_needed': msg})): logger.error("Cannot save monitor info") else: logger.error("Cannot retrieve system {0} information: {1}".format(system_id, msg)) return True
def remove_old_database_files(): """Task to run periodically.""" result, systems = get_systems() all_task_ok = True if result: for system_id, system_ip in systems: try: backup_days = get_config_backup_days() logger.info("Backup days... %s" % backup_days) data = remove_old_files(target=system_ip, rm_filter="configuration*", n_days=backup_days) if data[system_ip]["failures"] > 0 or data[system_ip]["unreachable"] > 0: logger.info("Removing old configuration files Error %s" % data) all_task_ok = False data = remove_old_files(target=system_ip, rm_filter="environment*", n_days=backup_days) if data[system_ip]["failures"] > 0 or data[system_ip]["unreachable"] > 0: logger.info("Removing old files Error %s" % data) all_task_ok = False except Exception, e: logger.info("Removing old files error %s" % str(e))
def start(self): """ Starts the monitor activity """ #Remove the previous monitor data. self.remove_monitor_data() success, local_ip = get_system_ip_from_local(local_loopback=False) if not success: logger.error("Cannot retrieve local system IP: %s" % str(local_ip)) return False # Check if this is professional or not. success, is_pro = get_is_professional(local_ip) if not (success and is_pro): return True # Iterate over the sensors. result, systems = get_systems(system_type="Sensor") if not result: logger.error("Can't retrieve the system info: %s" % str(systems)) return False for (system_id, system_ip) in systems: (success, info) = check_plugin_integrity(system_id) if success: try: #Create the JSON data to store the monitor info monitor_data = info #Save the data to the monitor_data table self.save_data(system_id, ComponentTypes.SENSOR, self.get_json_message(monitor_data)) except Exception as e: logger.error("[MonitorPluginIntegrity] Error: %s" % str(e)) else: logger.error( "Can't obtain integrity plugin information from system '%s'", system_id) return True
def make_backup_in_all_systems(backup_type): """ Make the backup for: - Local system - All connected remote sensors return True if all the backups finished successfully, False otherwise """ result, systems = get_systems(system_type='Sensor', directly_connected=True) if not result: notifier.error( "An error occurred while making the Backup [%s]. Cant' retrieve the systems " % backup_type) return False result, local_system_id = get_system_id_from_local() if not result: notifier.error( "An error occurred while making the Backup [%s]. Cant' retrieve the system ID" % backup_type) return False system_ids = [x[0] for x in systems] if local_system_id not in system_ids: system_ids.append(local_system_id) # Get server ip in case of distributed deployment (Because only server has the UI / possibility to set backup_pass) success, server_ip = get_system_ip_from_system_id(local_system_id) if not success: return False all_backups_ok = True backup_config_pass = ansible_get_backup_config_pass(server_ip) for system_id in system_ids: success, msg = make_system_backup(system_id=system_id, backup_type=backup_type, rotate=True, backup_pass=backup_config_pass) if not success: all_backups_ok = False return all_backups_ok
def start(self): """ Start monitor """ rt = True try: self.remove_monitor_data() logger.info("Monitor %s Working..." % self.monitor_id) rc, sensor_list = get_systems(system_type="Sensor") (success, version) = get_plugin_package_info_local() if not success: raise Exception(str(version)) (success, local_version) = get_plugin_package_info_local() for (system_id, _) in sensor_list: (success, sensor_id) = get_sensor_id_from_system_id(system_id) #logger.info("INFO => " + str(sensor_id)) if success: if sensor_id == '': logger.warning("Sensor (%s) ID not found" % sensor_id) continue (success, info) = get_plugin_package_info_from_sensor_id(sensor_id) if success: if info['version'] != '': data_sensor = {'version': info['version'], 'md5': info['md5'], 'comparison': compare_dpkg_version(info['version'], local_version['version'])} else: data_sensor = {'version': info['version'], 'md5': info['md5'], 'comparison': ''} if not self.save_data(sensor_id, ComponentTypes.SENSOR, self.get_json_message(data_sensor)): logger.error("Can't save monitor info for sensor '%s'" % sensor_id) else: logger.warning("Can't obtain plugin version for sensor '%s'", sensor_id) else: logger.warning("Can't obtain sensor_id for system_id '%s'", system_id) except Exception, e: logger.error("Something wrong happen while running the monitor..%s, %s" % (self.get_monitor_id(), str(e))) rt = False
def start(self): self.remove_monitor_data() rc, system_list = get_systems() if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False for (system_id, system_ip) in system_list: result, ansible_output = ping_system(system_ip) if not result: # Check whether is sensor or not sensor, sensor_id = get_sensor_id_from_system_id(system_id) if not self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message({'remote_certificates': 'Error: %s' % str(ansible_output), 'contacted': False, 'is_sensor': sensor})): logger.error("Can't save monitor info") else: self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message({'remote_certificates': 'Ping OK', 'contacted': True})) return True
def start(self): self.remove_monitor_data() rc, system_list = get_systems() if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False for (system_id, system_ip) in system_list: # Use ansible to get the DNS config. result, ansible_output = get_av_config(system_ip, {'general_admin_dns': ''}) logger.info("DNS returned from ossim_setup.conf %s" % str(ansible_output)) if result: dnslist = [] if 'general_admin_dns' in ansible_output: dnslist = ansible_output['general_admin_dns'].split(',') count = 0 for ip in dnslist: r = dns_is_external(ip) if r == -2: count += 1 elif r == -1: logger.error("Bad data in admin_dns field of ossim_setup.conf: " + str(ip)) # logger.info("DNS IP count = " + str(count)) if count == len(dnslist): admin_dns_msg = "Warning: All DNS configured are externals" self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message( {'admin_dns': admin_dns_msg, 'internal_dns': False})) else: self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message({'admin_dns': 'DNS ok. You have at least one internal DNS', 'internal_dns': True})) else: if not self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message({'admin_dns': 'Error: %s' % str(ansible_output), 'internal_dns': True})): logger.error("Can't save monitor info") return True
def start(self): """ Check in all system if there is a tunnel up and keys exits. if keys exists and tunnel down, exec all tunnel shutdown process """ success, systems = ret = get_systems() if not success: logger.error("Can't get systems list") return ret result = True for (system_id, system_ip) in systems: logger.info("Checking supports tunnels in system ('%s','%s')" % (system_id, system_ip)) success, result = check_support_tunnels(system_ip) if not result: logger.error( "Can't check support tunnel in system ('%s','%s')" % (system_id, system_ip)) else: logger.info("Tunnel in ('%s','%s'): %s" % (system_id, system_ip, result)) return result, ''
def start(self): """ Starts the monitor activity :return: True on success, False otherwise """ self.remove_monitor_data() # Find the local server. rc, system_list = get_systems() if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False args = {} args['plugin_list'] = 'disk_usage.plg' args['output_type'] = 'ansible' for (system_id, system_ip) in system_list: result, ansible_output = get_root_disk_usage(system_ip) if not self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message({'disk_usage': ansible_output})): logger.error("Can't save monitor info") return True
def start(self): """ Starts the monitor activity """ rt = True try: self.remove_monitor_data() logger.info("Monitor %s Working..." % self.monitor_id) rc, sensor_list = get_systems(system_type="Sensor") if not rc: logger.error("Can't retrieve sensor list: %s" % str(sensor_list)) return False for (sensor_id, sensor_ip) in sensor_list: if sensor_id == '': logger.warning("Sensor (%s) ID not found" % sensor_ip) continue logger.info("Getting dropped packets for sensor_ip %s" % sensor_ip) sensor_stats = get_pfring_stats(sensor_ip) # print sensor_stats try: packet_lost_average = sensor_stats["contacted"][sensor_ip][ "stats"]["packet_lost_average"] monitor_data = {'packet_loss': packet_lost_average} logger.info("Lost packet average for sensor: %s = %s" % (sensor_ip, packet_lost_average)) #Save data component_id = canonical uuid if not self.save_data(sensor_id, ComponentTypes.SENSOR, self.get_json_message(monitor_data)): logger.error("Can't save monitor info") except KeyError: pass except Exception, e: logger.error( "Something wrong happen while running the monitor..%s, %s" % (self.get_monitor_id(), str(e))) rt = False
def start(self): """ Starts the monitor activity """ #Remove the previous monitor data. self.remove_monitor_data() success, local_ip = get_system_ip_from_local(local_loopback=False) if not success: logger.error("Cannot retrieve local system IP: %s" % str(local_ip)) return False # Check if this is professional or not. success, is_pro = get_is_professional(local_ip) if not (success and is_pro): return True # Iterate over the sensors. result, systems = get_systems(system_type="Sensor") if not result: logger.error("Can't retrieve the system info: %s" % str(systems)) return False for (system_id, system_ip) in systems: (success, info) = check_plugin_integrity(system_id) if success: try: #Create the JSON data to store the monitor info monitor_data = info #Save the data to the monitor_data table self.save_data(system_id, ComponentTypes.SENSOR, self.get_json_message(monitor_data)) except Exception as e: logger.error("[MonitorPluginIntegrity] Error: %s" % str(e)) else: logger.error("Can't obtain integrity plugin information from system '%s'", system_id) return True
def start(self): """ Starts the monitor activity """ result, systems = get_systems() if not result: logger.error("Cannot retrieve system info: %s" % str(systems)) return False self.remove_monitor_data() for (system_id, system_ip) in systems: result, msg = system_reboot_needed(system_ip) if result: if not self.save_data( system_id, ComponentTypes.SYSTEM, self.get_json_message({'reboot_needed': msg})): logger.error("Cannot save monitor info") else: logger.error( "Cannot retrieve system {0} information: {1}".format( system_id, msg)) return True
def start(self): """ Starts the monitor activity :return: True on success, False otherwise """ self.remove_monitor_data() # Find the local server. rc, system_list = get_systems() if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False args = {} args['plugin_list'] = 'disk_usage.plg' args['output_type'] = 'ansible' for (system_id, system_ip) in system_list: dummy_result, ansible_output = get_root_disk_usage(system_ip) if not self.save_data( system_id, ComponentTypes.SYSTEM, self.get_json_message({'disk_usage': ansible_output})): logger.error("Can't save monitor info") return True
def make_backup_in_all_systems(backup_type): """ Make the backup for: - Local system - All connected remote sensors return True if all the backups finished successfully, False otherwise """ result, systems = get_systems(system_type='Sensor', directly_connected=True) if not result: notifier.error("An error occurred while making the Backup [%s]. Cant' retrieve the systems " % backup_type) return False result, local_system_id = get_system_id_from_local() if not result: notifier.error("An error occurred while making the Backup [%s]. Cant' retrieve the system ID" % backup_type) return False system_ids = [x[0] for x in systems] if local_system_id not in system_ids: system_ids.append(local_system_id) # Get server ip in case of distributed deployment (Because only server has the UI / possibility to set backup_pass) success, server_ip = get_system_ip_from_system_id(local_system_id) if not success: return False all_backups_ok = True backup_config_pass = ansible_get_backup_config_pass(server_ip) for system_id in system_ids: success, msg = make_system_backup(system_id=system_id, backup_type=backup_type, rotate=True, backup_pass=backup_config_pass) if not success: all_backups_ok = False return all_backups_ok
def start(self): """ Starts the monitor activity """ # Remove the previous monitor data. self.remove_monitor_data() # Check if we have custom plugins. We need only cfg local_path = "/etc/alienvault/plugins/custom/" plugins_to_sync = glob.glob(local_path + '*.cfg') if not plugins_to_sync: logger.info('Nothing to sync...') return True # Iterate over the sensors. # We should specify exclusive=True to fetch only sensors instead of # every machine with sensor profile result, systems = get_systems(system_type="Sensor", exclusive=True) if not result: logger.error( "[MonitorSyncCustomPlugins] Can't retrieve the system info: {}" .format(str(systems))) return False for (system_id, system_ip) in systems: for plugin_file_path in plugins_to_sync: success, msg = rsync_push(local_ip="127.0.0.1", remote_ip=system_ip, local_file_path=plugin_file_path, remote_file_path=plugin_file_path) if not success: logger.error( "[MonitorSyncCustomPlugins] Can't rsync with {}". format(system_ip)) return False return True
def remove_plugin_from_sensors(plugin_file): """ Disable and remove custom plugin from all systems. Args: plugin_file: (str) Full path to plugin file. Returns: (bool) Status """ plugin_name = splitext(basename(plugin_file))[0] result, added_sensors = get_systems(system_type="Sensor", exclusive=True, convert_to_dict=True) # In [3]: systems # Out[3]: {'564d1731-5369-d912-e91b-61c1fff3cf6c': '192.168.87.197'} if not result: api_log.error('Cannot get list of connected sensors: {}'.format(added_sensors)) return False # Add local check if isinstance(added_sensors, dict): added_sensors['local'] = '127.0.0.1' if added_sensors: for sensor_id, sensor_ip in added_sensors.iteritems(): api_log.info('Trying to disable global plugin "{}" plugin on - {}'.format(plugin_name, sensor_ip)) result, msg = disable_plugin_globally(plugin_name, sensor_ip) if not result: api_log.error(msg) api_log.info('Trying to disable per-asset plugin "{}" plugin on - {}'.format(plugin_name, sensor_ip)) result, msg = disable_plugin_per_assets(plugin_name, sensor_ip) if not result: api_log.error(msg) # Remove plugin file from disk api_log.info('Removing plugin file: {} on sensors {}'.format(plugin_file, added_sensors)) result = remove_file(host_list=added_sensors.values(), file_name=plugin_file) return result
def start(self): try: self.remove_monitor_data() rc, system_list = get_systems(directly_connected=False) if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False for (system_id, system_ip) in system_list: success, sensor_id = get_sensor_id_from_system_id(system_id) if not success: logger.warning("[MonitorRetrievesRemoteInfo] " "get_sensor_id_from_system_id failed for system %s (%s)" % (system_ip, system_id)) sensor_id = None ha_name = None success, result = system_all_info(system_id, no_cache=True) if not success: logger.warning("[MonitorRetrievesRemoteInfo] " "system_all_info failed for system %s (%s)" % (system_ip, system_id)) continue if 'ha_status' in result: ha_name = 'active' if result['ha_status'] == 'up' else 'passive' success, result = network_status(system_id, no_cache=True) if not success: logger.warning("[MonitorRetrievesRemoteInfo] " "network_status failed for system %s (%s)" % (system_ip, system_id)) continue success, result = alienvault_status(system_id, no_cache=True) if not success: logger.warning("[MonitorRetrievesRemoteInfo] " "alienvault_status failed for system %s (%s)" % (system_ip, system_id)) continue success, result = status_tunnel(system_id, no_cache=True) if not success: logger.warning("[MonitorRetrievesRemoreInfo] " "status_tunnel failed for system %s (%s)" % (system_ip, system_id)) continue success, result = get_system_config_general(system_id, no_cache=True) if not success: logger.warning("[MonitorRetrievesRemoteInfo] " "get_system_config_general failed for system %s (%s)" % (system_ip, system_id)) continue hostname = result.get('general_hostname', None) if hostname is not None: success, hostname_old = db_get_hostname(system_id) if not success: logger.warning("[MonitorRetrievesRemoteInfo] " "db_get_hostname failed for system %s (%s)" % (system_ip, system_id)) continue if hostname == hostname_old: hostname = None # Getting config params from the system, # we do use this result var so do not change the order of the calls! success, config_alienvault = get_system_config_alienvault(system_id, no_cache=True) if not success: logger.warning("[MonitorRetrievesRemoteInfo] " "get_system_config_alienvault failed for system %s (%s)" % (system_ip, system_id)) continue ha_ip = None ha_role = None if 'ha_ha_virtual_ip' in config_alienvault: ha_ip = config_alienvault['ha_ha_virtual_ip'] if not is_valid_ipv4(ha_ip): ha_ip = None if 'ha_ha_role' in config_alienvault: ha_role = config_alienvault['ha_ha_role'] if ha_role not in ['master', 'slave']: ha_role = None # Update interfaces cache success, result = get_interfaces(system_id, no_cache=True) if not success: continue # Update system setup data cache success, result = system_get(system_id, no_cache=True) if not success: continue vpn_ip = None if "ansible_tun0" in result: try: vpn_ip = result['ansible_tun0']['ipv4']['address'] except Exception: vpn_ip = None # Sensor exclusive if sensor_id is not None and sensor_id != '': self.__update_sensor_properties(sensor_id=sensor_id, config_alienvault=config_alienvault) # Refresh sensor plugins cache try: get_sensor_plugins(sensor_id, no_cache=True) except APIException: logger.warning("[MonitorRetrievesRemoteInfo] " "error getting plugins from sensor '{0}' {1}".format(sensor_id, system_ip)) if vpn_ip is not None: success, message = set_system_vpn_ip(system_id, vpn_ip) if not success: logger.warning("[MonitorRetrievesRemoteInfo] set_system_vpn_ip failed: %s" % message) if ha_role is not None: success, message = set_system_ha_role(system_id, ha_role) if not success: logger.warning("[MonitorRetrievesRemoteInfo] set_system_ha_role failed: %s" % message) else: success, message = set_system_ha_role(system_id, 'NULL') if not success: logger.warning("[MonitorRetrievesRemoteInfo] set_system_ha_role failed: %s" % message) if ha_ip is not None: success, message = set_system_ha_ip(system_id, ha_ip) if not success: logger.warning("[MonitorRetrievesRemoteInfo] set_system_ha_ip: %s" % message) success, message = fix_system_references() if not success: logger.warning("[MonitorRetrievesRemoteInfo] fix_system_references: %s" % message) if ha_name is not None: success, message = set_system_ha_name(system_id, ha_name) if not success: logger.warning("[MonitorRetrievesRemoteInfo] set_system_ha_name failed: %s" % message) else: success, message = set_system_ha_ip(system_id, '') if not success: logger.warning("[MonitorRetrievesRemoteInfo] set_system_ha_ip failed: %s" % message) if hostname is not None: success, message = db_system_update_hostname(system_id, hostname) if not success: logger.warning("[MonitorRetrievesRemoteInfo] db_system_update_hostname failed: %s" % message) # Backups success, message = get_backup_list(system_id=system_id, backup_type="configuration", no_cache=True) if not success: logger.warning("[MonitorRetrievesRemoteInfo] get_backup_list failed: %s" % message) except Exception as err: api_log.error("Something wrong happened while running the MonitorRetrievesRemoteInfo monitor %s" % str(err)) return False return True
def start(self): """ Starts the monitor activity Get if an asset is sending logs to the system Get if an asset received log are parsed by a plugin Messages: Generate a warning message when an asset which has sent logs does not send an event in 24 hours Generate an info message when an asset is not sending logs to the system Generate an info message when an asset is sending logs but there is no plugin enable parsing the logs :return: True on success, False otherwise monitor_data = { "has_events" : True|False "last_event_arrival": time in seconds from the last event "has_logs" = True|False "enabled_plugin" = True | False } """ rt = True try: # 1 - Remove old monitor data self.remove_monitor_data() # 2 - Get the sensor list rc, all_sensor_list = get_systems(system_type="Sensor") if not rc: logger.error("Can't retrieve sensor list: %s" % str(all_sensor_list)) return False # Store those logs that are coming from a known device (var/log/alienvault/devices) log_files_per_sensor = {} # List of plugins enabled by sensor plugins_enabled_by_sensor = {} # List of last events for each device last_event_per_host = get_timestamp_last_event_for_each_device() # For each sensor we will get the list of devices that are reporting to it, # and the list of active plugins with locations. device_list = {} for (sensor_id, sensor_ip) in all_sensor_list: logger.info("Monitor asset activity... assets for sensor %s" % sensor_ip) # Retrieves the list of devices with its logs: # 192.168.2.2: /var/log/alienvault/devices/192.168.2.2/192.168.2.2.log # #10576 - log_files_per_sensor[sensor_ip] = get_hosts_in_syslog(sensor_ip) sensor_devices_logging = get_devices_logging(sensor_ip) log_files_per_sensor[sensor_ip] = sensor_devices_logging # Retrieves the plugins enabled in each sensor. response = get_plugin_enabled_by_sensor(sensor_ip) tmp_hash = {} if "contacted" in response: if sensor_ip in response['contacted']: tmp_hash = response['contacted'][sensor_ip]['data'] plugins_enabled_by_sensor[ sensor_ip] = tmp_hash # plugin_name = [log_file] sensor_devices_list = get_devices_ids_list_per_sensor( sensor_ip, sensor_devices_logging.keys()) for device_id, device_ip in sensor_devices_list.iteritems(): if not device_list.has_key(device_id): device_list[device_id] = device_ip # Sensors table has the vpn ip if it exists # #10576 - asset_list = get_asset_list() # device_list = get_device_list() logger.info("Asset activity .... lets start working") n_devices = 0 for device_id, device_ip in device_list.iteritems(): device_id_uuid = uuid.UUID(device_id) n_devices += 1 if n_devices % 1000 == 0: logger.info( "Number of assets that have been analyzed.. %s" % n_devices) monitor_data = {} # Get uuid string from bytes. -> ASSET_ID #asset_id = get_uuid_string_from_bytes(asset.id) # GET List of assets IPS #asset_ips = [get_ip_str_from_bytes(host.ip) for host in asset.host_ips] has_events = False has_logs = False num_of_enabled_plugins = 0 last_event_arrival = 0 now = datetime.utcnow() device_id_str_with_no_hyphen = (device_id_uuid.hex).upper() # Are there any events in the database coming from this device? if last_event_per_host.has_key(device_id_str_with_no_hyphen): has_events = True # Time in seconds since the last event td = (now - last_event_per_host[device_id_str_with_no_hyphen]) # Is it been over 24 hours since the arrival of the latest event coming from this device? last_event_arrival = td.seconds + td.days * 86400 else: # No events coming from this device has_events = False # Is the device sending logs to the system? # We will check if there are log coming from this device in each sensor for sensor_ip, log_files in log_files_per_sensor.iteritems( ): enabled_plugins = json.loads( str(plugins_enabled_by_sensor[sensor_ip])) locations = [] # List of plugin locations for plugin, location in enabled_plugins.iteritems(): locations.extend(location.split(',')) device_logs = [] # An asset could have more than one IP address. # We should check each of those IP addresses # One device - one ip #for ip in asset_ips: if log_files.has_key(device_ip): device_logs.extend(log_files[device_ip]) has_logs = True for log in device_logs: if log in locations: num_of_enabled_plugins += 1 monitor_data['has_events'] = has_events monitor_data['last_event_arrival'] = last_event_arrival monitor_data['has_logs'] = has_logs monitor_data[ 'enabled_plugin'] = True if num_of_enabled_plugins > 0 else False logger.info("Device Monitor: %s" % str(monitor_data)) self.append_monitor_object(str(device_id_uuid), ComponentTypes.HOST, self.get_json_message(monitor_data)) #Commit all objects logger.info("Monitor Done.. Committing objects") self.commit_data() except Exception, e: rt = False logger.error( "Something wrong happen while running the monitor..%s, %s, %s" % (self.get_monitor_id(), str(e), traceback.format_exc()))
def start(self): try: self.remove_monitor_data() rc, system_list = get_systems() if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False for (system_id, system_ip) in system_list: success, sensor_id = get_sensor_id_from_system_id(system_id) if not success: continue success, result = get_plugins_from_yaml(sensor_id, no_cache=True) if not success: continue success, result = system_all_info(system_id, no_cache=True) if not success: continue success, result = network_status(system_id, no_cache=True) if not success: continue success, result = alienvault_status(system_id, no_cache=True) if not success: continue success, result = get_system_config_general(system_id, no_cache=True) if not success: continue #Getting config params from the system, we do use this result var so do not change the order of the calls! success, result = get_system_config_alienvault(system_id, no_cache=True) if not success: continue prads_enabled = False suricata_snort_enabled = False netflow_enabled = False ha_ip = None ha_role = None if 'sensor_detectors' in result: prads_enabled = True if 'prads' in result['sensor_detectors'] else False suricata_snort_enabled = True if 'snort' in result['sensor_detectors'] or 'suricata' in result['sensor_detectors'] else False if 'sensor_netflow' in result: netflow_enabled = True if result['sensor_netflow'] == 'yes' else False if 'ha_ha_virtual_ip' in result: ha_ip = result['ha_ha_virtual_ip'] if not is_valid_ipv4(ha_ip): ha_ip = None if 'ha_ha_role' in result: ha_role = result['ha_ha_role'] if ha_role not in ['master', 'slave']: ha_role = None success, result = get_interfaces(system_id, no_cache=True) if not success: continue success, result = system_get(system_id, no_cache=True) if not success: continue vpn_ip = None if "ansible_tun0" in result: try: vpn_ip = result['ansible_tun0']['ipv4']['address'] except: vpn_ip = None # TO DB; vpn_ip, netflow, active inventory, passive inventory # ha_ip success, message = set_sensor_properties_active_inventory(sensor_id, suricata_snort_enabled) if not success: continue success, message = set_sensor_properties_passive_inventory(sensor_id, prads_enabled) if not success: continue success, message = set_sensor_properties_netflow(sensor_id, netflow_enabled) if not success: continue if vpn_ip is not None: success, message = set_system_vpn_ip(system_id, vpn_ip) if not success: continue if ha_role is not None: success, message = set_system_ha_role(system_id, ha_role) if not success: continue if ha_ip is not None: success, message = set_system_ha_ip(system_id, ha_ip) if not success: continue except Exception as err: api_log.error("Something wrong happened while running the MonitorRetrievesRemoteInfo monitor %s" % str(err)) return False return True
def sync_asec_plugins(plugin=None, enable=True): """ Send the ASEC generated plugins to the system sensors and enable them Args: plugin: plugin name enable: wether we should enable the plugin or not. Default = True Returns: success (bool): msg (str): Success message/Error info """ if not plugin: return False, "No plugin to sync" try: plugin_path = "/var/lib/asec/plugins/" + plugin + ".cfg" sql_path = plugin_path + ".sql" sensors = [] (success, sensors) = get_systems(system_type='sensor') if not success: return False, "Unable to get sensors list: %s" % sensors # Bug in ansible copy module prevents us from copying the files from # /var/lib/asec/plugins as it has permissions 0 for "other" # Workaround: make a local copy using ansible command module plugin_tmp_path = "/tmp/" + plugin + ".cfg" sql_tmp_path = plugin_tmp_path + ".sql" success, local_ip = get_system_ip_from_local() if not success: error_msg = "[ansible_install_plugin] " + \ "Failed to make get local IP: %s" % local_ip return False, error_msg (success, msg) = local_copy_file(local_ip, plugin_path, plugin_tmp_path) if not success: error_msg = "[ansible_install_plugin] " + \ "Failed to make temp copy of plugin file: %s" % msg return False, error_msg (success, msg) = local_copy_file(local_ip, sql_path, sql_tmp_path) if not success: error_msg = "[ansible_install_plugin] " + \ "Failed to make temp copy of sql file: %s" % msg return False, error_msg all_ok = True for (sensor_id, sensor_ip) in sensors: (success, msg) = ansible_install_plugin(sensor_ip, plugin_tmp_path, sql_tmp_path) if success and enable: # Get list of active plugins and add the new one. # Then send the list back to the sensor? (success, data) = get_sensor_detectors(sensor_ip) if success: data['sensor_detectors'].append(plugin) sensor_det = ','.join(data['sensor_detectors']) (success, msg) = set_sensor_detectors(sensor_ip, sensor_det) if not success: error_msg = "[sync_asec_plugins] " + \ "Error enabling plugin %s " % plugin + \ "for sensor %s: %s" % (sensor_ip, msg) api_log.error(error_msg) all_ok = False else: # Now launch reconfig task job = alienvault_reconfigure.delay(sensor_ip) else: error_msg = "[sync_asec_plugins] " + \ "Error installing plugin %s " % plugin + \ "in sensor %s: %s" % (sensor_ip, msg) api_log.error(error_msg) all_ok = False # Delete temporal copies of the files remove_file([local_ip], plugin_tmp_path) remove_file([local_ip], sql_tmp_path) if not all_ok: error_msg = "Plugin %s installation failed " % plugin + \ "for some sensors" return False, error_msg info_msg = "Plugin %s installed. Enabled = %s" % (plugin, str(enable)) return True, info_msg except Exception as e: api_log.error("[sync_asec_plugins] Exception catched: %s" % str(e)) return False, "[sync_asec_plugins] Unknown error"
def start(self): try: self.remove_monitor_data() rc, system_list = get_systems() if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False for (system_id, system_ip) in system_list: success, sensor_id = get_sensor_id_from_system_id(system_id) if not success: continue success, result = get_plugins_from_yaml(sensor_id, no_cache=True) if not success: continue success, result = system_all_info(system_id, no_cache=True) if not success: continue success, result = network_status(system_id, no_cache=True) if not success: continue success, result = alienvault_status(system_id, no_cache=True) if not success: continue success, result = get_system_config_general(system_id, no_cache=True) if not success: continue #Getting config params from the system, we do use this result var so do not change the order of the calls! success, result = get_system_config_alienvault(system_id, no_cache=True) if not success: continue prads_enabled = False suricata_snort_enabled = False netflow_enabled = False ha_ip = None ha_role = None if 'sensor_detectors' in result: prads_enabled = True if 'prads' in result[ 'sensor_detectors'] else False suricata_snort_enabled = True if 'snort' in result[ 'sensor_detectors'] or 'suricata' in result[ 'sensor_detectors'] else False if 'sensor_netflow' in result: netflow_enabled = True if result[ 'sensor_netflow'] == 'yes' else False if 'ha_ha_virtual_ip' in result: ha_ip = result['ha_ha_virtual_ip'] if not is_valid_ipv4(ha_ip): ha_ip = None if 'ha_ha_role' in result: ha_role = result['ha_ha_role'] if ha_role not in ['master', 'slave']: ha_role = None success, result = get_interfaces(system_id, no_cache=True) if not success: continue success, result = system_get(system_id, no_cache=True) if not success: continue vpn_ip = None if "ansible_tun0" in result: try: vpn_ip = result['ansible_tun0']['ipv4']['address'] except: vpn_ip = None # TO DB; vpn_ip, netflow, active inventory, passive inventory # ha_ip success, message = set_sensor_properties_active_inventory( sensor_id, suricata_snort_enabled) if not success: continue success, message = set_sensor_properties_passive_inventory( sensor_id, prads_enabled) if not success: continue success, message = set_sensor_properties_netflow( sensor_id, netflow_enabled) if not success: continue if vpn_ip is not None: success, message = set_system_vpn_ip(system_id, vpn_ip) if not success: continue if ha_role is not None: success, message = set_system_ha_role(system_id, ha_role) if not success: continue if ha_ip is not None: success, message = set_system_ha_ip(system_id, ha_ip) if not success: continue except Exception as err: api_log.error( "Something wrong happened while running the MonitorRetrievesRemoteInfo monitor %s" % str(err)) return False return True
def start(self): """ Starts the monitor activity Get if an asset is sending logs to the system Get if an asset received log are parsed by a plugin Messages: Generate a warning message when an asset which has sent logs does not send an event in 24 hours Generate an info message when an asset is not sending logs to the system Generate an info message when an asset is sending logs but there is no plugin enable parsing the logs :return: True on success, False otherwise monitor_data = { "has_events" : True|False "last_event_arrival": time in seconds from the last event "has_logs" = True|False "enabled_plugin" = True | False } """ rt = True try: # 1 - Remove old monitor data self.remove_monitor_data() # 2 - Get the sensor list rc,all_sensor_list = get_systems(system_type="Sensor") if not rc: logger.error("Can't retrieve sensor list: %s" % str(all_sensor_list)) return False # Store those logs that are coming from a known device (var/log/alienvault/devices) log_files_per_sensor = {} # List of plugins enabled by sensor plugins_enabled_by_sensor = {} # List of last events for each device last_event_per_host = get_timestamp_last_event_for_each_device() # For each sensor we will get the list of devices that are reporting to it, # and the list of active plugins with locations. device_list = {} for (sensor_id,sensor_ip) in all_sensor_list: logger.info("Monitor asset activity... assets for sensor %s" % sensor_ip) # Retrieves the list of devices with its logs: # 192.168.2.2: /var/log/alienvault/devices/192.168.2.2/192.168.2.2.log # #10576 - log_files_per_sensor[sensor_ip] = get_hosts_in_syslog(sensor_ip) sensor_devices_logging = get_devices_logging(sensor_ip) log_files_per_sensor[sensor_ip] = sensor_devices_logging # Retrieves the plugins enabled in each sensor. response = get_plugin_enabled_by_sensor(sensor_ip) tmp_hash = {} if "contacted" in response: if sensor_ip in response['contacted']: tmp_hash = response['contacted'][sensor_ip]['data'] plugins_enabled_by_sensor[sensor_ip] = tmp_hash # plugin_name = [log_file] sensor_devices_list = get_devices_ids_list_per_sensor(sensor_ip,sensor_devices_logging.keys()) for device_id, device_ip in sensor_devices_list.iteritems(): if not device_list.has_key(device_id): device_list[device_id] = device_ip # Sensors table has the vpn ip if it exists # #10576 - asset_list = get_asset_list() # device_list = get_device_list() logger.info("Asset activity .... lets start working") n_devices = 0 for device_id, device_ip in device_list.iteritems(): device_id_uuid = uuid.UUID(device_id) n_devices += 1 if n_devices % 1000 == 0: logger.info("Number of assets that have been analyzed.. %s" % n_devices) monitor_data = {} # Get uuid string from bytes. -> ASSET_ID #asset_id = get_uuid_string_from_bytes(asset.id) # GET List of assets IPS #asset_ips = [get_ip_str_from_bytes(host.ip) for host in asset.host_ips] has_events = False has_logs = False num_of_enabled_plugins = 0 last_event_arrival = 0 now = datetime.utcnow() device_id_str_with_no_hyphen = (device_id_uuid.hex).upper() # Are there any events in the database coming from this device? if last_event_per_host.has_key(device_id_str_with_no_hyphen): has_events = True # Time in seconds since the last event td = (now - last_event_per_host[device_id_str_with_no_hyphen]) # Is it been over 24 hours since the arrival of the latest event coming from this device? last_event_arrival = td.seconds + td.days * 86400 else: # No events coming from this device has_events = False # Is the device sending logs to the system? # We will check if there are log coming from this device in each sensor for sensor_ip, log_files in log_files_per_sensor.iteritems(): enabled_plugins = json.loads(str(plugins_enabled_by_sensor[sensor_ip])) locations = [] # List of plugin locations for plugin, location in enabled_plugins.iteritems(): locations.extend(location.split(',')) device_logs = [] # An asset could have more than one IP address. # We should check each of those IP addresses # One device - one ip #for ip in asset_ips: if log_files.has_key(device_ip): device_logs.extend(log_files[device_ip]) has_logs = True for log in device_logs: if log in locations: num_of_enabled_plugins += 1 monitor_data['has_events'] = has_events monitor_data['last_event_arrival'] = last_event_arrival monitor_data['has_logs'] = has_logs monitor_data['enabled_plugin'] = True if num_of_enabled_plugins > 0 else False logger.info("Device Monitor: %s" % str(monitor_data)) self.append_monitor_object(str(device_id_uuid), ComponentTypes.HOST, self.get_json_message(monitor_data)) #Commit all objects logger.info("Monitor Done.. Committing objects") self.commit_data() except Exception, e: rt = False logger.error("Something wrong happen while running the monitor..%s, %s, %s" % (self.get_monitor_id(), str(e), traceback.format_exc()))
def start(self): """ Starts the monitor activity """ # Remove the previous monitor data. self.remove_monitor_data() # Iterate over the sensors. success, systems = get_systems(system_type="Sensor") if not success: logger.error( "[MonitorEnabledPluginsLimit] Can't retrieve the system info: {0}" .format(str(systems))) return False for (system_id, system_ip) in systems: success, sensor_id = get_sensor_id_from_system_id(system_id) if not success: logger.error( "[MonitorEnabledPluginsLimit] " "Can't resolve sensor_id of system {0}: {1}".format( system_id, sensor_id)) continue try: sensor_plugins = get_sensor_plugins(sensor_id=sensor_id, no_cache=True) enabled_plugins = sensor_plugins.get('enabled', {}) enabled_global_count = len(enabled_plugins.get( 'detectors', [])) enabled_per_asset_count = len( list( chain.from_iterable( enabled_plugins.get('devices', {}).values()))) enabled_total = enabled_global_count + enabled_per_asset_count # Temporal, should read it from from agent config (ansible method needed) warning_threshold = 85 max_limit_threshold = 100 monitor_data = { 'system_id': system_id, 'system_ip': system_ip, 'plugins_enabled_total': enabled_total, 'plugins_allowed_to_add': max_limit_threshold - enabled_total, 'limit_reached': enabled_total >= max_limit_threshold, 'warning_reached': (warning_threshold <= enabled_total) and (enabled_total < max_limit_threshold) } if not self.save_data(sensor_id, ComponentTypes.SENSOR, self.get_json_message(monitor_data)): logger.error( "[MonitorEnabledPluginsLimit] Cannot save monitor info" ) except APIException as e: logger.error( "[MonitorEnabledPluginsLimit] " "Can't obtain plugin information from system {0}: {1}". format(system_id, str(e))) continue return True
def start(self): try: self.remove_monitor_data() rc, system_list = get_systems(directly_connected=False) if not rc: logger.error("Can't retrieve systems..%s" % str(system_list)) return False for (system_id, system_ip) in system_list: success, sensor_id = get_sensor_id_from_system_id(system_id) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] " "get_sensor_id_from_system_id failed for system %s (%s)" % (system_ip, system_id)) sensor_id = None ha_name = None success, result = system_all_info(system_id, no_cache=True) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] " "system_all_info failed for system %s (%s)" % (system_ip, system_id)) continue if 'ha_status' in result: ha_name = 'active' if result[ 'ha_status'] == 'up' else 'passive' success, result = network_status(system_id, no_cache=True) if not success: logger.warning("[MonitorRetrievesRemoteInfo] " "network_status failed for system %s (%s)" % (system_ip, system_id)) continue success, result = alienvault_status(system_id, no_cache=True) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] " "alienvault_status failed for system %s (%s)" % (system_ip, system_id)) continue success, result = status_tunnel(system_id, no_cache=True) if not success: logger.warning("[MonitorRetrievesRemoreInfo] " "status_tunnel failed for system %s (%s)" % (system_ip, system_id)) continue success, result = get_system_config_general(system_id, no_cache=True) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] " "get_system_config_general failed for system %s (%s)" % (system_ip, system_id)) continue hostname = result.get('general_hostname', None) if hostname is not None: success, hostname_old = db_get_hostname(system_id) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] " "db_get_hostname failed for system %s (%s)" % (system_ip, system_id)) continue if hostname == hostname_old: hostname = None # Getting config params from the system, # we do use this result var so do not change the order of the calls! success, config_alienvault = get_system_config_alienvault( system_id, no_cache=True) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] " "get_system_config_alienvault failed for system %s (%s)" % (system_ip, system_id)) continue ha_ip = None ha_role = None if 'ha_ha_virtual_ip' in config_alienvault: ha_ip = config_alienvault['ha_ha_virtual_ip'] if not is_valid_ipv4(ha_ip): ha_ip = None if 'ha_ha_role' in config_alienvault: ha_role = config_alienvault['ha_ha_role'] if ha_role not in ['master', 'slave']: ha_role = None # Update interfaces cache success, result = get_interfaces(system_id, no_cache=True) if not success: continue # Update system setup data cache success, result = system_get(system_id, no_cache=True) if not success: continue vpn_ip = None if "ansible_tun0" in result: try: vpn_ip = result['ansible_tun0']['ipv4']['address'] except Exception: vpn_ip = None # Sensor exclusive if sensor_id is not None and sensor_id != '': self.__update_sensor_properties( sensor_id=sensor_id, config_alienvault=config_alienvault) # Refresh sensor plugins cache try: get_sensor_plugins(sensor_id, no_cache=True) except APIException: logger.warning( "[MonitorRetrievesRemoteInfo] " "error getting plugins from sensor '{0}' {1}". format(sensor_id, system_ip)) if vpn_ip is not None: success, message = set_system_vpn_ip(system_id, vpn_ip) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] set_system_vpn_ip failed: %s" % message) if ha_role is not None: success, message = set_system_ha_role(system_id, ha_role) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] set_system_ha_role failed: %s" % message) else: success, message = set_system_ha_role(system_id, 'NULL') if not success: logger.warning( "[MonitorRetrievesRemoteInfo] set_system_ha_role failed: %s" % message) if ha_ip is not None: success, message = set_system_ha_ip(system_id, ha_ip) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] set_system_ha_ip: %s" % message) success, message = fix_system_references() if not success: logger.warning( "[MonitorRetrievesRemoteInfo] fix_system_references: %s" % message) if ha_name is not None: success, message = set_system_ha_name( system_id, ha_name) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] set_system_ha_name failed: %s" % message) else: success, message = set_system_ha_ip(system_id, '') if not success: logger.warning( "[MonitorRetrievesRemoteInfo] set_system_ha_ip failed: %s" % message) if hostname is not None: success, message = db_system_update_hostname( system_id, hostname) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] db_system_update_hostname failed: %s" % message) # Backups success, message = get_backup_list(system_id=system_id, backup_type="configuration", no_cache=True) if not success: logger.warning( "[MonitorRetrievesRemoteInfo] get_backup_list failed: %s" % message) except Exception as err: api_log.error( "Something wrong happened while running the MonitorRetrievesRemoteInfo monitor %s" % str(err)) return False return True