def asynchronous_update(system_id, only_feed=False, update_key=""): """Launches an asynchronous update on the given system_ip Args: system_id (str): The system_id of the system to update. only_feed (boolean): A boolean to indicate that we need to update only the feed. Returns: (boolean, job_id): A tuple containing the result of the execution Examples: >>> asynchronous_update("11111111-1111-1111-111111111111") (True,"/var/log/alienvault/update/system_update.log") """ (success, system_ip) = get_system_ip_from_system_id(system_id) if not success: error_msg = "[asynchronous_update] Error retrieving " + \ "the system ip for the system id " + \ "%s -> %s" % (system_ip, str(system_ip)) return False, error_msg job = alienvault_asynchronous_update.delay(system_ip, only_feed, update_key) if job is None: error_msg = "Cannot update system %s. " % system_id + \ "Please verify that the system is reachable." api_log.error(error_msg) return False, error_msg flush_cache(namespace="system_packages") return True, job.id
def set_config_alienvault(system_id): param_names = [ 'framework_framework_ip', 'sensor_detectors', 'sensor_interfaces', 'sensor_mservers', 'sensor_networks', 'server_server_ip' ] (success, system_ip) = ret = get_system_ip_from_system_id(system_id) if not success: return make_error(system_ip, 500) set_values = {} for key, value in request.args.iteritems(): if key not in param_names: return make_error("Bad param %s" % key, 400) else: set_values[key] = value (success, config_values) = set_av_config(system_ip, set_values) if not success: current_app.logger.error("system: set_config_alienvault error: " + str(config_values)) return make_error( "Cannot set AlienVault configuration info %s" % str(config_values), 500) flush_cache(namespace="system") job = alienvault_asynchronous_reconfigure.delay(system_id) return make_ok(job_id=job.id)
def alienvault_asynchronous_update(system_id, only_feed=False, update_key=""): """Runs an asynchronous alienvault update Args: system_id (str): The system ID where we would like to run the alienvault-update only_feed (boolean): A boolean indicatin whether we should update only the feed or not. Returns: rt (boolean): True if success false otherwise """ if exist_task_running(task_type='alienvault_asynchronous_update', current_task_request=current_task.request, param_to_compare=system_id, argnum=0): return JobResult(False, "An existing task running", "").serialize try: logger.info("Start asynchronous update <%s>" % system_id) rt, error_str = api_run_update(system_id, only_feed=only_feed, update_key=update_key) # When the task has been launched properly the error_str variable will contain the log file. if not rt: return JobResult( False, "Something wrong happend while running the alienvault update %s" % error_str, "").serialize logger.info(" alienvault-update <%s> waiting to finish...." % system_id) time.sleep(1) # Wait until the task is lauched. n_process = 1 while n_process > 0: success, n_process = api_check_if_process_is_running( system_id, error_str) time.sleep(1) rt, log_file = apimethod_get_asynchronous_command_log_file( system_id, error_str) if not rt: return JobResult( False, "Something wrong happend while retrieving the alienvault-update log file %s" % log_file, "").serialize rt, return_code_msg = apimethod_check_asynchronous_command_return_code( system_id, error_str + ".rc") if not rt: return JobResult( False, "Something wrong happend while retrieving the alienvault-return code <%s>" % str(return_code_msg), log_file).serialize flush_cache(namespace="system_packages") logger.info("Running alienvault-update ... end %s - %s" % (rt, error_str)) except Exception, e: logger.error("An error occurred running alienvault-reconfig: %s, %s" % (str(e), traceback.format_exc())) return JobResult( False, "An error occurred running alienvault-update <%s>" % str(e), "").serialize
def alienvault_asynchronous_update(system_ip, only_feed=False, update_key=""): """Runs an asynchronous alienvault update Args: system_ip (str): The system IP where we would like to run the alienvault-update only_feed (boolean): A boolean indicating whether we should update only the feed or not. update_key (str): Upgrade key. Returns: JobResult (dict): where obj['result'] == True if success or False otherwise. """ running = exist_task_running(task_type='alienvault_asynchronous_update', current_task_request=current_task.request, param_to_compare=system_ip, argnum=0) if running: return JobResult(False, "An existing task running", "", "300090", system_ip=system_ip).serialize try: logger.info("Start asynchronous update <%s>" % system_ip) rt, error_str = ansible_run_async_update(system_ip, only_feed=only_feed, update_key=update_key) # When the task has been launched properly the error_str variable will contain the log file. if not rt: error_msg = "Something wrong happened while running the alienvault update %s" % error_str if 'unreachable' in error_msg: error_msg = 'System unreachable' return JobResult(False, error_msg, "", "300091", system_ip=system_ip).serialize logger.info(" alienvault-update <%s> waiting to finish...." % system_ip) time.sleep(1) # Wait until the task is launched. n_process = 1 while int(n_process) > 0: success, n_process = ansible_check_if_process_is_running(system_ip, error_str) time.sleep(1) flush_cache(namespace='system_packages') rt, log_file = ansible_get_asynchronous_command_log_file(system_ip, error_str) if not rt: return JobResult( False, "Something wrong happened while retrieving the alienvault-update log file: %s" % log_file, "", "300092", system_ip=system_ip).serialize rt, return_code_msg = ansible_check_asynchronous_command_return_code(system_ip, error_str+".rc") if not rt: error_msg = "Something wrong happened while retrieving the alienvault-return code <%s>" % return_code_msg error_id = "300093" if return_code_msg.startswith("Return code is different from 0"): return_code = return_code_msg.split("<")[1].split(">")[0] error_msg = SYSTEM_UPDATE_ERROR_STRINGS[return_code][1] error_id = SYSTEM_UPDATE_ERROR_STRINGS[return_code][0] return JobResult(False, error_msg, log_file, error_id, system_ip=system_ip).serialize logger.info("Running alienvault-update ... end %s - %s" % (rt, error_str)) except Exception, e: logger.error("An error occurred running alienvault-update: %s, %s" % (str(e), traceback.format_exc())) return JobResult(False, "An error occurred running alienvault-update <%s>" % str(e), "", "300099").serialize
def put_interface (system_id, iface, promisc): """ Modify network interface properties (currently, only sets promisc mode) """ # Flush the cache "sensor_network" flush_cache(namespace="sensor_network") (success, ip) = ret = get_system_ip_from_system_id(system_id) if not success: return ret return ansiblemethods.system.network.set_iface_promisc_status (ip, iface, promisc)
def start(self): """ Starts the monitor activity """ rt = True self.remove_monitor_data() # Clear caché flush_cache(namespace='system') # Load all system from current_local logger.info("Checking for pending updates") result, systems = get_systems() if not result: logger.error("Can't retrieve the system info: %s" % str(systems)) return False pending_updates = False for (system_id, system_ip) in systems: (success, info) = apimethod_get_update_info(system_id) if success: try: sys_pending_updates = info['pending_updates'] pending_updates = pending_updates or sys_pending_updates logger.info("Pending Updates for system %s (%s): %s" % (system_id, system_ip, sys_pending_updates)) monitor_data = {"pending_updates": sys_pending_updates} self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message(monitor_data)) except Exception as e: logger.error("[MonitorPendingUpdates] Error: %s" % str(e)) rt = False break else: logger.error("MonitorPendingUpdates: %s" % info) rt = False break if pending_updates: success, local_ip = get_system_ip_from_local() if not success: logger.error( "[MonitorPendingUpdates] Unable to get local IP: %s" % local_ip) return False success, msg = ansible_download_release_info(local_ip) if not success: logger.error( "[MonitorPendingUpdates] Unable to retrieve release info file: %s" % msg) return False return rt
def set_sensor_detectors (system_ip, plugins): """ @param system_ip: The system IP where you want to get the [sensor]/detectors from ossim_setup.conf @param Comma separate list of detector plugins to activate. Must exists in the machine @return A tuple (sucess|error, data|msgerror) """ # Need to flush namespace "system" as alienvault_config is cached in that namespace and # is used to show the active plugins, so we flush it to refresh the active plugins flush_cache(namespace="system") response = ansible.run_module(host_list=[system_ip], module="av_config", args="sensor_detectors=%s op=set" % plugins) return parse_av_config_response(response, system_ip)
def set_config_general(system_id): param_names = [ 'general_admin_dns', 'general_admin_gateway', 'general_admin_ip', 'general_admin_netmask', 'general_hostname', 'general_mailserver_relay', 'general_mailserver_relay_passwd', 'general_mailserver_relay_port', 'general_mailserver_relay_user', 'general_ntp_server', 'firewall_active' ] (success, system_ip) = ret = get_system_ip_from_system_id(system_id) if not success: return make_error(system_ip, 500) set_values = {} for key, value in request.args.iteritems(): if key not in param_names: return make_error("Bad param %s" % key, 400) else: set_values[key] = value (success, config_values) = set_av_config(system_ip, set_values) if not success: current_app.logger.error("system: set_config_general error: " + str(config_values)) return make_error( "Cannot set general configuration info %s" % str(config_values), 500) flush_cache(namespace="system") if 'general_hostname' in set_values: success, msg = db_system_update_hostname( system_id, set_values['general_hostname']) if not success: return make_error("Error setting values: %s" % msg, 500) if 'general_admin_ip' in set_values: success, msg = db_system_update_admin_ip( system_id, set_values['general_admin_ip']) if not success: return make_error("Error setting values: %s" % msg, 500) success, msg = ansible_add_ip_to_inventory( set_values['general_admin_ip']) if not success: return make_error("Error setting the admin IP address", 500) job = alienvault_asynchronous_reconfigure.delay(system_id) return make_ok(job_id=job.id)
def set_system_config(system_id, set_values): """ Set the configuration values to the system Args: system_id(str): The system id where the configuration will be setted set_values: key-value dictionary with the configuration settings Returns: (success, job_id): success=True when the operation when ok, otherwise success=False. On success job_id: id of the async reconfig job, error message string otherwise """ (success, system_ip) = get_system_ip_from_system_id(system_id) if not success: return (False, system_ip) (success, config_values) = set_av_config(system_ip, set_values) if not success: api_log.error("system: set_config_general error: " + str(config_values)) return (False, "Cannot set general configuration info: %s" % str(config_values)) flush_cache(namespace="system_config") if 'general_hostname' in set_values: success, msg = db_system_update_hostname( system_id, set_values['general_hostname']) if not success: return (False, "Error setting values: %s" % msg) new_admin_ip = None if 'general_admin_ip' in set_values: new_admin_ip = set_values['general_admin_ip'] success, msg = db_system_update_admin_ip( system_id, set_values['general_admin_ip']) if not success: return (False, "Error setting values: %s" % msg) success, msg = ansible_add_ip_to_inventory( set_values['general_admin_ip']) if not success: return (False, "Error setting the admin IP address") job = alienvault_asynchronous_reconfigure.delay(system_ip, new_admin_ip) return (True, job.id)
def start(self): """ Starts the monitor activity """ rt = True self.remove_monitor_data() # Clear caché flush_cache(namespace='system') # Load all system from current_local logger.info("Checking for pending updates") result, systems = get_systems() if not result: logger.error("Can't retrieve the system info: %s" % str(systems)) return False pending_updates = False for (system_id, system_ip) in systems: (success, info) = apimethod_get_update_info(system_id) if success: try: sys_pending_updates = info['pending_updates'] pending_updates = pending_updates or sys_pending_updates logger.info("Pending Updates for system %s (%s): %s" % (system_id, system_ip, sys_pending_updates)) monitor_data = {"pending_updates": sys_pending_updates} self.save_data(system_id, ComponentTypes.SYSTEM, self.get_json_message(monitor_data)) except Exception as e: logger.error("[MonitorPendingUpdates] Error: %s" % str(e)) rt = False break else: logger.error("MonitorPendingUpdates: %s" % info) rt = False break if pending_updates: success, local_ip = get_system_ip_from_local() if not success: logger.error("[MonitorPendingUpdates] Unable to get local IP: %s" % local_ip) return False success, msg = ansible_download_release_info(local_ip) if not success: logger.error("[MonitorPendingUpdates] Unable to retrieve release info file: %s" % msg) return False return rt
def set_system_config(system_id, set_values): """ Set the configuration values to the system Args: system_id(str): The system id where the configuration will be setted set_values: key-value dictionary with the configuration settings Returns: (success, job_id): success=True when the operation when ok, otherwise success=False. On success job_id: id of the async reconfig job, error message string otherwise """ (success, system_ip) = get_system_ip_from_system_id(system_id) if not success: return (False, system_ip) (success, config_values) = set_av_config(system_ip, set_values) if not success: api_log.error("system: set_config_general error: " + str(config_values)) return (False, "Cannot set general configuration info: %s" % str(config_values)) flush_cache(namespace="system_config") if "general_hostname" in set_values: success, msg = db_system_update_hostname(system_id, set_values["general_hostname"]) if not success: return (False, "Error setting values: %s" % msg) new_admin_ip = None if "general_admin_ip" in set_values: new_admin_ip = set_values["general_admin_ip"] success, msg = db_system_update_admin_ip(system_id, set_values["general_admin_ip"]) if not success: return (False, "Error setting values: %s" % msg) success, msg = ansible_add_ip_to_inventory(set_values["general_admin_ip"]) if not success: return (False, "Error setting the admin IP address") job = alienvault_asynchronous_reconfigure.delay(system_ip, new_admin_ip) return (True, job.id)
def set_config_general(system_id): param_names = ['general_admin_dns', 'general_admin_gateway', 'general_admin_ip', 'general_admin_netmask', 'general_hostname', 'general_mailserver_relay', 'general_mailserver_relay_passwd', 'general_mailserver_relay_port', 'general_mailserver_relay_user', 'general_ntp_server', 'firewall_active'] (success, system_ip) = ret = get_system_ip_from_system_id(system_id) if not success: return make_error(system_ip, 500) set_values = {} for key, value in request.args.iteritems(): if key not in param_names: return make_error ("Bad param %s" % key, 400) else: set_values[key] = value (success, config_values) = set_av_config(system_ip, set_values) if not success: current_app.logger.error("system: set_config_general error: " + str(config_values)) return make_error("Cannot set general configuration info %s" % str(config_values), 500) flush_cache(namespace="system") if 'general_hostname' in set_values: success, msg = db_system_update_hostname(system_id, set_values['general_hostname']) if not success: return make_error("Error setting values: %s" % msg, 500) if 'general_admin_ip' in set_values: success, msg = db_system_update_admin_ip(system_id, set_values['general_admin_ip']) if not success: return make_error("Error setting values: %s" % msg, 500) success, msg = ansible_add_ip_to_inventory(set_values['general_admin_ip']) if not success: return make_error("Error setting the admin IP address", 500) job = alienvault_asynchronous_reconfigure.delay(system_id) return make_ok(job_id=job.id)
def set_interfaces_roles(system_id, interfaces): """ Set roles for the system network interfaces. """ (success, ip) = ret = get_system_ip_from_system_id (system_id) if not success: return ret # Flush caches flush_cache(namespace="sensor_network") # Next verify that the interfaces param exists, correct decode a base64 string # and this string is a json object (success, msg) = ret = ansiblemethods.system.network.set_interfaces_roles(ip, interfaces) if not success: return ret job = alienvault_reconfigure.delay(ip) if job.state is 'FAILURE': return (False, "Can't start task to delete orphan status message") return (True, job.id)
def make_tunnel_with_vpn(system_ip, password): """Build the VPN tunnel with the given node""" if not is_valid_ipv4(system_ip): return False, "Invalid system ip: %s" % str(system_ip) success, own_server_id = get_server_id_from_local() if not success: error_msg = "Error while retrieving " + \ "server_id from local: %s" % str(own_server_id) return success, error_msg success, local_ip = get_system_ip_from_local() if not success: return success, "Cannot retrieve the local ip <%s>" % str(local_ip) success, data = ansible_make_tunnel_with_vpn( system_ip=system_ip, local_server_id=get_hex_string_from_uuid(own_server_id), password=password) if not success: return success, data print "Set VPN IP on the system table" new_node_vpn_ip = data['client_end_point1'] if new_node_vpn_ip is None: return False, "Cannot retrieve the new node VPN IP" print "New Node VPN IP %s" % new_node_vpn_ip success, data = get_system_id_from_system_ip(system_ip) if success: # If the system is not on the system table is doesn't matter success, data = set_system_vpn_ip(data, new_node_vpn_ip) if not success: return False, "Cannot set the new node vpn ip on the system table" flush_cache(namespace="support_tunnel") # Restart frameworkd print "Restarting ossim-framework" success, data = ansible_restart_frameworkd(system_ip=local_ip) if not success: print "Restarting %s ossim-framework failed (%s)" % (local_ip, data) return True, "VPN node successfully connected."
def make_tunnel_with_vpn(system_ip, password): """Build the VPN tunnel with the given node""" if not is_valid_ipv4(system_ip): return False, "Invalid system ip: %s" % str(system_ip) success, own_server_id = get_server_id_from_local() if not success: error_msg = "Error while retrieving " + \ "server_id from local: %s" % str(own_server_id) return success, error_msg success, local_ip = get_system_ip_from_local() if not success: return success, "Cannot retrieve the local ip <%s>" % str(local_ip) success, data = ansible_make_tunnel_with_vpn(system_ip=system_ip, local_server_id=get_hex_string_from_uuid(own_server_id), password=password) if not success: return success, data print "Set VPN IP on the system table" new_node_vpn_ip = data['client_end_point1'] if new_node_vpn_ip is None: return False, "Cannot retrieve the new node VPN IP" print "New Node VPN IP %s" % new_node_vpn_ip success, data = get_system_id_from_system_ip(system_ip) if success: # If the system is not on the system table is doesn't matter success, data = set_system_vpn_ip(data, new_node_vpn_ip) if not success: return False, "Cannot set the new node vpn ip on the system table" flush_cache(namespace="support_tunnel") # Restart frameworkd print "Restarting ossim-framework" success, data = ansible_restart_frameworkd(system_ip=local_ip) if not success: print "Restarting %s ossim-framework failed (%s)" % (local_ip, data) return True, "VPN node successfully connected."
def set_config_alienvault(system_id): param_names = ['framework_framework_ip', 'sensor_detectors', 'sensor_interfaces', 'sensor_mservers', 'sensor_networks', 'server_server_ip'] (success, system_ip) = ret = get_system_ip_from_system_id(system_id) if not success: return make_error(system_ip, 500) set_values = {} for key, value in request.args.iteritems(): if key not in param_names: return make_error ("Bad param %s" % key, 400) else: set_values[key] = value (success, config_values) = set_av_config(system_ip, set_values) if not success: current_app.logger.error("system: set_config_alienvault error: " + str(config_values)) return make_error("Cannot set AlienVault configuration info %s" % str(config_values), 500) flush_cache(namespace="system") job = alienvault_asynchronous_reconfigure.delay(system_id) return make_ok(job_id=job.id)
def alienvault_asynchronous_update(system_id, only_feed=False,update_key=""): """Runs an asynchronous alienvault update Args: system_id (str): The system ID where we would like to run the alienvault-update only_feed (boolean): A boolean indicatin whether we should update only the feed or not. Returns: rt (boolean): True if success false otherwise """ if exist_task_running(task_type='alienvault_asynchronous_update',current_task_request=current_task.request, param_to_compare=system_id,argnum=0): return JobResult(False, "An existing task running","").serialize try: logger.info("Start asynchronous update <%s>" % system_id) rt, error_str = api_run_update(system_id, only_feed=only_feed,update_key=update_key) # When the task has been launched properly the error_str variable will contain the log file. if not rt: return JobResult( False, "Something wrong happend while running the alienvault update %s" % error_str,"").serialize logger.info(" alienvault-update <%s> waiting to finish...." % system_id) time.sleep(1) # Wait until the task is lauched. n_process = 1 while n_process > 0: success,n_process = api_check_if_process_is_running(system_id, error_str) time.sleep(1) rt, log_file = apimethod_get_asynchronous_command_log_file(system_id, error_str) if not rt: return JobResult(False, "Something wrong happend while retrieving the alienvault-update log file %s" % log_file,"").serialize rt, return_code_msg = apimethod_check_asynchronous_command_return_code(system_id,error_str+".rc") if not rt: return JobResult(False,"Something wrong happend while retrieving the alienvault-return code <%s>" % str(return_code_msg) , log_file).serialize flush_cache(namespace="system_packages") logger.info("Running alienvault-update ... end %s - %s" % (rt,error_str)) except Exception, e: logger.error("An error occurred running alienvault-reconfig: %s, %s" % (str(e), traceback.format_exc())) return JobResult(False, "An error occurred running alienvault-update <%s>" % str(e),"").serialize
def set_sensor_detectors_from_yaml(system_ip, plugins): rc = True flush_cache(namespace="sensor_plugins") try: response = ansible.run_module(host_list=[system_ip], module='av_sensor_yaml', args="op=set plugins=\"%s\"" % plugins) if response['dark'] != {}: return False, "Something wrong happened while running the set plugin module %s" % str(response) if "failed" in response['contacted'][system_ip]: print "FAiled" try: msg = response['contacted'][system_ip]['msg'] except: msg = response return False,msg if "unreachable" in response: return False, "%s is unreachable" % system_ip except Exception as msg: response = str(msg) rc = False return rc,response
def make_tunnel_with_vpn(system_ip,password): """Build the VPN tunnel with the given node""" if not is_valid_ipv4(system_ip): return False, "Invalid system ip: %s" % str(system_ip) success, own_server_id = get_server_id_from_local() if not success: return success, "Error while retrieving server_id from local: %s" % str(own_server_id) success, data = ansible_make_tunnel_with_vpn(system_ip=system_ip, local_server_id= get_hex_string_from_uuid(own_server_id), password=password) if not success: return success, data print "Set VPN IP on the system table" new_node_vpn_ip = data['client_end_point1'] if new_node_vpn_ip is None: return False, "Cannot retrieve the new node VPN IP" print "New Node VPN IP %s" % new_node_vpn_ip success, data = get_system_id_from_system_ip(system_ip) if success:# If the system is not on the system table is doesn't matter success, data = set_system_vpn_ip(data, new_node_vpn_ip) if not success: return False, "Cannot set the new node vpn ip on the system table" flush_cache(namespace="system") return True, "VPN node successfully connected."
use_pretty_default_error_handlers(app) from celerymethods.celery_manager import CeleryManager cm = CeleryManager() cm.start() try: from celerymethods.tasks.tasks import Scheduler scheduler = Scheduler() scheduler.restore_tasks_to_db() except Exception as e: app.logger.error("Error loading tasks to scheduler: '{0}'".format(str(e))) from apimethods.system.cache import flush_cache try: flush_cache(namespace='system_packages') except Exception, msg: app.logger.warning("Error flushing system_packages namespace: %s" % (msg)) try: from api.lib.monitors.messages import initial_msg_load success, data = initial_msg_load() if not success: app.logger.warning("Messages couldn't be loaded in the database, %s" % str(data)) else: app.logger.info("Messages have been successfully loaded") except Exception, msg: app.logger.warning("Error loading messages in database") # Log permissions try:
use_pretty_default_error_handlers(app) from celerymethods.celery_manager import CeleryManager cm = CeleryManager() cm.start() try: from celerymethods.tasks.tasks import Scheduler scheduler = Scheduler() scheduler.restore_tasks_to_db() except Exception as e: app.logger.error("Error loading tasks to scheduler: '{0}'".format(str(e))) from apimethods.system.cache import flush_cache try: flush_cache(namespace='system_packages') except Exception, msg: app.logger.warning("Error flushing system_packages namespace: %s" % (msg)) try: from api.lib.monitors.messages import initial_msg_load success, data = initial_msg_load() if not success: app.logger.warning("Messages couldn't be loaded in the database, %s" % str(data)) else: app.logger.info("Messages have been successfully loaded") except Exception, msg: app.logger.warning("Error loading messages in database") # Log permissions
def then_flush_api_cache(context): flush_cache()
def set_sensor_plugins_enabled_by_asset(sensor_id, assets_info): """ Set the list of plugins enabled in a sensor by asset Params: sensor_id (UUID): sensor id assets_info (dict or json string): {"<asset_id>": ["<plugin_1>", "<plugin_2>", ...], ...} Return: the id of the agent restart job """ (success, sensor_ip) = get_sensor_ip_from_sensor_id(sensor_id) if not success: raise APICannotResolveSensorID( sensor_id=sensor_id, log="[set_sensor_plugins_enabled_by_asset] " "Error getting Sensor ip: %s".format(sensor_ip)) try: plugins = {} if isinstance(assets_info, basestring): assets_info = json.loads(assets_info) for asset_id, asset_plugins in assets_info.iteritems(): asset_id = str(uuid.UUID(asset_id)) asset_ips = get_asset_ip_from_id(asset_id=asset_id) if not asset_ips: api_log.error("Cannot resolve ips for asset '{0}'".format(asset_id)) continue plugins[asset_id] = {'device_ip': asset_ips[0], 'plugins': asset_plugins} except Exception as e: raise APIInvalidInputFormat( log="[set_sensor_plugins_enabled_by_asset] " "Invalid asset_info format: '{0}'".format(str(e))) try: (success, data) = set_sensor_detectors_from_yaml(sensor_ip, str(plugins)) except Exception as e: raise APICannotSetSensorPlugins( log="[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(e))) if not success: api_log.error("[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(data))) raise APICannotSetSensorPlugins( log="[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(data))) # Flush sensor plugin cache and Update host plugin info flush_cache("sensor_plugins") # Import here to avoid circular imports from celerymethods.tasks.monitor_tasks import (monitor_update_host_plugins, monitor_enabled_plugins_limit) try: monitor_update_host_plugins.delay() except AlreadyQueued: api_log.info("[set_sensor_plugins_enabled_by_asset] monitor update host plugins already queued") try: monitor_enabled_plugins_limit.delay() except AlreadyQueued: api_log.info("[set_sensor_plugins_enabled_by_asset] monitor for enabled plugins already queued") # Restart the alienvault agent job = restart_alienvault_agent.delay(sensor_ip=sensor_ip) return job.id
def set_sensor_plugins_enabled_by_asset(sensor_id, assets_info): """ Set the list of plugins enabled in a sensor by asset Params: sensor_id (UUID): sensor id assets_info (dict or json string): {"<asset_id>": ["<plugin_1>", "<plugin_2>", ...], ...} Return: the id of the agent restart job """ (success, sensor_ip) = get_sensor_ip_from_sensor_id(sensor_id) if not success: raise APICannotResolveSensorID( sensor_id=sensor_id, log="[set_sensor_plugins_enabled_by_asset] " "Error getting Sensor ip: %s".format(sensor_ip)) try: plugins = {} if isinstance(assets_info, basestring): assets_info = json.loads(assets_info) for asset_id, asset_plugins in assets_info.iteritems(): asset_id = str(uuid.UUID(asset_id)) asset_ips = get_asset_ip_from_id(asset_id=asset_id) if not asset_ips: api_log.error( "Cannot resolve ips for asset '{0}'".format(asset_id)) continue plugins[asset_id] = { 'device_ip': asset_ips[0], 'plugins': asset_plugins } except Exception as e: raise APIInvalidInputFormat( log="[set_sensor_plugins_enabled_by_asset] " "Invalid asset_info format: '{0}'".format(str(e))) try: (success, data) = set_sensor_detectors_from_yaml(sensor_ip, str(plugins)) except Exception as e: raise APICannotSetSensorPlugins( log="[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(e))) if not success: api_log.error("[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(data))) raise APICannotSetSensorPlugins( log="[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(data))) # Flush sensor plugin cache and Update host plugin info flush_cache("sensor_plugins") # Import here to avoid circular imports from celerymethods.tasks.monitor_tasks import ( monitor_update_host_plugins, monitor_enabled_plugins_limit) try: monitor_update_host_plugins.delay() except AlreadyQueued: api_log.info( "[set_sensor_plugins_enabled_by_asset] monitor update host plugins already queued" ) try: monitor_enabled_plugins_limit.delay() except AlreadyQueued: api_log.info( "[set_sensor_plugins_enabled_by_asset] monitor for enabled plugins already queued" ) # Restart the alienvault agent job = restart_alienvault_agent.delay(sensor_ip=sensor_ip) return job.id