def make_request(self, url): """Make a request against the OTX server Args: url (str): The url with the request. Returns: response_data(json): The OTX response Raise an exception when something is wrong """ api_log.info("trying to make a request: {}".format(url)) custom_headers = {'X-OTX-API-KEY': self.key} if self.otx_user_version: # Information about system that is using OTX custom_headers['User-Agent'] = self.otx_user_version # http://docs.python-requests.org/en/master/user/advanced/#proxies proxies = self.avproxy.get_proxies() response_data = requests.get(url, headers=custom_headers, proxies=proxies, timeout=10) api_log.info("Status code: {}".format(response_data.status_code)) if response_data.status_code in GENERAL_ERROR_CODES: # Making timeout raise RequestException("Response status code: {}".format(response_data.status_code)) if response_data.status_code == INVALID_API_KEY_CODE: raise InvalidAPIKey("Invalid API Key") if response_data.status_code == BAD_REQUEST_CODE: raise BadRequest("Bad Request") return response_data.json()
def restore_backup(system_id='local', backup_type='configuration', backup_name='', backup_pass=''): """ Restore backup in the system """ success, system_ip = get_system_ip_from_system_id(system_id) if not success: api_log.error(str(system_ip)) error_msg = "Error retrieving the system ip for the system id %s -> %s" % (system_id, str(system_ip)) return False, error_msg backup_name = os.path.basename(backup_name) success, backup_path = secure_path_join(BACKUP_PATH, backup_name) if not success: api_log.error("restore backup: %s '%s'" % (backup_path, backup_name)) return False, "" try: success, msg = run_restore(target=system_ip, backup_type=backup_type, backup_file=backup_path, backup_pass=backup_pass) if not success: api_log.error("restore_backup: %s" % msg) error_msg = "Error trying to restore the backup '%s': %s" % (backup_name, msg) return False, error_msg except Exception as e: api_log.info("restore_backup Error: %s" % str(e)) error_msg = "Error trying to restore the backup '%s': %s" % (backup_name, str(e)) return False, error_msg return success, msg
def _ossec_parse_agent_list(data): """ Internal function to make beautiful output """ strio = StringIO.StringIO(data) # Well I assume that there aren't a lots of sensor. lines = strio.readlines() # Gen a dict with key the IP" result = {} for line in lines: line = line[:-1] # *coff* *coff* "\n" if line != "": (agent_id, name, ipaddress, status, tail) = line.split(",") # Match agent id if re.match(r"^[0-9]{1,4}$", agent_id) is not None: result[agent_id] = { 'name': name, 'ip': ipaddress, 'status': status } else: api_log.info( "[ossec_get_available_agents] Discarting info about agentless %s" % line) return result
def ansible_purge_logs(system_ip, log_type): """ Delete update/reconfigure log files older than a year Args: system_ip(str): System IP log_type (str): reconfigure or update Returns: success (bool): OK/ERROR msg (str): info message """ if not (system_ip or log_type): return False, "[ansible_purge_logs]: Missing arguments" response = ansible.run_module(host_list=[system_ip], module="av_purge_logs", use_sudo=True, args="log_type=%s" % log_type) success, msg = ansible_is_valid_response(system_ip, response) if success: if response['contacted'][system_ip]['changed']: api_log.info(response['contacted'][system_ip]['msg']) return True, "[ansible_purge_logs] Purge logs OK" return False, "[ansible_purge_logs] Purge logs error: %s"
def ansible_get_agent_config_yml(sensor_ip): """Get config.yml file and parse it""" config_file = '/etc/ossim/agent/config.yml' local_file = '/var/tmp/{0}{1}'.format(sensor_ip, config_file) device_list = {} try: success, dst = fetch_file(sensor_ip, config_file, '/var/tmp') except Exception as exc: api_log.error("[ansible_get_agent_config_yml] Error: %s" % str(exc)) return False, str(exc) if not os.path.exists(local_file): api_log.info("[ansible_get_agent_config_yml] File {0} not found in {1}".format(config_file, local_file)) else: try: with open(local_file, 'r') as f: content = yaml.load(f.read()) if "plugins" in content: for plg in content['plugins']: for path,info in plg.iteritems(): if "DEFAULT" in info: data = info['DEFAULT'] device_list[data['device_id']] = [] # Support more than one plugin per asset for plg in content['plugins']: for path,info in plg.iteritems(): if "DEFAULT" in info: data = info['DEFAULT'] device_list[data['device_id']].append(data['pid']) # Support more than one plugin per asset os.remove(local_file) except Exception as exc: api_log.error("[ansible_get_agent_config_yml] Unable to parse yml: %s" % str(exc)) return False, str(exc) return True, device_list
def make_request(self, url): """Make a request against the OTX server Args: url (str): The url with the request. Returns: response_data(json): The OTX response Raise an exception when something is wrong """ api_log.info("trying to make a request: {}".format(url)) custom_headers = {'X-OTX-API-KEY': self.key} if self.otx_user_version: # Information about system that is using OTX custom_headers['User-Agent'] = self.otx_user_version # http://docs.python-requests.org/en/master/user/advanced/#proxies proxies = self.avproxy.get_proxies() response_data = requests.get(url, headers=custom_headers, proxies=proxies, timeout=10) api_log.info("Status code: {}".format(response_data.status_code)) if response_data.status_code in GENERAL_ERROR_CODES: # Making timeout raise RequestException("Response status code: {}".format( response_data.status_code)) if response_data.status_code == INVALID_API_KEY_CODE: raise InvalidAPIKey("Invalid API Key") if response_data.status_code == BAD_REQUEST_CODE: raise BadRequest("Bad Request") return response_data.json()
def get_license(self): license_info = self._config_repository.get_config(CONFIG_LICENSE_NAME) is_trial = LICENSE_IS_TRIAL if CONFIG_LICENSE_TRIAL_INDICATOR in license_info.value else LICENSE_IS_NOT_TRIAL try: expires_on = re.findall(r'expire=(.*?)$', license_info.value, re.M)[0] expires_on = int( (datetime.strptime(expires_on, '%Y-%m-%d') - datetime.strptime('1970-01-01', '%Y-%m-%d')).total_seconds()) except Exception as exc: # License expiration is not mandatory in the central_console bounded context api_log.info( 'Did not manage to get license expiration, ignoring: {}'. format(str(exc))) expires_on = None try: devices = int( re.findall(r'devices=(.*?)$', license_info.value, re.M)[0]) except Exception as exc: # License devices number is not mandatory in the central_console bounded context api_log.info( 'Did not manage to get license devices number, ignoring: {}'. format(str(exc))) devices = None return self._license_constructor(is_trial, expires_on, devices)
def ansible_resend_alarms(system_ip, alarms): if alarms: chunk_size = 10 for alarm_chunk in [ alarms[x:x + chunk_size] for x in xrange(0, len(alarms), chunk_size) ]: # alarm_chunks are 10 alarms # event_id = str(uuid.UUID(alarm)) events = "\n".join(map(lambda x: str(uuid.UUID(x)), alarm_chunk)) api_log.info( "[ansible_resend_alarms] Resending event '%s' to server '%s'" % (str(events), system_ip)) cmd = "echo -e \"%s\" | nc 127.0.0.1 40004 -w1" % events #api_log.debug("Remote command: %s " % cmd) response = ansible.run_module(host_list=[system_ip], module="shell", args=cmd) success, msg = ansible_is_valid_response(system_ip, response) if not success: api_log.error( "[ansible_resend_alarms] Can't resend to '%s' event_id '%s'.Bailing out" % (system_ip, event_id)) return False, str(err) return True, ''
def reload_hosts(self): """Builds an IDM message to reload the hosts""" try: self.sequence_id += 1 message = 'reload-hosts id="' + str(self.sequence_id) + '"\n' api_log.info("Sending the reload host message") self.send(message) connection_message_response = self.recv() if not self.__process_connection_message_response(connection_message_response): api_log.error("Server connector - Cannot connect to the server, invalid response") except Exception as e: api_log.debug("Server connector, cannot send the reload host message")
def apimethod_get_pending_packges(system_id, no_cache=False): """Retrieves the available updates for the given system_id and the release_info file Args: system_id(str): The system id of which we want to know if it has available updates Returns: (success,data): success=True when the operation when ok, otherwise success=False. On success data will contain a json object with the updates information. """ success, data = apimethod_get_update_info(system_id, no_cache=no_cache) if not success: return success, data available_updates = data['available_updates'] if available_updates: # Check for release info file success, local_ip = get_system_ip_from_local() if not success: error_msg = "[apimethod_get_pending_packges] " + \ "Unable to get local IP: %s" % local_ip api_log.error(error_msg) return False, available_updates success, is_pro = get_is_professional(local_ip) if success and is_pro: success, is_trial = system_is_trial(system_id='local') if success and is_trial: info_msg = "[apimethod_get_pending_packges] " + \ "Trial version. Skipping download release info file" api_log.info(info_msg) return True, available_updates success, msg = ansible_download_release_info(local_ip) if not success: error_msg = "[apimethod_get_pending_packges] " + \ "Unable to retrieve release info file: %s" % msg api_log.error(error_msg) return True, available_updates
def _ossec_parse_agent_list(data): """ Internal function to make beautiful output """ strio = StringIO.StringIO(data) # Well I assume that there aren't a lots of sensor. lines = strio.readlines() # Gen a dict with key the IP" result = {} for line in lines: line = line[:-1] # *coff* *coff* "\n" if line != "": (agent_id, name, ipaddress, status, tail) = line.split(",") # Match agent id if re.match(r"^[0-9]{1,4}$", agent_id) is not None: result[agent_id] = {'name': name, 'ip': ipaddress, 'status': status} else: api_log.info("[ossec_get_available_agents] Discarting info about agentless %s" % line) return result
def ansible_resend_alarms(system_ip, alarms): if alarms: chunk_size = 10 for alarm_chunk in [alarms[x:x+chunk_size] for x in xrange(0, len(alarms), chunk_size)]: # alarm_chunks are 10 alarms # event_id = str(uuid.UUID(alarm)) events = "\n".join(map(lambda x: str(uuid.UUID(x)), alarm_chunk)) api_log.info("[ansible_resend_alarms] Resending event '%s' to server '%s'" % (str(events), system_ip)) cmd = "echo -e \"%s\" | nc 127.0.0.1 40004 -w1" % events #api_log.debug("Remote command: %s " % cmd) response = ansible.run_module(host_list=[system_ip], module="shell", args=cmd) success, msg = ansible_is_valid_response(system_ip, response) if not success: api_log.error("[ansible_resend_alarms] Can't resend to '%s' event_id '%s'.Bailing out" % (system_ip, event_id)) return False, str(err) return True, ''
def reload_hosts(self): """Builds an IDM message to reload the hosts""" try: self.sequence_id += 1 message = 'reload-hosts id="' + str(self.sequence_id) + '"\n' api_log.info("Sending the reload host message") self.send(message) connection_message_response = self.recv() if not self.__process_connection_message_response( connection_message_response): api_log.error( "Server connector - Cannot connect to the server, invalid response" ) except Exception as e: api_log.debug( "Server connector, cannot send the reload host message")
def get_license(self): license_info = self._config_repository.get_config(CONFIG_LICENSE_NAME) is_trial = LICENSE_IS_TRIAL if CONFIG_LICENSE_TRIAL_INDICATOR in license_info.value else LICENSE_IS_NOT_TRIAL try: expires_on = re.findall(r'expire=(.*?)$', license_info.value, re.M)[0] expires_on = int(( datetime.strptime(expires_on, '%Y-%m-%d') - datetime.strptime('1970-01-01', '%Y-%m-%d') ).total_seconds()) except Exception as exc: # License expiration is not mandatory in the central_console bounded context api_log.info('Did not manage to get license expiration, ignoring: {}'.format(str(exc))) expires_on = None try: devices = int(re.findall(r'devices=(.*?)$', license_info.value, re.M)[0]) except Exception as exc: # License devices number is not mandatory in the central_console bounded context api_log.info('Did not manage to get license devices number, ignoring: {}'.format(str(exc))) devices = None return self._license_constructor(is_trial, expires_on, devices)
def restore_backup(system_id='local', backup_type='configuration', backup_name=''): """ Restore backup in the system """ success, system_ip = get_system_ip_from_system_id(system_id) if not success: api_log.error(str(system_ip)) error_msg = "Error retrieving the system ip for the system id " error_msg = error_msg + "%s -> %s" % (system_id, str(system_ip)) return False, error_msg backup_name = os.path.basename(backup_name) success, backup_path = secure_path_join(BACKUP_PATH, backup_name) if not success: api_log.error("restore backup: %s '%s'" % (backup_path, backup_name)) return False, "" try: success, msg = run_restore(target=system_ip, backup_type=backup_type, backup_file=backup_path) if not success: api_log.error("restore_backup: %s" % msg) error_msg = "Error trying to restore the backup " error_msg = error_msg + "'%s': %s" % (backup_name, msg) return False, error_msg except Exception as e: api_log.info("restore_backup Error: %s" % str(e)) error_msg = "Error trying to restore the backup " error_msg = error_msg + "'%s': %s" % (backup_name, str(e)) return False, error_msg return success, msg
def remove_plugin_from_sensors(plugin_file): """ Disable and remove custom plugin from all systems. Args: plugin_file: (str) Full path to plugin file. Returns: (bool) Status """ plugin_name = splitext(basename(plugin_file))[0] result, added_sensors = get_systems(system_type="Sensor", exclusive=True, convert_to_dict=True) # In [3]: systems # Out[3]: {'564d1731-5369-d912-e91b-61c1fff3cf6c': '192.168.87.197'} if not result: api_log.error( 'Cannot get list of connected sensors: {}'.format(added_sensors)) return False # Add local check if isinstance(added_sensors, dict): added_sensors['local'] = '127.0.0.1' if added_sensors: for sensor_id, sensor_ip in added_sensors.iteritems(): api_log.info( 'Trying to disable global plugin "{}" plugin on - {}'.format( plugin_name, sensor_ip)) result, msg = disable_plugin_globally(plugin_name, sensor_ip) if not result: api_log.error(msg) api_log.info( 'Trying to disable per-asset plugin "{}" plugin on - {}'. format(plugin_name, sensor_ip)) result, msg = disable_plugin_per_assets(plugin_name, sensor_ip) if not result: api_log.error(msg) # Remove plugin file from disk api_log.info('Removing plugin file: {} on sensors {}'.format( plugin_file, added_sensors)) result = remove_file(host_list=added_sensors.values(), file_name=plugin_file) return result
def remove_plugin_from_sensors(plugin_file): """ Disable and remove custom plugin from all systems. Args: plugin_file: (str) Full path to plugin file. Returns: (bool) Status """ plugin_name = splitext(basename(plugin_file))[0] result, added_sensors = get_systems(system_type="Sensor", exclusive=True, convert_to_dict=True) # In [3]: systems # Out[3]: {'564d1731-5369-d912-e91b-61c1fff3cf6c': '192.168.87.197'} if not result: api_log.error('Cannot get list of connected sensors: {}'.format(added_sensors)) return False # Add local check if isinstance(added_sensors, dict): added_sensors['local'] = '127.0.0.1' if added_sensors: for sensor_id, sensor_ip in added_sensors.iteritems(): api_log.info('Trying to disable global plugin "{}" plugin on - {}'.format(plugin_name, sensor_ip)) result, msg = disable_plugin_globally(plugin_name, sensor_ip) if not result: api_log.error(msg) api_log.info('Trying to disable per-asset plugin "{}" plugin on - {}'.format(plugin_name, sensor_ip)) result, msg = disable_plugin_per_assets(plugin_name, sensor_ip) if not result: api_log.error(msg) # Remove plugin file from disk api_log.info('Removing plugin file: {} on sensors {}'.format(plugin_file, added_sensors)) result = remove_file(host_list=added_sensors.values(), file_name=plugin_file) return result
def set_sensor_plugins_enabled_by_asset(sensor_id, assets_info): """ Set the list of plugins enabled in a sensor by asset Params: sensor_id (UUID): sensor id assets_info (dict or json string): {"<asset_id>": ["<plugin_1>", "<plugin_2>", ...], ...} Return: the id of the agent restart job """ (success, sensor_ip) = get_sensor_ip_from_sensor_id(sensor_id) if not success: raise APICannotResolveSensorID( sensor_id=sensor_id, log="[set_sensor_plugins_enabled_by_asset] " "Error getting Sensor ip: %s".format(sensor_ip)) try: plugins = {} if isinstance(assets_info, basestring): assets_info = json.loads(assets_info) for asset_id, asset_plugins in assets_info.iteritems(): asset_id = str(uuid.UUID(asset_id)) asset_ips = get_asset_ip_from_id(asset_id=asset_id) if not asset_ips: api_log.error("Cannot resolve ips for asset '{0}'".format(asset_id)) continue plugins[asset_id] = {'device_ip': asset_ips[0], 'plugins': asset_plugins} except Exception as e: raise APIInvalidInputFormat( log="[set_sensor_plugins_enabled_by_asset] " "Invalid asset_info format: '{0}'".format(str(e))) try: (success, data) = set_sensor_detectors_from_yaml(sensor_ip, str(plugins)) except Exception as e: raise APICannotSetSensorPlugins( log="[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(e))) if not success: api_log.error("[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(data))) raise APICannotSetSensorPlugins( log="[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(data))) # Flush sensor plugin cache and Update host plugin info flush_cache("sensor_plugins") # Import here to avoid circular imports from celerymethods.tasks.monitor_tasks import (monitor_update_host_plugins, monitor_enabled_plugins_limit) try: monitor_update_host_plugins.delay() except AlreadyQueued: api_log.info("[set_sensor_plugins_enabled_by_asset] monitor update host plugins already queued") try: monitor_enabled_plugins_limit.delay() except AlreadyQueued: api_log.info("[set_sensor_plugins_enabled_by_asset] monitor for enabled plugins already queued") # Restart the alienvault agent job = restart_alienvault_agent.delay(sensor_ip=sensor_ip) return job.id
def sync_database_from_child(system_id): """ Check SQL sync file in system_id and if it differs from the local one, get it and add to local database Then, check if we have to propagate changes upwards and generate sync.sql if so """ # Get remote and local IPs (success, system_ip) = get_system_ip_from_system_id(system_id) if not success: error_msg = "[Apimethod sync_database_from_child] " + \ "Error retrieving the system ip for the system id " + \ "%s -> %s" % (system_ip, str(system_ip)) return success, error_msg success, local_ip = get_system_ip_from_local() if not success: error_msg = "[Apimethod sync_database_from_child] " + \ "Error while getting the local ip: %s" % str(local_ip) return success, error_msg # SQL file changed. Get it, check md5 and apply # Get MD5SUM file for the SQL file remote_md5file_path = "/var/lib/alienvault-center/db/sync.md5" local_md5file_path = "%s" % get_base_path_from_system_id(system_id) + \ "/sync_%s.md5" % system_id (retrieved, msg) = rsync_pull(system_ip, remote_md5file_path, local_ip, local_md5file_path) if not retrieved and 'already in sync' not in msg: return False, "[Apimethod sync_database_from_child] %s" % msg # Check SQL file MD5 local_file_path = "%s" % get_base_path_from_system_id(system_id) + \ "/sync_%s.sql" % system_id with open(local_md5file_path) as m: md5_read = m.readline() p = Popen(['/usr/bin/md5sum', local_file_path], stdout=PIPE) md5_calc, err = p.communicate() if err: return False, "[Apimethod sync_database_from_child] %s" % err if str(md5_read.rstrip('\n')) in str(md5_calc): return True, "[Apimethod sync_database_from_child] SQL already synced" # Get remote sync file if changed remote_file_path = "/var/lib/alienvault-center/db/sync.sql" (retrieved, msg) = rsync_pull(system_ip, remote_file_path, local_ip, local_file_path) if not retrieved: if 'already in sync' in msg: true_msg = "[Apimethod sync_database_from_child] " + \ "Databases already in sync" return True, true_msg else: false_msg = "[Apimethod sync_database_from_child] " + \ "%s" % msg return False, false_msg # Check SQL file MD5 p = Popen(['/usr/bin/md5sum', local_file_path], stdout=PIPE) md5_calc, err = p.communicate() if err: return False, "[Apimethod sync_database_from_child] %s" % err if not str(md5_read.rstrip('\n')) in str(md5_calc): error_msg = "[Apimethod sync_database_from_child] " + \ "Corrupt or incomplete SQL file (bad md5sum)" return False, error_msg # SQL file OK. Apply with open(local_file_path) as f: if call(['/usr/bin/ossim-db'], stdin=f): error_msg = "[Apimethod sync_database_from_child] " + \ "Error applying SQL file to ossim-db" return False, error_msg else: info_msg = "[Apimethod sync_database_from_child] " + \ "SQL applied successfully" api_log.info(info_msg) # Check first line of sync.sql file for mySQL restart option f.seek(0, 0) restart_db = "RESTART OSSIM-SERVER" in f.readline() # Restart SQL server if needed if restart_db: try: restart_ossim_server(local_ip) except Exception, err: error_msg = "An error occurred while restarting " + \ "MySQL server: %s" % str(err) return False, error_msg
def set_sensor_plugins_enabled_by_asset(sensor_id, assets_info): """ Set the list of plugins enabled in a sensor by asset Params: sensor_id (UUID): sensor id assets_info (dict or json string): {"<asset_id>": ["<plugin_1>", "<plugin_2>", ...], ...} Return: the id of the agent restart job """ (success, sensor_ip) = get_sensor_ip_from_sensor_id(sensor_id) if not success: raise APICannotResolveSensorID( sensor_id=sensor_id, log="[set_sensor_plugins_enabled_by_asset] " "Error getting Sensor ip: %s".format(sensor_ip)) try: plugins = {} if isinstance(assets_info, basestring): assets_info = json.loads(assets_info) for asset_id, asset_plugins in assets_info.iteritems(): asset_id = str(uuid.UUID(asset_id)) asset_ips = get_asset_ip_from_id(asset_id=asset_id) if not asset_ips: api_log.error( "Cannot resolve ips for asset '{0}'".format(asset_id)) continue plugins[asset_id] = { 'device_ip': asset_ips[0], 'plugins': asset_plugins } except Exception as e: raise APIInvalidInputFormat( log="[set_sensor_plugins_enabled_by_asset] " "Invalid asset_info format: '{0}'".format(str(e))) try: (success, data) = set_sensor_detectors_from_yaml(sensor_ip, str(plugins)) except Exception as e: raise APICannotSetSensorPlugins( log="[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(e))) if not success: api_log.error("[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(data))) raise APICannotSetSensorPlugins( log="[set_sensor_plugins_enabled_by_asset] " "Cannot set asset plugins: '{0}'".format(str(data))) # Flush sensor plugin cache and Update host plugin info flush_cache("sensor_plugins") # Import here to avoid circular imports from celerymethods.tasks.monitor_tasks import ( monitor_update_host_plugins, monitor_enabled_plugins_limit) try: monitor_update_host_plugins.delay() except AlreadyQueued: api_log.info( "[set_sensor_plugins_enabled_by_asset] monitor update host plugins already queued" ) try: monitor_enabled_plugins_limit.delay() except AlreadyQueued: api_log.info( "[set_sensor_plugins_enabled_by_asset] monitor for enabled plugins already queued" ) # Restart the alienvault agent job = restart_alienvault_agent.delay(sensor_ip=sensor_ip) return job.id
def send_events_from_hosts(self, host_list): """Builds an IDM Event from a host dictionary and send it to the IDM""" for host, host_data in host_list.iteritems(): try: h = HostInfoEvent() h["ip"] = host h["mac"] = host_data["host_mac"] h["hostname"] = host_data["hostname"] os_family_data = None software_cpe = set() hardware_cpe = set() so_cpe = set() if "osclass" in host_data: accuracy = -1 for osfamily in host_data["osclass"]: if osfamily["accuracy"] == '' or osfamily["accuracy"] is None: current_accuracy = 0 else: current_accuracy = int(osfamily["accuracy"]) if current_accuracy > accuracy: os_family_data = osfamily accuracy = current_accuracy if os_family_data is not None: # The same parsing the the ossim-agent does. h["os"] = '{0} {1}'.format(os_family_data['osfamily'], os_family_data['osgen']) port_protocol_info = [] for protocol in ["udp", "tcp", "sctp"]: if protocol in host_data: for port, port_data in host_data[protocol].iteritems(): port_state = port_data["state"] if port_state != "open": continue port_cpe = port_data["cpe"] port_service_name = port_data["name"] banner = [] for value in ["product", "version", "extrainfo"]: banner_data = port_data[value] if banner_data is not None: banner.append(banner_data) local_software = [] if port_cpe: for cpe in port_cpe: if not banner: banner.append(' '.join([s[0].upper() + s[1:] for s in re.sub(':',' ',re.sub(r"^cpe:/.:", '', re.sub(r":+", ':', cpe))).split(' ')])) cpe += '|' cpe += (' '.join(banner)).lstrip(' ') if cpe.startswith('cpe:/o:'): so_cpe.add(cpe) elif cpe.startswith('cpe:/h:'): hardware_cpe.add(cpe) else: software_cpe.add(cpe) local_software.append(cpe) if software_cpe: port_protocol_info.append("{0}|{1}|{2}|{3}".format(protocol,port, port_service_name, ','.join(local_software))) else: port_protocol_info.append("{0}|{1}|{2}".format(protocol,port, port_service_name)) h["service"] = ','.join(port_protocol_info) all_software = set() all_software.update(software_cpe) all_software.update(so_cpe) all_software.update(hardware_cpe) h["software"] = ','.join(all_software) h["inventory_source"] = NMAP_INVENTORY_SOURCE_ID api_log.info("IDM Event: {0}".format(str(h))) self.send(str(h)) except Exception as e: api_log.error("IDM connector, cannot send the event {0}".format(str(e)))
def send_events_from_hosts(self, host_list): """Builds an IDM Event from a host dictionary and send it to the IDM""" for host, host_data in host_list.iteritems(): try: h = HostInfoEvent() h["ip"] = host h["mac"] = host_data["host_mac"] h["hostname"] = host_data["hostname"] os_family_data = None software_cpe = set() hardware_cpe = set() so_cpe = set() if "osclass" in host_data: accuracy = -1 for osfamily in host_data["osclass"]: if osfamily["accuracy"] == '' or osfamily[ "accuracy"] is None: current_accuracy = 0 else: current_accuracy = int(osfamily["accuracy"]) if current_accuracy > accuracy: os_family_data = osfamily accuracy = current_accuracy if os_family_data is not None: # The same parsing the the ossim-agent does. h["os"] = '{0} {1}'.format(os_family_data['osfamily'], os_family_data['osgen']) port_protocol_info = [] for protocol in ["udp", "tcp", "sctp"]: if protocol in host_data: for port, port_data in host_data[protocol].iteritems(): port_state = port_data["state"] if port_state != "open": continue port_cpe = port_data["cpe"] port_service_name = port_data["name"] banner = [] for value in ["product", "version", "extrainfo"]: banner_data = port_data[value] if banner_data is not None: banner.append(banner_data) local_software = [] if port_cpe: for cpe in port_cpe: if not banner: banner.append(' '.join([ s[0].upper() + s[1:] for s in re.sub( ':', ' ', re.sub(r"^cpe:/.:", '', re.sub(r":+", ':', cpe))).split(' ') ])) cpe += '|' cpe += (' '.join(banner)).lstrip(' ') if cpe.startswith('cpe:/o:'): so_cpe.add(cpe) elif cpe.startswith('cpe:/h:'): hardware_cpe.add(cpe) else: software_cpe.add(cpe) local_software.append(cpe) if software_cpe: port_protocol_info.append( "{0}|{1}|{2}|{3}".format( protocol, port, port_service_name, ','.join(local_software))) else: port_protocol_info.append("{0}|{1}|{2}".format( protocol, port, port_service_name)) h["service"] = ','.join(port_protocol_info) all_software = set() all_software.update(software_cpe) all_software.update(so_cpe) all_software.update(hardware_cpe) h["software"] = ','.join(all_software) h["inventory_source"] = NMAP_INVENTORY_SOURCE_ID api_log.info("IDM Event: {0}".format(str(h))) self.send(str(h)) except Exception as e: api_log.error( "IDM connector, cannot send the event {0}".format(str(e)))