class Auth: def __init__(self): super().__init__() self.inv = InventoryMgr() self.log = FullLogger() self.tokens_coll = self.inv.collections['api_tokens'] self.ldap_access = LDAPAccess() def get_token(self, token): tokens = None try: tokens = list(self.tokens_coll.find({'token': token})) except Exception as e: self.log.error('Failed to get token for ', str(e)) return tokens def write_token(self, token): error = None try: self.tokens_coll.insert_one(token) except Exception as e: self.log.error( "Failed to write new token {0} to database for {1}".format( token['token'], str(e))) error = 'Failed to create new token' return error def delete_token(self, token): error = None try: self.tokens_coll.delete_one({'token': token}) except Exception as e: self.log.error('Failed to delete token {0} for {1}'.format( token, str(e))) error = 'Failed to delete token {0}'.format(token) return error def validate_credentials(self, username, pwd): return self.ldap_access.authenticate_user(username, pwd) def validate_token(self, token): error = None tokens = self.get_token(token) if not tokens: error = "Token {0} doesn't exist".format(token) elif len(tokens) > 1: self.log.error('Multiple tokens found for {0}'.format(token)) error = "Multiple tokens found" else: t = tokens[0] error = Token.validate_token(t) return error
class MonitoringHandler(MongoAccess, CliAccess, BinaryConverter): PRODUCTION_CONFIG_DIR = '/etc/sensu/conf.d' APP_SCRIPTS_FOLDER = 'monitoring/checks' REMOTE_SCRIPTS_FOLDER = '/etc/sensu/plugins' provision_levels = { 'none': 0, 'db': 1, 'files': 2, 'deploy': 3 } pending_changes = {} fetch_ssl_files = [] def __init__(self, env): super().__init__() self.log = FullLogger() self.configuration = Configuration() self.mechanism_drivers = \ self.configuration.environment['mechanism_drivers'] self.env = env self.had_errors = False self.monitoring_config = self.db.monitoring_config_templates try: self.env_monitoring_config = self.configuration.get('Monitoring') except IndexError: self.env_monitoring_config = {} self.local_host = self.env_monitoring_config.get('server_ip', '') self.scripts_prepared_for_host = {} self.replacements = self.env_monitoring_config self.inv = InventoryMgr() self.config_db = self.db[self.inv.get_coll_name('monitoring_config')] self.provision = self.provision_levels['none'] if self.env_monitoring_config: provision = self.env_monitoring_config.get('provision', 'none') provision = str.lower(provision) self.provision =\ self.provision_levels.get(provision, self.provision_levels['none']) # create a directory if it does not exist @staticmethod def make_directory(directory): if not os.path.exists(directory): os.makedirs(directory) return directory def get_config_dir(self, sub_dir=''): config_folder = self.env_monitoring_config['config_folder'] + \ (os.sep + sub_dir if sub_dir else '') return self.make_directory(config_folder).rstrip(os.sep) def prepare_config_file(self, file_type, base_condition): condition = base_condition condition['type'] = file_type sort = [('order', pymongo.ASCENDING)] docs = self.monitoring_config.find(condition, sort=sort) content = {} for doc in docs: if not self.check_env_condition(doc): return {} content.update(doc) self.replacements['app_path'] = \ self.configuration.environment['app_path'] config = self.content_replace({'config': content.get('config', {})}) return config def check_env_condition(self, doc): if 'condition' not in doc: return True condition = doc['condition'] if not isinstance(condition, dict): self.log.error('incorrect condition in monitoring ({}): ' 'condition must be a dict' .format(doc.get(doc.get('type'), ''))) return False for key, required_value in condition.items(): if not self.check_env_config(key, required_value): return False return True def check_env_config(self, config_name, required_config_value): required_config_values = required_config_value \ if isinstance(required_config_value, list) \ else [required_config_value] conf_values = self.configuration.environment.get(config_name, []) conf_values = conf_values if isinstance(conf_values, list) \ else [conf_values] intersection = [val for val in required_config_values if val in conf_values] return bool(intersection) def content_replace(self, content): content_remapped = remap(content, visit=self.fill_values) return content_remapped def format_string(self, val): formatted = val if not isinstance(val, str) or '{' not in val \ else val.format_map(self.replacements) return formatted def fill_values(self, path, key, value): if not path: return key, value key_formatted = self.format_string(key) value_formatted = self.format_string(value) return key_formatted, value_formatted def get_config_from_db(self, host, file_type): find_tuple = { 'environment': self.env, 'host': host, 'type': file_type } doc = self.config_db.find_one(find_tuple) if not doc: return {} doc.pop("_id", None) return self.decode_mongo_keys(doc) def write_config_to_db(self, host, config, file_type): find_tuple = { 'environment': self.env, 'host': host, 'type': file_type } doc = copy.copy(find_tuple) doc['config'] = config doc = self.encode_mongo_keys(doc) if not doc: return {} self.config_db.update_one(find_tuple, {'$set': doc}, upsert=True) def merge_config(self, host, file_type, content): """ merge current monitoring config of host with newer content. return the merged config """ doc = self.get_config_from_db(host, file_type) config = remerge([doc['config'], content.get('config')]) if doc \ else content.get('config', {}) self.write_config_to_db(host, config, file_type) return config def write_config_file(self, file_name, sub_dir, host, content, is_container=False, is_server=False): """ apply environment definitions to the config, e.g. replace {server_ip} with the IP or host name for the server """ # save the config to DB first, and while doing that # merge it with any existing config on same host content = self.merge_config(host, file_name, content) if self.provision == self.provision_levels['db']: self.log.debug('Monitoring setup kept only in DB') return # now dump the config to the file content_json = json.dumps(content.get('config', content), sort_keys=True, indent=4) content_json += '\n' # always write the file locally first local_dir = self.make_directory(os.path.join(self.get_config_dir(), sub_dir.strip(os.path.sep))) local_path = os.path.join(local_dir, file_name) self.write_to_local_host(local_path, content_json) self.track_setup_changes(host, is_container, file_name, local_path, sub_dir, is_server=is_server) def add_changes_for_all_clients(self): """ to debug deployment, add simulated track changes entries. no need to add for server, as these are done by server_setup() """ docs = self.config_db.find({'environment': self.env}) for doc in docs: host = doc['host'] sub_dir = os.path.join('host', host) file_name = doc['type'] config_folder = self.env_monitoring_config['config_folder'] local_path = os.path.join(config_folder, sub_dir, file_name) if host == self.env_monitoring_config['server_ip']: continue self.track_setup_changes(host, False, file_name, local_path, sub_dir) def get_ssh(self, host, is_container=False, for_sftp=False): ssh = SshConnection.get_ssh(host, for_sftp) if not ssh: conf = self.env_monitoring_config if is_container or host == conf['server_ip']: host = conf['server_ip'] port = int(conf['ssh_port']) user = conf['ssh_user'] pwd = conf['ssh_password'] ssh = SshConnection(host, user, _pwd=pwd, _port=port, for_sftp=for_sftp) else: ssh = SshConn(host, for_sftp=for_sftp) return ssh def track_setup_changes(self, host=None, is_container=False, file_name=None, local_path=None, sub_dir=None, is_server=False, target_mode=None, target_path=PRODUCTION_CONFIG_DIR): if host not in self.pending_changes: self.pending_changes[host] = {} if file_name not in self.pending_changes[host]: self.pending_changes[host][file_name] = { "host": host, "is_container": is_container, "is_server": is_server, "file_name": file_name, "local_path": local_path, "sub_dir": sub_dir, "target_path": target_path, "target_mode": target_mode } def handle_pending_setup_changes(self): if self.provision < self.provision_levels['files']: if self.provision == self.provision_levels['db']: self.log.info('Monitoring config applied only in DB') return True self.log.info('applying monitoring setup') hosts = {} scripts_to_hosts = {} for host, host_changes in self.pending_changes.items(): self.handle_pending_host_setup_changes(host_changes, hosts, scripts_to_hosts) if self.provision < self.provision_levels['deploy']: return True if self.fetch_ssl_files: self.deploy_ssl_files(list(scripts_to_hosts.keys())) for host in scripts_to_hosts.values(): self.deploy_scripts_to_host(host) for host in hosts.values(): self.deploy_config_to_target(host) had_errors = ', with some error(s)' if self.had_errors else '' self.log.info('done applying monitoring setup{}'.format(had_errors)) return not self.had_errors def handle_pending_host_setup_changes(self, host_changes, hosts, scripts_to_hosts): if self.provision < self.provision_levels['deploy']: self.log.info('Monitoring config not deployed to remote host') for file_type, changes in host_changes.items(): host = changes['host'] is_container = changes['is_container'] is_server = changes['is_server'] local_dir = changes['local_path'] if local_dir == "scripts": scripts_to_hosts[host] = {'host': host, 'is_server': is_server} continue self.log.debug('applying monitoring setup changes ' + 'for host ' + host + ', file type: ' + file_type) is_local_host = host == self.local_host file_path = os.path.join(self.PRODUCTION_CONFIG_DIR, file_type) if not is_server and host not in hosts: hosts[host] = { 'host': host, 'local_dir': local_dir, 'is_local_host': is_local_host, 'is_container': is_container, 'is_server': is_server } if is_server: remote_path = self.PRODUCTION_CONFIG_DIR if os.path.isfile(local_dir): remote_path += os.path.sep + os.path.basename(local_dir) try: self.write_to_server(local_dir, remote_path=remote_path, is_container=is_container) except SshError: self.had_errors = True elif is_local_host: # write to production configuration directory on local host self.make_directory(self.PRODUCTION_CONFIG_DIR) shutil.copy(changes['local_path'], file_path) else: # write to remote host prepare dir - use sftp if self.provision < self.provision_levels['deploy']: continue try: self.write_to_remote_host(host, changes['local_path']) except SshError: self.had_errors = True def prepare_scripts(self, host, is_server): if self.scripts_prepared_for_host.get(host, False): return gateway_host = SshConn.get_gateway_host(host) # copy scripts to host scripts_dir = os.path.join(self.env_monitoring_config['app_path'], self.APP_SCRIPTS_FOLDER) script_files = [f for f in os.listdir(scripts_dir) if os.path.isfile(os.path.join(scripts_dir, f))] script_mode = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | \ stat.S_IROTH | stat.S_IXOTH target_host = host if is_server else gateway_host self.make_remote_dir(target_host, self.REMOTE_SCRIPTS_FOLDER) for file_name in script_files: remote_path = os.path.join(self.REMOTE_SCRIPTS_FOLDER, file_name) local_path = os.path.join(scripts_dir, file_name) if not os.path.isfile(local_path): continue if is_server: ssh = self.get_ssh(target_host, for_sftp=True) ssh.copy_file(local_path, remote_path, mode=script_mode) else: self.copy_to_remote_host(target_host, local_path, remote_path, mode=script_mode, make_remote_dir=False) self.scripts_prepared_for_host[host] = True def deploy_ssl_files(self, hosts: list): try: monitoring_server = self.env_monitoring_config['server_ip'] gateway_host = SshConn.get_gateway_host(hosts[0]) temp_dir = tempfile.TemporaryDirectory() for file_path in self.fetch_ssl_files: # copy SSL files from the monitoring server file_name = os.path.basename(file_path) local_path = os.path.join(temp_dir.name, file_name) self.get_file(monitoring_server, file_path, local_path) # first copy the files to the gateway self.write_to_remote_host(gateway_host, local_path, remote_path=file_path) ssl_path = os.path.commonprefix(self.fetch_ssl_files) for host in hosts: self.copy_from_gateway_to_host(host, ssl_path, ssl_path) except SshError: self.had_errors = True def deploy_scripts_to_host(self, host_details): try: host = host_details['host'] is_server = host_details['is_server'] self.prepare_scripts(host, is_server) remote_path = self.REMOTE_SCRIPTS_FOLDER local_path = remote_path + os.path.sep + '*.py' if is_server: return # this was done earlier self.copy_from_gateway_to_host(host, local_path, remote_path) except SshError: self.had_errors = True def restart_service(self, host: str = None, service: str = 'sensu-client', is_server: bool = False, msg: str =None): ssh = self.get_ssh(host) cmd = 'sudo service {} restart'.format(service) log_msg = msg if msg else 'deploying config to host {}'.format(host) self.log.info(log_msg) try: if is_server: ssh.exec(cmd) else: self.run(cmd, ssh_to_host=host, ssh=ssh) except SshError as e: if 'Error: Redirecting to /bin/systemctl restart' not in str(e): self.had_errors = True def deploy_config_to_target(self, host_details): try: host = host_details['host'] is_local_host = host_details['is_local_host'] is_container = host_details['is_container'] is_server = host_details['is_server'] local_dir = host_details['local_dir'] if is_container or is_server or not is_local_host: local_dir = os.path.dirname(local_dir) if not is_server: self.move_setup_files_to_remote_host(host, local_dir) # restart the Sensu client on the remote host, # so it takes the new setup self.restart_service(host) except SshError: self.had_errors = True def run_cmd_locally(self, cmd): try: subprocess.popen(cmd.split(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except subprocess.CalledProcessError as e: print("Error running command: " + cmd + ", output: " + self.binary2str(e.output) + "\n") def move_setup_files_to_remote_host(self, host, local_dir): if self.provision < self.provision_levels['deploy']: self.log.info('Monitoring config not written to remote host') return # need to scp the files from the gateway host to the target host remote_path = self.PRODUCTION_CONFIG_DIR self.copy_from_gateway_to_host(host, local_dir, remote_path) def copy_from_gateway_to_host(self, host, local_dir, remote_path): ssh = self.get_ssh(host) what_to_copy = local_dir if '*' in local_dir else local_dir + '/*' if ssh.is_gateway_host(host): # on gateway host, perform a simple copy # make sure the source and destination are not the same local_dir_base = local_dir[:local_dir.rindex('/*')] \ if '/*' in local_dir else local_dir if local_dir_base.strip('/*') == remote_path.strip('/*'): return # same directory - nothing to do self.make_remote_dir(host, remote_path) cmd = 'cp {} {}'.format(what_to_copy, remote_path) self.run(cmd, ssh=ssh) return self.make_remote_dir(host, remote_path) remote_path = ssh.get_user() + '@' + host + ':' + \ remote_path + os.sep self.run_on_gateway('scp {} {}'.format(what_to_copy, remote_path), enable_cache=False, use_sudo=None) def make_remote_dir_on_host(self, ssh, host, path, path_is_file=False): # make sure we have write permissions in target directories dir_path = path if path_is_file: dir_path = os.path.dirname(dir_path) cmd = 'sudo mkdir -p ' + dir_path try: self.run(cmd, ssh_to_host=host, ssh=ssh) except timeout: self.log.error('timed out trying to create directory {} on host {}' .format(dir_path, host)) return cmd = 'sudo chown -R ' + ssh.get_user() + ' ' + dir_path self.run(cmd, ssh_to_host=host, ssh=ssh) def make_remote_dir(self, host, path, path_is_file=False): ssh = self.get_ssh(host, for_sftp=True) self.make_remote_dir_on_host(ssh, host, path, path_is_file) def copy_to_remote_host(self, host, local_path, remote_path, mode=None, make_remote_dir=True): # copy the local file to the preparation folder for the remote host # on the gateway host ssh = self.get_ssh(host) gateway_host = ssh.get_gateway_host(host) if make_remote_dir: self.make_remote_dir(gateway_host, remote_path, path_is_file=True) ftp_ssh = self.get_ssh(gateway_host, for_sftp=True) ftp_ssh.copy_file(local_path, remote_path, mode) def write_to_remote_host(self, host, local_path=None, remote_path=None): remote_path = remote_path if remote_path else local_path self.copy_to_remote_host(host, local_path, remote_path) def write_to_server(self, local_path, remote_path=None, is_container=False): host = self.env_monitoring_config['server_ip'] ssh = self.get_ssh(host, is_container=is_container) remote_path = remote_path if remote_path else local_path self.make_remote_dir_on_host(ssh, host, remote_path, True) # copy to config dir first ftp_ssh = self.get_ssh(host, is_container=is_container, for_sftp=True) ftp_ssh.copy_file(local_path, remote_path) @staticmethod def write_to_local_host(file_path, content): f = open(file_path, "w") f.write(content) f.close() return file_path def get_file(self, host, remote_path, local_path): ftp_ssh = self.get_ssh(host, for_sftp=True) ftp_ssh.copy_file_from_remote(remote_path, local_path)
class ResponderBase(DataValidate, DictNamingConverter): UNCHANGED_COLLECTIONS = [ "monitoring_config_templates", "environments_config", "messages", "scheduled_scans" ] def __init__(self): super().__init__() self.log = FullLogger() self.inv = InventoryMgr() def set_successful_response(self, resp, body="", status="200"): if not isinstance(body, str): try: body = jsonify(body) except Exception as e: self.log.exception(e) raise ValueError("The response body should be a string") resp.status = status resp.body = body def set_error_response(self, title="", code="", message="", body=""): if body: raise exceptions.CalipsoApiException(code, body, message) body = {"error": {"message": message, "code": code, "title": title}} body = jsonify(body) raise exceptions.CalipsoApiException(code, body, message) def not_found(self, message="Requested resource not found"): self.set_error_response("Not Found", "404", message) def conflict(self, message="The posted data conflicts with the existing data"): self.set_error_response("Conflict", "409", message) def bad_request(self, message="Invalid request content"): self.set_error_response("Bad Request", "400", message) def unauthorized(self, message="Request requires authorization"): self.set_error_response("Unauthorized", "401", message) def validate_query_data(self, data, data_requirements, additional_key_reg=None, can_be_empty_keys=None): error_message = self.validate_data(data, data_requirements, additional_key_reg, can_be_empty_keys) if error_message: self.bad_request(error_message) def check_and_convert_datetime(self, time_key, data): time = data.get(time_key) if time: time = time.replace(' ', '+') try: data[time_key] = parser.parse(time) except Exception: self.bad_request( "{0} must follow ISO 8610 date and time format," "YYYY-MM-DDThh:mm:ss.sss+hhmm".format(time_key)) def check_environment_name(self, env_name): query = {"name": env_name} objects = self.read("environments_config", query) if not objects: return False return True def get_object_by_id(self, collection, query, stringify_types, id): objs = self.read(collection, query) if not objs: env_name = query.get("environment") if env_name and \ not self.check_environment_name(env_name): self.bad_request("unknown environment: " + env_name) self.not_found() obj = objs[0] stringify_object_values_by_types(obj, stringify_types) if id == "_id": obj['id'] = obj.get('_id') return obj def get_objects_list(self, collection, query, page=0, page_size=1000, projection=None, stringify_types=None): objects = self.read(collection, query, projection, page, page_size) if not objects: env_name = query.get("environment") if env_name and \ not self.check_environment_name(env_name): self.bad_request("unknown environment: " + env_name) self.not_found() for obj in objects: if "id" not in obj and "_id" in obj: obj["id"] = str(obj["_id"]) if "_id" in obj: del obj["_id"] if stringify_types: stringify_object_values_by_types(objects, stringify_types) return objects def parse_query_params(self, req): query_string = req.query_string if not query_string: return {} try: query_params = dict( (k, v if len(v) > 1 else v[0]) for k, v in parse.parse_qs(query_string, keep_blank_values=True, strict_parsing=True).items()) return query_params except ValueError as e: self.bad_request(str("Invalid query string: {0}".format(str(e)))) def replace_colon_with_dot(self, s): return s.replace(':', '.') def get_pagination(self, filters): page_size = filters.get('page_size', 1000) page = filters.get('page', 0) return page, page_size def update_query_with_filters(self, filters, filters_keys, query): for filter_key in filters_keys: filter = filters.get(filter_key) if filter is not None: query.update({filter_key: filter}) def get_content_from_request(self, req): error = "" content = "" if not req.content_length: error = "No data found in the request body" return error, content data = req.stream.read() content_string = data.decode() try: content = json.loads(content_string) if not isinstance(content, dict): error = "The data in the request body must be an object" except Exception: error = "The request can not be fulfilled due to bad syntax" return error, content def get_collection_by_name(self, name): if name in self.UNCHANGED_COLLECTIONS: return self.inv.db[name] return self.inv.collections[name] def get_constants_by_name(self, name): constants = self.get_collection_by_name("constants").\ find_one({"name": name}) # consts = [d['value'] for d in constants['data']] consts = [] if not constants: self.log.error('constant type: ' + name + 'no constants exists') return consts for d in constants['data']: try: consts.append(d['value']) except KeyError: self.log.error('constant type: ' + name + ': no "value" key for data: ' + str(d)) return consts def read(self, collection, matches=None, projection=None, skip=0, limit=1000): if matches is None: matches = {} collection = self.get_collection_by_name(collection) skip *= limit query = collection.find(matches, projection).skip(skip).limit(limit) return list(query) def write(self, document, collection="inventory"): try: return self.get_collection_by_name(collection)\ .insert_one(document) except errors.DuplicateKeyError as e: self.conflict("The key value ({0}) already exists".format( ', '.join(self.get_duplicate_key_values(e.details['errmsg'])))) except errors.WriteError as e: self.bad_request('Failed to create resource for {0}'.format( str(e))) def get_duplicate_key_values(self, err_msg): return [ "'{0}'".format(key) for key in re.findall(r'"([^",]+)"', err_msg) ] def aggregate(self, pipeline, collection): collection = self.get_collection_by_name(collection) data = collection.aggregate(pipeline) return list(data)
class LDAPAccess(metaclass=Singleton): default_config_file = "ldap.conf" TLS_REQUEST_CERTS = { "demand": ssl.CERT_REQUIRED, "allow": ssl.CERT_OPTIONAL, "never": ssl.CERT_NONE, "default": ssl.CERT_NONE } user_ssl = True def __init__(self, config_file_path=""): super().__init__() self.log = FullLogger() self.ldap_params = self.get_ldap_params(config_file_path) self.server = self.connect_ldap_server() def get_ldap_params(self, config_file_path): ldap_params = {"url": "ldap://localhost:389"} if not config_file_path: config_file_path = ConfigFile.get(self.default_config_file) if config_file_path: try: config_file = ConfigFile(config_file_path) params = config_file.read_config() ldap_params.update(params) except Exception as e: self.log.error(str(e)) raise if "user_tree_dn" not in ldap_params: raise ValueError("user_tree_dn must be specified in " + config_file_path) if "user_id_attribute" not in ldap_params: raise ValueError("user_id_attribute must be specified in " + config_file_path) return ldap_params def connect_ldap_server(self): ca_certificate_file = self.ldap_params.get('tls_cacertfile') req_cert = self.ldap_params.get('tls_req_cert') ldap_url = self.ldap_params.get('url') if ca_certificate_file: if not req_cert or req_cert not in self.TLS_REQUEST_CERTS.keys(): req_cert = 'default' tls_req_cert = self.TLS_REQUEST_CERTS[req_cert] tls = Tls(local_certificate_file=ca_certificate_file, validate=tls_req_cert) return Server(ldap_url, use_ssl=self.user_ssl, tls=tls) return Server(ldap_url, use_ssl=self.user_ssl) def authenticate_user(self, username, pwd): if not self.server: self.server = self.connect_ldap_server() user_dn = self.ldap_params['user_id_attribute'] + "=" + \ username + "," + self.ldap_params['user_tree_dn'] connection = Connection(self.server, user=user_dn, password=pwd) # validate the user by binding # bound is true if binding succeed, otherwise false bound = False try: bound = connection.bind() connection.unbind() except Exception as e: self.log.error('Failed to bind the server for {0}'.format(str(e))) return bound
class Fetcher: ENV_TYPE_KUBERNETES = 'Kubernetes' ENV_TYPE_OPENSTACK = 'OpenStack' def __init__(self): super().__init__() self.env = None self.log = FullLogger() self.configuration = None self.origin = None @staticmethod def escape(string): return string def set_env(self, env): self.env = env self.log.setup(env=env) self.configuration = Configuration() def setup(self, env, origin: Origin = None): self.set_env(env=env) if origin: self.origin = origin self.log.setup(origin=origin) def get_env(self): return self.env def get(self, object_id): return None def set_folder_parent(self, o: dict, object_type: str = None, master_parent_type: str = None, master_parent_id: str = None, parent_objects_name=None, parent_type: str = None, parent_id: str = None, parent_text: str = None): if object_type: o['type'] = object_type if not parent_objects_name: parent_objects_name = '{}s'.format(object_type) if not master_parent_type: self.log.error( 'set_folder_parent: must specify: ' 'master_parent_type, master_parent_id, ' 'parent_type', 'parent_id') return if not parent_objects_name and not parent_type: self.log.error('set_folder_parent: must specify: ' 'either parent_objects_name (e.g. "vedges") ' 'or parent_type and parent_id') return if parent_objects_name and not parent_type: parent_type = '{}_folder'.format(parent_objects_name) if parent_objects_name and not parent_id: parent_id = '{}-{}'.format(master_parent_id, parent_objects_name) o.update({ 'master_parent_type': master_parent_type, 'master_parent_id': master_parent_id, 'parent_type': parent_type, 'parent_id': parent_id }) if parent_text: o['parent_text'] = parent_text elif parent_objects_name: o['parent_text'] = parent_objects_name.capitalize()
class Configuration(metaclass=Singleton): def __init__(self, environments_collection="environments_config"): super().__init__() self.db_client = MongoAccess() self.db = MongoAccess.db self.inv = InventoryMgr() self.collection = self.inv.collections.get(environments_collection) self.env_name = None self.environment = None self.configuration = None self.log = FullLogger() def use_env(self, env_name): self.log.info( "Configuration taken from environment: {}".format(env_name)) self.env_name = env_name envs = self.collection.find({"name": env_name}) if envs.count() == 0: raise ValueError("use_env: could not find matching environment") if envs.count() > 1: raise ValueError("use_env: found multiple matching environments") self.environment = envs[0] self.configuration = self.environment["configuration"] def get_env_config(self): return self.environment def get_configuration(self): return self.configuration def get_env_name(self): return self.env_name def get_env_type(self): return 'OpenStack' if 'environment_type' not in self.environment \ else self.environment['environment_type'] def update_env(self, values): self.collection.update_one( {"name": self.env_name}, {'$set': MongoAccess.encode_mongo_keys(values)}) def get(self, component): try: matches = [c for c in self.configuration if c["name"] == component] except AttributeError: raise ValueError("Configuration: environment not set") if len(matches) == 0: raise IndexError("No matches for configuration component: " + component) if len(matches) > 1: raise IndexError( "Found multiple matches for configuration component: " + component) return matches[0] def has_network_plugin(self, name): if 'mechanism_drivers' not in self.environment: self.log.error( 'Environment missing mechanism_drivers definition: ' + self.environment['name']) mechanism_drivers = self.environment['mechanism_drivers'] return name in mechanism_drivers
class StatsConsumer(MongoAccess): default_env = "WebEX-Mirantis@Cisco" def __init__(self): self.get_args() MongoAccess.set_config_file(self.args.mongo_config) MongoAccess.__init__(self) self.log = FullLogger() self.log.set_loglevel(self.args.loglevel) self.conf = Configuration() self.inv = InventoryMgr() self.inv.set_collections(self.args.inventory) stats_coll = self.inv.get_coll_name('statistics') self.stats = self.db[stats_coll] # consume messages from topic self.consumer = KafkaConsumer('VPP.stats', group_id='calipso_test', auto_offset_reset=self.args.offset, bootstrap_servers=['localhost:9092']) def get_args(self): # try to read scan plan from command line parameters parser = argparse.ArgumentParser() parser.add_argument("-m", "--mongo_config", nargs="?", type=str, default="", help="name of config file " + "with MongoDB servr access details") parser.add_argument("-e", "--env", nargs="?", type=str, default=self.default_env, help="name of environment to scan \n" + "(default: " + self.default_env + ")") parser.add_argument("-y", "--inventory", nargs="?", type=str, default="inventory", help="name of inventory collection \n" + "(default: 'inventory')") parser.add_argument("-l", "--loglevel", nargs="?", type=str, default="INFO", help="logging level \n(default: 'INFO')") parser.add_argument("-o", "--offset", nargs="?", type=str, default="largest", help="where to start reading" + " - use 'smallest' for start \n" + "(default: 'largest')") self.args = parser.parse_args() def read(self): for kafka_msg in self.consumer: msg = json.loads(kafka_msg.value.decode()) self.add_stats(msg) def add_stats(self, msg): host_ip = msg['hostIp'] search = { 'environment': self.args.env, 'type': 'host', 'ip_address': host_ip } host = self.inv.find_items(search, get_single=True) if not host: self.log.error('could not find host with ip address=' + host_ip) return host_id = host['id'] search = { 'environment': self.args.env, 'type': 'vedge', 'host': host_id } vedge = self.inv.find_items(search, get_single=True) if not vedge: self.log.error('could not find vEdge for host: ' + host_id) return self.log.info('setting VPP stats for vEdge of host: ' + host_id) self.add_stats_for_object(vedge, msg) def add_stats_for_object(self, o, msg): msg['type'] = 'vedge_flows' msg['environment'] = self.args.env msg['object_type'] = o['type'] msg['object_id'] = o['id'] time_seconds = int(msg['averageArrivalNanoSeconds'] / 1000000000) sample_time = time.gmtime(time_seconds) msg['sample_time'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", sample_time) # find instances between which the flow happens # to find the instance, find the related vNIC first msg['source'] = self.find_instance_for_stat('source', msg) msg['destination'] = self.find_instance_for_stat('destination', msg) self.stats.insert_one(msg) def find_instance_for_stat(self, direction, msg): search_by_mac_address = 'sourceMacAddress' in msg value_attr = 'MacAddress' if search_by_mac_address else 'IpAddress' value_to_search = msg[direction + value_attr] attr = 'mac_address' if search_by_mac_address else 'ip_address' search = { 'environment': self.args.env, 'type': 'vnic', attr: value_to_search } vnic = self.inv.find_items(search, get_single=True) if not vnic: self.log.error('failed to find vNIC for ' + attr + '=' + value_to_search) return 'Unknown' # now find the instance name from the vnic name name_path = vnic['name_path'].split('/') instance_name = name_path[8] return instance_name