def main(resource, action=''): try: if action == 'meta-data': return resource.metadata() Conf.load(const.HA_GLOBAL_INDEX, Yaml(const.HA_CONFIG_FILE)) log_path = Conf.get(const.HA_GLOBAL_INDEX, f"LOG{_DELIM}path") log_level = Conf.get(const.HA_GLOBAL_INDEX, f"LOG{_DELIM}level") Log.init(service_name='resource_agent', log_path=log_path, level=log_level) with open(const.RESOURCE_SCHEMA, 'r') as f: resource_schema = json.load(f) os.makedirs(const.RA_LOG_DIR, exist_ok=True) resource_agent = resource(DecisionMonitor(), resource_schema) Log.debug(f"{resource_agent} initialized for action {action}") if action == 'monitor': return resource_agent.monitor() elif action == 'start': return resource_agent.start() elif action == 'stop': return resource_agent.stop() else: print('Usage %s [monitor] [start] [stop] [meta-data]' % sys.argv[0]) exit() except Exception as e: Log.error(f"{traceback.format_exc()}") return const.OCF_ERR_GENERIC
def __init__(self): """ Initialization of HA CLI. """ # TODO Check product env and load specific conf Conf.init() Conf.load(const.RESOURCE_GLOBAL_INDEX, Json(const.RESOURCE_SCHEMA)) Conf.load(const.RULE_GLOBAL_INDEX, Json(const.RULE_ENGINE_SCHAMA)) Conf.load(const.HA_GLOBAL_INDEX, Yaml(const.HA_CONFIG_FILE)) log_path = Conf.get(const.HA_GLOBAL_INDEX, "LOG.path") log_level = Conf.get(const.HA_GLOBAL_INDEX, "LOG.level") Log.init(service_name='cortxha', log_path=log_path, level=log_level)
def is_cleanup_required(self, node=None): """ Check if all alert resolved Args: node ([type]): [description] """ node = "all" if node is None else node Log.debug(f"Performing failback on {node}") resource_list = Conf.get(const.RESOURCE_GLOBAL_INDEX, "resources") status_list = {} for resource in resource_list: if node == "all": status_list[ resource] = self._decision_monitor.get_resource_status( resource) elif node in resource: status_list[ resource] = self._decision_monitor.get_resource_status( resource) else: pass Log.info(f"Resource status for node {node} is {status_list}") if Action.FAILED in status_list.values(): Log.debug("Some component are not yet recovered skipping failback") elif Action.RESOLVED in status_list.values(): Log.info("Failback is required as some of alert are resolved.") return True else: Log.debug( f"{node} node already in good state no need for failback") return False
def cleanup_db(self, node, data_only): """ Args: node ([string]): Node name. data_only ([boolean]): Remove data only. Action: consul data: {'entity': 'enclosure', 'entity_id': '0', 'component': 'controller', 'component_id': 'node1'} if data_only is True then remove data else remove data and perform cleanup. """ resources = Conf.get(const.RESOURCE_GLOBAL_INDEX, "resources") node = "all" if node is None else node Log.debug(f"Performing cleanup for {node} node") for key in resources.keys(): if node == "all": self._decision_monitor.acknowledge_resource(key, data_only) elif node in key: self._decision_monitor.acknowledge_resource(key, data_only) else: pass if not data_only: Log.info(f"Reseting HA decision event for {node}") self.reset_failover(node)
def _init_kafka_conf(self, **kwargs): kafka_cluster = Conf.get(const.CONFIG_INDEX, \ f"{const.KAFKA}.{const.CLUSTER}") bootstrap_servers = "" count = 1 for values in kafka_cluster: if len(kafka_cluster) <= count: bootstrap_servers = bootstrap_servers + f"{values[const.SERVER]}:{values[const.PORT]}" else: bootstrap_servers = bootstrap_servers + f"{values[const.SERVER]}:{values[const.PORT]}, " count = count + 1 self._hosts = bootstrap_servers self._client_id = kwargs.get(const.CLIENT_ID) self._group_id = kwargs.get(const.GROUP_ID) self._consumer_name = kwargs.get(const.CONSUMER_NAME) self._retry_counter = Conf.get(const.CONFIG_INDEX, \ f"{const.KAFKA}.{const.RETRY_COUNTER}") Log.info(f"Message bus config initialized. Hosts: {self._hosts}, "\ f"Client ID: {self._client_id}, Group ID: {self._group_id}")
def _provision_compiled_schema(self, compiled_schema): """ Scan schema and replace ${var} in compiled schema to configuration provided by provision. """ keys = re.findall(r"\${[^}]+}(?=[^]*[^]*)", str(compiled_schema)) new_compiled_schema = str(compiled_schema) for element in keys: key = element.replace("${", "").replace("}", "") new_compiled_schema = new_compiled_schema.replace( element, str(Conf.get(const.PROV_CONF_INDEX, key, element))) self.compiled_json = ast.literal_eval(new_compiled_schema)
def _assign_var(self): """ Assign value to runtime variable """ keys = list(set(re.findall(r"\${[^}]+}(?=[^]*[^]*)", str(self.compiled_json)))) args = {} with open(self._script, "a") as script_file: script_file.writelines("pcs_status=$(pcs constraint)\n") script_file.writelines("pcs_location=$(pcs constraint location)\n") for element in keys: if "." not in element: variable = element.replace("${", "").replace("}", "") key = variable.replace("_", ".") script_file.writelines(variable+ "="+ str(Conf.get(const.PROV_CONF_INDEX, key))+"\n")
def _init_config(self, **kwargs): try: #TODO: Write a script to fetch kafka bootstarp cluster and write in #the config. Provide the script to Provisioner to copy it to desired #location. ConfInit() self._message_bus_type = Conf.get(const.CONFIG_INDEX, const.TYPE) if self._message_bus_type == const.KAFKA: self._init_kafka_conf(**kwargs) elif self._message_bus_type == const.RMQ: self._init_rmq_conf(**kwargs) else: raise InvalidConfigError("Invalid config") except Exception as ex: Log.error(f"Invalid config error. {ex}") raise InvalidConfigError(f"Invalid config. {ex}")