def add_device(lib_env, model, model_options, generic_options, force_model=False, force_options=False, skip_offline_nodes=False): """ Add quorum device to cluster, distribute and reload configs if live model quorum device model model_options model specific options dict generic_options generic quorum device options dict force_model continue even if the model is not valid force_options continue even if options are not valid skip_offline_nodes continue even if not all nodes are accessible """ __ensure_not_cman(lib_env) cfg = lib_env.get_corosync_conf() # Try adding qdevice to corosync.conf. This validates all the options and # makes sure qdevice is not defined in corosync.conf yet. cfg.add_quorum_device(lib_env.report_processor, model, model_options, generic_options, force_model, force_options) target_list = lib_env.get_node_target_factory().get_target_list( cfg.get_nodes()) # First setup certificates for qdevice, then send corosync.conf to nodes. # If anything fails, nodes will not have corosync.conf with qdevice in it, # so there is no effect on the cluster. if lib_env.is_corosync_conf_live: # do model specific configuration # if model is not known to pcs and was forced, do not configure antyhing # else but corosync.conf, as we do not know what to do anyways if model == "net": _add_device_model_net( lib_env, # we are sure it's there, it was validated in add_quorum_device model_options["host"], cfg.get_cluster_name(), cfg.get_nodes(), skip_offline_nodes) lib_env.report_processor.process( reports.service_enable_started("corosync-qdevice")) com_cmd = qdevice_com.Enable(lib_env.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run_and_raise(lib_env.get_node_communicator(), com_cmd) # everything set up, it's safe to tell the nodes to use qdevice lib_env.push_corosync_conf(cfg, skip_offline_nodes) # Now, when corosync.conf has been reloaded, we can start qdevice service. if lib_env.is_corosync_conf_live: lib_env.report_processor.process( reports.service_start_started("corosync-qdevice")) com_cmd = qdevice_com.Start(lib_env.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run_and_raise(lib_env.get_node_communicator(), com_cmd)
def _service_start(lib_env, func): lib_env.report_processor.process( reports.service_start_started("quorum device")) try: func(lib_env.cmd_runner()) except external.StartServiceError as e: raise LibraryError(reports.service_start_error(e.service, e.message)) lib_env.report_processor.process( reports.service_start_success("quorum device"))
def _service_start(lib_env, func): lib_env.report_processor.process( reports.service_start_started("quorum device") ) try: func(lib_env.cmd_runner()) except external.StartServiceError as e: raise LibraryError( reports.service_start_error(e.service, e.message) ) lib_env.report_processor.process( reports.service_start_success("quorum device") )
def add_device(lib_env, model, model_options, generic_options, heuristics_options, force_model=False, force_options=False, skip_offline_nodes=False): """ Add a quorum device to a cluster, distribute and reload configs if live string model -- quorum device model dict model_options -- model specific options dict generic_options -- generic quorum device options dict heuristics_options -- heuristics options bool force_model -- continue even if the model is not valid bool force_options -- continue even if options are not valid bool skip_offline_nodes -- continue even if not all nodes are accessible """ cfg = lib_env.get_corosync_conf() if cfg.has_quorum_device(): raise LibraryError(reports.qdevice_already_defined()) lib_env.report_processor.process_list( corosync_conf_validators.add_quorum_device( model, model_options, generic_options, heuristics_options, [node.nodeid for node in cfg.get_nodes()], force_model=force_model, force_options=force_options)) cfg.add_quorum_device( model, model_options, generic_options, heuristics_options, ) if cfg.is_quorum_device_heuristics_enabled_with_no_exec(): lib_env.report_processor.process( reports.corosync_quorum_heuristics_enabled_with_no_exec()) # First setup certificates for qdevice, then send corosync.conf to nodes. # If anything fails, nodes will not have corosync.conf with qdevice in it, # so there is no effect on the cluster. if lib_env.is_corosync_conf_live: target_factory = lib_env.get_node_target_factory() target_list = target_factory.get_target_list( cfg.get_nodes_names(), skip_non_existing=skip_offline_nodes, ) # Do model specific configuration. # If the model is not known to pcs and was forced, do not configure # anything else than corosync.conf, as we do not know what to do # anyway. if model == "net": qdevice_net.set_up_client_certificates( lib_env.cmd_runner(), lib_env.report_processor, lib_env.communicator_factory, # We are sure the "host" key is there, it has been validated # above. target_factory.get_target_from_hostname(model_options["host"]), cfg.get_cluster_name(), target_list, skip_offline_nodes) lib_env.report_processor.process( reports.service_enable_started("corosync-qdevice")) com_cmd = qdevice_com.Enable(lib_env.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run_and_raise(lib_env.get_node_communicator(), com_cmd) # everything set up, it's safe to tell the nodes to use qdevice lib_env.push_corosync_conf(cfg, skip_offline_nodes) # Now, when corosync.conf has been reloaded, we can start qdevice service. if lib_env.is_corosync_conf_live: lib_env.report_processor.process( reports.service_start_started("corosync-qdevice")) com_cmd = qdevice_com.Start(lib_env.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run_and_raise(lib_env.get_node_communicator(), com_cmd)
def add_device( lib_env, model, model_options, generic_options, force_model=False, force_options=False, skip_offline_nodes=False ): """ Add quorum device to cluster, distribute and reload configs if live model quorum device model model_options model specific options dict generic_options generic quorum device options dict force_model continue even if the model is not valid force_options continue even if options are not valid skip_offline_nodes continue even if not all nodes are accessible """ __ensure_not_cman(lib_env) cfg = lib_env.get_corosync_conf() # Try adding qdevice to corosync.conf. This validates all the options and # makes sure qdevice is not defined in corosync.conf yet. cfg.add_quorum_device( lib_env.report_processor, model, model_options, generic_options, force_model, force_options ) # First setup certificates for qdevice, then send corosync.conf to nodes. # If anything fails, nodes will not have corosync.conf with qdevice in it, # so there is no effect on the cluster. if lib_env.is_corosync_conf_live: # do model specific configuration # if model is not known to pcs and was forced, do not configure antyhing # else but corosync.conf, as we do not know what to do anyways if model == "net": _add_device_model_net( lib_env, # we are sure it's there, it was validated in add_quorum_device model_options["host"], cfg.get_cluster_name(), cfg.get_nodes(), skip_offline_nodes ) lib_env.report_processor.process( reports.service_enable_started("corosync-qdevice") ) communicator = lib_env.node_communicator() parallel_nodes_communication_helper( qdevice_client.remote_client_enable, [ [(lib_env.report_processor, communicator, node), {}] for node in cfg.get_nodes() ], lib_env.report_processor, skip_offline_nodes ) # everything set up, it's safe to tell the nodes to use qdevice lib_env.push_corosync_conf(cfg, skip_offline_nodes) # Now, when corosync.conf has been reloaded, we can start qdevice service. if lib_env.is_corosync_conf_live: lib_env.report_processor.process( reports.service_start_started("corosync-qdevice") ) communicator = lib_env.node_communicator() parallel_nodes_communication_helper( qdevice_client.remote_client_start, [ [(lib_env.report_processor, communicator, node), {}] for node in cfg.get_nodes() ], lib_env.report_processor, skip_offline_nodes )
def add_device( lib_env, model, model_options, generic_options, heuristics_options, force_model=False, force_options=False, skip_offline_nodes=False ): """ Add a quorum device to a cluster, distribute and reload configs if live string model -- quorum device model dict model_options -- model specific options dict generic_options -- generic quorum device options dict heuristics_options -- heuristics options bool force_model -- continue even if the model is not valid bool force_options -- continue even if options are not valid bool skip_offline_nodes -- continue even if not all nodes are accessible """ cfg = lib_env.get_corosync_conf() if cfg.has_quorum_device(): raise LibraryError(reports.qdevice_already_defined()) report_processor = SimpleReportProcessor(lib_env.report_processor) report_processor.report_list( corosync_conf_validators.add_quorum_device( model, model_options, generic_options, heuristics_options, [node.nodeid for node in cfg.get_nodes()], force_model=force_model, force_options=force_options ) ) if lib_env.is_corosync_conf_live: cluster_nodes_names, report_list = get_existing_nodes_names( cfg, # Pcs is unable to communicate with nodes missing names. It cannot # send new corosync.conf to them. That might break the cluster. # Hence we error out. error_on_missing_name=True ) report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() cfg.add_quorum_device( model, model_options, generic_options, heuristics_options, ) if cfg.is_quorum_device_heuristics_enabled_with_no_exec(): lib_env.report_processor.process( reports.corosync_quorum_heuristics_enabled_with_no_exec() ) # First setup certificates for qdevice, then send corosync.conf to nodes. # If anything fails, nodes will not have corosync.conf with qdevice in it, # so there is no effect on the cluster. if lib_env.is_corosync_conf_live: target_factory = lib_env.get_node_target_factory() target_list = target_factory.get_target_list( cluster_nodes_names, skip_non_existing=skip_offline_nodes, ) # Do model specific configuration. # If the model is not known to pcs and was forced, do not configure # anything else than corosync.conf, as we do not know what to do # anyway. if model == "net": qdevice_net.set_up_client_certificates( lib_env.cmd_runner(), lib_env.report_processor, lib_env.communicator_factory, # We are sure the "host" key is there, it has been validated # above. target_factory.get_target_from_hostname(model_options["host"]), cfg.get_cluster_name(), target_list, skip_offline_nodes ) lib_env.report_processor.process( reports.service_enable_started("corosync-qdevice") ) com_cmd = qdevice_com.Enable( lib_env.report_processor, skip_offline_nodes ) com_cmd.set_targets(target_list) run_and_raise(lib_env.get_node_communicator(), com_cmd) # everything set up, it's safe to tell the nodes to use qdevice lib_env.push_corosync_conf(cfg, skip_offline_nodes) # Now, when corosync.conf has been reloaded, we can start qdevice service. if lib_env.is_corosync_conf_live: lib_env.report_processor.process( reports.service_start_started("corosync-qdevice") ) com_cmd = qdevice_com.Start( lib_env.report_processor, skip_offline_nodes ) com_cmd.set_targets(target_list) run_and_raise(lib_env.get_node_communicator(), com_cmd)