def __fencing(self, node_name): maintenance = ManageMaintenance() if maintenance.get_maintenance_config( ).fencing == MaintenanceConfigState.off: logger.warning( "Fencing action will not fire the admin stopped it,the cluster is in maintenance mode." ) return node_list = ConsulAPI().get_node_list() for node in node_list: if str(node.name) == node_name: if Network().ping(node.backend_2_ip): logger.info("This node will stop node {}/{}.".format( node_name, node.backend_2_ip)) ssh().call_command(node.backend_2_ip, " poweroff ", 5) break elif Network().ping(node.management_ip): logger.info("This node will stop node {}/{}.".format( node_name, node.management_ip)) ssh().call_command(node.management_ip, " poweroff ", 5) break elif Network().ping(node.backend_1_ip): logger.info("This node will stop node {}/{}.".format( node_name, node.backend_1_ip)) ssh().call_command(node.backend_1_ip, " poweroff ", 5) break
def is_ip_configured(_ip): ips = Network().get_all_configured_ips() for ip, eth_name in ips.iteritems(): ip, mask = str(ip).split("/") if ip == _ip: return True return False
def check_connections(self): ''' to get all ips of management, backend1 and backend2 and ping them :param: :return:list of all possible ping errors ''' current_cluster_info = configuration().get_cluster_info() management_nodes = current_cluster_info.management_nodes status_report = StatusReport() status_report.success = True for node in management_nodes: node_info = NodeInfo() node_info.load_json(json.dumps(node)) management_host = node_info.management_ip backend1_host = node_info.backend_1_ip backend2_host = node_info.backend_2_ip if not Network().ping(management_host): status_report.failed_tasks.append( 'core_deploy_ping_error_node_{}_management'.format( management_nodes.index(node) + 1)) if not Network().ping(backend1_host): status_report.failed_tasks.append( 'core_deploy_ping_error_node_{}_backend1'.format( management_nodes.index(node) + 1)) if not Network().ping(backend2_host): status_report.failed_tasks.append( 'core_deploy_ping_error_node_{}_backend2'.format( management_nodes.index(node) + 1)) if len(status_report.failed_tasks) > 0: status_report.success = False return status_report
def __clean_unused_ips(self): ips = Network().get_all_configured_ips() for ip, eth_name in ips.iteritems(): ip, netmask = str(ip).split("/") if ip not in self.__local_ips and ip != self.__node_info.backend_1_ip and \ ip != self.__node_info.backend_2_ip and ip != self.__node_info.management_ip: NetworkAPI().delete_ip(ip, eth_name, netmask) logger.debug("Clean unused ip {} on interface {}.".format( ip, eth_name))
def get_node_eths(self): net = Network() management = net.get_node_management_interface() eths = net.get_node_interfaces() for i in eths: if i.name == management: i.is_management = True break return eths
def clean_source_node(self, ip, disk_id): if not self.update_path(ip, ReassignPathStatus.moving): return False # pool = CephAPI().get_pool_bydisk(disk_id) pool = self._get_pool_by_disk(disk_id) if not pool: logger.error('Could not find pool for disk ' + disk_id) return False disk = CephAPI().get_disk_meta(disk_id, pool) paths_list = disk.paths disk_path = None path_index = -1 for i in xrange(0, len(paths_list)): path_str = paths_list[i] path = Path() path.load_json(json.dumps(path_str)) if path.ip == ip: disk_path = path path_index = i break if disk_path: self._clean_iscsi_config(disk_id, path_index, disk.iqn) network = Network() NetworkAPI().delete_ip(path.ip, path.eth, path.subnet_mask) if network.is_ip_configured(ip): logger.error( "Move action,cannot clean newtwork config for disk {} path {}." .format(disk_id, path_index)) self.update_path(ip, ReassignPathStatus.failed) return False logger.info( "Move action,clean newtwork config for disk {} path {}.". format(disk_id, path_index)) key = self.__app_conf.get_consul_disks_path( ) + disk_id + "/" + str(path_index + 1) consul_api = ConsulAPI() session = self._get_node_session(configuration().get_node_name()) if ConsulAPI().is_path_locked_by_session(key, session): consul_api.release_disk_path(key, session, None) logger.info("Move action,release disk {} path {}.".format( disk_id, path_index + 1)) else: self.update_path(ip, ReassignPathStatus.failed) return False return True
def is_valid_network_setting(self): config = configuration().get_cluster_info() net = Network() eths = net.get_node_interfaces() try: if config.eth_count != len(eths): return False elif config.management_eth_name != net.get_node_management_interface( ): return False else: return True except Exception as ex: logger.exception(ex.message) return False
def test_set_Node(): from PetaSAN.core.cluster.configuration import configuration from PetaSAN.backend.cluster.manage_node import ManageNode from PetaSAN.backend.cluster.deploy import Wizard from PetaSAN.core.cluster.network import Network net = Network() wizerd = Wizard() conf = configuration() node = NodeInfo() m_node = ManageNode() node.backend_1_ip = "192.168.130.100" node.backend_2_ip = "192.168.120.100" node.management_ip = net.get_node_management_ip() #clu= conf.get_cluster_info() #clu.management_nodes.append(node) #conf.set_cluster_network_info(clu) print wizerd.set_node_info(node)
def set_node_info(self, node_info): """ :type node_info: NodeInfo """ try: config = configuration() net = Network() node_info.name = config.get_node_name() node_info.management_ip = net.get_node_management_ip() config.set_node_info(node_info) config_api = ConfigAPI() if call_script(config_api.get_node_start_ips_script_path()) != 0: raise Exception("Error could not start backend network.") logger.info("Set node info completed successfully.") except Exception as ex: logger.exception(ex.message) return Status().error return Status().done
def set_cluster_network_info(self, cluster_info): """ :type cluster_info: ClusterInfo """ try: config = configuration() net = Network() current_cluster_info = config.get_cluster_info() cluster_info.name = current_cluster_info.name cluster_info.bonds = current_cluster_info.bonds cluster_info.jumbo_frames = current_cluster_info.jumbo_frames cluster_info.eth_count = len(net.get_node_interfaces()) cluster_info.management_eth_name = net.get_node_management_interface( ) config.set_cluster_network_info(cluster_info) logger.info("Updated cluster network successfully.") except Exception as ex: logger.exception(ex.message) return Status().error return Status().done
def join(self, ip, password): config = configuration() ssh_obj = ssh() config_api = ConfigAPI() if os.path.exists(config_api.get_cluster_info_file_path()): os.remove(config_api.get_cluster_info_file_path()) Network().clean_bonding() logger.info("Starting node join") if ssh_obj.copy_public_key_from_host(ip, password): logger.info("Successfully copied public keys.") if ssh_obj.copy_private_key_from_host(ip, password): ssh_obj.create_authorized_key_file() logger.info("Successfully copied private keys.") config.set_password(password) logger.info("password set successfully.") else: raise SSHKeyException( "Error while copying keys or setting password.") if not ssh_obj.call_command( ip, "python {}".format( config_api.get_cluster_status_for_join_path())): raise JoinException("ceph monitor status not healthy.") if not os.listdir( os.path.dirname(config_api.get_cluster_info_file_path())): os.makedirs( os.path.dirname(config_api.get_cluster_info_file_path())) logger.info("Start copying cluster info file.") if not ssh_obj.copy_file_from_host( ip, config_api.get_cluster_info_file_path()): raise Exception("Error while copy cluster info file.") logger.info("Successfully copied cluster info file.") cluster_name = config.get_cluster_name(True) logger.info("Joined cluster {}".format(cluster_name)) self.__copy_current_tunings(ip) return cluster_name
def create_cluster_info(self, password, cluster_name): config = configuration() ssh_obj = ssh() try: ssh_obj.create_id(True) ssh_obj.create_authorized_key_file() logger.info("Created keys for cluster {}".format(cluster_name)) config.set_cluster_name(cluster_name) logger.info( "Created cluster file and set cluster name to {}".format( cluster_name)) Network().clean_bonding() if not config.set_password(password): logger.error("Could not set root password.") return Status().error logger.info("password set successfully.") except Exception as ex: logger.exception(ex.message) return Status().error return Status().done
def get_node_deploy_url(self): try: ip = configuration().get_node_info().management_ip except Exception as e: ip = Network().get_node_management_ip() return "http://{}:5001/".format(ip)
r = RoleAPI() return r.is_page_allowed_by_name(page_name, session['role_id']) def display_parent(parent_name): r = RoleAPI() return r.is_allowed_by_parent_name(parent_name, session['role_id']) app.jinja_env.globals.update(display_url=display_url) app.jinja_env.globals.update(display_url_by_page=display_url_by_page) app.jinja_env.globals.update(display_parent=display_parent) app.jinja_env.globals.update(gettext=gettext) @app.before_request def make_session_permanent(): session.permanent = True app.permanent_session_lifetime = timedelta(minutes=30) if __name__ == '__main__': #soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (10240, 10240)) #app.debug = True app.run(Network().get_node_management_ip(), port=5000, threaded=True)
but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. ''' from datetime import timedelta from flask import Flask, session from PetaSAN.core.cluster.network import Network # from PetaSAN.Web.DeployController.wizerd import wizerd_controller from PetaSAN.web.deploy_controller.wizard import wizard_controller from PetaSAN.core.common.messages import * app = Flask(__name__) # app.register_blueprint(wizerd_controller) app.register_blueprint(wizard_controller) #app.session_interface = ConsulSessionInterface() app.secret_key="petasan" app.jinja_env.globals.update(gettext=gettext) @app.before_request def make_session_permanent(): session.permanent = True #app.permanent_session_lifetime = timedelta(minutes=5) if __name__ == '__main__': app.run(Network().get_node_management_ip(),port=5001)
This program is free software; you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. ''' from PetaSAN.core.cluster.network import Network from PetaSAN.core.cluster.configuration import configuration config = configuration().get_cluster_info() net = Network() eths = net.get_node_interfaces() print('cluster management interface ' + config.management_eth_name) print('node management interface ' + net.get_node_management_interface()) if config.management_eth_name == net.get_node_management_interface(): print('management interface match') else: print('Error: management interface mis-match !!') print('cluster eth count ' + str(config.eth_count)) print('node eth count ' + str(len(eths))) if config.eth_count == len(eths): print('eth count match')
def test_network_management(): from PetaSAN.core.cluster.network import Network net = Network() print net.get_node_management_interface()
def replace(self, ip, password): config = configuration() ssh_obj = ssh() config_api = ConfigAPI() logger.info("Starting replace.") if os.path.exists(config_api.get_cluster_info_file_path()): os.remove(config_api.get_cluster_info_file_path()) if ssh_obj.copy_public_key_from_host(ip, password): logger.info("Successfully copied public keys.") if ssh_obj.copy_private_key_from_host(ip, password): ssh_obj.create_authorized_key_file() logger.info("Successfully copied private keys.") else: raise SSHKeyException("Error copying keys") out, err = ssh_obj.exec_command( ip, "python {}".format(config_api.get_cluster_status_for_join_path())) out = int(out) if out == -1: raise ReplaceException("core_deploy_replace_mon_not_healthy_err") elif out == 0: raise ReplaceException( "core_deploy_replace_cluster_in_progress_err") elif out == 1: raise ReplaceException( "core_deploy_replace_two_management_node_down_err") elif out == 3: raise ReplaceException("core_deploy_replace_cluster_running_err") if not os.listdir( os.path.dirname(config_api.get_cluster_info_file_path())): os.makedirs( os.path.dirname(config_api.get_cluster_info_file_path())) logger.info("Starting to copy config file") if not ssh_obj.copy_file_from_host( ip, config_api.get_cluster_info_file_path()): raise Exception("Error copying config file") logger.info("Successfully copied config file.") cluster_name = config.get_cluster_name(True) logger.info("Successfully joined to cluster {}".format(cluster_name)) wrong_name = True wrong_ip = True for node_info in config.get_management_nodes_config(): if node_info.name == config.get_node_name( ) or node_info.management_ip == Network().get_node_management_ip(): if node_info.name == config.get_node_name(): wrong_name = False if node_info.management_ip == Network().get_node_management_ip( ): wrong_ip = False if not wrong_name and not wrong_ip: config.set_node_info(node_info, True) open(config_api.get_replace_file_path(), 'w+').close() break if wrong_name and wrong_ip: os.remove(config_api.get_cluster_info_file_path()) raise ReplaceException("core_deploy_replace_node_do_not_match_err") elif wrong_name: os.remove(config_api.get_cluster_info_file_path()) raise ReplaceException( "core_deploy_replace_node_do_not_match_name_err") elif wrong_ip: os.remove(config_api.get_cluster_info_file_path()) raise ReplaceException( "core_deploy_replace_node_do_not_match_ip_err") config.set_password(password) logger.info("password set successfully.") self.__copy_current_tunings(ip) return cluster_name
as published by the Free Software Foundation This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. ''' from PetaSAN.core.common.enums import BondMode import sys from PetaSAN.core.cluster.configuration import configuration from PetaSAN.core.cluster.network import Network from PetaSAN.core.common.cmd import * logger.debug("Loading network configurations.") network = Network() config = configuration() node = config.get_node_info() cluster = config.get_cluster_info() node_management_eth_name = network.get_node_management_interface() node_management_vlan_id = network.get_node_management_vlan_id() node_management_eth_ip = network.get_node_management_ip() node_management_eth_netmask = network.get_node_management_netmask() gateway = Network().get_def_gateway() bonds = config.get_cluster_bonds() jumbo_eths = [] if hasattr(configuration().get_cluster_info(), "jumbo_frames"): jumbo_eths = config.get_cluster_info().jumbo_frames
def test_network(): from PetaSAN.core.cluster.network import Network net = Network() for i in net.get_node_interfaces(): print i.name, i.mac print len(net.get_node_interfaces())