def check_auth(kwargs): s = ConfigFileOpers() confDict = s.getValue(options.cluster_property, ['adminUser','adminPassword']) targetUserName = confDict['adminUser'] username = kwargs['basicauth_user'] if cmp(targetUserName,username) != 0: return False targetPassword = base64.decodestring(confDict['adminPassword']) password = kwargs['basicauth_pass'] if cmp(targetPassword,password) != 0: return False return True
class ZkOpers(object): zk = None DEFAULT_RETRY_POLICY = KazooRetry( max_tries=None, max_delay=10000, ) rootPath = "/letv/nginx" confOpers = ConfigFileOpers() ''' classdocs ''' def __init__(self): ''' Constructor ''' self.zkaddress, self.zkport = get_zk_address() if "" != self.zkaddress and "" != self.zkport: self.zk = KazooClient(hosts=self.zkaddress + ':' + str(self.zkport), connection_retry=self.DEFAULT_RETRY_POLICY, timeout=20) self.zk.add_listener(self.listener) self.zk.start() logging.info("instance zk client (%s:%s)" % (self.zkaddress, self.zkport)) def close(self): try: self.zk.stop() self.zk.close() except Exception, e: logging.error(e)
class ZkOpers(object): zk = None rootPath = "/letv/javaContainer/jetty" confOpers = ConfigFileOpers() ''' classdocs ''' def __init__(self): ''' Constructor ''' self.zkaddress, self.zkport = get_zk_address() self.retry = KazooRetry(max_tries=3, delay=0.5) self.zk = KazooClient(hosts=self.zkaddress+':'+str(self.zkport), connection_retry=self.retry) self.zk.start() #self.zk = self.ensureinstance() logging.info("instance zk client (%s:%s)" % (self.zkaddress, self.zkport)) def close(self): try: self.zk.stop() self.zk.close() logging.info("stop the zk client successfully") except Exception, e: logging.error(e)
class AdminUser(APIHandler): confOpers = ConfigFileOpers() def post(self): ''' function: create admin user url example: curl -d "adminUser=root&adminPassword=root" "http://localhost:8888/admin/user" ''' requestParam = {} args = self.request.arguments logging.info("args :"+ str(args)) for key in args: value = args[key][0] if key == 'adminPassword': value = base64.encodestring(value).strip('\n') requestParam.setdefault(key,value) if requestParam['adminUser'] == '' or requestParam['adminPassword'] == '': raise HTTPAPIError(status_code=401, error_detail="username or password is empty",\ notification = "direct", \ log_message= "username or password is empty", \ response = "username or password is empty") if requestParam != {}: self.confOpers.setValue(options.cluster_property, requestParam) result = {} result.setdefault("message", "creating admin user successful!") self.finish(result)
class Abstract_Async_Thread(threading.Thread): threading_exception_queue = Threading_Exception_Queue() confOpers = ConfigFileOpers() def __init__(self): threading.Thread.__init__(self) def _send_email(self, data_node_ip, text): try: # send email host_ip = getHostIp() invokeCommand = InvokeCommand() cmd_str = "rpm -qa beehive" version_str = invokeCommand._runSysCmd(cmd_str) subject = "[%s] %s" % (data_node_ip, text) body = "[%s] %s" % (data_node_ip, text) body += "\n" + version_str[0] + "\nip:" + host_ip # email_from = "%s <noreply@%s>" % (options.sitename, options.domain) if options.send_email_switch: send_email(options.admins, subject, body) except Exception, e: logging.error("send email process occurs error", e)
class KibanaOpers(AbstractOpers): def __init__(self): self.config_op = ConfigFileOpers() def action(self, cmd): ret_val = os.system(cmd) result = {} if ret_val != 0: message = "do %s failed" % cmd logging.info(message) result.setdefault("message", message) else: message = "do %s successfully" % cmd logging.error(message) result.setdefault("message", message) return result def config(self, args): ip = args.get('ip') url = self.config_op.get_value(options.kibana_conf, 'elasticsearch_url', ':') old_ip = re.findall('http://(.*):9200', url)[0] url = url.replace(old_ip, ip) self.config_op.set_value(options.kibana_conf, {"elasticsearch_url": url}, ':') def start(self): return self.action(options.start_kibana) def stop(self): return self.action(options.stop_kibana) def restart(self): return self.action(options.restart_kibana)
class CheckSync(): config_file_obj = ConfigFileOpers() ''' cluster will not be existed, when the first to start container-manager in a new server cluster ''' def sync(self): zk_address, zk_port = get_zk_address() if not (zk_address and zk_port): logging.info('admin zookeeper first!') return zkOper = Common_ZkOpers() existed = zkOper.existCluster() if existed: self._sync_server_cluster() self._sync_data_node() else: logging.info( "cluster does not exist, may be the first time to sync in a new server cluster" ) def _sync_server_cluster(self): zkOper = Common_ZkOpers() cluster_uuid = zkOper.getClusterUUID() uuid_value, _ = zkOper.retrieveClusterProp(cluster_uuid) uuid_value = uuid_value.replace("'", "\"") uuid_value = json.loads(uuid_value) self.config_file_obj.setValue(options.server_cluster_property, uuid_value) def _sync_data_node(self): server_ip = getHostIp() zkOper = Common_ZkOpers() server_ip_list = zkOper.retrieve_data_node_list() if server_ip in server_ip_list: data_node_value = zkOper.retrieve_data_node_info(server_ip) if isinstance(data_node_value, dict): self.config_file_obj.setValue(options.data_node_property, data_node_value) else: logging.error('server %s should be registered first' % str(server_ip))
class AdminConf(APIHandler): confOpers = ConfigFileOpers() def post(self): ''' function: admin conf url example: curl -d "zkAddress=10.204.8.211&zkPort=2181" "http://localhost:8888/admin/conf" ''' requestParam = self.get_all_arguments() if requestParam != {}: self.confOpers.setValue(options.jetty_manager_property, requestParam) result = {} result.setdefault("message", "admin conf successful!") self.finish(result)
class ServerClusterHandler(APIHandler): ''' classdocs ''' confOpers = ConfigFileOpers() # create server cluster # eg. curl --user root:root -d "clusterName=docker_cluster&dataNodeIp=192.168.84.132&dataNodeName=docker_cluster_node_1" "http://localhost:8888/serverCluster" def post(self): requestParam = self.get_all_arguments() zkOper = Requests_ZkOpers() existCluster = zkOper.existCluster() if existCluster: clusterUUID = zkOper.getClusterUUID() else: clusterUUID = str(uuid.uuid1()) requestParam.setdefault("clusterUUID", clusterUUID) if requestParam != {}: self.confOpers.setValue(options.server_cluster_property, requestParam) self.confOpers.setValue(options.data_node_property, requestParam) clusterProps = self.confOpers.getValue(options.server_cluster_property) dataNodeProprs = self.confOpers.getValue(options.data_node_property) zkOper.writeClusterInfo(clusterUUID, clusterProps) zkOper.writeDataNodeInfo(clusterUUID, dataNodeProprs) return_message = {} return_message.setdefault("message", "creating server cluster successful!") self.finish(return_message) def get(self): zkOper = Requests_ZkOpers() clusterUUID = zkOper.getClusterUUID() data, _ = zkOper.retrieveClusterProp(clusterUUID) self.confOpers.setValue(options.server_cluster_property, eval(data)) result = {} result.setdefault("message", "sync server cluster info to local successful!") self.finish(result)
class Config(APIHandler): confOpers = ConfigFileOpers() def post(self): ''' function: set the jetty configuration file url example: curl -d "k1=v1&k2=v2" "http://localhost:8888/jetty/config" ''' requestParam = self.get_all_arguments() zkOper = ZkOpers() try: if requestParam != {}: self.confOpers.setValue(options.jetty_service_cnf, requestParam) source_text = self.confOpers.retrieve_full_text(options.jetty_service_cnf) zkOper.writeJettyCnf(source_text) finally: zkOper.close()
class AdminUser(APIHandler): confOpers = ConfigFileOpers() def post(self): requestParam = {} args = self.request.arguments for key in args: value = args[key][0] if key == 'adminPassword': value = base64.encodestring(value).strip('\n') requestParam.setdefault(key, value) if requestParam != {}: self.confOpers.setValue( options.server_cluster_property, requestParam) return_message = {} return_message.setdefault("message", "creating admin user successful!") self.finish(return_message)
class AdminUser(APIHandler): confOpers = ConfigFileOpers() def post(self): """ function: create admin user url example: curl -d "adminUser=root&adminPassword=root" "http://localhost:8888/admin/user" """ requestParam = self.get_all_arguments() if 'adminPassword' in requestParam: new_value = base64.encodestring( requestParam['adminPassword']).strip('\n') requestParam['adminPassword'] = new_value logging.info("args :" + str(requestParam)) if requestParam != {}: self.confOpers.set_value(options.cluster_property, requestParam, '=') result = {} result.setdefault("message", "creating admin user successful!") self.finish(result)
class Abstract_Async_Thread(threading.Thread): threading_exception_queue = Threading_Exception_Queue() confOpers = ConfigFileOpers() def __init__(self): threading.Thread.__init__(self) def _send_email(self, data_node_ip, text): try: # send email host_ip = getHostIp() version_str = '{0}-{1}'.format(__app__, __version__) subject = "[%s] %s" % (data_node_ip, text) body = "[%s] %s" % (data_node_ip, text) body += "\n" + version_str[0] + "\nip:" + host_ip # email_from = "%s <noreply@%s>" % (options.sitename, options.domain) if options.send_email_switch: send_email(options.admins, subject, body) except Exception, e: logging.error("send email process occurs error", e)
import base64 import logging import socket from tornado.httpclient import HTTPClient from tornado.httpclient import HTTPError from tornado.options import options from utils.configFileOpers import ConfigFileOpers from utils.invokeCommand import InvokeCommand confOpers = ConfigFileOpers() def get_zk_address(): ret_dict = confOpers.getValue(options.jetty_manager_property, ['zkAddress', 'zkPort']) zk_address = ret_dict['zkAddress'] zk_port = ret_dict['zkPort'] return zk_address, zk_port def retrieve_userName_passwd(): confDict = confOpers.getValue(options.cluster_property, ['adminUser', 'adminPassword']) adminUser = confDict['adminUser'] adminPasswd = base64.decodestring(confDict['adminPassword']) return (adminUser, adminPasswd) def retrieve_node_name():
class ZkOpers(object): root_path = "/letv/elasticsearch/cluster" config_op = ConfigFileOpers() def __init__(self): self.zk_helper = None self.base_path = '' self.lock_path = '' self.config_path = '' self.datanode_path = '' self.monitor_path = '' self.status_path = '' self.init() self.init_path() def init(self): address, port = get_zk_address() self.zk_helper = ZkHelper(address, port) def init_path(self): self.base_path = self._get_base_path() self.lock_path = self.base_path + '/lock/' self.config_path = self.base_path + '/config/' self.datanode_path = self.base_path + '/datanode/' self.monitor_path = self.base_path + '/monitor/' self.status_path = self.base_path + '/status/' def _get_base_path(self): return self.root_path + '/' + get_cluster_name( ) + '/' + getClusterUUID() def watch_config(self): path = self.config_path + CONFIG_ES self.zk_helper.ensure_path(path) @self.zk_helper.zk.DataWatch(path) def watch_es_config(data, stat): data = self.read_es_config() if data: self_ip = self.config_op.get_value(options.data_node_property, 'dataNodeIp') if self_ip in data['discovery.zen.ping.unicast.hosts']: data['discovery.zen.ping.unicast.hosts'].remove(self_ip) self.config_op.set_value(options.es_config, data, ':') def get_self_ip(self): self_ip = self.config_op.get_value(options.data_node_property, 'dataNodeIp') return self_ip def cluster_exists(self, name): cluster_path = self.root_path + '/' + name return self.zk_helper.get_children_list(cluster_path) def get_config_lock(self): path = self.lock_path + LOCK_CONFIG return self.zk_helper.get_lock(path) def write(self, path, data): self.zk_helper.write(path, str(data)) def read(self, path): data = self.zk_helper.read(path) return eval(data) if data else {} def write_cluster_info(self, data): self.write(self.base_path, data) def read_cluster_info(self): return self.read(self.base_path) def read_cluster_info(self, cluster_name, uuid): path = self.root_path + '/' + cluster_name + '/' + uuid return self.read(path) def read_cluster_info(self, cluster_name): cluster_path = self.root_path + '/' + cluster_name uuid = self.zk_helper.get_children_list(cluster_path)[0] path = cluster_path + '/' + uuid return self.read(path) def write_config(self, node, data): path = self.config_path + node self.write(path, data) def write_es_config(self, data): self.write_config(CONFIG_ES, data) def read_es_config(self): return self.read_config(CONFIG_ES) def read_config(self, node): path = self.config_path + node return self.read(path) def write_data_node(self, ip, data): path = self.datanode_path + ip self.write(path, data) def read_data_node(self, ip): path = self.datanode_path + ip return self.read(path) def write_monitor(self, data): node = self.get_self_ip() path = self.monitor_path + node self.write(path, data) def read_monitor(self, node): path = self.monitor_path + node return self.read(path) def write_status(self, data): self.write(self.status_path, data) def read_status(self): return self.read(self.status_path)
class NodeOpers(AbstractOpers): ''' classdocs ''' invokeCommand = InvokeCommand() confOpers = ConfigFileOpers() base_config_path = '/etc/nginx/' man = ConfigOpers(base_config_path) def __init__(self): ''' Constructor ''' def create(self, params): if params == {} or params is None: raise UserVisiableException("please set the componentNode info!") dataNodeInternalPort = params.get('dataNodeInternalPort') if dataNodeInternalPort is not None: raise UserVisiableException( "no need to set the dataNodeInternalPort param!") zkOper = Common_ZkOpers() local_uuid = getClusterUUID() existCluster = zkOper.existCluster(local_uuid) if not existCluster: raise UserVisiableException( "sync componentCluster info error! please check if sync uuid is right!" ) params.setdefault("dataNodeInternalPort", options.port) dataNodeExternalPort = params.get('dataNodeExternalPort') if dataNodeExternalPort is None or '' == dataNodeExternalPort: params.setdefault("dataNodeExternalPort", options.port) self.confOpers.setValue(options.data_node_property, params) dataNodeProprs = self.confOpers.getValue(options.data_node_property) zkOper.writeDataNodeInfo(local_uuid, dataNodeProprs) result = {} result.setdefault( "message", "Configuration on this componentNode has been done successfully") return result def start(self): _, ret_val = self.invokeCommand._runSysCmd(options.start_nginx) result = {} if ret_val != 0: result.setdefault("message", "start nginx failed") else: container_name = retrieve_node_name() zkOper = Common_ZkOpers() zkOper.write_started_node(container_name) result.setdefault("message", "start nginx successfully") return result def stop(self): _, ret_val = self.invokeCommand._runSysCmd(options.stop_nginx) result = {} if ret_val != 0: result.setdefault("message", "stop nginx failed") else: container_name = retrieve_node_name() zkOper = Common_ZkOpers() zkOper.remove_started_node(container_name) result.setdefault("message", "stop nginx successfully") return result def reload(self): _, ret_val = self.invokeCommand._runSysCmd(options.reload_nginx) result = {} if ret_val != 0: result.setdefault("message", "reload nginx failed") else: result.setdefault("message", "reload nginx successfully") container_name = retrieve_node_name() zkOper = Common_ZkOpers() zkOper.write_started_node(container_name) return result def config(self, params): _upstream_name = params.get('upstreamName') _servers_ports = params.get('serverPorts') _cluster = params.get('containerClusterName') server_list = _servers_ports.split(',') if ',' in _servers_ports else [ _servers_ports ] upstream = UpStream(_upstream_name, server_list) self.man.save_upstream(upstream) self.man.enable_server(os.path.basename(self.man.upstream_file_path)) server = self.__get_server(_upstream_name) filename = '%ssites-available/%s.conf' % (self.base_config_path, _cluster) set_file_data(filename, str(server), 'w') self.man.enable_server('%s.conf' % _cluster) self.invokeCommand._runSysCmd(options.reload_nginx) result = {} result.setdefault("message", "node config upstream successfully") return result def __get_server(self, upstream_name): server = Server(port=8001, server_names=['webportal-app'], params={'error_page': '500 502 503 504 /50x.html'}) location = Location('/', params={ 'proxy_pass': '******' % upstream_name, 'proxy_set_header': 'Host rds.et.letv.com', 'proxy_redirect': 'off', 'index': 'index.html index.htm' }) server.add_location(location=location) location = Location('= /50x.html', params={'root': '/usr/share/nginx/html'}) server.add_location(location=location) return server def enable(self): files = os.listdir(self.man.sites_available) for _file in files: self.man.enable_server(_file) self.invokeCommand._runSysCmd(options.reload_nginx) def disable(self): files = os.listdir(self.man.sites_available) for _file in files: self.man.disable_server(_file) self.invokeCommand._runSysCmd(options.reload_nginx)
def __init__(self): self.config_op = ConfigFileOpers() self._zk_op = None
class ElasticsearchOpers(AbstractOpers): def __init__(self): self.config_op = ConfigFileOpers() self._zk_op = None @property def zk_op(self): return self._zk_op if self._zk_op else ToolZkOpers() def init_cluster(self, param): cluster_uuid = str(uuid.uuid1()) param['clusterUUID'] = cluster_uuid self.config_op.set_value(options.cluster_property, param) self.zk_op.init_path() self.zk_op.write_cluster_info( self.config_op.getValue(options.cluster_property)) def sync_node(self, cluster_name): data = self.zk_op.read_cluster_info(cluster_name) self.config_op.set_value(options.cluster_property, data) self.zk_op.init_path() self.zk_op.watch_config() def init_node(self, param): self._add_ip(param['dataNodeIp']) self.config_op.set_value(options.data_node_property, param) self.zk_op.write_data_node(param['dataNodeIp'], param) def action(self, cmd): ret_val = os.system(cmd) result = {} if ret_val != 0: message = "do %s failed" % cmd logging.info(message) result.setdefault("message", message) else: message = "do %s successfully" % cmd logging.error(message) result.setdefault("message", message) return result def config(self): node_info = self.config_op.getValue(options.data_node_property, [ 'dataNodeIp', 'dataNodeName']) cluster_info = self.config_op.getValue( options.cluster_property, ['clusterUUID', 'clusterName']) total_dic = {} total_dic['cluster.name'] = cluster_info['clusterName'] total_dic['node.name'] = node_info['dataNodeName'] total_dic['index.number_of_shards'] = 2 total_dic['path.data'] = '/srv/esdata' total_dic['path.work'] = '/var/log/eswork' total_dic['bootstrap.mlockall'] = 'true' total_dic['network.host'] = node_info['dataNodeIp'] total_dic['discovery.zen.minimum_master_nodes'] = 3 total_dic['discovery.zen.ping.timeout'] = '5s' total_dic['discovery.zen.ping.multicast.enabled'] = 'false' self.config_op.set_value(options.es_config, total_dic, separator=':') def sys_config(self, **kargs): dic = {} for k, v in kargs.items(): dic[k.upper()] = v self.config_op.set_value(options.sys_es_config, dic) def pull_config(self): data = self.zk_op.read_es_config() if data: self_ip = self.config_op.get_value( options.data_node_property, 'dataNodeIp') if self_ip in data['discovery.zen.ping.unicast.hosts']: data['discovery.zen.ping.unicast.hosts'].remove(self_ip) self.config_op.set_value(options.es_config, data, ':') def _add_ip(self, ip): lock = self.zk_op.get_config_lock() with lock: zk_ip = self.zk_op.read_es_config() if zk_ip: ips = zk_ip['discovery.zen.ping.unicast.hosts'] if ip not in ips: ips.append(ip) else: zk_ip['discovery.zen.ping.unicast.hosts'] = [ip] self.zk_op.write_es_config(zk_ip) def _remove_ip(self, ip): lock = self.zk_op.get_config_lock() with lock: zk_ip = self.zk_op.read_es_config() if zk_ip: ips = zk_ip['discovery.zen.ping.unicast.hosts'] if ip in ips: ips.remove(ip) self.zk_op.write_es_config(zk_ip) def add_ip(self, nodes_ip): lock = self.zk_op.get_config_lock() with lock: zk_ip = self.zk_op.read_es_config() if zk_ip: ips = zk_ip['discovery.zen.ping.unicast.hosts'] print "#" * 80 print ips, type(ips) for ip in nodes_ip: if ip not in ips: ips.append(ip) else: zk_ip['discovery.zen.ping.unicast.hosts'] = nodes_ip self.zk_op.write_es_config(zk_ip) def remove_ip(self, nodes_ip): lock = self.zk_op.get_config_lock() with lock: zk_ip = self.zk_op.read_es_config() if zk_ip: ips = zk_ip['discovery.zen.ping.unicast.hosts'] for ip in nodes_ip: if ip in ips: ips.remove(ip) self.zk_op.write_es_config(zk_ip) def start(self): return self.action(options.start_elasticsearch) def stop(self): return self.action(options.stop_elasticsearch) def restart(self): return self.action(options.restart_elasticsearch)
class ClusterOpers(BaseClusterOpers): ''' classdocs ''' confOpers = ConfigFileOpers() def __init__(self): super(ClusterOpers, self).__init__() def create(self, params): if params == {} or params is None: raise UserVisiableException("please set the componentNode info!") dataNodeInternalPort = params.get('dataNodeInternalPort') if dataNodeInternalPort: raise UserVisiableException( "no need to set the dataNodeInternalPort param!") zkOper = Common_ZkOpers() existCluster = zkOper.existCluster() if existCluster: raise UserVisiableException( "server has belong to a componentCluster,should be not create new componentCluster!" ) clusterUUID = str(uuid.uuid1()) params.setdefault("clusterUUID", clusterUUID) params.setdefault("dataNodeInternalPort", options.port) dataNodeExternalPort = params.get('dataNodeExternalPort') if dataNodeExternalPort is None or '' == dataNodeExternalPort: params.setdefault("dataNodeExternalPort", options.port) self.confOpers.setValue(options.cluster_property, params) self.confOpers.setValue(options.data_node_property, params) clusterProps = self.confOpers.getValue(options.cluster_property) dataNodeProprs = self.confOpers.getValue(options.data_node_property) zkOper.writeClusterInfo(clusterUUID, clusterProps) zkOper.writeDataNodeInfo(clusterUUID, dataNodeProprs) return clusterUUID def start(self): zkOper = Common_ZkOpers() existCluster = zkOper.existCluster() if not existCluster: raise UserVisiableException("Nginx componentCluster does't exist") total_nginx_nodes = zkOper.retrieve_nginx_node_list() started_nodes = zkOper.retrieve_started_nodes() if len(total_nginx_nodes) == len(started_nodes): raise UserVisiableException( "all nginx nodes have started. No need to start them.") logging.info("all nginx nodes: %s" % (total_nginx_nodes)) to_start_nginx_nodes = list( set(total_nginx_nodes) - set(started_nodes)) logging.info("nginx needed to start: " + str(to_start_nginx_nodes)) node_infos = [] for node in to_start_nginx_nodes: info = zkOper.retrieve_nginx_node_info(node) node_infos.append(info) self.baseOpers(node_infos, OperType.start) result_dict = { 'message': 'cluster start processing, please wait for a moment!' } return result_dict def stop(self): zkOper = Common_ZkOpers() node_infos = [] started_nodes_list = zkOper.retrieve_started_nodes() if not started_nodes_list: raise UserVisiableException( 'cluster has been stopped, no need to do this!') for nginx_node in started_nodes_list: info = zkOper.retrieve_nginx_node_info(nginx_node) node_infos.append(info) self.baseOpers(node_infos, OperType.stop) result_dict = { 'message': 'cluster stop processing, please wait for a moment!' } return result_dict def reload(self): zkOper = Common_ZkOpers() node_infos = [] nodes_list = zkOper.retrieve_nginx_node_list() for nginx_node in nodes_list: info = zkOper.retrieve_nginx_node_info(nginx_node) node_infos.append(info) self.baseOpers(node_infos, OperType.reload) result_dict = { 'message': 'cluster reload processing, please wait for a moment!' } return result_dict def syncExistedCluster(self, params): if params == {}: error_message = "please fill the cluster uuid!" raise UserVisiableException(error_message) clusterUUID = params['clusterUUID'] zkOper = Common_ZkOpers() existCluster = zkOper.existCluster(clusterUUID) if not existCluster: error_message = "Nginx componentCluster does't exist(cluster id:%s), \ please specify the right cluster uuid!" % (clusterUUID) raise UserVisiableException(error_message) data, _ = zkOper.retrieveClusterProp(clusterUUID) logging.info("data in zk %s" % (data)) json_str_data = data.replace("'", "\"") dict_data = json.loads(json_str_data) self.confOpers.setValue(options.cluster_property, dict_data) def retrieve_cluster_started_status(self): zkOper = Common_ZkOpers() started_nodes = zkOper.retrieve_started_nodes() total_nodes = zkOper.retrieve_nginx_node_list() started_nodes_count = len(started_nodes) total_nodes_count = len(total_nodes) if started_nodes_count == total_nodes_count: return ClusterStatus.STARTED elif 0 != started_nodes_count: return ClusterStatus.STARTED_PART else: return ClusterStatus.STOP def config(self, params): zkOper = Common_ZkOpers() node_infos = [] _nodes_list = zkOper.retrieve_nginx_node_list() if not _nodes_list: raise UserVisiableException( "cluster has not node, please check the cluster's node!") for _node in _nodes_list: info = zkOper.retrieve_nginx_node_info(_node) node_infos.append(info) self.baseOpers(node_infos, OperType.config) result_dict = { 'message': 'cluster config upstream processing, please wait for a moment!' } return result_dict def enable(self): zkOper = Common_ZkOpers() node_infos = [] _nodes_list = zkOper.retrieve_nginx_node_list() if not _nodes_list: raise UserVisiableException( "cluster has not node, please check the cluster's node!") for _node in _nodes_list: info = zkOper.retrieve_nginx_node_info(_node) node_infos.append(info) self.baseOpers(node_infos, OperType.enable) result_dict = { 'message': 'cluster proxy enable processing, please wait for a moment!' } return result_dict def disable(self): zkOper = Common_ZkOpers() node_infos = [] _nodes_list = zkOper.retrieve_nginx_node_list() if not _nodes_list: raise UserVisiableException( "cluster has not node, please check the cluster's node!") for _node in _nodes_list: info = zkOper.retrieve_nginx_node_info(_node) node_infos.append(info) self.baseOpers(node_infos, OperType.disable) result_dict = { 'message': 'cluster proxy disable processing, please wait for a moment!' } return result_dict
def __init__(self): self.config_op = ConfigFileOpers()
class NodeOpers(AbstractOpers): ''' classdocs ''' invokeCommand = InvokeCommand() confOpers = ConfigFileOpers() def __init__(self): ''' Constructor ''' def createNode(self, params): if params == {} or params is None: raise UserVisiableException("please set the componentNode info!") dataNodeInternalPort = params.get('dataNodeInternalPort') if dataNodeInternalPort is not None: raise UserVisiableException( "no need to set the dataNodeInternalPort param!") zkOper = ZkOpers() try: local_uuid = getClusterUUID() existCluster = zkOper.existCluster(local_uuid) if not existCluster: raise UserVisiableException( "sync componentCluster info error! please check if sync uuid is right!" ) params.setdefault("dataNodeInternalPort", options.port) dataNodeExternalPort = params.get('dataNodeExternalPort') if dataNodeExternalPort is None or '' == dataNodeExternalPort: params.setdefault("dataNodeExternalPort", options.port) self.confOpers.setValue(options.data_node_property, params) dataNodeProprs = self.confOpers.getValue( options.data_node_property) zkOper.writeDataNodeInfo(local_uuid, dataNodeProprs) finally: zkOper.close() result = {} result.setdefault( "message", "Configuration on this componentNode has been done successfully") return result def startNode(self): _, ret_val = self.invokeCommand._runSysCmd(options.start_jetty) result = {} if ret_val != 0: result.setdefault("message", "start jetty failed") else: container_name = retrieve_node_name() zkOper = ZkOpers() try: zkOper.write_started_node(container_name) finally: zkOper.close() result.setdefault("message", "start jetty successfully") return result def stopNode(self): _, ret_val = self.invokeCommand._runSysCmd(options.stop_jetty) result = {} if ret_val != 0: result.setdefault("message", "stop jetty failed") else: container_name = retrieve_node_name() zkOper = ZkOpers() try: zkOper.remove_started_node(container_name) finally: zkOper.close() result.setdefault("message", "stop jetty successfully") return result