def safe_get_all(self, sort_order=None, sort_target='key'): arr = list(self.client.get_all(sort_order, sort_target)) return Result(arr)
except InternalException, ex: self.log( "CheckCapacityTask test_strategy_service fail,as[%s]" % (ex.value), ex.errid) return Result( 'InternalException', ex.errid, "CheckCapacityTask launch_task fail,as[%s]" % (ex.value)) except Exception, e: PrintStack() self.log("launch_task except[%s]" % (str(e))) Log(1, "CheckCapacityTask launch_task fail,as[%s]" % (str(e))) return Result( self._id, 1, "CheckCapacityTask launch_task fail,as[%s]" % (str(e))) return Result(self._id) def snapshot(self): snap = super(CheckCapacityTask, self).snapshot() return snap def rollback(self): """ # rollback 由外部触发,任务本身失败了,不会触发rollback """ Log(4, "CheckCapacityTask.rollback") self.workbench.delete_pv() self.log("rollback") return Result(self._id)
def safe_get_prefix(self, key_prefix, sort_order=None, sort_target='key'): if key_prefix[-1] != "/": key_prefix += "/" arr = list(self.client.get_prefix(key_prefix, sort_order, sort_target)) return Result(arr)
def create_new_cluster(self, cluster_info, passport): """ 创建集群 :param creater: :param cluster_info: :return: """ master_ip = cluster_info.get('addr', '').split(':')[0] host_name = master_ip.replace('.', '-') # 检查license if not passport.get('licensed', ''): return Result('', LICENSE_OUT_OF_DATE, 'licensed is out of date', 400) # check集群是否存在 cluster_name = cluster_info.get('cluster_name', '') if Clusterdb.instance().clu_is_exist(cluster_name): return Result(0, CLUSTER_HAS_EXISTED, 'clu is existed', 400) masternode_list = [] nodemonitor_list = [] clunode_list = [] if cluster_info.get('create_way', '') == 'add': # check 集群ip是否添加过 if CluNodedb.instance().is_node_exist(cluster_name, host_name): return Result(0, msg='', result=CLUSTER_HAS_EXISTED, code=400) # 检查是否是ufleet主机 ufleet_hosts = GetSysConfig('ufleet_hosts').split(',') if master_ip in ufleet_hosts: return Result('', msg='the host is used by ufleet.', result=NODE_USED_BY_UFLEET, code=400) client = KubeClient({ 'auth_data': cluster_info.get('cacerts', ''), 'server': 'https://' + cluster_info.get('addr', ''), 'cert_data': cluster_info.get('apiservercerts'), 'client_key': cluster_info.get('apiserverkey'), 'cluser_name': cluster_name }) rlt = client.connect() if not rlt.success: Log( 3, 'KubeClientMgr.add_cluster[%s]fail, as[%s]' % (cluster_name, rlt.message)) return rlt self.__store[cluster_name] = client rlt = client.get_all_nodes() if not rlt.success: return rlt for j in rlt.content: address = j.get('status', {}).get('addresses', []) for add in address: if 'InternalIP' == add.get('type', ''): ip = add.get('address') if ip == cluster_info.get('addr', '').split(':')[0]: host_type = 'master' else: host_type = 'node' ip_name = ip.replace('.', '-') node_data = node_struct(cluster_name, add.get('address'), host_type, cluster_info.get('creater')) node_data = self.syn_nodeinfo(node_data, j, []) # clusternode clunode_list.append({ 'cluster_name': cluster_name, 'data': node_data }) # masternodedir masternode_data = masternode_struct( cluster_info.get('creater'), cluster_name, host_type, add.get('address', ''), '', '', '', '', '', '') masternode_list.append({ 'master_ip': ip_name, 'data': masternode_data }) # nodemonitor nodemonitor_list.append(ip_name) # 调用launcher保存集群认证信息接口 auth_data = auth_info_struct(cluster_info) rlt = LauncherClient.instance().load_cluster(auth_data) if not rlt.success: return Result('', 500, 'load_cluster error:' + rlt.message, 500) # 保存数据到etcd new_clu = clu_struct(cluster_info) rlt = Clusterdb.instance().create_cluster_full(cluster_name, new_clu) if not rlt.success: return Result('', rlt.result, rlt.message, 400) for i in clunode_list: rlt = CluNodedb.instance().save_node(i['cluster_name'], i['data']) if not rlt.success: return rlt for i in masternode_list: rlt = Masterdb.instance().save_master(i['master_ip'], i['data']) if not rlt.success: return rlt return Result('')
def list_clusterrolebinding(self, cluster_name): client = self.get_cluster_client(cluster_name) if not client: return Result('', FAIL, 'get cluster client fail') return client.clusterrolebinding()
def f(*args, **kwargs): try: return actual_do(*args, **kwargs) except Exception as e: Log(1, "{0} error:{1}".format(actual_do.__name__, e.message)) return Result('', 400, msg=e.message, code=400)
try: smtp = smtplib.SMTP(timeout=30) smtp.connect(self.smtphost, self.port) #smtp.set_debuglevel(1) #smtp.ehlo() if self.ssl: smtp.starttls() smtp.login(self.username, self.password) smtp.sendmail(self.from_addr, to_addr, msg) smtp.close() except Exception,e: PrintStack() return Result('', SEND_EMAIL_FAIL_ERR, 'send_email subject[%s]to[%s]fail,as[%s]'%(subject, emails, str(e))) else: return Result('ok') def message(self, to_addr, subject, content): msg = MIMEText(content,_subtype='plain',_charset='gb2312') msg['Subject'] = subject msg['From'] = self.from_addr msg['To'] = to_addr return msg.as_string() class SendTask(object): def __init__(self, email_tool, to_addr, subject, content, log_ids): self.status = 0 self.email_tool = email_tool self.to_addr = to_addr
def delete(self, **kwargs): cluster_name = kwargs.get('cluster_name') name = kwargs.get('name') if not name or not cluster_name: return Result('', 400, 'param error', 400) return DeployClient.instance().del_clusterrole(name, cluster_name)
def save_vip(self, cluster_id, vip): rlt = self.set(cluster_id + '/vip', vip) if not rlt.success: Log(1, "save_vip error:{}".format(rlt.message)) return Result(0)
class InitStorageWork(TaskData): def __init__(self, work_info): """ work_info = { "cluster_name":"", "ip0":"" } """ self.cluster_name = None self.ip0 = '' self.ip1 = '' self.ip2 = '' self.cluster_id = '' self.license_str = '' self.username = DEFAULT_USER_NAME self.password = DEFAULT_PASSWORD self.client = None super(InitStorageWork, self).__init__(work_info) def snapshot(self): snap = super(InitStorageWork, self).snapshot() snap["cluster_name"] = self.cluster_name snap["username"] = self.username snap["password"] = self.password snap["cluster_id"] = self.cluster_id snap["license_str"] = self.license_str snap["ip0"] = self.ip0 snap["ip1"] = self.ip1 snap["ip2"] = self.ip2 return snap def wait_for_ready(self): for _ in range(36): rlt = self.schedule_status() if not rlt.success: Log(4, 'skip current action, as the schedule is failed') return rlt time.sleep(5) return Result('ready') def check_valid(self): """ # 检查数据 """ try: if self.client is None: self.client = self.get_vespace_client() rlt = SettingMgr.instance().get_vespace_license() if rlt.success: self.license_str = rlt.content else: Log( 1, 'InitStorageWork.check_valid get_vespace_license fail,as[%s]' % (rlt.message)) return rlt except InternalException, e: Log(1, "InitStorageWork.check_valid except[%s]" % (e.value)) return Result("InitStorageWork", e.errid, e.value) except Exception, e: PrintStack() return Result("InitStorageWork", INTERNAL_EXCEPT_ERR, "InitStorageWork.check_valid except[%s]" % (str(e)))
_filter = json.loads(post_data.replace("'", "\'")) arr = [] for task_id in _filter['id_list']: arr.append(int(task_id)) except Exception,e: Log(1,"delete_task.parse data to json fail,input[%s]"%(post_data)) return Result('',INVALID_JSON_DATA_ERR,str(e)) user_id = args.get('passport',{}).get('access_uuid','') result = {} for task_id in arr: result[task_id] = self.drop_task(user_id, task_id) return Result(result) def create_init_storage_cluster_task(self, task_data, workbench): task_data["workbench"] = workbench task_data["task_key"] = task_data['cluster_name'] task = StorageSchedule(task_data) rlt = task.test() if not rlt.success: return rlt self.delete_init_storage_cluster_task(task._id, task_data['cluster_name']) return self.create_task(task) def create_delete_storage_cluster_task(self, task_data, workbench):
class Storage(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' pass @ring0 @ring3 def set_license(self, license_str, **args): username = args.get('passport', {}).get('username', 'unkown') rlt = SettingMgr.instance().set_vespace_license(license_str) if rlt.success: LogMod(3, username, u'用户 [%s] 更新存储模块license 成功.' % (username)) else: LogMod( 3, username, u'用户 [%s] 更新存储模块license 失败,as[%s].' % (username, rlt.message)) return rlt @ring0 @ring3 def get_license(self, **args): return SettingMgr.instance().get_vespace_license() @ring0 @ring3 def add_cluster(self, post_data, **args): try: data = json.loads(post_data.replace("'", "\'")) except Exception, e: Log(1, "Configure.add load data to json fail,input[%s]" % (post_data)) return Result('', INVALID_JSON_DATA_ERR, str(e), http.BAD_REQUEST) if 'name' not in data or not data['name']: return Result('', INVALID_PARAM_ERR, 'cluster name is invalid', http.BAD_REQUEST) if 'ip' not in data or not data['ip']: return Result('', INVALID_PARAM_ERR, 'ip is invalid', http.BAD_REQUEST) name = data['name'] ip = data['ip'] client = VeSpaceClient(ip, data.get('username', DEFAULT_USER_NAME), data.get('password', DEFAULT_PASSWORD)) rlt = client.create_cluster(name, ip) if not rlt.success: Log( 1, 'Storage.add_cluster [%s][%s]fail,as[%s]' % (name, ip, rlt.message)) return rlt cluster_id = rlt.content.get('id') rlt = SettingMgr.instance().get_vespace_license() if not rlt.success: Log( 1, 'Storage.add_cluster get_vespace_license fail,as[%s]' % (rlt.message)) return rlt license_str = rlt.content rlt = client.add_license(cluster_id, license_str) if not rlt.success: Log( 1, 'Storage.add_cluster add_licence[%s][%s]fail,as[%s]' % (name, ip, rlt.message)) return rlt cluster_info = rlt.content cluster_info['ip'] = ip cluster_info['cluster_id'] = cluster_id rlt = StoregeClusterDB.instance().create_cluster(name, cluster_info) if not rlt.success: Log( 1, 'Storage.add_cluster[%s][%s]to etcd fail,as[%s]' % (name, ip, rlt.message)) return rlt
Log(1, 'Storage.nodes read_app_node_list fail,as[%s]' % (rlt.message)) return rlt @ring0 @ring3 def add_node(self, post_data, **args): try: data = json.loads(post_data.replace("'", "\'")) except Exception, e: Log(1, "Configure.add load data to json fail,input[%s]" % (post_data)) return Result('', INVALID_JSON_DATA_ERR, str(e), http.BAD_REQUEST) if 'cluster_name' not in data or not data['cluster_name']: return Result('', INVALID_PARAM_ERR, 'cluster name is invalid', http.BAD_REQUEST) if 'ip' not in data or not data['ip']: return Result('', INVALID_PARAM_ERR, 'ip is invalid', http.BAD_REQUEST) cluster_name = data['cluster_name'] ip = data['ip'] rlt = StoregeClusterDB.instance().get_cluster_info(cluster_name) if not rlt.success: Log( 1, 'Storage.add_node get_cluster_info[%s][%s]fail,as[%s]' % (cluster_name, ip, rlt.message)) return Result('', FAIL, 'The cluster not exist')
try: data = json.loads(post_data.replace("'", "\'")) except Exception, e: Log( 1, "Cluster.deletegroup load data to json fail,input[%s]" % (post_data)) return Result('', INVALID_JSON_DATA_ERR, str(e), http.BAD_REQUEST) operator = args.get('passport', {}).get('username', 'system') Log(3, '[{}] delete group [{}] in'.format(operator, post_data)) group = data.get('group') if not group: Log(1, 'deletegroup fail,as[group name is invalid]') return Result('', PARAME_IS_INVALID_ERR, 'group invalid') StorageMgr.instance().delete_group_storage_class(group, operator) ws = WorkSpacedb.instance().get_ws_by_group(group) if not ws.success: Log(1, 'deletegroup get_ws_by_group fail,as[%s]' % (ws.message)) return Result('ok') g_d = {} for ns in ws.content: g_d.setdefault(ns['cluster_name'], []).append(ns['name']) for cluster_name, workspace_list in g_d.items(): client = KubeClientMgr.instance().get_cluster_client(cluster_name) if client:
def push(self, repository, tag): arr = [line for line in self.client.push(repository, tag, stream=True)] Log(3, 'DockerClient.push return[%s]' % (';'.join(arr))) return Result(arr)
def delete_mount_records(self, cluster_name, mount_id_list): for mount_id in mount_id_list: self.delete_mount_record(cluster_name, mount_id) return Result('done')
def tag(self, image, repository, tag): if self.client.tag(image, repository, tag): return Result('ok') return Result('', TAG_IMAGE_FAIL_ERR, 'tag fail.')
class AddPVWork(TaskData): def __init__(self, work_info): """ work_info = { "cluster_name":"", "ip":"", "group":"", "pv_name":"", "capacity":"", "read_write_mode":"", "recovery_model":"", "volume_type":"", "creator":"", "replica":"", "workspace":"" } """ self.cluster_name = '' self.ip = '' self.group = '' self.pv_name = '' self.capacity = '' self.read_write_mode = '' self.recovery_model = '' self.volume_type = '' self.creator = '' self.replica = 2 self.workspace = '' self.volume_status = 0 self.cluster_id = '' self.data_volume_server = '' self.data_volume_path = '' self.volume_id = '' self.storage_access_path = '' self.app_node_list = [] self.veclient = None self.kubeclient = None self.targetdport = 0 super(AddPVWork, self).__init__(work_info) def snapshot(self): snap = super(AddPVWork, self).snapshot() snap["cluster_name"] = self.cluster_name snap["cluster_id"] = self.cluster_id snap["workspace"] = self.workspace snap["ip"] = self.ip snap["pv_name"] = self.pv_name snap["group"] = self.group snap["capacity"] = self.capacity snap["read_write_mode"] = self.read_write_mode snap["recovery_model"] = self.recovery_model snap["volume_type"] = self.volume_type snap["volume_id"] = self.volume_id snap["storage_access_path"] = self.storage_access_path snap["data_volume_server"] = self.data_volume_server snap["data_volume_path"] = self.data_volume_path snap["creator"] = self.creator snap["volume_status"] = self.volume_status snap["app_node_list"] = self.app_node_list snap["targetdport"] = self.targetdport return snap def check_valid(self): """ # 检查数据 """ try: if not StorageNodeDB.instance().is_app_node_exist(self.cluster_name, self.ip): return Result('', INVALID_PARAM_ERR, 'mount host is invalid') if self.veclient is None: self.veclient = VespaceMgr.instance().get_cluster_client(self.cluster_name) if not self.veclient.test(): return Result('', INIT_VESPACE_CLIENT_FAILERR, 'init vespace client fail.') kube_client = KubeClientMgr.instance().get_cluster_client(self.cluster_name) if kube_client is None: Log(1, 'AddPVWork.check_valid get_cluster_client[%s]fail'%(self.cluster_name)) return Result('', INVALID_PARAM_ERR, 'cluster_name is invalid') else: self.kubeclient = kube_client if self.recovery_model not in ['Retain', 'Recycle', 'Delete']: self.recovery_model = 'Delete' except InternalException,e: Log(1,"AddPVWork.check_valid except[%s]"%(e.value)) return Result("AddPVWork",e.errid,e.value) except Exception,e: PrintStack() return Result("AddPVWork",INTERNAL_EXCEPT_ERR,"AddPVWork.check_valid except[%s]"%(str(e)))
def instance(cls): ''' Limits application to single instance ''' with LockGuard(cls.__lock): if not hasattr(cls, "_instance"): cls._instance = cls() return cls._instance def __init__(self): offset = 60 Log(3, "AuthenMgr.__init__ with offset[%d]" % (offset)) super(AuthenMgr, self).__init__(offset) def get_green_passport(self, method): if method == "whatTime" or method == "login" or method == "getCCPVMInfo": passport = {} passport["method"] = method passport["ring"] = RING_HELP_ASSIST return passport return False def verify_token(self, method, token, *args): try: passport = self.check_token(method, token, *args) except Exception, e: PrintStack() Log(1, "AuthenMgr.verify_token fail as [%s]" % (str(e))) else: return Result(passport)
def safe_mkdir(self, path, drop_key=False): spl = path.split('/') if spl[-1] != '/': path += '/' s = self.client.put(path, None) return Result(s)
rlt = StorageNodeDB.instance().read_node_info(self.cluster_name, self.ip) if not rlt.success: Log(1, 'DeleteStorageNodeWork.check_valid read_node_info[%s][%s]fail,as[%s]'%(self.cluster_name, self.ip, rlt.message)) return Result('', STORAGE_NODE_NOT_EXIST_ERR, 'The node is not exist.' ) self.store_api_port = rlt.content.get('store_api_port', STOREGE_HOST_PORT) self.app_api_port = rlt.content.get('app_api_port', APPLICATION_HOST_PORT) except InternalException,e: Log(1,"DeleteStorageNodeWork.check_valid except[%s]"%(e.value)) return Result("DeleteStorageNodeWork",e.errid,e.value) except Exception,e: PrintStack() return Result("DeleteStorageNodeWork",INTERNAL_EXCEPT_ERR,"DeleteStorageNodeWork.check_valid except[%s]"%(str(e))) return Result(0) def ready(self): self.save_to_db() def is_service_ready(self): if StorageNodeDB.instance().is_node_exist(self.cluster_name, self.ip): return True else: Log(1, 'The host[%s][%s] lost'%(self.cluster_name, self.ip)) raise InternalException("host deleted.", TASK_CANCEL_ERR) def get_cluster_id(self): if self.cluster_id: return self.cluster_id
def mkdir(self, path): spl = path.split('/') if spl[-1] != '/': path += '/' return Result(self.client.put(path, None))
def create_clusterrolebinding(self, cluster_name, name, data): client = self.get_cluster_client(cluster_name) if not client: return Result('', FAIL, 'get cluster client fail') return client.create_clusterrolebinding(data)
def connect(self): """ # 测试连接 """ config = { "clusters": [{ "name": "self", "cluster": { "certificate-authority-data": "", "server": "" } }], "users": [{ "name": "self", "user": { "client-certificate-data": "", "client-key-data": "", } }], "contexts": [{ "name": "self", "context": { "cluster": "self", "user": "******" } }], "current-context": "self" } config['clusters'][0]['cluster'][ 'certificate-authority-data'] = base64.b64encode(self.auth_data) config['clusters'][0]['cluster']['server'] = self.server config['users'][0]['user'][ 'client-certificate-data'] = base64.b64encode(self.cert_data) config['users'][0]['user']['client-key-data'] = base64.b64encode( self.client_key) api = pykube.HTTPClient(pykube.KubeConfig(doc=config)) try: response = api.request(method='GET', url='', timeout=5) except pykube.PyKubeError as e: Log(3, 'server:{},ssl error:{}'.format(self.server, e.message)) return Result( '', FAIL, 'KubeClient connect to server:{} fail,ssl error:{}'.format( self.server, e)) except Exception as e: Log(3, 'server:{},ssl error:{}'.format(self.server, e.message)) return Result( '', FAIL, 'KubeClient connect to server:{} except,ssl error:{}'.format( self.server, e)) if response.status_code == 200: self.client = api return Result('ok') else: Log(3, msg='server:{} ssl error. text:{}'.format( self.server, response.text)) return Result('', FAIL, response.text)
def delete_clusterrolebinding(self, cluster_name, name): client = self.get_cluster_client(cluster_name) if not client: return Result('', FAIL, 'get cluster client fail') return client.del_clusterrolebinding(name)
'bin/registry', 'garbage-collect', '/etc/docker/registry/config.yml' ]) Log(3, 'exec_create return[%s]' % (str(exec_id))) res = b'' for chunk in self.client.exec_start(exec_id, stream=True): res += chunk Log(3, 'garbage_collect return[%s]' % (res)) self.client.restart(self.registry_ct_id) except Exception, e: PrintStack() return Result('', CALL_DOCKER_INTERFACE_FAIL_ERR, 'garbage_collect except[%s]' % (str(e))) else: return Result(res) def net_status(self): return self.client.stats(self.registry_ct_id) def get_host_port(self, container, port): try: return self.client.port(container, port)[0]['HostPort'] except Exception: PrintStack() return False def search(self, key): arr = self.client.search(key) return Result(arr)
def safe_get(self, path): r, _ = self.client.get(path) return Result(r)
def search(self, key): arr = self.client.search(key) return Result(arr)
def safe_put(self, key, value, lease=None): self.client.put(key, value, lease) return Result('done')
def safe_get(self, path): r, _ = self.client.get(path) if r is None: return Result('', ETCD_KEY_NOT_FOUND_ERR, 'value is None') return Result(r)