def workspace_update(self, passport, w): """ 更新workspace :param creater: :param w: :return: """ clu_name = w.get('cluster_name') ws_name = w.get('workspace_name') # 检查权限 if passport.get('ring') != 'ring0': rlt = self.__check_permission(passport.get('username'), clu_name) if not rlt.success: return rlt if not rlt.content: return Result('', CLU_NOT_AUTH, 'not allowed', 400) old_workspace = WorkSpacedb.instance().read_workspace( w.get('workspace_name')) # old_workspace = self.etcd.read(self.root + w.get('workspace_name'), json=True) if old_workspace.success: if w.get('resource_cpu') < old_workspace.content.get( 'cpu', 0) or w.get('resource_mem') < old_workspace.content.get( 'mem', 0): return Result('', msg='change value can not litter than old value', result=WORKSPACE_CHANGE_ERROR, code=400) else: return Result('', old_workspace.result, old_workspace.message, 400) # 校验资源值 check_res = self.check_resource(clu_name, w, old_workspace.content.get('cpu', 0), old_workspace.content.get('mem', 0)) if not check_res.success: return check_res # 通过apiserver更新namespace信息 u_status = KubeClientMgr.instance().update_cluster_namespace( clu_name, ws_name, w) if not u_status.success: return Result('', u_status.result, u_status.message, 400) # 更新etcd数据 data = workspace_struce(w, passport.get('username')) WorkSpacedb.instance().update_workspace(w.get('workspace_name'), data) WebLog(3, u'更新', u"cluster[{}]的workspace[{}]".format(clu_name, ws_name), passport.get('username')) return Result('')
def get_host_all_pods(self, cluster_name, host_name): """ # 获取主机上的pods """ client = self.get_cluster_client(cluster_name) if client is None: Log( 1, 'KubeClientMgr.get_host_pod_list[%s] fail,as[the cluster info invalid]' % (cluster_name)) return Result('', FAIL, 'get cluster info fail') rlt = WorkSpacedb.instance().get_ns_by_cluster(cluster_name) if not rlt.success: Log( 1, 'KubeClientMgr.get_host_pod_list get_ns_by_cluster[%s] fail,as[%s]' % (cluster_name, rlt.message)) return rlt namespace_list = rlt.content ns_name_list = [] for ns in namespace_list: ns_name_list.append(ns.get('name')) rlt = client.get_host_pods(ns_name_list, host_name) if not rlt.success: return Result('', 400, rlt.message, 400) host_pods = rlt.content return Result(host_pods)
def create_configmap(self, namespace, data): rlt = WorkSpacedb.instance().read_workspace(namespace) if not rlt.success: return rlt clu_name = rlt.content.get('cluster_name') client = self.get_cluster_client(clu_name) if client is None: return Result('', FAIL, 'get cluster client fail') return client.create_configmaps(namespace, data)
def group_ws_list(self, group): """ 一个group下所有的workspace列表 :return: """ rlt = WorkSpacedb.instance().read_all_workspace() if not rlt.success: return rlt r_data = [] for i in rlt.content: if i['group'] == group: r_data.append(i) return Result(r_data)
def cluster_list(self, group): """ 通过用户组查看集群列表 :param group: :return: """ rlt = WorkSpacedb.instance().read_all_workspace() if not rlt.success: return rlt r_data = [] for i in rlt.content: if i['group'] == group: r_data.append(i['cluster_name']) return Result(list(set(r_data)))
def workspace_num(self, group=None): """ :return: """ num = 0 ws = WorkSpacedb.instance().read_all_workspace() if ws.success: if group: for i in ws.content: if i['group'] == group: num += 1 else: num = len(ws.content) return Result(num)
def workspace_volumes(self, workspace): rlt = WorkSpacedb.instance().read_workspace(workspace) if not rlt.success: Log( 1, 'Storage.workspace_volumes read_workspace[%s]fail,as[%s]' % (workspace, rlt.message)) return rlt cluster_name = rlt.content.get('cluster_name') rlt = VolumeDB.instance().read_volume_list(cluster_name) if not rlt.success: Log( 1, 'Storage.volumes read_volume_list[%s]fail,as[%s]' % (cluster_name, rlt.message)) return rlt
def get_by_clu(self, cluster_name, group): """ 通过cluster_name和group获取集群列表 :param cluster_name: :param group: :return: """ # 集群上的所有workspace列表 rlt = WorkSpacedb.instance().read_all_workspace() if not rlt.success: return rlt return Result([ i for i in rlt.content if i['group'] == group and i['cluster_name'] == cluster_name ])
def get_namespace_configmaps(self, namespace): """ 获取某个namespace下所有的configmap设置 :param cluster_name: :param namespace: :return: """ rlt = WorkSpacedb.instance().read_workspace(namespace) if not rlt.success: return rlt clu_name = rlt.content.get('cluster_name') client = self.get_cluster_client(clu_name) if client is None: return Result('', FAIL, 'get cluster client fail') return client.get_configmaps(namespace)
def workspace_pvs(self, workspace): rlt = WorkSpacedb.instance().read_workspace(workspace) if not rlt.success: Log( 1, 'Storage.workspace_volumes read_workspace[%s]fail,as[%s]' % (workspace, rlt.message)) return rlt cluster = rlt.content.get('cluster_name') rlt = PVDB.instance().get_pv_by_workspace(cluster, workspace) if not rlt.success: Log( 1, 'Storage.workspace_pvs get_pv_by_workspace[%s][%s]fail,as[%s]' % (cluster, workspace, rlt.message)) return rlt
def workspace_create(self, passport, w): """ 创建workspace :param creater: :param w: :return: """ clu_name = w.get('cluster_name') ws_name = w.get('workspace_name') # 检查权限 if passport.get('ring') != 'ring0': rlt = self.__check_permission(passport.get('username'), clu_name) if not rlt.success: return rlt if not rlt.content: return Result('', CLU_NOT_AUTH, 'not allowed', 400) # 检查参数 check_param = self.check_param(w) if not check_param.success: return Result('', check_param.result, check_param.message, check_param.code) # 校验资源值 check_res = self.check_resource(clu_name, w) if not check_res.success: return Result('', check_res.result, check_res.message, 400) # 通过apiserver创建namespace k = KubeClientMgr.instance().create_cluster_namespace( clu_name, ws_name, w) if not k.success: return Result('', k.result, k.message, 400) # 保存workspace数据 data = workspace_struce(w, passport.get('username')) WebLog(3, u'创建', u"cluster[{}]的workspace[{}]".format(clu_name, ws_name), passport.get('username')) return WorkSpacedb.instance().save_workspace(w.get('workspace_name'), data)
def get_all_pods(self, cluster_name): """ # 获取主机上的pod """ client = self.get_cluster_client(cluster_name) if client is None: Log( 1, 'KubeClientMgr.get_all_pods[%s] fail,as[the cluster info invalid]' % (cluster_name)) return Result('', FAIL, 'get cluster info fail') rlt = WorkSpacedb.instance().get_ns_by_cluster(cluster_name) if not rlt.success: Log( 1, 'KubeClientMgr.get_host_pod_list get_ns_by_cluster[%s] fail,as[%s]' % (cluster_name, rlt.message)) return rlt arr = [] namespace_list = rlt.content for ns in namespace_list: namespace = ns.get('name') if not namespace: Log( 1, 'KubeClientMgr.get_host_pod_list get_pod_list skip,as[name space invalid]' ) continue rlt = client.get_pod_list(ns['name']) if rlt.success: arr.extend(rlt.content) else: Log( 1, 'KubeClientMgr.get_host_pod_list get_pod_list[%s]fail,as[%s]' % (ns['name'], rlt.message)) return Result(arr)
def check_param(self, w): """ 检查参数 { "cluster_name": 集群名称 "workspacegroup_name": group名称 "workspace_name": workspace名称 "resource_cpu": 工作区cpu配额 默认: 1 "resource_mem": 工作区内存配额 默认: 2000Mi "pod_cpu_min": pod cpu下限 默认: 0.1 "pod_cpu_max": pod cpu上限 默认: 4 "pod_mem_min": pod mem下限 默认: 2Mi "pod_mem_max": pod mem上限 默认:2000Mi "c_cpu_default": 容器cpu默认值 默认:0.1 "c_mem_default": 容器mem默认值 默认:500Mi # ------ v1.8 增加 ---- "c_cpu_default_min": 容器cpu默认最小值, "c_mem_default_min": 容器mem默认最小值, "c_cpu_min": 容器全局配额 cpu下限 默认:0.1 "c_cpu_max": 容器全局配额 cpu上限 默认:2 "c_mem_min": 容器全局配额 mem下限 默认:1Mi "c_mem_max" 容器 全局配额 mem上限 默认:1000Mi } :param w: :return: """ if WorkSpacedb.instance().workspace_is_exist(w.get('workspace_name')): return Result('', WORKSPACE_IS_EXISTED, '', 400) if not Clusterdb.instance().clu_is_exist(w.get('cluster_name')): return Result('', CLUSTER_NOT_EXISTED, '', code=400) if w.get('workspace_name', '') == 'default' or w.get( 'workspace_name', '') == 'kube-system' or w.get( 'workspace_name', '') == 'kube-public': return Result('', msg='', result=WORKSPACE_IS_EXISTED, code=400) return Result('')
def subnet_workspace(self, cluster_name): """ 获取可被指派的工作区 :param cluster_name: :return: """ # 集群上的所有workspace列表 rlt = WorkSpacedb.instance().get_ns_by_cluster(cluster_name) if not rlt.success: return rlt w_list = [] for i in rlt.content: w_list.append(i['name']) # 已经被指定过的workspace rlt = NetworkMgr.instance().get_ippool_clu(cluster_name, 0) if not rlt.success: Log(1, "workspace subnet_worksapce error:{}".format(rlt.message)) return rlt for i in rlt.content: if i['workspace'] in w_list: w_list.remove(i['workspace']) return Result(w_list)
def workspce_remain(self, cluster_name): """ :param cluster_name: :return: """ rlt = CluNodedb.instance().read_node_list(cluster_name) if not rlt.success: return rlt # cluster_info = self.clu_info.get_node(cluster_name) cpu_1 = 0 mem_1 = 0 cpu_2 = 0 mem_2 = 0 Log(4, "workspace_remain:{}".format(rlt.content)) for i in rlt.content: if i.get('status', '') == 'running': c = i.get('cpu', '') m = i.get('memory', '') if c: cpu_1 += int(i.get('cpu', 0)) if m: mem_1 += float(i.get('memory', 0)[:-2]) # 获取已经添加的workspace所占用资源 rlt = WorkSpacedb.instance().clu_used(cluster_name) # used_workspace = get_workspace_list(self.etcd, cluster_name) if rlt.success: for i in rlt.content: cpu_2 += i.get('cpu', 0) mem_2 += float(i.get('mem', 0)) else: if rlt.result != ETCD_KEY_NOT_FOUND_ERR: return Result('', msg=rlt.message, result=500, code=500) return Result({ 'cpu_remain': round(cpu_1 * 0.8 - cpu_2, 2), 'mem_remain': round(mem_1 * 0.8 - mem_2, 3) })
def workspace_delete(self, workspace_name, workspacegroup_name, cluster_name, passport): """ 删除workspace 已经实现 :param workspace_name: :param workspacegroup_name: :return: """ # 检查权限 if passport.get('ring') != 'ring0': rlt = self.__check_permission(passport.get('username'), cluster_name) if not rlt.success: return rlt if not rlt.content: return Result('', CLU_NOT_AUTH, 'not allowed', 400) # 删除应用 # /**** deploy 模块自动检测,不需要主动去调用删除应用接口 ***/ # gws = WorkSpacedb.instance().read_group_workspace(cluster_name) # Log(3, "gws:{}".format(gws.content)) # deploy = DeployClient.instance().get_apply_num(cluster_name, gws.content) # if not deploy.success: # Log(1, "get apply num error:{}".format(gws.message)) # if deploy.content > 0: # rlt = DeployClient.instance().delete_apply(cluster_name, workspacegroup_name, workspace_name) # if not rlt.success: # Log(1, "delete apply error:{}".format(rlt.message)) # 通过apiserver删除namespace rlt = KubeClientMgr.instance().delete_cluster_namespace( cluster_name, workspace_name) if not rlt.success: Log( 1, "kubeclient delete workspace:{} error:{}".format( workspace_name, rlt.message)) return rlt # 删除workspace指定的子网 rlt = NetworkMgr.instance().get_subnet_by_ws(workspace_name) if rlt.success: data = rlt.content if data: NetworkMgr.instance().del_subnet_ws({ "cluster_name": cluster_name, 'fa_ip': data.get('fa_ip'), 'key': data.get('key') }) else: Log(1, "networkmgr get_subnet_by_ws error:{}".format(rlt.message)) # 删除etcd中configmap数据 rlt = ConfigMapdb.instance().del_by_ws(workspace_name) if not rlt.success: if rlt.result != ETCD_KEY_NOT_FOUND_ERR: Log(1, "workspace delete configmap error:{}".format(rlt.message)) # 更新etcd中数据 rlt = WorkSpacedb.instance().delete_workspace(workspace_name) if not rlt.success: Log( 1, "workspacedb delete workspace:{} error:{}".format( workspace_name, rlt.message)) WebLog( 3, u'删除', u"cluster[{}]的workspace[{}]".format(cluster_name, workspace_name), passport.get('username')) return Result('')
1, "Cluster.deletegroup load data to json fail,input[%s]" % (post_data)) return Result('', INVALID_JSON_DATA_ERR, str(e), http.BAD_REQUEST) operator = args.get('passport', {}).get('username', 'system') Log(3, '[{}] delete group [{}] in'.format(operator, post_data)) group = data.get('group') if not group: Log(1, 'deletegroup fail,as[group name is invalid]') return Result('', PARAME_IS_INVALID_ERR, 'group invalid') StorageMgr.instance().delete_group_storage_class(group, operator) ws = WorkSpacedb.instance().get_ws_by_group(group) if not ws.success: Log(1, 'deletegroup get_ws_by_group fail,as[%s]' % (ws.message)) return Result('ok') g_d = {} for ns in ws.content: g_d.setdefault(ns['cluster_name'], []).append(ns['name']) for cluster_name, workspace_list in g_d.items(): client = KubeClientMgr.instance().get_cluster_client(cluster_name) if client: for workspace in workspace_list: # 删除pvc StorageMgr.instance().delete_workspace_pv( cluster_name, workspace, operator)
def is_isolated(self, cluster_name, workspace, isolate): """ isolate workspace :param workspace: :return: """ # 检查工作区是否存在 if not WorkSpacedb.instance().workspace_is_exist(workspace): return Result('', ETCD_RECORD_NOT_EXIST_ERR, 'workspace:{} is not existd'.format(workspace), 400) # connect node rlt = CluNodedb.instance().read_node_list(cluster_name) if not rlt.success: return rlt master_ip = '' for i in rlt.content: if i.get('type') == 'master': master_ip = i.get('ip') break rlt = Masterdb.instance().read_master(master_ip.replace('.', '-')) if not rlt.success: return rlt con = rlt.content username = con.get('username', None) passwd = con.get('userpwd', None) prikey = con.get('prikey', None) prikeypwd = con.get('prikeypwd', None) port = int(con.get('port', 22)) remot = RemoteParam(master_ip, port, username, passwd, prikey, prikeypwd) rlt = remot.create_sshclient() if not rlt.success: return rlt # isolate workspace if isolate == '1': policy_content = """ - apiVersion: v1 kind: policy metadata: name: {} spec: selector: calico/k8s_ns == '{}' ingress: - action: allow source: selector: calico/k8s_ns == '{}' - action: deny source: selector: calico/k8s_ns != '{}' - action: allow egress: - action: allow""".format(workspace, workspace, workspace, workspace) else: policy_content = """ - apiVersion: v1 kind: policy metadata: name: {} spec: selector: calico/k8s_ns == '{}' ingress: - action: allow egress: - action: allow""".format(workspace, workspace) policy_command = "cat << EOF | ETCD_ENDPOINTS=http://127.0.0.1:12379 calicoctl apply -f - {}\nEOF".format( policy_content) rlt = remot.exec_command(policy_command) Log(3, "set isolate command:{}, {}".format(policy_command, rlt.content)) remot.close() if not rlt.success or ('Successfully' not in rlt.content[0] and 'resource already exists' not in rlt.content[0]): return Result('', 400, rlt.content, 400) # 更新etcd rlt = WorkSpacedb.instance().update_workspace(workspace, {'isolate': isolate}) if not rlt.success: return rlt return Result(0)
def creat_configmap(self, data): """ 创建configmap :param data: :return: """ # 检查版本在workspace下否存在 if ConfigMapdb.instance().is_existed( data.get('workspace'), data.get('name') + data.get('version')): return Result('', CONFIGMAP_EXISTED, 'is existed', 400) # 检查workspace是否存在 rlt = WorkSpacedb.instance().read_all_gws() if not rlt.success: return rlt group_info = rlt.content.get(data.get('group'), []) if data.get('workspace') not in group_info: return Result('', 400, 'the workspace not in the group', 400) try: content = json.loads(data.get('content')) Log(4, "content1:{}".format(content)) except ValueError: content = yaml.load(data.get('content')) Log(4, "content2:{}".format(content)) except Exception as e: return Result('', 400, str(e.message), 400) c_data = { "apiVersion": "v1", "kind": "ConfigMap", "metadata": { "name": data.get('name') + data.get('version') }, "data": content } Log(4, 'content:{}'.format(data.get('content', ''))) rlt = KubeClientMgr.instance().create_configmap( data.get('workspace'), c_data) if not rlt.success: Log(3, "create_configmap error:{}".format(rlt.message)) return rlt # 保存到etcd data['conf_keys'] = content.keys() con = configmap_struct(data) rlt = ConfigMapdb.instance().save_configmap( data.get('workspace'), data.get('name') + data.get('version'), con) if not rlt.success: return rlt WebLog( 3, u'创建', u"configmap[{}]".format( data.get('name', '') + data.get('version', '')), data.get('creater')) self.reload(1) return Result('')
def set_workspace(self, data): """ 指派工作区 :param data: :return: """ workspace = data.get('workspace') if not workspace: return Result('', 400, 'param error', 400) # 检查工作区是否存在 if not WorkSpacedb.instance().workspace_is_exist(workspace): return Result('', ETCD_RECORD_NOT_EXIST_ERR, '', 400) # 检查子网是否存在 rlt = self.networkdb.read_subnet(data.get('cluster_name'), data.get('fa_ip').split('/')[0], data.get('key')) if not rlt.success: return rlt subnet_info = rlt.content # 先检查子网是否被指派过 if rlt.content.get('status') == 0: return Result('', 400, 'the subnet has been assigned') # 检查工作区是否被指派过 rlt = self.get_ippool_clu(data.get('cluster_name')) if not rlt.success: return rlt for i in rlt.content: if i.get('workspace') == workspace: return Result('', 400, 'the workspace has been assigned', 400) # 连接主机 指派工作区 rlt = CluNodedb.instance().read_node_list(data['cluster_name']) if not rlt.success: return rlt master_ip = '' for i in rlt.content: if i.get('type') == 'master': master_ip = i.get('ip') break rlt = Masterdb.instance().read_master(master_ip.replace('.', '-')) if not rlt.success: return rlt con = rlt.content username = con.get('username', None) passwd = con.get('userpwd', None) prikey = con.get('prikey', None) prikeypwd = con.get('prikeypwd', None) port = int(con.get('port', 22)) remot = RemoteParam(master_ip, port, username, passwd, prikey, prikeypwd) rlt = remot.create_sshclient() if not rlt.success: return rlt # 执行创建子网命令 cidr = subnet_info['subnet'].split( '/')[0] + '/' + subnet_info['subnet'].split('/')[1] ipip = 'true' if subnet_info['ipip'] else 'false' nat = 'true' if subnet_info['nat'] else 'false' ippool_content = """ apiVersion: v1 kind: ipPool metadata: cidr: {} spec: ipip: enabled: {} nat-outgoing: {}""".format(cidr, ipip, nat) ipp_command = "cat << EOF | ETCD_ENDPOINTS=http://127.0.0.1:12379 calicoctl apply -f - {}\nEOF".format( ippool_content) rlt = remot.exec_command(ipp_command) Log( 3, "set_workspace ssh exec command:{}, {}".format( ipp_command, rlt.content)) remot.close() # 对于已经存在的ippool,忽略 if not rlt.success or ('Successfully' not in rlt.content[0] and 'resource already exists' not in rlt.content[0]): return Result('', 400, rlt.content, 400) # 更新etcd group_name = '' rlt = WorkSpacedb.instance().read_workspace(workspace) if rlt.success: group_name = rlt.content.get('group', '') rlt = self.networkdb.update_subnet(data.get('cluster_name'), data.get('fa_ip').split('/')[0], data.get('key'), { "workspace": workspace, "status": 0, "group": group_name }) if not rlt.success: return rlt self.reload(1) return Result('')