def get_ippool_clu(self, cluster_name, status=0, offset=0, limit=None): """ 获取一个集群的所有子网: require=0被分配 require=1:未被分配 :param cluster_name: :return: """ self.reload() if limit: limit = int(limit) # 检查集群是否存在 if not Clusterdb.instance().clu_is_exist(cluster_name): return Result('', CLUSTER_NOT_EXISTED, '', 400) r_data = [] subnet_list = [] for i in self.__store: if i['cluster_name'] == cluster_name: for j in i['subnet']: subnet_list.extend(j['children']) for i in subnet_list: if i['is_show'] == 1 and i['status'] == status: r_data.append(i) return Result(r_data[offset:limit])
def update_subip(self, data): """ 更新设置子网 :param kwargs: :return: """ key_list = data.keys() if 'fa_ip' in key_list and 'key' in key_list and 'cluster_name' in key_list: fa_ip = data.pop('fa_ip') key = data.pop('key') cluster_name = data.pop('cluster_name') else: return Result('', 400, 'param error', 400) # 检查子网是否被分配, 如果被分配,则不能修改is_show状态 rlt = self.networkdb.read_subnet(cluster_name, fa_ip.split('/')[0], key) if not rlt.success: return rlt if rlt.content.get('status') == 0: return Result('', 400, '') # 检查集群是否存在 if not Clusterdb.instance().clu_is_exist(cluster_name): return Result('', CLUSTER_NOT_EXISTED, '', 400) rlt = self.networkdb.update_subnet(cluster_name, fa_ip.split('/')[0], key, data) if not rlt.success: return rlt self.reload(flush=1) return Result('')
def __check_permission(self, username, clu_name): """ 检查是否有操作该资源的权限 :param clu_name: :return: """ rlt = Clusterdb.instance().read_clu_member(clu_name) if not rlt.success: Log(1, "workspacemgr check_permission error:{}".format(rlt.message)) return rlt rlt1 = Clusterdb.instance().read_cluster(clu_name) if not rlt1.success: return rlt1 if username not in rlt.content and username != rlt1.content.get( 'creater'): return Result(False) return Result(True)
def is_service_ready(self): if Clusterdb.instance().clu_is_exist(self.cluster_name): return self.client and self.client.test() \ and self.client.test_strategy_service(self.ip0) \ and (self.client.test_strategy_service(self.ip1) if self.ip1 else True) \ and (self.client.test_strategy_service(self.ip2) if self.ip2 else True) else: Log(1, 'The cluster[%s]lost' % (self.cluster_name)) raise InternalException("cluster deleted.", TASK_CANCEL_ERR)
def check_param(self, w): """ 检查参数 { "cluster_name": 集群名称 "workspacegroup_name": group名称 "workspace_name": workspace名称 "resource_cpu": 工作区cpu配额 默认: 1 "resource_mem": 工作区内存配额 默认: 2000Mi "pod_cpu_min": pod cpu下限 默认: 0.1 "pod_cpu_max": pod cpu上限 默认: 4 "pod_mem_min": pod mem下限 默认: 2Mi "pod_mem_max": pod mem上限 默认:2000Mi "c_cpu_default": 容器cpu默认值 默认:0.1 "c_mem_default": 容器mem默认值 默认:500Mi # ------ v1.8 增加 ---- "c_cpu_default_min": 容器cpu默认最小值, "c_mem_default_min": 容器mem默认最小值, "c_cpu_min": 容器全局配额 cpu下限 默认:0.1 "c_cpu_max": 容器全局配额 cpu上限 默认:2 "c_mem_min": 容器全局配额 mem下限 默认:1Mi "c_mem_max" 容器 全局配额 mem上限 默认:1000Mi } :param w: :return: """ if WorkSpacedb.instance().workspace_is_exist(w.get('workspace_name')): return Result('', WORKSPACE_IS_EXISTED, '', 400) if not Clusterdb.instance().clu_is_exist(w.get('cluster_name')): return Result('', CLUSTER_NOT_EXISTED, '', code=400) if w.get('workspace_name', '') == 'default' or w.get( 'workspace_name', '') == 'kube-system' or w.get( 'workspace_name', '') == 'kube-public': return Result('', msg='', result=WORKSPACE_IS_EXISTED, code=400) return Result('')
def create_new_cluster(self, cluster_info, passport): """ 创建集群 :param creater: :param cluster_info: :return: """ master_ip = cluster_info.get('addr', '').split(':')[0] host_name = master_ip.replace('.', '-') # 检查license if not passport.get('licensed', ''): return Result('', LICENSE_OUT_OF_DATE, 'licensed is out of date', 400) # check集群是否存在 cluster_name = cluster_info.get('cluster_name', '') if Clusterdb.instance().clu_is_exist(cluster_name): return Result(0, CLUSTER_HAS_EXISTED, 'clu is existed', 400) masternode_list = [] nodemonitor_list = [] clunode_list = [] if cluster_info.get('create_way', '') == 'add': # check 集群ip是否添加过 if CluNodedb.instance().is_node_exist(cluster_name, host_name): return Result(0, msg='', result=CLUSTER_HAS_EXISTED, code=400) # 检查是否是ufleet主机 ufleet_hosts = GetSysConfig('ufleet_hosts').split(',') if master_ip in ufleet_hosts: return Result('', msg='the host is used by ufleet.', result=NODE_USED_BY_UFLEET, code=400) client = KubeClient({ 'auth_data': cluster_info.get('cacerts', ''), 'server': 'https://' + cluster_info.get('addr', ''), 'cert_data': cluster_info.get('apiservercerts'), 'client_key': cluster_info.get('apiserverkey'), 'cluser_name': cluster_name }) rlt = client.connect() if not rlt.success: Log( 3, 'KubeClientMgr.add_cluster[%s]fail, as[%s]' % (cluster_name, rlt.message)) return rlt self.__store[cluster_name] = client rlt = client.get_all_nodes() if not rlt.success: return rlt for j in rlt.content: address = j.get('status', {}).get('addresses', []) for add in address: if 'InternalIP' == add.get('type', ''): ip = add.get('address') if ip == cluster_info.get('addr', '').split(':')[0]: host_type = 'master' else: host_type = 'node' ip_name = ip.replace('.', '-') node_data = node_struct(cluster_name, add.get('address'), host_type, cluster_info.get('creater')) node_data = self.syn_nodeinfo(node_data, j, []) # clusternode clunode_list.append({ 'cluster_name': cluster_name, 'data': node_data }) # masternodedir masternode_data = masternode_struct( cluster_info.get('creater'), cluster_name, host_type, add.get('address', ''), '', '', '', '', '', '') masternode_list.append({ 'master_ip': ip_name, 'data': masternode_data }) # nodemonitor nodemonitor_list.append(ip_name) # 调用launcher保存集群认证信息接口 auth_data = auth_info_struct(cluster_info) rlt = LauncherClient.instance().load_cluster(auth_data) if not rlt.success: return Result('', 500, 'load_cluster error:' + rlt.message, 500) # 保存数据到etcd new_clu = clu_struct(cluster_info) rlt = Clusterdb.instance().create_cluster_full(cluster_name, new_clu) if not rlt.success: return Result('', rlt.result, rlt.message, 400) for i in clunode_list: rlt = CluNodedb.instance().save_node(i['cluster_name'], i['data']) if not rlt.success: return rlt for i in masternode_list: rlt = Masterdb.instance().save_master(i['master_ip'], i['data']) if not rlt.success: return rlt return Result('')
def delelte_subnet(self, kwargs): """ 删除集群的某个子网集合 :param kwargs: subnet: 子网ip cluster_name :return: """ # 检查集群是否存在 cluster_name = kwargs.get('cluster_name') fa_ip = kwargs.get('fa_ip', '') ip = fa_ip.split('/')[0] if not Clusterdb.instance().clu_is_exist(cluster_name): return Result('', CLUSTER_NOT_EXISTED, '', 400) creater = kwargs.get('passport', {}).get('username', '') # 检查子网是否有被使用 all_net = self.networkdb.subnet_value_list(cluster_name + '/' + ip) if not all_net.success: return all_net for i in all_net.content: if i['status'] == 0: return Result('', SUBNET_IPIS_USED, '') # 删除etcd中数据 rlt = self.networkdb.del_net(cluster_name, ip) if not rlt.success: return rlt self.reload(flush=1) WebLog(3, u'删除', u"集群:[{}]的网络池:[{}]".format(cluster_name, fa_ip), creater) # 删除主机上的网络池 rlt = CluNodedb.instance().read_node_list(cluster_name) if not rlt.success: return rlt master_ip = '' for i in rlt.content: if i.get('type') == 'master': master_ip = i.get('ip') break rlt = Masterdb.instance().read_master(master_ip.replace('.', '-')) if not rlt.success: return rlt con = rlt.content username = con.get('username', None) passwd = con.get('userpwd', None) prikey = con.get('prikey', None) prikeypwd = con.get('prikeypwd', None) port = int(con.get('port', 22)) remot = RemoteParam(master_ip, port, username, passwd, prikey, prikeypwd) rlt = remot.create_sshclient() if not rlt.success: return rlt for i in all_net.content: if not i.get('workspace'): continue ippool_command = "ETCD_ENDPOINTS=http://127.0.0.1:12379 calicoctl delete ippool {}".format( i['subnet']) rlt = remot.exec_command(ippool_command) if not rlt.success or 'Successfully' not in rlt.content[0]: continue remot.close() return Result('')
def create_subnet(self, data): """ 创建网络池 :param data: ip: str netmask: int :return: """ cluster_name = data.get('cluster_name', '') # 检查集群是否存在 if not Clusterdb.instance().clu_is_exist(cluster_name): return Result('', CLUSTER_NOT_EXISTED, 'the cluster:{} not existed'.format(cluster_name), 400) rlt = self.networkdb.get_subnet_by_clu(cluster_name) if not rlt.success: return rlt clu_all_sub = rlt.content subnet = data.get('subnet', '') # 被划分的网段 subnet_num = data.get('subnet_num') # 要划分的子网个数 subnet_net = subnet.split('/')[0] mask_bit = int(subnet.split('/')[-1]) # 掩码位数 net_log = math.log(subnet_num, 2) # 网络位需要向主机位借的位数 # 检查子网是否已经被划分过 fa_ip = IP(subnet_net).make_net(mask_bit).strNormal(1) if self.networkdb.is_fa_ip_exist(cluster_name, fa_ip.split('/')[0]): return Result('', IPIP_EXISTED_CLUSTER, 'the subnet has been divided') if 2**int(net_log) == subnet_num: net_log = int(net_log) else: net_log = int(net_log) + 1 new_mask = mask_bit + net_log # 新子网的掩码位数 # IP pool size is too small (min /26) for use with Calico IPAM if new_mask > 26: return Result( '', 400, 'IP pool size is too small (min /26) for use with Calico IPAM', 400) subnet_num_max = 2**(32 - mask_bit) if subnet_num > subnet_num_max: return Result( '', 400, 'the max number that net can be divided to subnet is:{}'. format(subnet_num_max)) sub = SubnetMgr(str(subnet)) net_list = [] for i in range(2**net_log): new_subnet = sub.assign_new_subnet(32 - new_mask, i) if not new_subnet.success: return new_subnet subnet_id = str(uuid.uuid1()) # k = data['cluster_name'] + '/' + fa_ip.split('/')[0] + '/' + subnet_id net_ = network_pool(cluster_name, new_subnet.content, new_mask, data.get('creater'), 1, data.get('ipip'), data.get('nat'), 1, fa_ip, subnet_id) # 判断新的子网是否被创建过 if net_['subnet'] not in clu_all_sub: net_list.append(net_) # 为空表示该子网已经被划分过 if not net_list: return Result('', 400, IPIP_EXISTED_CLUSTER, 400) # 保存到etcd pool_data = {} for i in net_list: k = data['cluster_name'] + '/' + fa_ip.split( '/')[0] + '/' + i['key'] pool_data[k] = i rlt = self.networkdb.save_clu_ippool(pool_data) if not rlt.success: return rlt WebLog(3, u'创建', u"集群[{}]的网络池[{}]".format(data['cluster_name'], fa_ip), data['creater']) self.reload(flush=1) return Result('')