def test_get_existed(self, common_patch): """测试获取已经存在的命名空间""" client = Namespace( CtxCluster.create(TEST_CLUSTER_ID, TEST_PROJECT_ID, token='token')) ret = client.get_or_create_cc_namespace( 'default', 'admin', labels={'test_key': 'test_val'}) assert ret == {'name': 'default', 'namespace_id': 1}
def delete_hpa(request, project_id, cluster_id, ns_name, namespace_id, name): # 共享集群 HPA 不允许删除 if get_cluster_type(cluster_id) == ClusterType.SHARED: raise DeleteResourceError(_("共享集群 HPA 不支持删除")) ctx_cluster = CtxCluster.create(token=request.user.token.access_token, project_id=project_id, id=cluster_id) client = hpa_client.HPA(ctx_cluster) try: client.delete_ignore_nonexistent(name=name, namespace=ns_name) except Exception as error: logger.error("delete hpa error, namespace: %s, name: %s, error: %s", ns_name, name, error) raise DeleteResourceError(_("删除HPA资源失败")) # 删除成功则更新状态 InstanceConfig.objects.filter( namespace=namespace_id, category=K8sResourceName.K8sHPA.value, name=name).update( updator=request.user.username, oper_type=application_constants.DELETE_INSTANCE, deleted_time=timezone.now(), is_deleted=True, is_bcs_success=True, )
def get_cluster_hpa_list(request, project_id, cluster_id, cluster_env, cluster_name, namespace=None): """获取基础hpa列表""" access_token = request.user.token.access_token project_code = request.project.english_name hpa_list = [] try: if request.project.kind == ProjectKind.MESOS.value: client = mesos.MesosClient(access_token, project_id, cluster_id, env=cluster_env) hpa = client.list_hpa(namespace).get("data") or [] hpa_list = slz_mesos_hpa_info(hpa, project_code, cluster_name, cluster_env, cluster_id) else: ctx_cluster = CtxCluster.create( token=request.user.token.access_token, project_id=project_id, id=cluster_id) client = hpa_client.HPA(ctx_cluster) formatter = HPAFormatter(cluster_id, project_code, cluster_name, cluster_env) hpa_list = client.list(formatter=formatter) except Exception as error: logger.error("get hpa list error, %s", error) return hpa_list
async def __call__(self, scope, receive, send): query_params = dict( parse.parse_qsl(scope['query_string'].decode('utf8'))) session_id = query_params.get("session_id", None) if not session_id: raise HttpResponseForbidden(_("session_id为空")) project_id, cluster_id = self.extract_project_and_cluster_id(scope) session = session_mgr.create(project_id, cluster_id) ctx = session.get(session_id) if not ctx: raise HttpResponseForbidden(_("获取ctx为空, session_id不正确或者已经过期")) ctx_cluster = CtxCluster.create( id=ctx['cluster_id'], project_id=ctx['project_id'], token=ctx['access_token'], ) scope["ctx_cluster"] = ctx_cluster scope["ctx_session"] = ctx return await self.inner(scope, receive, send)
def crd_client(self, project_id, cluster_id): return CustomResourceDefinition( CtxCluster.create(token='token', project_id=project_id, id=cluster_id), api_version=sample_crd["apiVersion"], )
def test_failed(self, project_id, cluster_id): cluster = CtxCluster.create(cluster_id, project_id, token='token') querier = BcsAPIEnvironmentQuerier(cluster) with StubPaaSCCClient.get_cluster_by_id.mock( return_value=fake_cc_get_cluster_result_failed): with pytest.raises(KeyError): assert querier.do()
def get_or_create_namespace(self, request, project_id, cluster_id): """创建bcs-system命名空间,如果不存在,则创建;如果存在,则直接返回数据""" ctx_cluster = CtxCluster.create(token=request.user.token.access_token, id=cluster_id, project_id=project_id) return Namespace(ctx_cluster).get_or_create_cc_namespace( K8S_LB_NAMESPACE, request.user.username)
def common_patch(self): ctx_cluster = CtxCluster.create(TEST_CLUSTER_ID, TEST_PROJECT_ID, token='token') Deployment(ctx_cluster).update_or_create( namespace=TEST_NAMESPACE, name=self.deployment_name, body=gen_deployment_body(self.deployment_name) ) yield Deployment(ctx_cluster).delete(namespace=TEST_NAMESPACE, name=self.deployment_name)
def cobj_client(self, project_id, cluster_id): return get_cobj_client_by_crd( CtxCluster.create(token='token', project_id=project_id, id=cluster_id), crd_name=getitems(sample_crd, "metadata.name"), )
def delete_hpa(request, project_id, cluster_id, ns_name, namespace_id, name): if request.project.kind == ProjectKind.K8S.value: ctx_cluster = CtxCluster.create(token=request.user.token.access_token, project_id=project_id, id=cluster_id) client = hpa_client.HPA(ctx_cluster) try: client.delete_ignore_nonexistent(name=name, namespace=ns_name) except Exception as error: logger.error( "delete hpa error, namespace: %s, name: %s, error: %s", ns_name, name, error) raise hpa_exceptions.DeleteHPAError(_("删除HPA资源失败")) # 删除成功则更新状态 InstanceConfig.objects.filter( namespace=namespace_id, category=K8sResourceName.K8sHPA.value, name=name).update( updator=request.user.username, oper_type=application_constants.DELETE_INSTANCE, deleted_time=timezone.now(), is_deleted=True, is_bcs_success=True, ) else: delete_mesos_hpa(request, project_id, cluster_id, ns_name, namespace_id, name)
def test_normal(self, project_id, cluster_id): cluster = CtxCluster.create(cluster_id, project_id, token='token') querier = BcsAPIEnvironmentQuerier(cluster) with StubPaaSCCClient.get_cluster_by_id.mock( return_value=fake_cc_get_cluster_result_ok): api_env_name = querier.do() assert api_env_name == 'my_stag'
def hpa_client(self, project_id, cluster_id): try: return hpa_client.HPA( CtxCluster.create(token='token', project_id=project_id, id=cluster_id)) except ResourceNotFoundError: pytest.skip('Can not initialize HPA client, skip')
def _update_resources(access_token, release_data, namespace_info, manifest): ctx_cluster = CtxCluster.create(token=access_token, id=namespace_info['cluster_id'], project_id=release_data['project_id']) return (Deployment(ctx_cluster).replace( body=manifest, name=release_data['name'], namespace=namespace_info['name']).data.to_dict())
def _set_ctx_project_cluster(self, request, project_id: str, cluster_id: str): from backend.container_service.clusters.base.models import CtxCluster from backend.container_service.projects.base.models import CtxProject access_token = 'access_token_for_test' request.ctx_project = CtxProject.create(token=access_token, id=project_id) if cluster_id: request.ctx_cluster = CtxCluster.create(token=access_token, id=cluster_id, project_id=project_id) else: request.ctx_cluster = None
def generate_api_client(access_token: str, project_id: str, cluster_id: str) -> ApiClient: """根据指定参数,生成 api_client""" ctx_cluster = CtxCluster.create(id=cluster_id, project_id=project_id, token=access_token) config = BcsKubeConfigurationService(ctx_cluster).make_configuration() return ApiClient(config, header_name='X-BKAPI-AUTHORIZATION', header_value=json.dumps({"access_token": access_token}))
def test_create_nonexistent(self, common_patch): """测试获取不存在的命名空间(触发创建逻辑)""" client = Namespace( CtxCluster.create(TEST_CLUSTER_ID, TEST_PROJECT_ID, token='token')) ret = client.get_or_create_cc_namespace( self.namespace_for_test, 'admin', annotations={'test_key': 'test_val'}) assert ret == {'name': self.namespace_for_test, 'namespace_id': 2} client.delete(name=self.namespace_for_test)
def _set_ctx_project_cluster(self, request, project_id: str, cluster_id: str): access_token = request.user.token.access_token request.ctx_project = CtxProject.create(token=access_token, id=project_id) if cluster_id: request.ctx_cluster = CtxCluster.create(token=access_token, id=cluster_id, project_id=project_id) else: request.ctx_cluster = None
def update_or_create_crd(): client = CustomResourceDefinition( CtxCluster.create(token='token', project_id=TEST_PROJECT_ID, id=TEST_CLUSTER_ID), api_version=crd_manifest["apiVersion"], ) name = crd_manifest['metadata']['name'] client.create(body=crd_manifest, namespace='default', name=name) yield client.delete_wait_finished(namespace="default", name=name)
def get_access_cluster_context(self): """获取访问集群需要的信息""" # 获取集群的环境 # TODO: 这一部分逻辑后续直接放到组装kubeconfig中 ctx_cluster = CtxCluster.create(id=self.cluster_id, project_id=self.project_id, token=self.access_token) env_name = BcsAPIEnvironmentQuerier(ctx_cluster).do() return { 'server_address': f"{settings.BCS_APIGW_DOMAIN[env_name]}/clusters/{self.cluster_id}", 'identifier': self.cluster_id, 'user_token': settings.BCS_APIGW_TOKEN, }
def handler_k8shpa(self, ns, cluster_id, spec): """下发HPA配置""" ctx_cluster = CtxCluster.create(token=self.access_token, project_id=self.project_id, id=cluster_id) client = HPA(ctx_cluster) name = spec["metadata"]["name"] spec['apiVersion'] = DEFAULT_HPA_API_VERSION try: result = client.update_or_create(spec, name, ns) logger.debug("deploy hpa success, %s", result) except Exception as error: logger.exception('deploy hpa error, %s', error) raise Rollback({})
def _create_kubernetes_namespace( self, access_token: str, username: str, project_id: str, cluster_id: str, ns_name: str, ) -> Dict: # TODO: 需要注意需要迁移到权限中心V3,通过注册到V0权限中心的命名空间ID,反查命名空间名称、集群ID及项目ID # 连接集群创建命名空间 ctx_cluster = CtxCluster.create(token=access_token, id=cluster_id, project_id=project_id) return Namespace(ctx_cluster).get_or_create_cc_namespace( ns_name, username)
def pre_create(self, request, data): # 1. 创建bcs-system命名空间 ns_info = self.get_or_create_namespace(request, data["project_id"], data["cluster_id"]) data.update({ "namespace": ns_info["name"], "namespace_id": ns_info["namespace_id"] }) # 2. save lb config self.create_lb_conf(data) # 3. create label for node; format is key: value is nodetype: lb ctx_cluster = CtxCluster.create(token=request.user.token.access_token, id=data["cluster_id"], project_id=data["project_id"]) LBController(ctx_cluster).add_labels(data["ip_list"]) return {"ns_info": ns_info}
def get_cluster_nodes(access_token, project_id, cluster_id): """获取集群下的node信息 NOTE: 节点数据通过集群中获取,避免数据不一致 """ ctx_cluster = CtxCluster.create( id=cluster_id, project_id=project_id, token=access_token, ) try: cluster_nodes = Node(ctx_cluster).list(is_format=False) except Exception as e: logger.error("查询集群内节点数据异常, %s", e) return [] return [{ "inner_ip": node.inner_ip, "status": node.node_status } for node in cluster_nodes.items]
def get_cluster_hpa_list(request, project_id, cluster_id, namespace=None): """获取基础hpa列表""" # 共享集群 HPA 不展示 if get_cluster_type(cluster_id) == ClusterType.SHARED: return [] project_code = request.project.english_name hpa_list = [] try: ctx_cluster = CtxCluster.create(token=request.user.token.access_token, project_id=project_id, id=cluster_id) client = hpa_client.HPA(ctx_cluster) formatter = HPAFormatter(cluster_id, project_code) hpa_list = client.list(formatter=formatter, namespace=namespace) except Exception as error: logger.error("get hpa list error, %s", error) return hpa_list
def destroy(self, request, project_id, pk): """删除nginx ingress 1. 标识LB配置 2. 删除节点标签nodetype 3. 删除helm记录 """ lb_conf = self.get_object() # 标识LB被删除 self.delete_lb_conf(lb_conf) # 删除节点标签 ip_used_data = convert_ip_used_data(request.user.token.access_token, lb_conf.project_id, lb_conf.cluster_id, json.loads(lb_conf.ip_info)) ctx_cluster = CtxCluster.create(token=request.user.token.access_token, id=lb_conf.cluster_id, project_id=project_id) LBController(ctx_cluster).delete_labels(ip_used_data) # 删除helm release release = self.get_helm_release(lb_conf.cluster_id, K8S_LB_CHART_NAME, namespace_id=lb_conf.namespace_id, namespace=lb_conf.namespace) if not release: return Response() user_log = log_client.ContextActivityLogClient( project_id=project_id, user=request.user.username, resource_type='lb', resource="%s:%s" % (lb_conf.cluster_id, lb_conf.namespace_id), resource_id=pk, ) release.destroy(username=request.user.username, access_token=request.user.token.access_token) user_log.log_delete(activity_status="succeed") return Response()
def list(self, request, project_id): # get cluster id by request cluster_id = self.get_cluster_id(request) # get node info node_list = self.get_node_list(request, project_id, cluster_id) node_list = node_list.get('results') or [] if not node_list: return Response({'code': 0, 'result': []}) node_id_info_map = self.exclude_removed_status_node(node_list) # get node labels node_label_list = self.get_labels_by_node(request, project_id, node_id_info_map.keys()) # render cluster id, cluster name and cluster environment cluster_name_env = self.get_cluster_id_info_map(request, project_id) # 获取节点的taint ctx_cluster = CtxCluster.create(token=request.user.token.access_token, id=cluster_id, project_id=project_id) nodes = query_cluster_nodes(ctx_cluster) node_list = self.compose_nodes( node_id_info_map, node_label_list, request.project['english_name'], cluster_name_env, nodes ) # add perm for node nodes_results = bcs_perm.Cluster.hook_perms(request, project_id, node_list) return Response({'count': len(node_list), 'results': nodes_results})
def ctx_cluster(cluster_id, project_id): return CtxCluster.create(id=cluster_id, token=generate_random_string(12), project_id=project_id)
def list(self, request, project_id, *args, **kwargs): """""" project_cluster = self.get_project_cluster(request, project_id) qs = self.get_queryset() # 获取过滤参数 params = request.query_params # 集群和命名空间必须传递 cluster_id = params.get('cluster_id') namespace = params.get("namespace") # TODO: 先写入db中,防止前端通过ID,获取数据失败;后续通过helm服务提供API if cluster_id: try: ctx_cluster = CtxCluster.create( id=cluster_id, token=request.user.token.access_token, project_id=project_id) RecordReleases(ctx_cluster, namespace).record() except Exception as e: logger.error("获取集群内release数据失败,%s", e) if cluster_id: qs = qs.filter(cluster_id=cluster_id) if namespace: if not cluster_id: raise ValidationError(_("命名空间作为过滤参数时,需要提供集群ID")) qs = qs.filter(namespace=namespace) # 获取返回的数据 slz = ReleaseListSLZ(qs, many=True) data = slz.data # do fix on the data which version is emtpy iam_ns_ids = [] app_list = [] for item in data: # 过滤掉k8s系统和bcs平台命名空间下的release if item["namespace"] in K8S_PLAT_NAMESPACE: continue cluster_info = project_cluster.get(item['cluster_id']) or { 'name': item['cluster_id'] } item['cluster_name'] = cluster_info['name'] item['iam_ns_id'] = calc_iam_ns_id(item['cluster_id'], item['namespace']) iam_ns_ids.append({'iam_ns_id': item['iam_ns_id']}) item['cluster_env'] = settings.CLUSTER_ENV_FOR_FRONT.get( cluster_info.get('environment')) item["current_version"] = item.pop("version") if not item["current_version"]: version = App.objects.filter(id=item["id"]).values_list( "release__chartVersionSnapshot__version", flat=True)[0] App.objects.filter(id=item["id"]).update(version=version) item["current_version"] = version # 判断任务超时,并更新字段 if self._is_transition_timeout(item["updated"], item["transitioning_on"]): err_msg = _("Helm操作超时,请重试!") App.objects.filter(id=item["id"]).update( transitioning_on=False, transitioning_result=False, transitioning_message=err_msg, ) item["transitioning_result"] = False item["transitioning_on"] = False item["transitioning_message"] = err_msg app_list.append(item) result = { "count": len(app_list), "next": None, "previous": None, "results": app_list } try: ns_request = NamespaceRequest(project_id=project_id, cluster_id=cluster_id) except TypeError: return Response(result) else: return PermsResponse( data=result, resource_request=ns_request, resource_data=iam_ns_ids, )
def update(self, request, project_id, pk): """ 更新LB配置,包含下面几种场景 1. 增加/减少LB协议类型 2. 增加/减少节点数量(标签+replica) """ serializer = UpdateK8SLoadBalancerSLZ(data=request.data) serializer.is_valid(raise_exception=True) data = serializer.data username = request.user.username data.update({"id": pk, "updator": username}) lb_conf = self.get_k8s_lb_info(data["id"]) del_labels_ip_list, add_labels_ip_list = self.get_ip_list( request, data, lb_conf) ctx_cluster = CtxCluster.create(token=request.user.token.access_token, id=lb_conf.cluster_id, project_id=project_id) client = LBController(ctx_cluster) # 删除节点配置 if del_labels_ip_list: client.delete_labels(del_labels_ip_list) # 添加节点配置 if add_labels_ip_list: client.add_labels(add_labels_ip_list) # 更新lb self.update_lb_conf(lb_conf, data["ip_info"], data["protocol_type"], username) release = self.get_helm_release(lb_conf.cluster_id, lb_conf.name, namespace_id=lb_conf.namespace_id) if not release: raise error_codes.ResNotFoundError(_("没有查询到对应的release信息")) data["namespace_id"] = lb_conf.namespace_id user_log = log_client.ContextActivityLogClient( project_id=project_id, user=request.user.username, resource_type='lb', resource="%s:%s" % (lb_conf.cluster_id, lb_conf.namespace_id), resource_id=pk, extra=json.dumps(data), ) # release 对应的版本为"(current-unchanged) v1.1.2" version = data["version"].split( constants.RELEASE_VERSION_PREFIX)[-1].strip() chart_version = self.get_chart_version(project_id, version) access_token = request.user.token.access_token sys_variables = self.collect_system_variable(access_token, project_id, data["namespace_id"]) updated_instance = release.upgrade_app( access_token=access_token, chart_version_id=chart_version.id, answers=[], customs=[], valuefile=data["values_content"], updator=username, sys_variables=sys_variables, ) if updated_instance.transitioning_result: user_log.log_modify(activity_status="succeed") return Response() user_log.log_modify(activity_status="failed") raise error_codes.APIError(updated_instance.transitioning_message)
{"address": fake_inner_ip, "type": "InternalIP"}, {"address": fake_node_name, "type": "Hostname"}, ], "conditions": [ { 'lastHeartbeatTime': '2021-06-08T01:55:53Z', 'lastTransitionTime': '2020-08-20T12:14:43Z', 'message': 'kubelet is posting ready status', 'reason': 'KubeletReady', 'status': 'False', 'type': 'Ready', } ], }, } fake_ctx_cluster = CtxCluster.create(token="token", id="BCS-K8S-15091", project_id="test") @patch( "backend.container_service.clusters.tools.node.Node.list", return_value=[NodeResourceData(name=fake_node_name, inner_ip=fake_inner_ip, data=fake_resource_data)], ) def query_cluster_nodes(mock_list): cluster_nodes = node.query_cluster_nodes(fake_ctx_cluster) assert fake_inner_ip in cluster_nodes assert cluster_nodes[fake_inner_ip]["node_name"] == fake_node_name assert cluster_nodes[fake_inner_ip]["status"] == NodeConditionStatus.Ready assert not cluster_nodes[fake_inner_ip]["unschedulable"] @pytest.mark.parametrize(