def test_reap_dead_node(self): node = copy.deepcopy(self.dummy_node) TestInstance = collections.namedtuple('TestInstance', ['launch_time']) instance = TestInstance(datetime.now(pytz.utc)) ready_condition = None for condition in node['status']['conditions']: if condition['type'] == 'Ready': ready_condition = condition break ready_condition['status'] = 'Unknown' ready_condition['lastHeartbeatTime'] = datetime.isoformat( datetime.now(pytz.utc) - timedelta(minutes=30)) kube_node = KubeNode(pykube.Node(self.api, node)) kube_node.delete = mock.Mock(return_value="mocked stuff") self.cluster.maintain([kube_node], {kube_node.instance_id: instance}, {}, [], []) kube_node.delete.assert_not_called() ready_condition['lastHeartbeatTime'] = datetime.isoformat( datetime.now(pytz.utc) - timedelta(hours=2)) kube_node = KubeNode(pykube.Node(self.api, node)) kube_node.delete = mock.Mock(return_value="mocked stuff") self.cluster.maintain([kube_node], {kube_node.instance_id: instance}, {}, [], []) kube_node.delete.assert_called_once_with()
def test_max_scale_in(self): node1 = copy.deepcopy(self.dummy_node) node2 = copy.deepcopy(self.dummy_node) TestInstance = collections.namedtuple('TestInstance', ['launch_time']) instance1 = TestInstance(datetime.now(pytz.utc)) instance2 = TestInstance(datetime.now(pytz.utc)) for node in [node1, node2]: for condition in node['status']['conditions']: if condition['type'] == 'Ready': condition['status'] = 'Unknown' condition['lastHeartbeatTime'] = datetime.isoformat( datetime.now(pytz.utc) - timedelta(hours=2)) break kube_node1 = KubeNode(pykube.Node(self.api, node1)) kube_node1.delete = mock.Mock(return_value="mocked stuff") kube_node2 = KubeNode(pykube.Node(self.api, node2)) kube_node2.delete = mock.Mock(return_value="mocked stuff") self.cluster.maintain([kube_node1, kube_node2], { kube_node1.instance_id: instance1, kube_node2.instance_id: instance2 }, {}, [], []) kube_node1.delete.assert_not_called() kube_node2.delete.assert_not_called()
def put(self, name): if "values" not in flask.request.json: return {"error": "missing config"}, 400 obj = pykube.KlotIOApp.objects(kube()).filter().get(name=name).obj fields = self.fields(obj, flask.request.json["values"]) if not fields.validate(): return {"fields": fields.to_list(), "errors": fields.errors}, 400 obj["settings"] = fields.values for field in fields: if not field.content.get("node") or field.value == field.original: continue label = f"{obj['metadata']['name']}/{field.name}" current = field.value if field.multi else [field.value] original = field.original if field.multi else [field.original] for value in current: if value not in original: node = pykube.Node.objects(kube()).get(name=value).obj node["metadata"]["labels"][label] = field.content["node"] pykube.Node(kube(), node).replace() if obj["status"] == "Installed": obj["status"] = "Installing" for value in original: if value not in current: node = pykube.Node.objects(kube()).get(name=value).obj del node["metadata"]["labels"][label] pykube.Node(kube(), node).replace() if obj["status"] == "NeedSettings": obj["status"] = "Installing" pykube.KlotIOApp(kube(), obj).replace() try: config = pykube.ConfigMap.objects(kube()).filter( namespace=obj["spec"]["namespace"]).get(name="config").obj config["data"] = { "settings.yaml": yaml.safe_dump(flask.request.json["values"]) } pykube.ConfigMap(kube(), config).replace() except pykube.ObjectDoesNotExist: pass return {"values": flask.request.json["values"]}
def delete(self): if self.singular not in flask.request.json: return {"error": "missing %s" % self.singular}, 400 label = flask.request.json[self.singular] errors = [] for field in ["app", "name", "node"]: if field not in label: errors.append("missing %s.%s" % (self.singular, field)) if errors: return {"errors": errors}, 400 app_labels = [] for obj in [app.obj for app in pykube.App.objects(kube()).filter()]: if "labels" in obj["spec"]: for app_label in obj["spec"]["labels"]: app_labels.append("%s/%s" % (obj["metadata"]["name"], app_label["name"])) if "%s/%s" % (label["app"], label["name"]) not in app_labels: return {"error": "invalid label %s/%s" % (label["app"], label["name"])}, 400 obj = pykube.Node.objects(kube()).filter().get(name=flask.request.json[self.singular]["node"]).obj if "labels" in obj["metadata"] and "%s/%s" % (label["app"], label["name"]) in obj["metadata"]["labels"]: del obj["metadata"]["labels"]["%s/%s" % (label["app"], label["name"])] pykube.Node(kube(), obj).replace() return {"message": "%s unlabeled %s/%s" % (label["node"], label["app"], label["name"])}
def create_node(self, pool_name, index): dummy_node = deepcopy(self.dummy_node_ref) node_name = 'k8-{}-16334397-{}'.format(pool_name, index) dummy_node['metadata']['name'] = node_name dummy_node['metadata']['labels']['kubernetes.io/hostname'] = node_name node = KubeNode(pykube.Node(self.api, dummy_node)) return node
def test_get_pending_pods(self): dummy_node = copy.deepcopy(self.dummy_node) dummy_node['metadata']['name'] = 'k8s-agentpool1-16334397-0' node = KubeNode(pykube.Node(self.api, dummy_node)) pod = KubePod(pykube.Pod(self.api, self.dummy_pod)) act = self.cluster.get_pending_pods([pod], [node]) self.assertEqual(len(act), 0) node = KubeNode(pykube.Node(self.api, dummy_node)) pod2 = KubePod(pykube.Pod(self.api, self.dummy_pod)) pod3 = KubePod(pykube.Pod(self.api, self.dummy_pod)) act = self.cluster.get_pending_pods([pod, pod2, pod3], [node]) #only one should fit self.assertEqual(len(act), 2)
def _spin_up_node(self): # spin up dummy ec2 node self.asg_client.set_desired_capacity(AutoScalingGroupName='dummy-asg', DesiredCapacity=1) response = self.asg_client.describe_auto_scaling_groups() instance_id = response['AutoScalingGroups'][0]['Instances'][0]['InstanceId'] self.dummy_node['metadata']['labels']['aws/id'] = instance_id node = KubeNode(pykube.Node(self.api, self.dummy_node)) return node
def delete_kubernetes_node(kube_api, node_name): if node_name is not None and get_node(kube_api, node_name) is not None: obj = { "apiVersion": "v1", "kind": "Node", "metadata": { "name": node_name } } pykube.Node(kube_api, obj).delete()
def get_node(kube_api, name): """ Retrieve Node from cluster :param kube_api: Pykube API Object :param name: Name of Node :return: Pykube Node object from cluster """ node = pykube.Node(kube_api, base_obj("Node", "", name)) node.reload() return node
def create_nodes(self, nb_pool, nb_nodes_per_pool): nodes = [] for pool_idx in range(nb_pool): for node_idx in range(nb_nodes_per_pool): dummy_node = copy.deepcopy(self.dummy_node_ref) node_name = 'k8-agentpool{}-16334397-{}'.format( pool_idx, node_idx) dummy_node['metadata']['name'] = node_name dummy_node['metadata']['labels'][ 'kubernetes.io/hostname'] = node_name node = KubeNode(pykube.Node(self.api, dummy_node)) nodes.append(node) return nodes
def change_label(api, nodename, new_id, aws_region): obj = { "apiVersion": "v1", "kind": "Node", "metadata": { "name": nodename, "labels": { RUN_ID_LABEL: new_id } } } node = pykube.Node(api, obj) node.labels[RUN_ID_LABEL] = new_id node.labels[AWS_REGION_LABEL] = aws_region node.update()
def _spin_up_node(self, launch_time=None): # spin up dummy ec2 node self.asg_client.set_desired_capacity(AutoScalingGroupName='dummy-asg', DesiredCapacity=1) response = self.asg_client.describe_auto_scaling_groups() instance_id = response['AutoScalingGroups'][0]['Instances'][0][ 'InstanceId'] self.dummy_node['metadata']['labels']['aws/id'] = instance_id node = KubeNode(pykube.Node(self.api, self.dummy_node)) node.cordon = mock.Mock(return_value="mocked stuff") node.drain = mock.Mock(return_value="mocked stuff") node.uncordon = mock.Mock(return_value="mocked stuff") node.delete = mock.Mock(return_value="mocked stuff") return node
def delete_kube_node(nodename, run_id, api): if nodename is None: nodes = pykube.Node.objects(api).filter( selector={RUN_ID_LABEL: run_id}) if len(nodes.response['items']) > 0: node = nodes.response['items'][0] nodename = node['metadata']['name'] if nodename is not None: obj = { "apiVersion": "v1", "kind": "Node", "metadata": { "name": nodename, "labels": { "runid": run_id } } } pykube.Node(api, obj).delete()
def _spin_up_nodes(self, count, launch_time=None): assert count <= 256 # spin up dummy ec2 node self.asg_client.set_desired_capacity(AutoScalingGroupName='dummy-asg', DesiredCapacity=count) response = self.asg_client.describe_auto_scaling_groups() nodes = [] for i, instance in enumerate(response['AutoScalingGroups'][0]['Instances']): instance_id = instance['InstanceId'] dummy_node = copy.deepcopy(self.dummy_node) dummy_node['metadata']['labels']['aws/id'] = instance_id dummy_node['metadata']['name'] = '10.0.' + str(i) + '.228' node = KubeNode(pykube.Node(self.api, dummy_node)) node.cordon = mock.Mock(return_value="mocked stuff") node.drain = mock.Mock(return_value="mocked stuff") node.uncordon = mock.Mock(return_value="mocked stuff") node.delete = mock.Mock(return_value="mocked stuff") nodes.append(node) return nodes
def post(self): if self.singular not in flask.request.json: return {"error": "missing %s" % self.singular}, 400 label = flask.request.json[self.singular] errors = [] for field in ["app", "name", "value", "node"]: if field not in label: errors.append("missing %s.%s" % (self.singular, field)) if errors: return {"errors": errors}, 400 app_labels = {} for obj in [app.obj for app in pykube.App.objects(kube()).filter()]: if "labels" in obj["spec"]: for app_label in obj["spec"]["labels"]: app_labels["%s/%s=%s" % (obj["metadata"]["name"], app_label["name"], app_label["value"])] = app_label if "%s/%s=%s" % (label["app"], label["name"], label["value"]) not in app_labels: return {"error": "invalid label %s/%s=%s" % (label["app"], label["name"], label["value"])}, 400 app_label = app_labels["%s/%s=%s" % (label["app"], label["name"], label["value"])] obj = pykube.Node.objects(kube()).filter().get(name=flask.request.json[self.singular]["node"]).obj if obj["metadata"]["name"] == platform.node() and ("master" not in app_label or not app_label["master"]): return {"error": "can't label master with %s/%s=%s" % (label["app"], label["name"], label["value"])}, 400 if "labels" not in obj["metadata"]: obj["metadata"]["labels"] = {} obj["metadata"]["labels"]["%s/%s" % (label["app"], label["name"])] = label["value"] pykube.Node(kube(), obj).replace() return {"message": "%s labeled %s/%s=%s" % (label["node"], label["app"], label["name"], label["value"])}
def label_node(nodename, run_id, api, cluster_name, cluster_role, aws_region): pipe_log('Assigning instance {} to RunID: {}'.format(nodename, run_id)) obj = { "apiVersion": "v1", "kind": "Node", "metadata": { "name": nodename, "labels": { "runid": run_id, "aws_region": aws_region } } } if cluster_name: obj["metadata"]["labels"]["cp-cluster-name"] = cluster_name if cluster_role: obj["metadata"]["labels"]["cp-cluster-role"] = cluster_role pykube.Node(api, obj).update() pipe_log('Instance {} is assigned to RunID: {}\n-'.format( nodename, run_id))
def test_can_fit(self): pod = KubePod(pykube.Pod(self.api, self.dummy_pod)) node = KubeNode(pykube.Node(self.api, self.dummy_node)) assert node.can_fit(pod.resources)