コード例 #1
0
def cordon_node(node_name):
    """
    Cordon a kubernetes node to avoid new pods being scheduled on it
    """

    try:
        config.load_incluster_config()
    except config.ConfigException:
        try:
            config.load_kube_config()
        except config.ConfigException:
            raise Exception("Could not configure kubernetes python client")

    configuration = client.Configuration()
    # create an instance of the API class
    k8s_api = client.CoreV1Api(client.ApiClient(configuration))
    logger.info("Cordoning k8s node {}...".format(node_name))
    try:
        api_call_body = client.V1Node(spec=client.V1NodeSpec(
            unschedulable=True))
        if not app_config['DRY_RUN']:
            k8s_api.patch_node(node_name, api_call_body)
        else:
            k8s_api.patch_node(node_name, api_call_body, dry_run=True)
        logger.info("Node cordoned")
    except ApiException as e:
        logger.info(
            "Exception when calling CoreV1Api->patch_node: {}".format(e))
コード例 #2
0
def testpod():

    pod_name = 'nginx-no-split-655b866cfd-54xmg'
    namespace = 'default'
    read_pod = apis_api.read_namespaced_pod(name=pod_name, namespace=namespace)
    #pprint("read_pod={}".format(read_pod))

    lifecycle = read_pod.spec.node_selector

    pprint("lifecycle type={}".format(type(lifecycle)))
    pprint("lifecycle data={}".format(lifecycle))
    pprint("lifecycle={}".format(lifecycle['lifecycle']))

    #lifecycle['lifecycle'] = 'OnDemand'

    read_pod.spec.node_selector['lifecycle'] = 'OnDemand1'
    #pprint("read_pod={}".format(read_pod))

    body = {"spec": {'node_selector': {'lifecycle': 'OnDemand'}}}
    response = apis_api.patch_namespaced_pod(name=pod_name,
                                             namespace=namespace,
                                             body=body)

    #response = apis_api.replace_namespaced_pod(name=pod_name, namespace=namespace, body=read_pod)

    pprint("response={}".format(response))
    #read_pod = apis_api.read_namespaced_pod(name=pod_name,namespace=namespace)
    #pprint("read_pod={}".format(read_pod))

    V1Node = client.V1Node()
    pprint("V1Node={}".format(V1Node))
コード例 #3
0
def test_add_label_to_node(cl, client, has_conf):
    fake_node_name = "fake_node.com"

    has_conf.return_value = False
    v1 = MagicMock()

    condition = k8sClient.V1NodeCondition(type="Ready", status="True")
    status = k8sClient.V1NodeStatus(conditions=[condition])
    spec = k8sClient.V1NodeSpec(unschedulable=False)
    metadata = k8sClient.V1ObjectMeta(name=fake_node_name,
                                      labels={"label1": "True"})
    node = k8sClient.V1Node(status=status, spec=spec, metadata=metadata)
    response = k8sClient.V1NodeList(items=[node])

    v1.list_node_with_http_info.return_value = response
    v1.patch_node.return_value = node
    client.CoreV1Api.return_value = v1

    label_selector = 'label_default=true'

    add_label_to_node(label_selector=label_selector,
                      label_name="label1",
                      label_value="value1")

    v1.list_node_with_http_info.assert_called_with(
        label_selector=label_selector,
        _preload_content=True,
        _return_http_data_only=True)
    v1.patch_node.assert_called_with(
        fake_node_name, {'metadata': {
            'labels': {
                'label1': "value1"
            }
        }})
コード例 #4
0
ファイル: k8s.py プロジェクト: kermit832/eks-rolling-update
def taint_node(node_name):
    """
    Taint a kubernetes node to avoid new pods being scheduled on it
    """

    try:
        config.load_incluster_config()
    except config.ConfigException:
        try:
            config.load_kube_config()
        except config.ConfigException:
            raise Exception("Could not configure kubernetes python client")

    configuration = client.Configuration()
    # create an instance of the API class
    k8s_api = client.CoreV1Api(client.ApiClient(configuration))
    logger.info("Adding taint to k8s node {}...".format(node_name))
    try:
        taint = client.V1Taint(effect='NoSchedule', key='eks-rolling-update')
        api_call_body = client.V1Node(spec=client.V1NodeSpec(taints=[taint]))
        if not app_config['DRY_RUN']:
            k8s_api.patch_node(node_name, api_call_body)
        else:
            k8s_api.patch_node(node_name, api_call_body, dry_run=True)
        logger.info("Added taint to the node")
    except ApiException as e:
        logger.info("Exception when calling CoreV1Api->patch_node: {}".format(e))
コード例 #5
0
def create_node(meta: Dict[str, Any] = None, spec: Dict[str, Any] = None,
                secrets: Secrets = None) -> client.V1Node:
    """
    Create one new node in the cluster.

    Due to the way things work on certain cloud providers, you won't be able
    to use this meaningfully on them. For instance on GCE, this will likely
    fail.

    See also: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency
    """  # noqa: E501
    api = create_k8s_api_client(secrets)

    v1 = client.CoreV1Api(api)
    body = client.V1Node()

    body.metadata = client.V1ObjectMeta(**meta) if meta else None
    body.spec = client.V1NodeSpec(**spec) if spec else None

    try:
        res = v1.create_node(body)
    except ApiException as x:
        raise FailedActivity("Creating new node failed: {}".format(x.body))

    logger.debug("Node '{}' created".format(res.metadata.name))

    return res
コード例 #6
0
def fake_node_false():
    return client.V1Node(
        api_version='v1',
        kind='Node',
        metadata=client.V1ObjectMeta(name='curry-node-test',
                                     labels={'name': 'curry-node-test'}),
        status=client.V1NodeStatus(
            conditions=[client.V1NodeCondition(status='False', type='Ready')]))
コード例 #7
0
def create_node_object(name: str = "default",
                       labels: {} = None) -> k8sClient.V1Node:
    condition = k8sClient.V1NodeCondition(type="Ready", status="True")
    status = k8sClient.V1NodeStatus(conditions=[condition])
    spec = k8sClient.V1NodeSpec(unschedulable=False)
    metadata = k8sClient.V1ObjectMeta(name=name, labels=labels)
    node = k8sClient.V1Node(status=status, spec=spec, metadata=metadata)
    return node
def test():
    try:
        
        
        V1NamespaceBody = client.V1Namespace()
        pprint("V1NamespaceBody={}".format(V1NamespaceBody))
        
        V1ObjectReferenceBody = client.V1ObjectReference()
        pprint("V1ObjectReferenceBody={}".format(V1ObjectReferenceBody))    
        
        V1ObjectMetaBody = client.V1ObjectMeta()
        pprint("V1ObjectMetaBody={}".format(V1ObjectMetaBody))    
    
    
        #V1BindingBody = client.V1Binding()
        #pprint("V1BindingBody={}".format(V1BindingBody))    
    
    
        V1ConfigMapBody = client.V1ConfigMap()
        pprint("V1ConfigMapBody={}".format(V1ConfigMapBody))    
    
    
        V1Pod = client.V1Pod()
        pprint("V1Pod={}".format(V1Pod))    
    
    
        V1PodTemplate = client.V1PodTemplate()
        pprint("V1PodTemplate={}".format(V1PodTemplate))    
    
    
        V1ReplicationController = client.V1ReplicationController()
        pprint("V1ReplicationController={}".format(V1ReplicationController))    
    
    
        V1Service = client.V1Service()
        pprint("V1Service={}".format(V1Service))    
    
    
        V1Node = client.V1Node()
        pprint("V1Node={}".format(V1Node))   
        
        pod = 'nginx-no-split-655b866cfd-54xmg'
        namespace='default'
        read_pod = apis_api.read_namespaced_pod(name=pod,namespace=namespace)
        pprint("read_pod={}".format(read_pod))   
        
        lifecycle=read_pod.spec.node_selector.lifecycle
        pprint("lifecycle={}".format(lifecycle))   
        read_pod.spec.node_selector.lifecycle='OnDemand'
        pprint("read_pod={}".format(read_pod))  
        #metadata = read_pod.metadata
        #pprint("metadata={}".format(metadata))   
        #metadata.cluster_name = 'Ec2SpotEKS4'
        #pprint("metadata={}".format(metadata))   
        
    except Exception as e:
        print("Exception when calling CoreV1Api->create_namespace: %s\n" % e)    
コード例 #9
0
 def set_unschedulable(self, node_name, value=True):
     """Set the spec key 'unschedulable'"""
     scale_logger.debug("Setting %s node's unschedulable property to %r",
                        node_name, value)
     assert node_name not in self.critical_node_names
     new_node = client.V1Node(api_version="v1",
                              kind="Node",
                              metadata=client.V1ObjectMeta(name=node_name),
                              spec=client.V1NodeSpec(unschedulable=value))
コード例 #10
0
def test_can_select_nodes_by_label(cl, client, has_conf):
    has_conf.return_value = False
    v1 = MagicMock()


    condition = k8sClient.V1NodeCondition(type="Ready", status="True")
    status = k8sClient.V1NodeStatus(conditions=[condition])

    node = k8sClient.V1Node(status=status)

    response = k8sClient.V1NodeList(items=[node])
    v1.list_node.return_value = response
    client.CoreV1Api.return_value = v1

    label_selector = 'beta.kubernetes.io/instance-type=m5.large'
    resp = all_nodes_are_ok(label_selector=label_selector)
    v1.list_node.assert_called_with(
        label_selector=label_selector, _preload_content=False)
    assert resp is True
コード例 #11
0
ファイル: k8s.py プロジェクト: mx51/eks-rolling-update
def cordon_node(node_name):
    """
    Cordon a kubernetes node to avoid new pods being scheduled on it
    """

    ensure_config_loaded()

    # create an instance of the API class
    k8s_api = client.CoreV1Api()
    logger.info("Cordoning k8s node {}...".format(node_name))
    try:
        api_call_body = client.V1Node(spec=client.V1NodeSpec(unschedulable=True))
        if not app_config['DRY_RUN']:
            k8s_api.patch_node(node_name, api_call_body)
        else:
            k8s_api.patch_node(node_name, api_call_body, dry_run=True)
        logger.info("Node cordoned")
    except ApiException as e:
        logger.info("Exception when calling CoreV1Api->patch_node: {}".format(e))
コード例 #12
0
ファイル: k8s.py プロジェクト: mx51/eks-rolling-update
def taint_node(node_name):
    """
    Taint a kubernetes node to avoid new pods being scheduled on it
    """

    ensure_config_loaded()

    k8s_api = client.CoreV1Api()
    logger.info("Adding taint to k8s node {}...".format(node_name))
    try:
        taint = client.V1Taint(effect='NoSchedule', key='eks-rolling-update')
        api_call_body = client.V1Node(spec=client.V1NodeSpec(taints=[taint]))
        if not app_config['DRY_RUN']:
            k8s_api.patch_node(node_name, api_call_body)
        else:
            k8s_api.patch_node(node_name, api_call_body, dry_run=True)
        logger.info("Added taint to the node")
    except ApiException as e:
        logger.info("Exception when calling CoreV1Api->patch_node: {}".format(e))
コード例 #13
0
def test_taint_nodes_by_label(gks, cl, client, has_conf):
    fake_node_name = "fake_node.com"
    gks.return_value = {
        'url': 'fake_url.com',
        'token': 'fake_token_towhatever',
        'SLACK_CHANNEL': 'chaos_fanout',
        'SLACK_TOKEN': 'sometoken'
    }
    has_conf.return_value = False
    v1 = MagicMock()

    condition = k8sClient.V1NodeCondition(type="Ready", status="True")
    status = k8sClient.V1NodeStatus(conditions=[condition])
    spec = k8sClient.V1NodeSpec(unschedulable=False)
    metadata = k8sClient.V1ObjectMeta(name=fake_node_name,
                                      labels={"label1": "True"})
    node = k8sClient.V1Node(status=status, spec=spec, metadata=metadata)

    response = k8sClient.V1NodeList(items=[node])

    v1.list_node_with_http_info.return_value = response
    v1.patch_node.return_value = node
    client.CoreV1Api.return_value = v1
    client.V1Taint.return_value = k8sClient.V1Taint(key="",
                                                    value="",
                                                    effect="")

    label_selector = 'label_default=true, label1=True'

    taint_nodes_by_label(label_selector=label_selector,
                         key="key1",
                         value="Apps",
                         effect="NoExec")
    assert v1.patch_node.call_count == 1
    args = v1.patch_node.call_args[0]
    assert args[0] == fake_node_name
    assert args[1]['spec']['taints'][0].key == "key1"
    assert args[1]['spec']['taints'][0].effect == "NoExec"
    assert args[1]['spec']['taints'][0].value == "Apps"
コード例 #14
0
def test_remove_label_from_node(gks, cl, client, has_conf):
    fake_node_name = "fake_node.com"
    gks.return_value = {
        'url': 'fake_url.com',
        'token': 'fake_token_towhatever',
        'SLACK_CHANNEL': 'chaos_fanout',
        'SLACK_TOKEN': 'sometoken'
    }
    has_conf.return_value = False
    v1 = MagicMock()

    condition = k8sClient.V1NodeCondition(type="Ready", status="True")
    status = k8sClient.V1NodeStatus(conditions=[condition])
    spec = k8sClient.V1NodeSpec(unschedulable=False)
    metadata = k8sClient.V1ObjectMeta(name=fake_node_name,
                                      labels={"label1": "True"})
    node = k8sClient.V1Node(status=status, spec=spec, metadata=metadata)
    response = k8sClient.V1NodeList(items=[node])

    v1.list_node_with_http_info.return_value = response
    v1.patch_node.return_value = node
    client.CoreV1Api.return_value = v1
    client.V1NodeList.return_value = k8sClient.V1NodeList(items=[])

    label_selector = 'label_default=true, label1=True'

    remove_label_from_node(label_selector, "label1")

    v1.list_node_with_http_info.assert_called_with(
        label_selector=label_selector,
        _preload_content=True,
        _return_http_data_only=True)
    v1.patch_node.assert_called_with(
        fake_node_name, {'metadata': {
            'labels': {
                'label1': None
            }
        }})
コード例 #15
0
    def taint_node(self, name, key, value, effect):
        body = client.V1Node(spec=client.V1NodeSpec(
            taints=[client.V1Taint(key=key, value=value, effect=effect)]))

        return self._v1.patch_node(name, body)
コード例 #16
0
    def test_generate_action_plan(self):
        mock_input = {
            'all_nodes': [
                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-1',
                    creation_timestamp=(now - datetime.timedelta(days=30.1)),
                    annotations={}
                ), spec=client.V1NodeSpec(unschedulable=False)),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-2',
                    creation_timestamp=(now - datetime.timedelta(days=60)),
                    annotations={}
                ), spec=client.V1NodeSpec(unschedulable=False)),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-3',
                    creation_timestamp=(now - datetime.timedelta(days=30.2)),
                    annotations={}
                ), spec=client.V1NodeSpec(unschedulable=True)),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-4',
                    creation_timestamp=(now - datetime.timedelta(days=30.3)),
                    annotations={
                        main.annotation('cordoned'): '',
                    }
                ), spec=client.V1NodeSpec(unschedulable=False)),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-4',
                    creation_timestamp=(now - datetime.timedelta(days=30.4)),
                    annotations={
                        main.annotation('cordoned'): '',
                    }
                ), spec=client.V1NodeSpec(
                    unschedulable=True,
                    taints=[
                        client.V1Taint(
                            key='node.kubernetes.io/unschedulable',
                            effect='NoSchedule',
                            time_added=(now - datetime.timedelta(hours=1)),
                        )
                    ]
                )),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-5',
                    creation_timestamp=(now - datetime.timedelta(days=32.5)),
                    annotations={
                        main.annotation('cordoned'): '',
                    }
                ), spec=client.V1NodeSpec(
                    unschedulable=True,
                    taints=[
                        client.V1Taint(
                            key='node.kubernetes.io/unschedulable',
                            effect='NoSchedule',
                            time_added=(now - datetime.timedelta(days=1.2)),
                        )
                    ]
                )),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-6',
                    creation_timestamp=(now - datetime.timedelta(days=35)),
                    annotations={
                        main.annotation('cordoned'): '',
                    }
                ), spec=client.V1NodeSpec(
                    unschedulable=True,
                    taints=[
                        client.V1Taint(
                            key='node.kubernetes.io/unschedulable',
                            effect='NoSchedule',
                            time_added=(now - datetime.timedelta(days=2)),
                        )
                    ]
                )),

                client.V1Node(metadata=client.V1ObjectMeta(
                    name='node-7',
                    creation_timestamp=(now - datetime.timedelta(days=35)),
                    annotations={
                        main.annotation('cordoned'): '',
                        main.annotation('notifications-sent'): str(int((datetime.datetime.utcnow() - datetime.timedelta(days=2.5)).timestamp())),
                    }
                ), spec=client.V1NodeSpec(
                    unschedulable=True,
                    taints=[
                        client.V1Taint(
                            key='node.kubernetes.io/unschedulable',
                            effect='NoSchedule',
                            time_added=(now - datetime.timedelta(days=4)),
                        )
                    ]
                )),
            ],
            'all_namespaces': [
                client.V1Namespace(metadata=client.V1ObjectMeta(
                    name='ns-1',
                    annotations={
                        'annotation-1': 'bla',
                    })),
                client.V1Namespace(metadata=client.V1ObjectMeta(
                    name='ns-2',
                    annotations={
                        'annotation-2': 'blub',
                    }))
            ],
            'all_pods': [
                client.V1Pod(
                    metadata=client.V1ObjectMeta(namespace='ns-1', name='pod-1', annotations={
                        'annotation-3': '123',
                    }),
                    spec=client.V1PodSpec(node_name='node-5', containers=[])),
                client.V1Pod(
                    metadata=client.V1ObjectMeta(namespace='ns-2', name='pod-2', annotations={
                        'annotation-4': '456',
                    }),
                    spec=client.V1PodSpec(node_name='node-6', containers=[])),
                client.V1Pod(
                    metadata=client.V1ObjectMeta(namespace='ns-2', name='pod-3', annotations={
                        'annotation-5': '789',
                    }),
                    spec=client.V1PodSpec(node_name='node-7', containers=[])),
            ],
            'args': args,
        }
        expected_result = {
            'cordon': {
                'nodes': ['node-1', 'node-2', 'node-4'],
                'affected_pods': []
            },
            'notify': {
                'nodes': ['node-5', 'node-6'],
                'affected_pods': [
                    {
                        'namespace': 'ns-1',
                        'name': 'pod-1',
                        'annotations': {
                            'annotation-1': 'bla',
                            'annotation-3': '123',
                        },
                        'eviction_time': '2 days from now',
                    },
                    {
                        'namespace': 'ns-2',
                        'name': 'pod-2',
                        'annotations': {
                            'annotation-2': 'blub',
                            'annotation-4': '456',
                        },
                        'eviction_time': '2 days from now',
                    },
                ]
            },
            'drain': {
                'nodes': ['node-7'],
                'affected_pods': [
                    {
                        'namespace': 'ns-2',
                        'name': 'pod-3',
                        'annotations': {
                            'annotation-2': 'blub',
                            'annotation-5': '789',
                        },
                        'eviction_time': None,
                    },
                ]
            },
        }
        self.assertEqual(expected_result, main.generate_action_plan(**mock_input))
コード例 #17
0
def main():
    ################################
    # Find BIG-IP VTEP MAC Address #
    ################################
    bipMGMT = os.getenv('BIP')
    bipUser = os.getenv('BIPUSER')
    bipPass = os.getenv('BIPPASS')
    bipPodCIDR = os.getenv('BIPPODCIDR')
    bipName = os.getenv('BIPNAME')
    bipFlanPIP = os.getenv('BIPFLANPIP')

    try:
        br = requests.get(
            'https://{}/mgmt/tm/net/tunnels/tunnel/~Common~flannel_vxlan/stats?options=all-properties'
            .format(bipMGMT),
            verify=False,
            auth=HTTPBasicAuth(bipUser, bipPass))
        br.raise_for_status()
    except requests.exceptions.HTTPError as err:
        print(err)
        sys.exit(1)
    except requests.exceptions.RequestException as err:
        print(err)
        sys.exit(1)
    try:
        vtepMAC = br.json()['entries'][
            'https://localhost/mgmt/tm/net/tunnels/tunnel/~Common~flannel_vxlan/~Common~flannel_vxlan/stats'][
                'nestedStats']['entries']['macAddr']['description']
    except json.decoder.JSONDecodeError as err:
        print(err)
        sys.exit(1)

    ##################
    #Create K8s Node #
    ##################
    config.load_incluster_config()
    api_instance = client.CoreV1Api()

    update = True
    nodes = api_instance.list_node(watch=False)

    for item in nodes.items:
        if item.metadata.name == bipName:
            patch = True
            if item.metadata.annotations[
                    'flannel.alpha.coreos.com/backend-data'] == '{{"VtepMAC": {}}}'.format(
                        vtepMAC):
                update = False

    if update:
        body = client.V1Node()
        body.spec = client.V1NodeSpec(pod_cidr=bipPodCIDR)
        body.metadata = client.V1ObjectMeta(
            name=bipName,
            annotations={
                "flannel.alpha.coreos.com/backend-data":
                '{{"VtepMAC": {}}}'.format(vtepMAC),
                "flannel.alpha.coreos.com/public-ip":
                bipFlanPIP,
                "flannel.alpha.coreos.com/backend-type":
                "vxlan",
                "flannel.alpha.coreos.com/kube-subnet-manager":
                "true"
            })

        if patch:
            try:
                api_response = api_instance.patch_node(bipName,
                                                       body,
                                                       pretty=True)
                pprint(api_response)
                sys.exit(0)
            except ApiException as e:
                print("Exception when calling CoreV1Api->create_node: %s\n" %
                      e)
                sys.exit(1)
        else:
            try:
                api_response = api_instance.create_node(body, pretty=True)
                pprint(api_response)
                sys.exit(0)
            except ApiException as e:
                print("Exception when calling CoreV1Api->create_node: %s\n" %
                      e)
                sys.exit(1)
    else:
        print("Node {} does not need to be created/updated.".format(bipName))
        sys.exit(0)