def get_pods(k8s_client, ssh_config, label=None, node=None, namespace=None, state='Running'): """Return the pod `component` from the specified node""" field_selector = [] if state: field_selector.append('status.phase={}'.format(state)) if node: nodename = utils.resolve_hostname(node, ssh_config) field_selector.append('spec.nodeName={}'.format(nodename)) kwargs = {} if field_selector: kwargs['field_selector'] = ','.join(field_selector) if label: kwargs['label_selector'] = label if namespace: return k8s_client.list_namespaced_pod(namespace=namespace, **kwargs).items return k8s_client.list_pod_for_all_namespaces(**kwargs).items
def declare_node(ssh_config, version, k8s_client, node_type, hostname, bootstrap_config): """Declare the given node in Kubernetes.""" node_ip = get_node_ip(hostname, ssh_config, bootstrap_config) node_name = utils.resolve_hostname(hostname, ssh_config) node_manifest = get_node_manifest(node_type, version, node_ip, node_name) k8s_client.create_node(body=node_from_manifest(node_manifest))
def check_node_is_registered(ssh_config, k8s_client, hostname): """Check if the given node is registered in Kubernetes.""" node_name = utils.resolve_hostname(hostname, ssh_config) try: k8s_client.read_node(node_name) except k8s.client.rest.ApiException as exn: pytest.fail(str(exn))
def _create(self, body): # Fixup the hostname. body['spec']['nodeName'] = utils.resolve_hostname( body['spec']['nodeName'], self._ssh_config) self._client.create_cluster_custom_object(group=self._group, version=self._version, plural=self._plural, body=body)
def deploy_node(host, ssh_config, version, name): node_name = utils.resolve_hostname(name, ssh_config) accept_ssh_key = [ 'salt-ssh', '-i', node_name, 'test.ping', '--roster=kubernetes' ] pillar = {'orchestrate': {'node_name': node_name}} deploy = [ 'salt-run', 'state.orchestrate', 'metalk8s.orchestrate.deploy_node', 'saltenv=metalk8s-{}'.format(version), "pillar='{}'".format(json.dumps(pillar)) ] run_salt_command(host, accept_ssh_key, ssh_config) run_salt_command(host, deploy, ssh_config)
def get_pods( k8s_client, ssh_config, label, node='bootstrap', namespace='default', state='Running' ): """Return the pod `component` from the specified node""" field_selector = ['status.phase={}'.format(state)] if node: nodename = utils.resolve_hostname(node, ssh_config) field_selector.append('spec.nodeName={}'.format(nodename)) return k8s_client.list_namespaced_pod( namespace, field_selector=','.join(field_selector), label_selector=label ).items
def check_node_status(ssh_config, k8s_client, hostname, expected_status): """Check if the given node has the expected status.""" node_name = utils.resolve_hostname(hostname, ssh_config) try: status = k8s_client.read_node_status(node_name).status except k8s.client.rest.ApiException as exn: pytest.fail(str(exn)) # If really not ready, status may not have been pushed yet. if status.conditions is None: assert expected_status == 'NotReady' return # See https://kubernetes.io/docs/concepts/architecture/nodes/#condition MAP_STATUS = {'True': 'Ready', 'False': 'NotReady', 'Unknown': 'Unknown'} for condition in status.conditions: if condition.type == 'Ready': break assert MAP_STATUS[condition.status] == expected_status
def run_salt_command(host, command, ssh_config): """Run a command inside the salt-master container.""" pod = 'salt-master-{}'.format( utils.resolve_hostname('bootstrap', ssh_config)) output = kubectl_exec(host, command, pod, container='salt-master', namespace='kube-system') assert output.exit_status == 0, \ 'deploy failed with: \nout: {}\nerr:'.format( output.stdout, output.stderr )
def etcdctl(k8s_client, command, ssh_config): """Run an etcdctl command inside the etcd container.""" name = 'etcd-{}'.format( utils.resolve_hostname('bootstrap', ssh_config) ) etcd_command = [ 'etcdctl', '--endpoints', 'https://localhost:2379', '--ca-file', '/etc/kubernetes/pki/etcd/ca.crt', '--key-file', '/etc/kubernetes/pki/etcd/server.key', '--cert-file', '/etc/kubernetes/pki/etcd/server.crt', ] + command output = k8s.stream.stream( k8s_client.connect_get_namespaced_pod_exec, name=name, namespace='kube-system', command=etcd_command, stderr=True, stdin=False, stdout=True, tty=False ) return output
def check_node_status(ssh_config, k8s_client, hostname, expected_status): """Check if the given node has the expected status.""" node_name = utils.resolve_hostname(hostname, ssh_config) def _check_node_status(): try: status = k8s_client.read_node_status(node_name).status except k8s.client.rest.ApiException as exn: raise AssertionError(exn) # If really not ready, status may not have been pushed yet. if status.conditions is None: assert expected_status == 'NotReady' return for condition in status.conditions: if condition.type == 'Ready': break assert kube_utils.MAP_STATUS[condition.status] == expected_status utils.retry( _check_node_status, times=10, wait=5, name="check node '{}' status".format(node_name) )
def check_etcd_role(ssh_config, k8s_client, node_name): """Check if the given node is a member of the etcd cluster.""" node_name = utils.resolve_hostname(node_name, ssh_config) etcd_member_list = etcdctl(k8s_client, ['member', 'list'], ssh_config) assert node_name in etcd_member_list, \ 'node {} is not part of the etcd cluster'.format(node_name)