def test_loops_over_initial_namespaces_and_processes_MODIFIED_watch_hits( self): ns1 = V1Namespace(metadata=V1ObjectMeta(name='mynamespace')) ns2 = V1Namespace(metadata=V1ObjectMeta(name='mynamespace2')) namespaces = V1NamespaceList(items=[ns1, ns2]) self.sut.kubeapi.list_namespace = MagicMock(return_value=namespaces) watchermock = MagicMock() watchermock.stream = MagicMock( return_value=[{ 'type': 'MODIFIED', 'object': ns2 }, { 'type': 'DELETED', 'object': 'should not matter' }, { 'type': 'MODIFIED', 'object': ns1 }]) watch.Watch = MagicMock(return_value=watchermock) self.sut.process_namespace = MagicMock() self.sut.watch() self.assertEqual(self.sut.process_namespace.call_count, 4) self.sut.process_namespace.assert_has_calls( [call(ns1), call(ns2), call(ns2), call(ns1)]) watchermock.stream.assert_called_once() watchermock.stream.assert_called_with(self.sut.kubeapi.list_namespace)
def test_error_does_not_terminate_watch(self): ns1 = V1Namespace(metadata=V1ObjectMeta(name='mynamespace')) ns2 = V1Namespace(metadata=V1ObjectMeta(name='mynamespace2')) namespaces = V1NamespaceList(items=[ns1, ns2]) self.sut.kubeapi.list_namespace = MagicMock(return_value=namespaces) error_ns = V1Namespace(metadata=V1ObjectMeta(name='errornamespace')) watchermock = MagicMock() watchermock.stream = MagicMock(return_value=[{ 'type': 'MODIFIED', 'object': ns2 }, { 'type': 'MODIFIED', 'object': error_ns }, { 'type': 'MODIFIED', 'object': ns1 }]) watch.Watch = MagicMock(return_value=watchermock) self.sut.process_namespace = MagicMock() self.sut.process_namespace.side_effect = lambda ns: exec( 'raise ValueError') if ns == error_ns else 0 self.sut.watch() self.assertEqual(self.sut.process_namespace.call_count, 5) self.sut.process_namespace.assert_has_calls( [call(ns1), call(ns2), call(ns2), call(error_ns), call(ns1)]) watchermock.stream.assert_called_once() watchermock.stream.assert_called_with(self.sut.kubeapi.list_namespace)
def main(): parser = argparse.ArgumentParser() parser.add_argument('namespace') parser.add_argument('name') parser.add_argument('jobid', type=int) parser.add_argument('command') args = parser.parse_args() try: config.load_incluster_config() except Exception as e: config.load_kube_config() crdApi = client.CustomObjectsApi() meta = V1ObjectMeta(name=args.name, namespace=args.namespace) myjob_spec = V1MyJobSpec(job_id=args.jobid, command=args.command) myjob = V1MyJob(metadata=meta, spec=myjob_spec) print "About to create MyJob:" pprint.pprint(myjob.__dict__) crdApi.create_namespaced_custom_object(API_NAMESPACE, API_VERSION, args.namespace, API_KIND, myjob)
def evict_pod(self, pod): '''Trigger the eviction process for a single pod. Args: - pod: the Kubernetes API pod object to evict Returns: None Raises: CommandExecutionError in case of API error ''' delete_options = V1DeleteOptions() if self.grace_period >= 0: delete_options.grace_period = self.grace_period object_meta = V1ObjectMeta(name=pod.metadata.name, namespace=pod.metadata.namespace) eviction = V1beta1Eviction( delete_options=delete_options, metadata=object_meta, ) api_call = "CoreV1Api->create_namespaced_pod_eviction" api_instance = kubernetes.client.CoreV1Api() try: return api_instance.create_namespaced_pod_eviction( name=eviction.metadata.name, namespace=eviction.metadata.namespace, body=eviction, ) except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None log.exception('Exception when calling %s', api_call) raise CommandExecutionError(exc)
def make_pvc(name, storage_class, access_modes, storage, labels): """ Make a k8s pvc specification for running a user notebook. Parameters: - name: Name of persistent volume claim. Must be unique within the namespace the object is going to be created in. Must be a valid DNS label. - storage_class String of the name of the k8s Storage Class to use. - access_modes: A list of specifying what access mode the pod should have towards the pvc - storage The ammount of storage needed for the pvc """ pvc = V1PersistentVolumeClaim() pvc.kind = "PersistentVolumeClaim" pvc.api_version = "v1" pvc.metadata = V1ObjectMeta() pvc.metadata.name = name pvc.metadata.annotations = {} if storage_class: pvc.metadata.annotations.update( {"volume.beta.kubernetes.io/storage-class": storage_class}) pvc.metadata.labels = {} pvc.metadata.labels.update(labels) pvc.spec = V1PersistentVolumeClaimSpec() pvc.spec.access_modes = access_modes pvc.spec.resources = V1ResourceRequirements() pvc.spec.resources.requests = {"storage": storage} return pvc
def main(): #load kubernetes configs config.load_incluster_config() configuration = client.Configuration() # create an instance of the API class api_instance = client.AppsV1Api(client.ApiClient(configuration)) #set name, namespace, and body name = 'commerce-nginx' # str | name of the Scale namespace = 'default' # str | object name and auth scope, such as for teams and projects body = client.V1Scale(metadata=V1ObjectMeta(name=name, namespace=namespace), spec=V1ScaleSpec(replicas=numpods)) #run method with above parameters try: api_response = api_instance.replace_namespaced_deployment_scale( name, namespace, body) print("Scaling", name, "to", numpods, "pods.") # print how many pods pprint(api_response) #uncomment to see api response except ApiException as e: print( "Exception when calling AppsV1Api->replace_namespaced_deployment_scale: %s\n" % e)
def get_object_metadata(self): return V1ObjectMeta(annotations=[], name=socket.gethostname(), uid='', labels=[], resource_version='', self_link='')
def test_special_cluster_creates_project_in_special_cluster(self): namespace = V1Namespace(metadata=V1ObjectMeta( name='mynamespace', annotations={ 'project-name-annotation': 'my project', 'cluster-name-annotation': 'my-other-cluster' })) self.rancherMock.get_project = MagicMock(return_value=None) self.rancherMock.create_project = MagicMock( return_value={'id': 'p-123abc'}) self.sut.process_namespace(namespace) self.rancherMock.get_project.assert_called_once() self.rancherMock.get_project.assert_called_with('my project') self.rancherMock.create_project.assert_called_once() self.rancherMock.create_project.assert_called_with( 'my project', 'my-other-cluster') self.sut.kubeapi.patch_namespace.assert_called_once() self.sut.kubeapi.patch_namespace.assert_called_with( 'mynamespace', namespace) self.assertEqual( namespace.metadata.annotations['project-id-annotation'], 'p-123abc')
def patch(self, name: str, namespace: str, body, **kwargs): if namespace not in self._namespaced_items: raise ApiException(404, "Not Found") if not body: raise ValueError body.metadata = dict(body.metadata, **{"namespace": namespace}) body.metadata = V1ObjectMeta(**body.metadata) for resource in self._namespaced_items[namespace]: if resource.metadata.name == name: self._namespaced_items[namespace].remove(resource) self._items.remove(resource) break else: raise ApiException(404, "Not Found") if hasattr(body, "metadata"): resource.metadata = body.metadata if hasattr(body, "spec"): resource.spec = body.spec if hasattr(body, "status"): resource.status = body.status self._namespaced_items[namespace].add(resource) self._items.add(resource) return resource
def get_object_metadata(self): return V1ObjectMeta(annotations=[], name=HOSTNAME, uid='', labels=[], resource_version='', self_link='')
def test_no_annotations_noop(self): namespace = V1Namespace( metadata=V1ObjectMeta(name='mynamespace', annotations={})) self.sut.process_namespace(namespace) self.rancherMock.get_project.assert_not_called() self.rancherMock.create_project.assert_not_called() self.sut.kubeapi.patch_namespace.assert_not_called()
def test_loops_over_initial_namespaces_and_watches(self): ns1 = V1Namespace(metadata=V1ObjectMeta(name='mynamespace')) ns2 = V1Namespace(metadata=V1ObjectMeta(name='mynamespace2')) namespaces = V1NamespaceList(items=[ns1, ns2]) self.sut.kubeapi.list_namespace = MagicMock(return_value=namespaces) watchermock = MagicMock() watchermock.stream = MagicMock(return_value=[]) watch.Watch = MagicMock(return_value=watchermock) self.sut.process_namespace = MagicMock() self.sut.watch() self.assertEqual(self.sut.process_namespace.call_count, 2) self.sut.process_namespace.assert_has_calls([call(ns1), call(ns2)]) watchermock.stream.assert_called_once() watchermock.stream.assert_called_with(self.sut.kubeapi.list_namespace)
def test_just_pid_annotation_leaves_it_alone(self): namespace = V1Namespace(metadata=V1ObjectMeta( name='mynamespace', annotations={'project-id-annotation': 'p-123abc'})) self.sut.process_namespace(namespace) self.rancherMock.get_project.assert_not_called() self.rancherMock.create_project.assert_not_called() self.sut.kubeapi.patch_namespace.assert_not_called()
def _mock_v1_pod(jobId, userName, vcName, nodeName): pod = V1Pod() pod.metadata = V1ObjectMeta() pod.metadata.labels = { "jobId": jobId, "type": "job", "userName": userName, "vcName": vcName } pod.spec = V1PodSpec(containers=[]) pod.spec.node_name = nodeName return pod
def create(self, namespace: str, body, **kwargs): if not body: raise ValueError body.metadata = dict(body.metadata, **{"namespace": namespace}) body.metadata = V1ObjectMeta(**body.metadata) if namespace not in self._namespaced_items: raise ApiException(404, "Not Found") if body in self._namespaced_items[namespace]: raise ApiException(409, "AlreadyExists") self._items.add(body) self._namespaced_items[namespace].add(body) return body
def test_no_member_annotations_ignores_existing_members(self): namespace = V1Namespace( metadata=V1ObjectMeta(name='mynamespace', annotations={ 'project-name-annotation': 'my project', 'project-id-annotation': 'p-123abc' })) self.rancherMock.get_project = MagicMock( return_value={'id': 'p-123abc'}) self.sut.handle_project_role = MagicMock() self.sut.process_namespace(namespace) self.rancherMock.get_project.assert_called_once() self.rancherMock.get_project.assert_called_with('my project') self.sut.handle_project_role.assert_not_called()
def test_correct_annotations_just_verifies(self): namespace = V1Namespace( metadata=V1ObjectMeta(name='mynamespace', annotations={ 'project-name-annotation': 'my project', 'project-id-annotation': 'p-123abc' })) self.rancherMock.get_project = MagicMock( return_value={'id': 'p-123abc'}) self.sut.process_namespace(namespace) self.rancherMock.get_project.assert_called_once() self.rancherMock.get_project.assert_called_with('my project') self.rancherMock.create_project.assert_not_called() self.sut.kubeapi.patch_namespace.assert_not_called()
def pvc(self, nameapp, deploy, volspace): idname = nameapp + "-" + deploy volumename = 'pv-' + idname pvcb = kubernetes.client.V1PersistentVolumeClaim( ) # V1PersistentVolumeClaim | pretty = 'true' accessmode = ['ReadWriteMany'] pvcmeta = V1ObjectMeta() pvcspec = V1PersistentVolumeClaimSpec() selector = V1LabelSelector() requirements = V1ResourceRequirements() ##################################################pvcmeta pvcmeta.namespace = self.namespace pvcmeta.name = volumename pvcmeta.labels = {'label': idname} ##################################################pvcspec selector.match_label = {'label': idname} requirements.limits = {'storage': volspace} requirements.requests = {'storage': volspace} pvcspec.access_modes = accessmode #pvcspec.selector = selector #pvcspec.volume_name = volumename pvcspec.storage_class_name = 'glusterfs-storage' pvcspec.resources = requirements ##################################################pvcbody pvcb.api_version = 'v1' pvcb.kind = 'PersistentVolumeClaim' pvcb.metadata = pvcmeta pvcb.spec = pvcspec try: api_response = self.k1.create_namespaced_persistent_volume_claim( self.namespace, pvcb, pretty=pretty) logging.info(api_response) return volumename except ApiException as e: print( "Exception when calling CoreV1Api->create_namespaced_persistent_volume_claim: %s\n" % e)
def test_list_resources(self, mock_pod_list, mock_pod_delete): plugin = K8sPodProtectablePlugin(self._context, cfg.CONF) pod = V1Pod(api_version="v1", kind="Pod", metadata=V1ObjectMeta( name="busybox-test", namespace="default", uid="dd8236e1-8c6c-11e7-9b7a-fa163e18e097"), status=V1PodStatus(phase="Running")) pod_list = V1PodList(items=[pod]) mock_pod_list.return_value = pod_list self.assertEqual([ Resource('OS::Kubernetes::Pod', uuid.uuid5(uuid.NAMESPACE_OID, "default:busybox-test"), 'default:busybox-test') ], plugin.list_resources(self._context))
def mock_k8s_node(config): if not isinstance(config, MockK8sNodeConfig): raise TypeError("Wrong config type") node = V1Node() node.metadata = V1ObjectMeta(name=config.name, labels=config.labels) node.spec = V1NodeSpec(unschedulable=config.unschedulable) address_ip = V1NodeAddress(config.internal_ip, "InternalIP") conditions = [V1NodeCondition(type="Ready", status=config.ready)] node.status = V1NodeStatus(addresses=[address_ip], conditions=conditions, capacity=config.capacity, allocatable=config.allocatable) return node
def _create_config_map_object(name, namespace, data): """Create/return a Kubernetes ConfigMap object out of the given data :param dict data: The data to put into the config map :returns: A config map object made from the given data :rtype: V1ConfigMap """ LOG.debug("Creating Kubernetes config map object") metadata = V1ObjectMeta( name=name, namespace=namespace ) return V1ConfigMap( api_version=CONFIG_MAP_DETAILS['api_version'], kind=CONFIG_MAP_DETAILS['kind'], data=data, metadata=metadata )
def test_workloaders_annotations_handles_workloaders(self): namespace = V1Namespace( metadata=V1ObjectMeta(name='mynamespace', annotations={ 'project-name-annotation': 'my project', 'project-id-annotation': 'p-123abc', 'workloaders-annotation': 'jdoe,ssmith' })) self.rancherMock.get_project = MagicMock( return_value={'id': 'p-123abc'}) self.sut.handle_project_role = MagicMock() self.sut.process_namespace(namespace) self.rancherMock.get_project.assert_called_once() self.rancherMock.get_project.assert_called_with('my project') self.sut.handle_project_role.assert_called_with( 'mynamespace', 'p-123abc', 'workloads-manage', ['jdoe', 'ssmith'])
def evict_pod(name, namespace='default', grace_period=1, **kwargs): '''Trigger the eviction process for a single pod. Args: - name : the name of the pod to evict - namespace : the namespace of the pod to evict - grace_period : the time to wait before killing the pod Returns: api response Raises: CommandExecutionError in case of API error ''' kind_info = __utils__['metalk8s_kubernetes.get_kind_info']({ 'kind': 'PodEviction', 'apiVersion': 'v1' }) delete_options = V1DeleteOptions() if grace_period >= 0: delete_options.grace_period = grace_period object_meta = V1ObjectMeta(name=name, namespace=namespace) kubeconfig, context = __salt__['metalk8s_kubernetes.get_kubeconfig']( **kwargs) client = kind_info.client client.configure(config_file=kubeconfig, context=context) try: result = client.create(name=name, namespace=namespace, body=V1beta1Eviction( delete_options=delete_options, metadata=object_meta)) except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None raise CommandExecutionError( 'Failed to evict pod "{}" in namespace "{}": {!s}'.format( name, namespace, exc)) return result.to_dict()
def test_project_exists_write_pid_annotation(self): namespace = V1Namespace(metadata=V1ObjectMeta( name='mynamespace', annotations={'project-name-annotation': 'my project'})) self.rancherMock.get_project = MagicMock( return_value={'id': 'p-123abc'}) self.sut.process_namespace(namespace) self.rancherMock.get_project.assert_called_once() self.rancherMock.get_project.assert_called_with('my project') self.rancherMock.create_project.assert_not_called() self.sut.kubeapi.patch_namespace.assert_called_once() self.sut.kubeapi.patch_namespace.assert_called_with( 'mynamespace', namespace) self.assertEqual( namespace.metadata.annotations['project-id-annotation'], 'p-123abc')
def test_show_resource(self, mock_pod_get): plugin = K8sPodProtectablePlugin(self._context, cfg.CONF) pod = V1Pod(api_version="v1", kind="Pod", metadata=V1ObjectMeta( name="busybox-test", namespace="default", uid="dd8236e1-8c6c-11e7-9b7a-fa163e18e097"), status=V1PodStatus(phase="Running")) mock_pod_get.return_value = pod self.assertEqual( Resource('OS::Kubernetes::Pod', uuid.uuid5(uuid.NAMESPACE_OID, "default:busybox-test"), 'default:busybox-test'), plugin.show_resource( self._context, uuid.uuid5(uuid.NAMESPACE_OID, "default:busybox-test"), {'name': 'default:busybox-test'}))
def test_expected_remove_service_event(self, mock_remove_host): #pylint: disable=invalid-name ''' test recieving a normal deleted event for the service we are expecting ''' event = { 'type': 'DELETED', 'object': V1Service( api_version='v1', kind='Service', metadata=V1ObjectMeta( name='myregistry', namespace='testnamespace', ), spec=V1ServiceSpec(cluster_ip='10.0.0.1'), ), } watcher = hostess.Watcher(env=self.envvars) watcher.handle_service_event(event) mock_remove_host.assert_called_with(self=watcher, fqdn=self.envvars['SHADOW_FQDN'])
def test_create_backup(self, mock_k8s_create): resource = Resource(id="c88b92a8-e8b4-504c-bad4-343d92061871", type=constants.POD_RESOURCE_TYPE, name='default:busybox-test') fake_bank_section.update_object = mock.MagicMock() protect_operation = self.plugin.get_protect_operation(resource) mock_k8s_create.return_value = self.k8s_client self.k8s_client.read_namespaced_pod = mock.MagicMock() self.k8s_client.read_namespaced_pod.return_value = V1Pod( api_version="v1", kind="Pod", metadata=V1ObjectMeta(name="busybox-test", namespace="default", uid="dd8236e1-8c6c-11e7-9b7a-fa163e18e097"), spec=V1PodSpec(volumes=[], containers=[]), status=V1PodStatus(phase="Running")) fake_bank_section.update_object = mock.MagicMock() call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, {})
def createvolume(self, nameapp, deploy, datadir): idname = nameapp + "-" + deploy vbody = kubernetes.client.V1PersistentVolume() volmeta = V1ObjectMeta() volspec = V1PersistentVolumeSpec() gfs = V1GlusterfsVolumeSource() accessmode = ['ReadWriteMany'] ################################################# - V1ObjectMeta() volmeta.labels = {'label': idname} volmeta.namespace = self.namespace volmeta.name = 'pv-' + idname ################################################# - gfs gfs.endpoints = 'pv-' + idname gfs.path = datadir gfs.server = nameapp + "-" + deploy ################################################# - V1PersistentVolumeSpec() volspec.access_modes = accessmode volspec.capacity = {'storage': '1Gi'} volspec.glusterfs = gfs volspec.persistent_volume_reclaim_policy = 'Recycle' volspec.storage_class_name = 'glusterfs-storage' vbody.api_version = self.apiver vbody.kind = 'PersistentVolume' vbody.metadata = volmeta vbody.spec = volspec try: api_response = self.k1.create_persistent_volume(vbody, pretty='true') logging.info(api_response) except ApiException as e: print( "Exception when calling CoreV1Api->create_persistent_volume: %s\n" % e)
def createroute(self, target, idname, namespace, service, nameapp, l): rbody = openshift.client.V1Route() routemeta = V1ObjectMeta() routespec = openshift.client.V1RouteSpec() routeport = openshift.client.V1RoutePort() routeto = openshift.client.V1RouteTargetReference() secureroute = openshift.client.V1TLSConfig() routeport.target_port = target routeto.kind = 'Service' routeto.name = idname routeto.weight = 100 secureroute.termination = 'Edge' secureroute.insecure_edge_termination_policy = 'Redirect' routespec.host = idname + '-' + str(l) + str( self.domain) ##'.web.roma2.infn.it' routespec.port = routeport routespec.to = routeto routespec.tls = secureroute routemeta.labels = {"label": idname, "bundle": service + "-" + nameapp} routemeta.name = idname + '-' + str(l) routemeta.namespace = namespace rbody.api_version = 'v1' rbody.kind = 'Route' rbody.metadata = routemeta rbody.spec = routespec try: self.o1.create_namespaced_route(namespace=namespace, body=rbody, pretty='true') except ApiException as e: print("Exception when calling OapiApi->create_route: %s\n" % e)
def mock_k8s_pod(config): if not isinstance(config, MockK8sPodConfig): raise TypeError("Wrong config type") pod = V1Pod() pod.metadata = V1ObjectMeta(name=config.name, labels=config.labels, namespace=config.namespace) containers = [] for i, requests in enumerate(config.container_requests): r = V1ResourceRequirements(requests=requests) c = V1Container(name=config.name + str(i), resources=r) containers.append(c) pod.spec = V1PodSpec(node_name=config.node_name, containers=containers, node_selector=config.node_selector) pod.status = V1PodStatus(phase=config.phase) return pod