def test_handle_resource_deployment_time_both_expired(): # both creation time + TTL and deployment time + TTL are in the past resource = Namespace( None, { "metadata": { "name": "foo", "annotations": { "janitor/ttl": "1w", "deploymentTimestamp": "2019-03-02T11:13:09Z", }, "creationTimestamp": "2019-03-01T11:13:09Z", } }, ) counter = handle_resource_on_ttl( resource, [], 0, deployment_time_annotation="deploymentTimestamp", dry_run=True) assert counter == { "resources-processed": 1, "namespaces-with-ttl": 1, "namespaces-deleted": 1, }
def test_handle_resource_no_expiry(): resource = Namespace(None, {'metadata': {'name': 'foo'}}) counter = handle_resource_on_expiry(resource, [], None, dry_run=True, tiller=None) assert counter == {}
def test_handle_resource_ttl_annotation_with_notification_not_triggered( self, mocked_add_notification_flag, mocked_utcnow): # Resource was created: 2019-03-11T11:05:00Z # ttl is 10 minutes, so it will expire: 2019-03-11T11:15:00Z # Current datetime is: 2019-03-11T11:10:09Z # Notification is 3 minutes: 180s. Has to notify after: 2019-03-11T11:12:00Z resource = Namespace( None, { "metadata": { "name": "foo", "annotations": { "janitor/ttl": "10m" }, "creationTimestamp": "2019-03-11T11:05:00Z", } }, ) delete_notification = 180 handle_resource_on_ttl( resource, [], delete_notification, deployment_time_annotation=None, dry_run=True, ) mocked_add_notification_flag.assert_not_called()
def test_delete_namespace(caplog): caplog.set_level(logging.INFO) mock_api = MagicMock() foo_ns = Namespace(mock_api, {'metadata': {'name': 'foo'}}) delete(foo_ns, dry_run=False) assert 'Deleting Namespace foo..' in caplog.messages mock_api.delete.assert_called_once()
def test_delete_namespace(caplog): caplog.set_level(logging.INFO) mock_api = MagicMock() foo_ns = Namespace(mock_api, {"metadata": {"name": "foo"}}) delete(foo_ns, wait_after_delete=0, dry_run=False) assert "Deleting Namespace foo.." in caplog.messages mock_api.delete.assert_called_once()
def test_handle_resource_no_expiry(): resource = Namespace(None, {"metadata": {"name": "foo"}}) counter = handle_resource_on_expiry(resource, [], None, wait_after_delete=0, dry_run=True) assert counter == {}
def test_handle_resource_ttl_annotation_with_forever_value_not_triggered( self, mocked_add_notification_flag, mocked_utcnow): # Resource was created: 2019-03-11T11:05:00Z # ttl is `forever`, so it will not expire # Current datetime is: 2019-03-11T11:13:09Z # Notification is 3 minutes: 180s. Has to notify after: 2019-03-11T11:12:00Z resource = Namespace( None, { "metadata": { "name": "foo", "annotations": { "janitor/ttl": "forever" }, "creationTimestamp": "2019-03-11T11:05:00Z", } }, ) delete_notification = 180 counter = handle_resource_on_ttl( resource, [], delete_notification, deployment_time_annotation=None, dry_run=True, ) self.assertEqual(1, counter["resources-processed"]) self.assertEqual(1, len(counter)) mocked_add_notification_flag.assert_not_called()
def test_create_np(cluster): test_id = "np1" org = create_org_object(api=cluster.api, organization_name=test_id + ORG_NAME) org.obj["spec"]["resources"]["cpu"] = "10" org.obj["spec"]["resources"]["memory"] = "10G" org.create() time.sleep(TIMEOUT) tenant = create_tenant_object(api=cluster.api, organization_name=test_id + ORG_NAME, tenant_name=test_id + TENANT_NAME) tenant.obj["spec"]["resources"]["cpu"] = "5" tenant.obj["spec"]["resources"]["memory"] = "5G" tenant.create() time.sleep(TIMEOUT) space = create_space_object(api=cluster.api, organization_name=test_id + ORG_NAME, tenant_name=test_id + TENANT_NAME, space_name=test_id + SPACE_NAME) space.obj["spec"]["resources"]["cpu"] = "1" space.obj["spec"]["resources"]["memory"] = "1G" space.create() time.sleep(TIMEOUT) namespace = Namespace.objects(cluster.api).get( name=space_namespacename_generator(organization_name=test_id + ORG_NAME, tenant_name=test_id + TENANT_NAME, space_name=test_id + SPACE_NAME)) networkPolicies = NetworkPolicy.objects(cluster.api, namespace.name).filter() assert len(networkPolicies) == 1
def test_handle_resource_no_ttl(): resource = Namespace(None, {'metadata': {'name': 'foo'}}) counter = handle_resource_on_ttl(resource, [], None, dry_run=True, tiller=None) assert counter == {'resources-processed': 1}
def uninstall(spec, logger, **kwargs): logger.info('uninstall') try: delete('tekton-pipelines', ["tekton-pipelines-controller", "tekton-pipelines-webhook", "tekton-triggers-controller", "tekton-triggers-webhook", "tekton-dashboard"], logger) try: subprocess.run(f"kubectl delete task.tekton.dev/kaniko -n {spec.get('namespace', 'default')}", shell=True, check=False, env=osenv) subprocess.run(f"kubectl delete task.tekton.dev/git-clone -n {spec.get('namespace', 'default')}", shell=True, check=False, env=osenv) except subprocess.CalledProcessError as e: logger.error(e.output) raise e api = HTTPClient(KubeConfig.from_file()) obj = { 'apiVersion': 'v1', 'kind': 'Namespace', 'metadata': { 'name': spec.get('namespace'), } } Namespace(api, obj).delete() except ObjectDoesNotExist: pass
def test_handle_resource_no_ttl(): resource = Namespace(None, {"metadata": {"name": "foo"}}) counter = handle_resource_on_ttl(resource, [], None, deployment_time_annotation=None, dry_run=True) assert counter == {"resources-processed": 1}
def test_matches_resource_filter(): foo_ns = Namespace(None, {"metadata": {"name": "foo"}}) assert not matches_resource_filter(foo_ns, [], [], [], []) assert not matches_resource_filter(foo_ns, ALL, [], [], []) assert matches_resource_filter(foo_ns, ALL, [], ALL, []) assert not matches_resource_filter(foo_ns, ALL, [], ALL, ["foo"]) assert not matches_resource_filter(foo_ns, ALL, ["namespaces"], ALL, []) assert matches_resource_filter(foo_ns, ALL, ["deployments"], ALL, ["kube-system"])
def test_create_space_np(cluster): test_id = "np2" org = create_org_object(api=cluster.api, organization_name=test_id + ORG_NAME) org.obj["spec"]["resources"]["cpu"] = "10" org.obj["spec"]["resources"]["memory"] = "10G" org.create() time.sleep(TIMEOUT) tenant = create_tenant_object(api=cluster.api, organization_name=test_id + ORG_NAME, tenant_name=test_id + TENANT_NAME) tenant.obj["spec"]["resources"]["cpu"] = "5" tenant.obj["spec"]["resources"]["memory"] = "5G" tenant.create() time.sleep(TIMEOUT) space = create_space_object(api=cluster.api, organization_name=test_id + ORG_NAME, tenant_name=test_id + TENANT_NAME, space_name=test_id + SPACE_NAME) space.obj["spec"]["resources"]["cpu"] = "1" space.obj["spec"]["resources"]["memory"] = "1G" space.obj["spec"]["allowIncomingNetwork"] = { "organizations": [{ "organization_name": "example" }], "tenants": [ { "organization_name": "example", "tenant_name": "crm" }, { "organization_name": "example", "tenant_name": "crm2" }, ], "spaces": [{ "organization_name": "example", "tenant_name": "crm", "space_name": "test" }] } space.create() time.sleep(TIMEOUT) namespace = Namespace.objects(cluster.api).get( name=space_namespacename_generator(organization_name=test_id + ORG_NAME, tenant_name=test_id + TENANT_NAME, space_name=test_id + SPACE_NAME)) networkPolicies = list( NetworkPolicy.objects(cluster.api, namespace.name).iterator()) assert len(networkPolicies) == 1 print(networkPolicies[0]) assert len(networkPolicies[0].ingress) == 5
def clean_up(api, include_resources: frozenset, exclude_resources: frozenset, include_namespaces: frozenset, exclude_namespaces: frozenset, rules: list, delete_notification: int, dry_run: bool): counter = Counter() for namespace in Namespace.objects(api): if matches_resource_filter(namespace, include_resources, exclude_resources, include_namespaces, exclude_namespaces): counter.update( handle_resource_on_ttl(namespace, rules, delete_notification, dry_run)) counter.update( handle_resource_on_expiry(namespace, rules, delete_notification, dry_run)) else: logger.debug(f'Skipping {namespace.kind} {namespace}') already_seen = set() filtered_resources = [] resource_types = get_namespaced_resource_types(api) for _type in resource_types: if _type.endpoint not in exclude_resources: try: for resource in _type.objects(api, namespace=pykube.all): # objects might be available via multiple API versions (e.g. deployments appear as extensions/v1beta1 and apps/v1) # => process them only once object_id = (resource.kind, resource.namespace, resource.name) if object_id in already_seen: continue already_seen.add(object_id) if matches_resource_filter(resource, include_resources, exclude_resources, include_namespaces, exclude_namespaces): filtered_resources.append(resource) else: logger.debug( f'Skipping {resource.kind} {resource.namespace}/{resource.name}' ) except Exception as e: logger.error(f'Could not list {_type.kind} objects: {e}') for resource in filtered_resources: counter.update( handle_resource_on_ttl(resource, rules, delete_notification, dry_run)) counter.update( handle_resource_on_expiry(resource, rules, delete_notification, dry_run)) stats = ', '.join([f'{k}={v}' for k, v in counter.items()]) logger.info(f'Clean up run completed: {stats}') return counter
def test_matches_resource_filter(): foo_ns = Namespace(None, {'metadata': {'name': 'foo'}}) assert not matches_resource_filter(foo_ns, [], [], [], []) assert not matches_resource_filter(foo_ns, ALL, [], [], []) assert matches_resource_filter(foo_ns, ALL, [], ALL, []) assert not matches_resource_filter(foo_ns, ALL, [], ALL, ['foo']) assert not matches_resource_filter(foo_ns, ALL, ['namespaces'], ALL, []) assert matches_resource_filter(foo_ns, ALL, ['deployments'], ALL, ['kube-system'])
def test_handle_resource_ttl_annotation_with_notification_triggered(self, mocked_add_notification_flag, mocked_utcnow): # Resource was created: 2019-03-11T11:05:00Z # ttl is 10 minutes, so it will expire: 2019-03-11T11:15:00Z # Current datetime is: 2019-03-11T11:13:09Z # Notification is 3 minutes: 180s. Has to notify after: 2019-03-11T11:12:00Z resource = Namespace(None, {'metadata': {'name': 'foo', 'annotations': {'janitor/ttl': '10m'}, 'creationTimestamp': '2019-03-11T11:05:00Z'}}) delete_notification = 180 handle_resource_on_ttl(resource, [], delete_notification, dry_run=True) mocked_add_notification_flag.assert_called()
def test_wait_after_delete(monkeypatch): mock_sleep = MagicMock() monkeypatch.setattr("time.sleep", mock_sleep) mock_api = MagicMock() foo_ns = Namespace(mock_api, {"metadata": {"name": "foo"}}) delete(foo_ns, wait_after_delete=123, dry_run=True) assert not mock_sleep.called delete(foo_ns, wait_after_delete=123, dry_run=False) mock_sleep.assert_called_once_with(123)
def test_handle_resource_ttl_annotation_with_forever_value_not_triggered(self, mocked_add_notification_flag, mocked_utcnow): # Resource was created: 2019-03-11T11:05:00Z # ttl is `forever`, so it will not expire # Current datetime is: 2019-03-11T11:13:09Z # Notification is 3 minutes: 180s. Has to notify after: 2019-03-11T11:12:00Z resource = Namespace(None, {'metadata': {'name': 'foo', 'annotations': {'janitor/ttl': 'forever'}, 'creationTimestamp': '2019-03-11T11:05:00Z'}}) delete_notification = 180 counter = handle_resource_on_ttl(resource, [], delete_notification, dry_run=True) self.assertEqual(1, counter['resources-processed']) self.assertEqual(1, len(counter)) mocked_add_notification_flag.assert_not_called()
def test_create_org(cluster): test_id = "t1" org = create_org_object(api=cluster.api, organization_name=test_id + ORG_NAME) org.create() time.sleep(TIMEOUT) namespace = Namespace.objects(cluster.api).get( name=organization_namespacename_generator(organization_name=test_id + ORG_NAME)) assert namespace.labels["k8spin.cloud/type"] == "organization" assert namespace.labels["k8spin.cloud/org"] == test_id + ORG_NAME
def test_handle_resource_expiry_expired(): resource = Namespace( None, { "metadata": { "name": "foo", "annotations": {"janitor/expires": "2001-09-26T01:51:42Z"}, } }, ) counter = handle_resource_on_expiry(resource, [], None, dry_run=True) assert counter == {"namespaces-with-expiry": 1, "namespaces-deleted": 1}
def test_handle_resource_expiry_expired(): resource = Namespace( None, { 'metadata': { 'name': 'foo', 'annotations': { 'janitor/expires': '2001-09-26T01:51:42Z' } } }) counter = handle_resource_on_expiry(resource, [], None, dry_run=True) assert counter == {'namespaces-with-expiry': 1, 'namespaces-deleted': 1}
def namespace(spec, old, new, logger, **kwargs): logger.info(f'namespace: {old=}, {new=}') api = HTTPClient(KubeConfig.from_file()) if new: obj = { 'apiVersion': 'v1', 'kind': 'Namespace', 'metadata': { 'name': new, } } Namespace(api, obj).create() elif old: obj = { 'apiVersion': 'v1', 'kind': 'Namespace', 'metadata': { 'name': old, } } Namespace(api, obj).delete()
def test_handle_resource_expiry_annotation(): # TTL is far in the future resource = Namespace( None, { "metadata": { "name": "foo", "annotations": {"janitor/expires": "2050-09-26T01:51:42Z"}, } }, ) counter = handle_resource_on_expiry(resource, [], None, dry_run=True) assert counter == {"namespaces-with-expiry": 1}
def test_handle_resource_expiry_annotation(): # TTL is far in the future resource = Namespace( None, { 'metadata': { 'name': 'foo', 'annotations': { 'janitor/expires': '2050-09-26T01:51:42Z' } } }) counter = handle_resource_on_expiry(resource, [], None, dry_run=True) assert counter == {'namespaces-with-expiry': 1}
def test_handle_resource_ttl_annotation(): # TTL is far in the future resource = Namespace( None, { 'metadata': { 'name': 'foo', 'annotations': { 'janitor/ttl': '999w' }, 'creationTimestamp': '2019-01-17T20:59:12Z' } }) counter = handle_resource_on_ttl(resource, [], dry_run=True) assert counter == {'resources-processed': 1, 'namespaces-with-ttl': 1}
def test_handle_resource_ttl_annotation(): # TTL is far in the future resource = Namespace( None, { "metadata": { "name": "foo", "annotations": { "janitor/ttl": "999w" }, "creationTimestamp": "2019-01-17T20:59:12Z", } }, ) counter = handle_resource_on_ttl(resource, [], None, dry_run=True) assert counter == {"resources-processed": 1, "namespaces-with-ttl": 1}
def test_handle_resource_ttl_annotation(): # TTL is in the future resource = Namespace( None, { "metadata": { "name": "foo", "annotations": {"janitor/ttl": "2w"}, "creationTimestamp": "2019-03-01T11:13:09Z", } }, ) counter = handle_resource_on_ttl( resource, [], 0, deployment_time_annotation=None, dry_run=True ) assert counter == {"resources-processed": 1, "namespaces-with-ttl": 1}
def test_handle_resource_ttl_expired(): resource = Namespace( None, { 'metadata': { 'name': 'foo', 'annotations': { 'janitor/ttl': '1s' }, 'creationTimestamp': '2019-01-17T20:59:12Z' } }) counter = handle_resource_on_ttl(resource, [], dry_run=True) assert counter == { 'resources-processed': 1, 'namespaces-with-ttl': 1, 'namespaces-deleted': 1 }
def test_handle_resource_ttl_expired(): resource = Namespace( None, { "metadata": { "name": "foo", "annotations": {"janitor/ttl": "1s"}, "creationTimestamp": "2019-01-17T20:59:12Z", } }, ) counter = handle_resource_on_ttl( resource, [], None, deployment_time_annotation=None, dry_run=True ) assert counter == { "resources-processed": 1, "namespaces-with-ttl": 1, "namespaces-deleted": 1, }
def test_create_spaces(cluster): test_id = "t3" org = create_org_object(api=cluster.api, organization_name=test_id + ORG_NAME) org.create() time.sleep(TIMEOUT) tenant = create_tenant_object(api=cluster.api, organization_name=test_id + ORG_NAME, tenant_name=test_id + TENANT_NAME) tenant.create() time.sleep(TIMEOUT) space = create_space_object(api=cluster.api, organization_name=test_id + ORG_NAME, tenant_name=test_id + TENANT_NAME, space_name=test_id + SPACE_NAME) space.create() time.sleep(TIMEOUT) namespace_name = space_namespacename_generator( organization_name=test_id + ORG_NAME, tenant_name=test_id + TENANT_NAME, space_name=test_id + SPACE_NAME) namespace = Namespace.objects(cluster.api).get(name=namespace_name) assert namespace.labels["k8spin.cloud/type"] == "space" assert namespace.labels["k8spin.cloud/org"] == test_id + ORG_NAME assert namespace.labels["k8spin.cloud/tenant"] == test_id + TENANT_NAME assert namespace.labels["k8spin.cloud/space"] == test_id + SPACE_NAME assert namespace.labels["k8spin.cloud/name"] == namespace_name resourceQuotas = pykube.ResourceQuota.objects( cluster.api, namespace.name).filter(selector={"k8spin.cloud/type": "quotas"}) assert len(resourceQuotas) == 1 limitRanges = pykube.LimitRange.objects( cluster.api, namespace.name).filter(selector={"k8spin.cloud/type": "defaults"}) assert len(limitRanges) == 1