def test_get_gcp_config_json_input(self, *args): self.ctxmock.node.properties['gcp_config'] = json.loads( json.dumps({ 'zone': '3', 'auth': '''{"type": "sa", "project_id": "1", "private_key_id": "2", "private_key": "abcd", "client_email": "svc@some_email", "client_id": "3", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://oauth2.googleapis.com/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/.."}''' })) auth_expected = json.loads( self.ctxmock.node.properties['gcp_config']['auth']) gcp_config_expected = { 'zone': 3, 'project': '1', 'auth': auth_expected } conf = utils.get_gcp_config() self.assertDictEqual(conf['auth'], gcp_config_expected['auth']) self.assertEqual(conf['project'], gcp_config_expected['project'])
def stop(**kwargs): gcp_config = utils.get_gcp_config() name = ctx.instance.runtime_properties.get('name') if name: cluster = Cluster(gcp_config, ctx.logger, name=name,) utils.delete_if_not_external(cluster)
def disable_legacy_abac(**kwargs): gcp_config = utils.get_gcp_config() cluster_id = ctx.node.properties.get('cluster_id') if cluster_id: # Before update legacy abac to the cluster we should check the # status of the cluster if it is running or not legacy_abac = LegacyAbac(gcp_config, ctx.logger, enabled=False, cluster_id=cluster_id, ) utils.delete_if_not_external(legacy_abac)
def stop(**kwargs): gcp_config = utils.get_gcp_config() name = ctx.instance.runtime_properties.get(constants.NAME) cluster_id = ctx.instance.runtime_properties.get('cluster_id') if name: node_pool = NodePool(gcp_config, ctx.logger, name=name, cluster_id=cluster_id,) utils.delete_if_not_external(node_pool)
def enable_legacy_abac(enabled, cluster_id, additional_settings, **kwargs): gcp_config = utils.get_gcp_config() legacy_abac = LegacyAbac(gcp_config, ctx.logger, enabled=enabled, cluster_id=cluster_id, additional_settings=additional_settings) utils.set_resource_id_if_use_external(cluster_id) utils.create(legacy_abac)
def delete(**kwargs): gcp_config = utils.get_gcp_config() name = ctx.instance.runtime_properties.get(constants.NAME) cluster_id = ctx.node.properties.get('cluster_id') if name: node_pool = NodePool(gcp_config, ctx.logger, name=name, cluster_id=cluster_id,) utils.resource_deleted(ctx, node_pool)
def test_get_gcp_config(self, *args): self.ctxmock.node.properties['gcp_config'] = { 'zone': '3', 'project': 'plan 9', 'auth': 'let me in!', } conf = utils.get_gcp_config() self.assertEqual('default', conf['network'])
def start(**kwargs): gcp_config = utils.get_gcp_config() name = ctx.instance.runtime_properties.get(constants.NAME) if name: cluster = Cluster( gcp_config, ctx.logger, name=name, ) utils.resource_started(ctx, cluster)
def stop(**kwargs): gcp_config = utils.get_gcp_config() name = ctx.instance.runtime_properties.get(constants.NAME) if name: cluster = Cluster( gcp_config, ctx.logger, name=name, ) utils.delete_if_not_external(cluster)
def unset_monitoring_service(**kwargs): gcp_config = utils.get_gcp_config() cluster_id = ctx.node.properties.get('cluster_id') if cluster_id: # Before update monitoring service to the cluster we should check the # status of the cluster if it is running or not service = MonitoringService(gcp_config, ctx.logger, service_type='none', cluster_id=cluster_id, ) utils.delete_if_not_external(service)
def create_network_policy_config(network_policy_config, additional_settings, **kwargs): gcp_config = utils.get_gcp_config() cluster_id = ctx.instance.runtime_properties['cluster_id'] network_policy = NetworkPolicy(gcp_config, ctx.logger, network_policy_config=network_policy_config, cluster_id=cluster_id, additional_settings=additional_settings) utils.create(network_policy)
def delete_network_policy_config(**kwargs): gcp_config = utils.get_gcp_config() cluster_id = ctx.instance.runtime_properties['cluster_id'] network_policy_config = {'enabled': False, 'provider': 'PROVIDER_UNSPECIFIED' } if cluster_id: network_policy = \ NetworkPolicy(gcp_config, ctx.logger, network_policy_config=network_policy_config, cluster_id=cluster_id,) utils.delete_if_not_external(network_policy)
def create(name, additional_settings, **kwargs): name = utils.get_final_resource_name(name) gcp_config = utils.get_gcp_config() cluster = Cluster(gcp_config, ctx.logger, name=name, additional_settings=additional_settings) utils.create(cluster) ctx.instance.runtime_properties.update(cluster.get()) ctx.instance.runtime_properties[constants.KUBERNETES_CLUSTER] = \ cluster.get()
def set_monitoring_service(monitoring_service, cluster_id, additional_settings, **kwargs): name = utils.get_final_resource_name(monitoring_service) gcp_config = utils.get_gcp_config() service = MonitoringService(gcp_config, ctx.logger, service_type=name, cluster_id=cluster_id, additional_settings=additional_settings) utils.set_resource_id_if_use_external(name) utils.create(service)
def create(name, cluster_id, additional_settings, **kwargs): name = utils.get_final_resource_name(name) gcp_config = utils.get_gcp_config() node_pool = NodePool(gcp_config, ctx.logger, name=name, cluster_id=cluster_id, additional_settings=additional_settings) utils.create(node_pool) ctx.instance.runtime_properties['name'] = name ctx.instance.runtime_properties['cluster_id'] = cluster_id
def delete(**_): gcp_config = utils.get_gcp_config() name = ctx.instance.runtime_properties.get(constants.NAME) if name: if re.match(PATTERN, name): name = name.split('/roles/')[-1] role = Role(gcp_config, ctx.logger, name=name) role_dict = role.get() deleted = role_dict.get('deleted') if not deleted: role.delete() raise OperationRetry(DELETING_MESSAGE.format(deleted=deleted))
def start(**kwargs): name = ctx.instance.runtime_properties[constants.NAME] cluster_id = ctx.instance.runtime_properties['cluster_id'] gcp_config = utils.get_gcp_config() node_pool = NodePool(gcp_config, ctx.logger, name=name, cluster_id=cluster_id, additional_settings={}) utils.resource_started(ctx, node_pool) ctx.logger.debug('Node pool {0} started successfully'.format(name)) ctx.instance.runtime_properties[ constants.KUBERNETES_NODE_POOL] = get_node(node_pool)
def enable_legacy_abac(enabled, cluster_id, additional_settings, **kwargs): if utils.resource_created(ctx, 'cluster_id'): return gcp_config = utils.get_gcp_config() legacy_abac = LegacyAbac(gcp_config, ctx.logger, enabled=enabled, cluster_id=cluster_id, additional_settings=additional_settings) utils.set_resource_id_if_use_external(cluster_id) utils.create(legacy_abac)
def delete(**kwargs): gcp_config = utils.get_gcp_config() name = ctx.instance.runtime_properties.get('name') cluster_id = ctx.node.properties.get('cluster_id') if name: node_pool = NodePool(gcp_config, ctx.logger, name=name, cluster_id=cluster_id,) remote_mode = get_node(node_pool) if not remote_mode: ctx.operation.retry( 'Node pool {0} deleted successfully'.format(name))
def test_get_gcp_config_json_input_field_missing(self, *args): # auth_provider_x509_cert_url is missing self.ctxmock.node.properties['gcp_config'] = json.loads( json.dumps({ 'zone': '3', 'auth': '''{"type": "sa", "project_id": "1", "private_key_id": "2", "private_key": "abcd", "client_email": "svc@some_email", "client_id": "3", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri":"https://oauth2.googleapis.com/token", "client_x509_cert_url": "https://www.googleapis.com/..."}''' })) with self.assertRaises(NonRecoverableError): utils.get_gcp_config()
def disable_legacy_abac(**kwargs): gcp_config = utils.get_gcp_config() cluster_id = ctx.node.properties.get('cluster_id') if cluster_id: # Before update legacy abac to the cluster we should check the # status of the cluster if it is running or not legacy_abac = LegacyAbac( gcp_config, ctx.logger, enabled=False, cluster_id=cluster_id, ) utils.delete_if_not_external(legacy_abac)
def stop(**kwargs): gcp_config = utils.get_gcp_config() name = ctx.instance.runtime_properties.get('name') cluster_id = ctx.instance.runtime_properties.get('cluster_id') if name: node_pool = NodePool(gcp_config, ctx.logger, name=name, cluster_id=cluster_id,) remote_mode = get_node(node_pool) if remote_mode: utils.delete_if_not_external(node_pool) else: ctx.operation.retry( 'Node pool {0} stopped'.format(name))
def create(name, additional_settings, **kwargs): if utils.resource_created(ctx, constants.NAME): return name = utils.get_final_resource_name(name) gcp_config = utils.get_gcp_config() cluster = Cluster(gcp_config, ctx.logger, name=name, additional_settings=additional_settings) utils.create(cluster) ctx.instance.runtime_properties.update(cluster.get()) ctx.instance.runtime_properties[constants.KUBERNETES_CLUSTER] = \ cluster.get()
def create(name, cluster_id, additional_settings, **kwargs): if utils.resource_created(ctx, constants.NAME): return name = utils.get_final_resource_name(name) gcp_config = utils.get_gcp_config() node_pool = NodePool(gcp_config, ctx.logger, name=name, cluster_id=cluster_id, additional_settings=additional_settings) utils.create(node_pool) ctx.instance.runtime_properties[constants.NAME] = name ctx.instance.runtime_properties['cluster_id'] = cluster_id
def update_network_policy_addon(cluster_id, enabled): gcp_config = utils.get_gcp_config() network_policy = NetworkPolicy(gcp_config, ctx.logger, None, cluster_id=cluster_id, additional_settings={}) policy_addon_object = dict() policy_addon_object['update'] = \ { 'desiredAddonsConfig': { 'networkPolicyConfig': { 'disabled': not enabled } } } network_policy.update_network_policy_addon(policy_addon_object)
def start(**kwargs): name = ctx.instance.runtime_properties['name'] cluster_id = ctx.instance.runtime_properties['cluster_id'] gcp_config = utils.get_gcp_config() node_pool = NodePool(gcp_config, ctx.logger, name=name, cluster_id=cluster_id, additional_settings={}) created_node = get_node(node_pool) if not created_node: ctx.operation.retry( 'Kubernetes node pool {0} ' 'is still provisioning'.format(name), 15) ctx.logger.debug('Node pool {0} started successfully'.format(name)) ctx.instance.runtime_properties[ constants.KUBERNETES_NODE_POOL] = created_node
def create(name, title, description, permissions, stage, **_): if utils.resource_created(ctx, constants.NAME): return name = utils.get_final_resource_name(name) gcp_config = utils.get_gcp_config() role = Role(gcp_config, ctx.logger, name=name, title=title, description=description, permissions=permissions, stage=stage) ctx.instance.runtime_properties[constants.NAME] = name utils.create(role) ctx.instance.runtime_properties.update(role.get())
def delete(**kwargs): gcp_config = utils.get_gcp_config() name = ctx.instance.runtime_properties.get('name') if name: cluster = Cluster(gcp_config, ctx.logger, name=name, ) try: cluster_status = cluster.get()['status'] if cluster.get() else None if cluster_status == constants.KUBERNETES_STOPPING_STATUS: ctx.operation.retry( 'Kubernetes cluster is still de-provisioning', 15) elif cluster_status == constants.KUBERNETES_ERROR_STATUS: raise NonRecoverableError( 'Kubernetes cluster failed to delete.') except HttpError as e: if e.resp.status == http_client.NOT_FOUND: ctx.logger.debug('Kubernetes cluster deleted.') else: raise e
def start(**kwargs): gcp_config = utils.get_gcp_config() name = ctx.instance.runtime_properties.get('name') if name: cluster = Cluster(gcp_config, ctx.logger, name=name, ) cluster_status = cluster.get()['status'] if cluster.get() else None if cluster_status == constants.KUBERNETES_RUNNING_STATUS: ctx.logger.debug('Kubernetes cluster running.') elif cluster_status == constants.KUBERNETES_PROVISIONING_STATUS: ctx.operation.retry( 'Kubernetes cluster is still provisioning.', 15) elif cluster_status == constants.KUBERNETES_ERROR_STATUS: raise NonRecoverableError('Kubernetes cluster in error state.') else: ctx.logger.warn( 'cluster status is neither {0}, {1}, {2}.' ' Unknown Status: {3}'.format( constants.KUBERNETES_RUNNING_STATUS, constants.KUBERNETES_PROVISIONING_STATUS, constants.KUBERNETES_ERROR_STATUS, cluster_status))