def aro_delete(cmd, client, resource_group_name, resource_name, no_wait=False): # TODO: clean up rbac rp_client_sp = None try: oc = client.get(resource_group_name, resource_name) except CloudError as e: if e.status_code == 404: raise ResourceNotFoundError(e.message) from e logger.info(e.message) except HttpOperationError as e: logger.info(e.message) aad = AADManager(cmd.cli_ctx) # Best effort - assume the role assignments on the SP exist if exception raised try: rp_client_sp = aad.get_service_principal(resolve_rp_client_id()) if not rp_client_sp: raise ResourceNotFoundError("RP service principal not found.") except GraphErrorException as e: logger.info(e.message) # Customers frequently remove the Cluster or RP's service principal permissions. # Attempt to fix this before performing any action against the cluster if rp_client_sp: ensure_resource_permissions(cmd.cli_ctx, oc, False, [rp_client_sp.object_id]) return sdk_no_wait(no_wait, client.begin_delete, resource_group_name=resource_group_name, resource_name=resource_name)
def aro_delete(cmd, client, resource_group_name, resource_name, no_wait=False): # TODO: clean up rbac rp_client_sp = None resources = set() try: oc = client.get(resource_group_name, resource_name) # Get cluster resources we need to assign network contributor on resources = get_cluster_network_resources(cmd.cli_ctx, oc) except (CloudError, HttpOperationError) as e: logger.info(e.message) aad = AADManager(cmd.cli_ctx) if rp_mode_production(): rp_client_id = FP_CLIENT_ID else: rp_client_id = os.environ.get('AZURE_FP_CLIENT_ID', FP_CLIENT_ID) # Best effort - assume the role assignments on the SP exist if exception raised try: rp_client_sp = aad.get_service_principal(rp_client_id) if not rp_client_sp: raise ResourceNotFoundError("RP service principal not found.") except GraphErrorException as e: logger.info(e.message) # Customers frequently remove the Cluster or RP's service principal permissions. # Attempt to fix this before performing any action against the cluster if rp_client_sp: for resource in sorted(resources): # Create the role assignment if it doesn't exist # Assume that the role assignment exists if we fail to look it up resource_contributor_exists = True try: resource_contributor_exists = has_network_contributor_on_resource( cmd.cli_ctx, resource, rp_client_sp.object_id) except CloudError as e: logger.info(e.message) continue if not resource_contributor_exists: assign_network_contributor_to_resource(cmd.cli_ctx, resource, rp_client_sp.object_id) return sdk_no_wait(no_wait, client.delete, resource_group_name=resource_group_name, resource_name=resource_name)
def aro_delete(cmd, client, resource_group_name, resource_name, no_wait=False): if not no_wait: aad = AADManager(cmd.cli_ctx) try: oc = client.get(resource_group_name, resource_name) except CloudError as err: if err.status_code == 404: return raise err sdk_no_wait(no_wait, client.delete, resource_group_name=resource_group_name, resource_name=resource_name) if not no_wait: aad.deleteManagedApplication(oc.service_principal_profile.client_id)
def aro_delete(cmd, client, resource_group_name, resource_name, no_wait=False): # TODO: clean up rbac try: oc = client.get(resource_group_name, resource_name) master_subnet = oc.master_profile.subnet_id worker_subnets = {w.subnet_id for w in oc.worker_profiles} master_parts = parse_resource_id(master_subnet) vnet = resource_id( subscription=master_parts['subscription'], resource_group=master_parts['resource_group'], namespace='Microsoft.Network', type='virtualNetworks', name=master_parts['name'], ) aad = AADManager(cmd.cli_ctx) rp_client_id = os.environ.get('AZURE_FP_CLIENT_ID', FP_CLIENT_ID) rp_client_sp = aad.get_service_principal(rp_client_id) # Customers frequently remove the RP's permissions, then cannot # delete the cluster. Where possible, fix this before attempting # deletion. if rp_client_sp: sp_id = rp_client_sp.object_id assign_contributor_to_vnet(cmd.cli_ctx, vnet, sp_id) assign_contributor_to_routetable(cmd.cli_ctx, worker_subnets | {master_subnet}, sp_id) except (CloudError, HttpOperationError) as e: # Default to old deletion behaviour in case operations throw an # exception above. Log the error. logger.info(e.message) return sdk_no_wait(no_wait, client.delete, resource_group_name=resource_group_name, resource_name=resource_name)
def aro_create( cmd, # pylint: disable=too-many-locals client, resource_group_name, resource_name, master_subnet, worker_subnet, vnet=None, vnet_resource_group_name=None, # pylint: disable=unused-argument location=None, domain=None, cluster_resource_group=None, client_id=None, client_secret=None, pod_cidr=None, service_cidr=None, master_vm_size=None, worker_vm_size=None, worker_vm_disk_size_gb=None, worker_count=None, apiserver_visibility=None, ingress_visibility=None, tags=None, no_wait=False): vnet = validate_subnets(master_subnet, worker_subnet) subscription_id = get_subscription_id(cmd.cli_ctx) random_id = ''.join( random.choice('abcdefghijklmnopqrstuvwxyz0123456789') for _ in range(8)) aad = AADManager(cmd.cli_ctx) if client_id is None: app, client_secret = aad.create_application('aro-%s' % random_id) client_id = app.app_id client_sp = aad.get_service_principal(client_id) if not client_sp: client_sp = aad.create_service_principal(client_id) rp_client_id = FP_CLIENT_ID if rp_mode_development(): rp_client_id = os.environ['AZURE_FP_CLIENT_ID'] rp_client_sp = aad.get_service_principal(rp_client_id) assign_contributor_to_vnet(cmd.cli_ctx, vnet, client_sp.object_id) assign_contributor_to_vnet(cmd.cli_ctx, vnet, rp_client_sp.object_id) oc = v2019_12_31_preview.OpenShiftCluster( location=location, tags=tags, cluster_profile=v2019_12_31_preview.ClusterProfile( domain=domain or random_id, resource_group_id='/subscriptions/%s/resourceGroups/%s' % (subscription_id, cluster_resource_group or "aro-" + random_id), ), service_principal_profile=v2019_12_31_preview.ServicePrincipalProfile( client_id=client_id, client_secret=client_secret, ), network_profile=v2019_12_31_preview.NetworkProfile( pod_cidr=pod_cidr or '10.128.0.0/14', service_cidr=service_cidr or '172.30.0.0/16', ), master_profile=v2019_12_31_preview.MasterProfile( vm_size=master_vm_size or 'Standard_D8s_v3', subnet_id=master_subnet, ), worker_profiles=[ v2019_12_31_preview.WorkerProfile( name='worker', # TODO: 'worker' should not be hard-coded vm_size=worker_vm_size or 'Standard_D2s_v3', disk_size_gb=worker_vm_disk_size_gb or 128, subnet_id=worker_subnet, count=worker_count or 3, ) ], apiserver_profile=v2019_12_31_preview.APIServerProfile( visibility=apiserver_visibility or 'Public', ), ingress_profiles=[ v2019_12_31_preview.IngressProfile( name='default', # TODO: 'default' should not be hard-coded visibility=ingress_visibility or 'Public', ) ], ) return sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=resource_name, parameters=oc)
def cluster_application_update(cli_ctx, oc, client_id, client_secret, refresh_cluster_credentials): # QUESTION: is there possible unification with the create path? rp_client_sp = None client_sp = None random_id = generate_random_id() # if any of these are set - we expect users to have access to fix rbac so we fail # common for 1 and 2 flows fail = client_id or client_secret or refresh_cluster_credentials aad = AADManager(cli_ctx) # check if we can see if RP service principal exists try: rp_client_sp = aad.get_service_principal(resolve_rp_client_id()) if not rp_client_sp: raise ResourceNotFoundError("RP service principal not found.") except GraphErrorException as e: if fail: logger.error(e.message) raise logger.info(e.message) # refresh_cluster_credentials refreshes cluster SP application. # At firsts it tries to re-use existing application and generate new password. # If application does not exist - creates new one if refresh_cluster_credentials: try: app = aad.get_application_by_client_id(client_id or oc.service_principal_profile.client_id) if not app: # we were not able to find and applications, create new one parts = parse_resource_id(oc.cluster_profile.resource_group_id) cluster_resource_group = parts['resource_group'] app, client_secret = aad.create_application(cluster_resource_group or 'aro-' + random_id) client_id = app.app_id else: client_secret = aad.refresh_application_credentials(app.object_id) except GraphErrorException as e: logger.error(e.message) raise # attempt to get/create SP if one was not found. try: client_sp = aad.get_service_principal(client_id or oc.service_principal_profile.client_id) except GraphErrorException as e: if fail: logger.error(e.message) raise logger.info(e.message) if fail and not client_sp: client_sp = aad.create_service_principal(client_id or oc.service_principal_profile.client_id) sp_obj_ids = [sp.object_id for sp in [rp_client_sp, client_sp] if sp] ensure_resource_permissions(cli_ctx, oc, fail, sp_obj_ids) return client_id, client_secret
def aro_create(cmd, # pylint: disable=too-many-locals client, resource_group_name, resource_name, master_subnet, worker_subnet, vnet=None, # pylint: disable=unused-argument vnet_resource_group_name=None, # pylint: disable=unused-argument location=None, pull_secret=None, domain=None, cluster_resource_group=None, client_id=None, client_secret=None, pod_cidr=None, service_cidr=None, software_defined_network=None, disk_encryption_set=None, master_encryption_at_host=False, master_vm_size=None, worker_encryption_at_host=False, worker_vm_size=None, worker_vm_disk_size_gb=None, worker_count=None, apiserver_visibility=None, ingress_visibility=None, tags=None, no_wait=False): if not rp_mode_development(): resource_client = get_mgmt_service_client( cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) provider = resource_client.providers.get('Microsoft.RedHatOpenShift') if provider.registration_state != 'Registered': raise UnauthorizedError('Microsoft.RedHatOpenShift provider is not registered.', 'Run `az provider register -n Microsoft.RedHatOpenShift --wait`.') validate_subnets(master_subnet, worker_subnet) subscription_id = get_subscription_id(cmd.cli_ctx) random_id = generate_random_id() aad = AADManager(cmd.cli_ctx) if client_id is None: app, client_secret = aad.create_application(cluster_resource_group or 'aro-' + random_id) client_id = app.app_id client_sp = aad.get_service_principal(client_id) if not client_sp: client_sp = aad.create_service_principal(client_id) rp_client_sp = aad.get_service_principal(resolve_rp_client_id()) if not rp_client_sp: raise ResourceNotFoundError("RP service principal not found.") if rp_mode_development(): worker_vm_size = worker_vm_size or 'Standard_D2s_v3' else: worker_vm_size = worker_vm_size or 'Standard_D4s_v3' if apiserver_visibility is not None: apiserver_visibility = apiserver_visibility.capitalize() if ingress_visibility is not None: ingress_visibility = ingress_visibility.capitalize() oc = openshiftcluster.OpenShiftCluster( location=location, tags=tags, cluster_profile=openshiftcluster.ClusterProfile( pull_secret=pull_secret or "", domain=domain or random_id, resource_group_id='/subscriptions/%s/resourceGroups/%s' % (subscription_id, cluster_resource_group or "aro-" + random_id), ), service_principal_profile=openshiftcluster.ServicePrincipalProfile( client_id=client_id, client_secret=client_secret, ), network_profile=openshiftcluster.NetworkProfile( pod_cidr=pod_cidr or '10.128.0.0/14', service_cidr=service_cidr or '172.30.0.0/16', software_defined_network=software_defined_network or 'OpenShiftSDN' ), master_profile=openshiftcluster.MasterProfile( vm_size=master_vm_size or 'Standard_D8s_v3', subnet_id=master_subnet, encryption_at_host='Enabled' if master_encryption_at_host else 'Disabled', disk_encryption_set_id=disk_encryption_set, ), worker_profiles=[ openshiftcluster.WorkerProfile( name='worker', # TODO: 'worker' should not be hard-coded vm_size=worker_vm_size, disk_size_gb=worker_vm_disk_size_gb or 128, subnet_id=worker_subnet, count=worker_count or 3, encryption_at_host='Enabled' if worker_encryption_at_host else 'Disabled', disk_encryption_set_id=disk_encryption_set, ) ], apiserver_profile=openshiftcluster.APIServerProfile( visibility=apiserver_visibility or 'Public', ), ingress_profiles=[ openshiftcluster.IngressProfile( name='default', # TODO: 'default' should not be hard-coded visibility=ingress_visibility or 'Public', ) ], ) sp_obj_ids = [client_sp.object_id, rp_client_sp.object_id] ensure_resource_permissions(cmd.cli_ctx, oc, True, sp_obj_ids) return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name=resource_group_name, resource_name=resource_name, parameters=oc)
def aro_create( cmd, # pylint: disable=too-many-locals client, resource_group_name, resource_name, master_subnet, worker_subnet, vnet=None, vnet_resource_group_name=None, # pylint: disable=unused-argument location=None, pull_secret=None, domain=None, cluster_resource_group=None, client_id=None, client_secret=None, pod_cidr=None, service_cidr=None, master_vm_size=None, worker_vm_size=None, worker_vm_disk_size_gb=None, worker_count=None, apiserver_visibility=None, ingress_visibility=None, tags=None, no_wait=False): if not rp_mode_development(): resource_client = get_mgmt_service_client( cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) provider = resource_client.providers.get('Microsoft.RedHatOpenShift') if provider.registration_state != 'Registered': raise CLIError( 'Microsoft.RedHatOpenShift provider is not registered. Run `az provider ' + 'register -n Microsoft.RedHatOpenShift --wait`.') vnet = validate_subnets(master_subnet, worker_subnet) subscription_id = get_subscription_id(cmd.cli_ctx) random_id = generate_random_id() aad = AADManager(cmd.cli_ctx) if client_id is None: app, client_secret = aad.create_application(cluster_resource_group or 'aro-' + random_id) client_id = app.app_id client_sp = aad.get_service_principal(client_id) if not client_sp: client_sp = aad.create_service_principal(client_id) rp_client_id = FP_CLIENT_ID if rp_mode_development(): rp_client_id = os.environ['AZURE_FP_CLIENT_ID'] rp_client_sp = aad.get_service_principal(rp_client_id) for sp_id in [client_sp.object_id, rp_client_sp.object_id]: assign_contributor_to_vnet(cmd.cli_ctx, vnet, sp_id) assign_contributor_to_routetable(cmd.cli_ctx, master_subnet, worker_subnet, sp_id) if rp_mode_development(): worker_vm_size = worker_vm_size or 'Standard_D2s_v3' else: worker_vm_size = worker_vm_size or 'Standard_D4s_v3' if apiserver_visibility is not None: apiserver_visibility = apiserver_visibility.capitalize() if ingress_visibility is not None: ingress_visibility = ingress_visibility.capitalize() oc = v2020_04_30.OpenShiftCluster( location=location, tags=tags, cluster_profile=v2020_04_30.ClusterProfile( pull_secret=pull_secret or "", domain=domain or random_id, resource_group_id='/subscriptions/%s/resourceGroups/%s' % (subscription_id, cluster_resource_group or "aro-" + random_id), ), service_principal_profile=v2020_04_30.ServicePrincipalProfile( client_id=client_id, client_secret=client_secret, ), network_profile=v2020_04_30.NetworkProfile( pod_cidr=pod_cidr or '10.128.0.0/14', service_cidr=service_cidr or '172.30.0.0/16', ), master_profile=v2020_04_30.MasterProfile( vm_size=master_vm_size or 'Standard_D8s_v3', subnet_id=master_subnet, ), worker_profiles=[ v2020_04_30.WorkerProfile( name='worker', # TODO: 'worker' should not be hard-coded vm_size=worker_vm_size, disk_size_gb=worker_vm_disk_size_gb or 128, subnet_id=worker_subnet, count=worker_count or 3, ) ], apiserver_profile=v2020_04_30.APIServerProfile( visibility=apiserver_visibility or 'Public', ), ingress_profiles=[ v2020_04_30.IngressProfile( name='default', # TODO: 'default' should not be hard-coded visibility=ingress_visibility or 'Public', ) ], ) return sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=resource_name, parameters=oc)
def aro_create( cmd, # pylint: disable=too-many-locals client, resource_group_name, resource_name, master_subnet, worker_subnet, vnet=None, vnet_resource_group_name=None, # pylint: disable=unused-argument location=None, client_id=None, client_secret=None, pod_cidr=None, service_cidr=None, master_vm_size=None, worker_vm_size=None, worker_vm_disk_size_gb=None, worker_count=None, tags=None, no_wait=False): vnet = validate_subnets(master_subnet, worker_subnet) subscription_id = get_subscription_id(cmd.cli_ctx) aad = AADManager(cmd.cli_ctx) if client_id is None: app, client_secret = aad.createManagedApplication( "aro-%s-%s-%s" % (subscription_id, resource_group_name, resource_name)) client_id = app.app_id client_sp = aad.getServicePrincipal(client_id) if not client_sp: client_sp = aad.createServicePrincipal(client_id) rp_client_id = FP_CLIENT_ID if rp_mode_development(): rp_client_id = os.environ['AZURE_FP_CLIENT_ID'] rp_client_sp = aad.getServicePrincipal(rp_client_id) assign_contributor_to_vnet(cmd.cli_ctx, vnet, client_sp.object_id) assign_contributor_to_vnet(cmd.cli_ctx, vnet, rp_client_sp.object_id) oc = v2019_12_31_preview.OpenShiftCluster( location=location, tags=tags, service_principal_profile=v2019_12_31_preview.ServicePrincipalProfile( client_id=client_id, client_secret=client_secret, ), network_profile=v2019_12_31_preview.NetworkProfile( pod_cidr=pod_cidr or "10.128.0.0/14", service_cidr=service_cidr or "172.30.0.0/16", ), master_profile=v2019_12_31_preview.MasterProfile( vm_size=master_vm_size or "Standard_D8s_v3", subnet_id=master_subnet, ), worker_profiles=[ v2019_12_31_preview.WorkerProfile( name="worker", # TODO: "worker" should not be hard-coded vm_size=worker_vm_size or "Standard_D2s_v3", disk_size_gb=worker_vm_disk_size_gb or 128, subnet_id=worker_subnet, count=worker_count or 3, ) ]) return sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=resource_name, parameters=oc)