def ensure_resource_permissions(cli_ctx, oc, fail, sp_obj_ids): try: # Get cluster resources we need to assign network contributor on resources = get_cluster_network_resources(cli_ctx, oc) except (CloudError, HttpOperationError) as e: if fail: logger.error(e.message) raise logger.info(e.message) return for sp_id in sp_obj_ids: for resource in sorted(resources): # Create the role assignment if it doesn't exist # Assume that the role assignment exists if we fail to look it up resource_contributor_exists = True try: resource_contributor_exists = has_network_contributor_on_resource(cli_ctx, resource, sp_id) except CloudError as e: if fail: logger.error(e.message) raise logger.info(e.message) if not resource_contributor_exists: assign_network_contributor_to_resource(cli_ctx, resource, sp_id)
def aro_delete(cmd, client, resource_group_name, resource_name, no_wait=False): # TODO: clean up rbac rp_client_sp = None resources = set() try: oc = client.get(resource_group_name, resource_name) # Get cluster resources we need to assign network contributor on resources = get_cluster_network_resources(cmd.cli_ctx, oc) except (CloudError, HttpOperationError) as e: logger.info(e.message) aad = AADManager(cmd.cli_ctx) if rp_mode_production(): rp_client_id = FP_CLIENT_ID else: rp_client_id = os.environ.get('AZURE_FP_CLIENT_ID', FP_CLIENT_ID) # Best effort - assume the role assignments on the SP exist if exception raised try: rp_client_sp = aad.get_service_principal(rp_client_id) if not rp_client_sp: raise ResourceNotFoundError("RP service principal not found.") except GraphErrorException as e: logger.info(e.message) # Customers frequently remove the Cluster or RP's service principal permissions. # Attempt to fix this before performing any action against the cluster if rp_client_sp: for resource in sorted(resources): # Create the role assignment if it doesn't exist # Assume that the role assignment exists if we fail to look it up resource_contributor_exists = True try: resource_contributor_exists = has_network_contributor_on_resource( cmd.cli_ctx, resource, rp_client_sp.object_id) except CloudError as e: logger.info(e.message) continue if not resource_contributor_exists: assign_network_contributor_to_resource(cmd.cli_ctx, resource, rp_client_sp.object_id) return sdk_no_wait(no_wait, client.delete, resource_group_name=resource_group_name, resource_name=resource_name)
def aro_create( cmd, # pylint: disable=too-many-locals client, resource_group_name, resource_name, master_subnet, worker_subnet, vnet=None, vnet_resource_group_name=None, # pylint: disable=unused-argument location=None, pull_secret=None, domain=None, cluster_resource_group=None, client_id=None, client_secret=None, pod_cidr=None, service_cidr=None, master_vm_size=None, worker_vm_size=None, worker_vm_disk_size_gb=None, worker_count=None, apiserver_visibility=None, ingress_visibility=None, tags=None, no_wait=False): if not rp_mode_development(): resource_client = get_mgmt_service_client( cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) provider = resource_client.providers.get('Microsoft.RedHatOpenShift') if provider.registration_state != 'Registered': raise UnauthorizedError( 'Microsoft.RedHatOpenShift provider is not registered.', 'Run `az provider register -n Microsoft.RedHatOpenShift --wait`.' ) vnet = validate_subnets(master_subnet, worker_subnet) resources = get_network_resources(cmd.cli_ctx, [master_subnet, worker_subnet], vnet) subscription_id = get_subscription_id(cmd.cli_ctx) random_id = generate_random_id() aad = AADManager(cmd.cli_ctx) if client_id is None: app, client_secret = aad.create_application(cluster_resource_group or 'aro-' + random_id) client_id = app.app_id client_sp = aad.get_service_principal(client_id) if not client_sp: client_sp = aad.create_service_principal(client_id) if rp_mode_production(): rp_client_id = FP_CLIENT_ID else: rp_client_id = os.environ.get('AZURE_FP_CLIENT_ID', FP_CLIENT_ID) rp_client_sp = aad.get_service_principal(rp_client_id) if not rp_client_sp: raise ResourceNotFoundError("RP service principal not found.") for sp_id in [client_sp.object_id, rp_client_sp.object_id]: for resource in sorted(resources): if not has_network_contributor_on_resource(cmd.cli_ctx, resource, sp_id): assign_network_contributor_to_resource(cmd.cli_ctx, resource, sp_id) if rp_mode_development(): worker_vm_size = worker_vm_size or 'Standard_D2s_v3' else: worker_vm_size = worker_vm_size or 'Standard_D4s_v3' if apiserver_visibility is not None: apiserver_visibility = apiserver_visibility.capitalize() if ingress_visibility is not None: ingress_visibility = ingress_visibility.capitalize() oc = openshiftcluster.OpenShiftCluster( location=location, tags=tags, cluster_profile=openshiftcluster.ClusterProfile( pull_secret=pull_secret or "", domain=domain or random_id, resource_group_id='/subscriptions/%s/resourceGroups/%s' % (subscription_id, cluster_resource_group or "aro-" + random_id), ), service_principal_profile=openshiftcluster.ServicePrincipalProfile( client_id=client_id, client_secret=client_secret, ), network_profile=openshiftcluster.NetworkProfile( pod_cidr=pod_cidr or '10.128.0.0/14', service_cidr=service_cidr or '172.30.0.0/16', ), master_profile=openshiftcluster.MasterProfile( vm_size=master_vm_size or 'Standard_D8s_v3', subnet_id=master_subnet, ), worker_profiles=[ openshiftcluster.WorkerProfile( name='worker', # TODO: 'worker' should not be hard-coded vm_size=worker_vm_size, disk_size_gb=worker_vm_disk_size_gb or 128, subnet_id=worker_subnet, count=worker_count or 3, ) ], apiserver_profile=openshiftcluster.APIServerProfile( visibility=apiserver_visibility or 'Public', ), ingress_profiles=[ openshiftcluster.IngressProfile( name='default', # TODO: 'default' should not be hard-coded visibility=ingress_visibility or 'Public', ) ], ) return sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=resource_name, parameters=oc)