class RbacProvider(object): def __init__(self): self.cli = EmbeddedCLI() def list_assignments(self, dt_scope, include_inherited=False, role_type=None): include_inherited_flag = "" filter_role_type = "" if include_inherited: include_inherited_flag = "--include-inherited" if role_type: filter_role_type = "--role '{}'".format(role_type) list_op = self.cli.invoke( "role assignment list --scope '{}' {} {}".format( dt_scope, filter_role_type, include_inherited_flag)) if not list_op.success(): raise CLIError("Unable to determine assignments.") return list_op.as_json() def assign_role(self, dt_scope, assignee, role_type): assign_op = self.cli.invoke( "role assignment create --scope '{}' --role '{}' --assignee '{}'". format(dt_scope, role_type, assignee)) if not assign_op.success(): raise CLIError("Unable to assign role.") return assign_op.as_json() def remove_role(self, dt_scope, assignee, role_type=None): filter_role_type = "" if role_type: filter_role_type = "--role '{}'".format(role_type) delete_op = self.cli.invoke( "role assignment delete --scope '{}' --assignee '{}' {}".format( dt_scope, assignee, filter_role_type)) if not delete_op.success(): raise CLIError("Unable to remove role assignment.") return def assign_role_flex(self, principal_id, scope, principal_type="ServicePrincipal", role_type="Contributor"): assign_op = self.cli.invoke( "role assignment create --scope '{}' --role '{}' --assignee-object-id '{}' --assignee-principal-type '{}' " .format(scope, role_type, principal_id, principal_type)) if not assign_op.success(): raise CLIError("Unable to assign role.") return assign_op.as_json()
def __init__(self, test_scenario): assert test_scenario super(DTLiveScenarioTest, self).__init__(test_scenario) self.settings = DynamoSettings( opt_env_set=["azext_iot_testrg", "azext_dt_region"]) self.embedded_cli = EmbeddedCLI() self._bootup_scenario()
def __init__(self, test_scenario): assert test_scenario os.environ["AZURE_CORE_COLLECT_TELEMETRY"] = "no" super(DTLiveScenarioTest, self).__init__(test_scenario) self.settings = DynamoSettings( opt_env_set=["azext_iot_testrg", "azext_dt_resource_location"]) self.embedded_cli = EmbeddedCLI() self._bootup_scenario()
def __init__(self, _): super(TestPnPModelLifecycle, self).__init__(_) account_settings = EmbeddedCLI().invoke( "account show").as_json()["user"] repo_id = (EmbeddedCLI().invoke( "iot pnp repo list --pnp-dns-suffix {}".format( _pnp_dns_suffix)).as_json()[0]["tenantId"]) self.kwargs.update({ "model": "test_model_definition.json", "user_id": account_settings["name"], "user_type": account_settings["type"], "repo_id": repo_id, "pnp_dns_suffix": _pnp_dns_suffix, })
def create(self, name, resource_group_name, location=None, tags=None, timeout=20): if tags: tags = validate_key_value_pairs(tags) if not location: from azext_iot.common.embedded_cli import EmbeddedCLI resource_group_meta = EmbeddedCLI().invoke( "group show --name {}".format(resource_group_name)).as_json() location = resource_group_meta["location"] try: return self.mgmt_sdk.digital_twins.create_or_update( resource_name=name, resource_group_name=resource_group_name, location=location, tags=tags, long_running_operation_timeout=timeout, ) except CloudError as e: raise e except ErrorResponseException as err: raise CLIError(unpack_msrest_error(err))
def test_embedded_cli(self, mocked_azclient, command, subscription): import shlex cli = EmbeddedCLI() cli.invoke(command=command, subscription=subscription) # Due to forced json output command += " -o json" if subscription: command += " --subscription '{}'".format(subscription) expected_args = shlex.split(command) call = mocked_azclient().invoke.call_args_list[0] actual_args, _ = call assert expected_args == actual_args[0] success = cli.success() if mocked_azclient.test_meta.error_code == 1: assert not success elif mocked_azclient.test_meta.error_code == 0: assert success assert cli.output assert cli.as_json()
def setUp(self): if self._testMethodName == "test_repo_rbac": # check for TenantAdministrator try: repo_id = (EmbeddedCLI().invoke( "iot pnp repo list --pnp-dns-suffix {}".format( _pnp_dns_suffix)).as_json()[0]["tenantId"]) roles = self.cmd( "iot pnp role-assignment list --resource-id {0} --resource-type Tenant --subject-id {1} " "--pnp-dns-suffix {2}".format(repo_id, self.user_id, _pnp_dns_suffix)) roles = roles.get_output_in_json() role_assignments = list( map(lambda role: role["subject"]["role"], roles)) if RoleIdentifier.tenantAdmin.value not in role_assignments: self.skipTest("Need TenantAdmin role to perform test") except CLIError as e: self.skipTest(e)
class DTLiveScenarioTest(LiveScenarioTest): role_map = { "owner": "Azure Digital Twins Data Owner", "reader": "Azure Digital Twins Data Reader", } def __init__(self, test_scenario): assert test_scenario super(DTLiveScenarioTest, self).__init__(test_scenario) self.settings = DynamoSettings( opt_env_set=["azext_iot_testrg", "azext_dt_region"]) self.embedded_cli = EmbeddedCLI() self._bootup_scenario() def _bootup_scenario(self): self._is_provider_registered() self._init_basic_env_vars() self.tracked_instances = [] def _is_provider_registered(self): result = self.cmd( "provider show --namespace 'Microsoft.DigitalTwins' --query 'registrationState'" ) if '"registered"' in result.output.lower(): return pytest.skip( "Microsoft.DigitalTwins provider not registered. " "Run 'az provider register --namespace Microsoft.DigitalTwins'") def _init_basic_env_vars(self): self._force_region = self.settings.env.azext_dt_region if self._force_region and not self.is_region_available( self._force_region): raise RuntimeError( "Forced region: {} does not have capacity.".format( self._force_region)) self.region = (self._force_region if self._force_region else self.get_available_region()) self.rg = self.settings.env.azext_iot_testrg if not self.rg: pytest.skip( "Digital Twins CLI tests requires at least 'azext_iot_testrg' for resource deployment." ) self.rg_region = self.embedded_cli.invoke( "group show --name {}".format(self.rg)).as_json()["location"] @property def current_user(self): return self.embedded_cli.invoke( "account show").as_json()["user"]["name"] @property def current_subscription(self): return self.embedded_cli.invoke("account show").as_json()["id"] def wait_for_capacity(self, region=None, capacity: int = 1, wait_in_sec: int = 10, interval: int = 3): from time import sleep target_region = region if not target_region: target_region = self.region if self.is_region_available(region=target_region, capacity=capacity): return while interval >= 1: logger.info("Waiting :{} (sec) for capacity.") sleep(wait_in_sec) if self.is_region_available(region=target_region, capacity=capacity): return interval = interval - 1 raise RuntimeError( "Unavailable region DT capacity. wait(sec): {}, interval: {}, region: {}, capacity: {}" .format(wait_in_sec, interval, target_region, capacity)) def is_region_available(self, region, capacity: int = 1): region_capacity = self.calculate_region_capacity return (region_capacity.get(region, 0) + capacity) <= REGION_RESOURCE_LIMIT @property def calculate_region_capacity(self) -> dict: instances = self.instances = self.embedded_cli.invoke( "dt list").as_json() capacity_map = {} for instance in instances: cap_val = capacity_map.get(instance["location"], 0) cap_val = cap_val + 1 capacity_map[instance["location"]] = cap_val for region in REGION_LIST: if region not in capacity_map: capacity_map[region] = 0 return capacity_map def get_available_region(self, capacity: int = 1, skip_regions: list = None) -> str: if not skip_regions: skip_regions = [] region_capacity = self.calculate_region_capacity while region_capacity: region = min(region_capacity, key=region_capacity.get) if region not in skip_regions: if region_capacity[region] + capacity <= REGION_RESOURCE_LIMIT: return region region_capacity.pop(region, None) raise RuntimeError( "There are no available regions with capacity: {} for provision DT instances in subscription: {}" .format(capacity, self.current_subscription)) def track_instance(self, instance: dict): self.tracked_instances.append( (instance["name"], instance["resourceGroup"])) def tearDown(self): for instance in self.tracked_instances: self.embedded_cli.invoke( "dt delete -n {} -g {} -y --no-wait".format( instance[0], instance[1])) # Needed because the DT service will indicate provisioning is finished before it actually is. def wait_for_hostname(self, instance: dict, wait_in_sec: int = 10, interval: int = 7): from time import sleep sleep(wait_in_sec) self.embedded_cli.invoke( "dt wait -n {} -g {} --custom \"hostName && provisioningState=='Succeeded'\" --interval {} --timeout {}" .format(instance["name"], instance["resourceGroup"], wait_in_sec, wait_in_sec * interval)) refereshed_instance = self.embedded_cli.invoke( "dt show -n {} -g {}".format(instance["name"], instance["resourceGroup"])).as_json() return refereshed_instance if refereshed_instance else instance
class DTLiveScenarioTest(LiveScenarioTest): role_map = { "owner": "Azure Digital Twins Data Owner", "reader": "Azure Digital Twins Data Reader", } def __init__(self, test_scenario): assert test_scenario os.environ["AZURE_CORE_COLLECT_TELEMETRY"] = "no" super(DTLiveScenarioTest, self).__init__(test_scenario) self.settings = DynamoSettings( opt_env_set=["azext_iot_testrg", "azext_dt_resource_location"] ) self.embedded_cli = EmbeddedCLI() self._bootup_scenario() def _bootup_scenario(self): self._is_provider_registered() self._init_basic_env_vars() def _is_provider_registered(self): result = self.cmd( "provider show --namespace 'Microsoft.DigitalTwins' --query 'registrationState'" ) if '"registered"' in result.output.lower(): return pytest.skip( "Microsoft.DigitalTwins provider not registered. " "Run 'az provider register --namespace Microsoft.DigitalTwins'" ) def _init_basic_env_vars(self): self._location = self.settings.env.azext_dt_resource_location if not self._location: self._location = "westus2" self._rg = self.settings.env.azext_iot_testrg if not self._rg: pytest.skip( "Digital Twins CLI tests requires at least 'azext_iot_testrg' for resource deployment." ) self._rg_loc = self.embedded_cli.invoke( "group show --name {}".format(self._rg) ).as_json()["location"] @property def current_user(self): return self.embedded_cli.invoke("account show").as_json()["user"]["name"] @property def current_subscription(self): return self.embedded_cli.invoke("account show").as_json()["id"] @property def dt_location(self): return self._location @dt_location.setter def dt_location(self, value): self._location = value @property def dt_resource_group(self): return self._rg @dt_resource_group.setter def dt_resource_group(self, value): self._rg = value @property def dt_resource_group_loc(self): return self._rg_loc
def add_endpoint( self, name, endpoint_name, endpoint_resource_type, endpoint_resource_name, endpoint_resource_group, endpoint_resource_policy=None, endpoint_resource_namespace=None, endpoint_subscription=None, dead_letter_endpoint=None, tags=None, resource_group_name=None, timeout=20, ): from azext_iot.common.embedded_cli import EmbeddedCLI from azext_iot.digitaltwins.common import ADTEndpointType requires_policy = [ADTEndpointType.eventhub, ADTEndpointType.servicebus] if endpoint_resource_type in requires_policy: if not endpoint_resource_policy: raise CLIError( "Endpoint resources of type {} require a policy name.".format( " or ".join(map(str, requires_policy)) ) ) if not endpoint_resource_namespace: raise CLIError( "Endpoint resources of type {} require a namespace.".format( " or ".join(map(str, requires_policy)) ) ) target_instance = self.find_instance( name=name, resource_group_name=resource_group_name ) if not resource_group_name: resource_group_name = self.get_rg(target_instance) cli = EmbeddedCLI() error_prefix = "Could not create ADT instance endpoint. Unable to retrieve" properties = {} if endpoint_resource_type == ADTEndpointType.eventgridtopic: eg_topic_keys_op = cli.invoke( "eventgrid topic key list -n {} -g {}".format( endpoint_resource_name, endpoint_resource_group ), subscription=endpoint_subscription, ) if not eg_topic_keys_op.success(): raise CLIError("{} Event Grid topic keys.".format(error_prefix)) eg_topic_keys = eg_topic_keys_op.as_json() eg_topic_endpoint_op = cli.invoke( "eventgrid topic show -n {} -g {}".format( endpoint_resource_name, endpoint_resource_group ), subscription=endpoint_subscription, ) if not eg_topic_endpoint_op.success(): raise CLIError("{} Event Grid topic endpoint.".format(error_prefix)) eg_topic_endpoint = eg_topic_endpoint_op.as_json() properties = EventGridEndpointProperties( access_key1=eg_topic_keys["key1"], access_key2=eg_topic_keys["key2"], dead_letter_secret=dead_letter_endpoint, topic_endpoint=eg_topic_endpoint["endpoint"], ) elif endpoint_resource_type == ADTEndpointType.servicebus: sb_topic_keys_op = cli.invoke( "servicebus topic authorization-rule keys list -n {} " "--namespace-name {} -g {} --topic-name {}".format( endpoint_resource_policy, endpoint_resource_namespace, endpoint_resource_group, endpoint_resource_name, ), subscription=endpoint_subscription, ) if not sb_topic_keys_op.success(): raise CLIError("{} Service Bus topic keys.".format(error_prefix)) sb_topic_keys = sb_topic_keys_op.as_json() properties = ServiceBusEndpointProperties( primary_connection_string=sb_topic_keys["primaryConnectionString"], secondary_connection_string=sb_topic_keys["secondaryConnectionString"], dead_letter_secret=dead_letter_endpoint, ) elif endpoint_resource_type == ADTEndpointType.eventhub: eventhub_topic_keys_op = cli.invoke( "eventhubs eventhub authorization-rule keys list -n {} " "--namespace-name {} -g {} --eventhub-name {}".format( endpoint_resource_policy, endpoint_resource_namespace, endpoint_resource_group, endpoint_resource_name, ), subscription=endpoint_subscription, ) if not eventhub_topic_keys_op.success(): raise CLIError("{} Event Hub keys.".format(error_prefix)) eventhub_topic_keys = eventhub_topic_keys_op.as_json() properties = EventHubEndpointProperties( connection_string_primary_key=eventhub_topic_keys[ "primaryConnectionString" ], connection_string_secondary_key=eventhub_topic_keys[ "secondaryConnectionString" ], dead_letter_secret=dead_letter_endpoint, ) try: return self.mgmt_sdk.digital_twins_endpoint.create_or_update( resource_name=target_instance.name, resource_group_name=resource_group_name, endpoint_name=endpoint_name, properties=properties, long_running_operation_timeout=timeout, ) except ErrorResponseException as e: raise CLIError(unpack_msrest_error(e))
def __init__(self): self.cli = EmbeddedCLI()
def create( self, name, resource_group_name, location=None, tags=None, timeout=60, assign_identity=None, scopes=None, role_type="Contributor", public_network_access=ADTPublicNetworkAccessType.enabled.value, ): if not location: from azext_iot.common.embedded_cli import EmbeddedCLI resource_group_meta = ( EmbeddedCLI() .invoke("group show --name {}".format(resource_group_name)) .as_json() ) location = resource_group_meta["location"] try: if assign_identity: if scopes and not role_type: raise CLIError( "Both --scopes and --role values are required when assigning the instance identity." ) digital_twins_create = DigitalTwinsDescription( location=location, tags=tags, identity={"type": "SystemAssigned" if assign_identity else "None"}, public_network_access=public_network_access, ) create_or_update = self.mgmt_sdk.digital_twins.create_or_update( resource_name=name, resource_group_name=resource_group_name, digital_twins_create=digital_twins_create, long_running_operation_timeout=timeout, ) def rbac_handler(lro): instance = lro.resource().as_dict() identity = instance.get("identity") if identity: identity_type = identity.get("type") principal_id = identity.get("principal_id") if ( principal_id and scopes and identity_type and identity_type.lower() == "systemassigned" ): for scope in scopes: logger.info( "Applying rbac assignment: Principal Id: {}, Scope: {}, Role: {}".format( principal_id, scope, role_type ) ) self.rbac.assign_role_flex( principal_id=principal_id, scope=scope, role_type=role_type, ) create_or_update.add_done_callback(rbac_handler) return create_or_update except CloudError as e: raise e except ErrorResponseException as err: raise CLIError(unpack_msrest_error(err))
def __init__(self, test_case): account = EmbeddedCLI().invoke("account show").as_json() self.user_id = account["user"]["name"] self.user_type = account["user"]["type"] super(TestPNPRepo, self).__init__(test_case)
def add_endpoint( self, name, endpoint_name, endpoint_resource_type, endpoint_resource_name, endpoint_resource_group, endpoint_resource_policy=None, endpoint_resource_namespace=None, tags=None, resource_group_name=None, timeout=20, ): from azext_iot.common.embedded_cli import EmbeddedCLI from azext_iot.digitaltwins.common import ADTEndpointType requires_policy = [ ADTEndpointType.eventhub, ADTEndpointType.servicebus ] if endpoint_resource_type in requires_policy: if not endpoint_resource_policy: raise CLIError( "Endpoint resources of type {} require a policy name.". format(" or ".join(map(str, requires_policy)))) if not endpoint_resource_namespace: raise CLIError( "Endpoint resources of type {} require a namespace.". format(" or ".join(map(str, requires_policy)))) if tags: tags = validate_key_value_pairs(tags) target_instance = self.find_instance( name=name, resource_group_name=resource_group_name) if not resource_group_name: resource_group_name = self.get_rg(target_instance) payload = {"tags": tags} cli = EmbeddedCLI() error_prefix = "Could not create ADT instance endpoint. Unable to retrieve" if endpoint_resource_type == ADTEndpointType.eventgridtopic: eg_topic_keys_op = cli.invoke( "eventgrid topic key list -n {} -g {}".format( endpoint_resource_name, endpoint_resource_group)) if not eg_topic_keys_op.success(): raise CLIError( "{} Event Grid topic keys.".format(error_prefix)) eg_topic_keys = eg_topic_keys_op.as_json() eg_topic_endpoint_op = cli.invoke( "eventgrid topic show -n {} -g {}".format( endpoint_resource_name, endpoint_resource_group)) if not eg_topic_endpoint_op.success(): raise CLIError( "{} Event Grid topic endpoint.".format(error_prefix)) eg_topic_endpoint = eg_topic_endpoint_op.as_json() payload["endpointType"] = "EventGrid" payload["accessKey1"] = eg_topic_keys["key1"] payload["accessKey2"] = eg_topic_keys["key2"] payload["TopicEndpoint"] = eg_topic_endpoint["endpoint"] elif endpoint_resource_type == ADTEndpointType.servicebus: sb_topic_keys_op = cli.invoke( "servicebus topic authorization-rule keys list -n {} " "--namespace-name {} -g {} --topic-name {}".format( endpoint_resource_policy, endpoint_resource_namespace, endpoint_resource_group, endpoint_resource_name, )) if not sb_topic_keys_op.success(): raise CLIError( "{} Service Bus topic keys.".format(error_prefix)) sb_topic_keys = sb_topic_keys_op.as_json() payload["endpointType"] = "ServiceBus" payload["primaryConnectionString"] = sb_topic_keys[ "primaryConnectionString"] payload["secondaryConnectionString"] = sb_topic_keys[ "secondaryConnectionString"] elif endpoint_resource_type == ADTEndpointType.eventhub: eventhub_topic_keys_op = cli.invoke( "eventhubs eventhub authorization-rule keys list -n {} " "--namespace-name {} -g {} --eventhub-name {}".format( endpoint_resource_policy, endpoint_resource_namespace, endpoint_resource_group, endpoint_resource_name, )) if not eventhub_topic_keys_op.success(): raise CLIError("{} Event Hub keys.".format(error_prefix)) eventhub_topic_keys = eventhub_topic_keys_op.as_json() payload["endpointType"] = "EventHub" payload["connectionString-PrimaryKey"] = eventhub_topic_keys[ "primaryConnectionString"] payload["connectionString-SecondaryKey"] = eventhub_topic_keys[ "secondaryConnectionString"] properties = {"properties": payload} try: return self.mgmt_sdk.digital_twins_endpoint.create_or_update( resource_name=target_instance.name, resource_group_name=resource_group_name, endpoint_name=endpoint_name, properties=properties, long_running_operation_timeout=timeout, ) except ErrorResponseException as e: raise CLIError(unpack_msrest_error(e))