コード例 #1
0
    def delete_func(cmd, resource_group_name, resource_name, item_name, no_wait=False):  # pylint: disable=unused-argument

        client = cf_frontdoor(cmd.cli_ctx, None)
        item = client.get(resource_group_name, resource_name)
        keep_items = \
            [x for x in item.__getattribute__(prop) if x.name.lower() != item_name.lower()]
        with UpdateContext(item) as c:
            c.update_param(prop, keep_items, False)
        if no_wait:
            sdk_no_wait(no_wait, client.create_or_update, resource_group_name, resource_name, item)
        else:
            result = sdk_no_wait(no_wait, client.create_or_update, resource_group_name, resource_name, item).result()
            if next((x for x in getattr(result, prop) if x.name.lower() == item_name.lower()), None):
                from knack.util import CLIError
                raise CLIError("Failed to delete '{}' on '{}'".format(item_name, resource_name))
コード例 #2
0
ファイル: live_event.py プロジェクト: sptramer/azure-cli
def start(cmd, client, resource_group_name, account_name, live_event_name, no_wait=False):
    if no_wait:
        return sdk_no_wait(no_wait, client.start, resource_group_name, account_name, live_event_name)

    LongRunningOperation(cmd.cli_ctx)(client.start(resource_group_name, account_name, live_event_name))

    return client.get(resource_group_name, account_name, live_event_name)
コード例 #3
0
ファイル: custom.py プロジェクト: yugangw-msft/azure-cli
def cluster_create(cmd,
                   resource_group_name,
                   cluster_name,
                   sku,
                   location=None,
                   capacity=None,
                   custom_headers=None,
                   raw=False,
                   polling=True,
                   no_wait=False,
                   **kwargs):

    from azure.mgmt.kusto.models import Cluster, AzureSku
    from azure.cli.command_modules.kusto._client_factory import cf_cluster

    if location is None:
        location = _get_resource_group_location(cmd.cli_ctx, resource_group_name)

    _client = cf_cluster(cmd.cli_ctx, None)

    _cluster = Cluster(location=location, sku=AzureSku(name=sku, capacity=capacity))

    return sdk_no_wait(no_wait,
                       _client.create_or_update,
                       resource_group_name=resource_group_name,
                       cluster_name=cluster_name,
                       parameters=_cluster,
                       custom_headers=custom_headers,
                       raw=raw,
                       polling=polling,
                       operation_config=kwargs)
コード例 #4
0
def sqlvm_group_create(client, cmd, sql_virtual_machine_group_name, resource_group_name, location, sql_image_offer,
                       sql_image_sku, domain_fqdn, cluster_operator_account, sql_service_account,
                       storage_account_url, storage_account_key, cluster_bootstrap_account=None,
                       file_share_witness_path=None, ou_path=None, tags=None):

    '''
    Creates a SQL virtual machine group.
    '''
    tags = tags or {}

    # Create the windows server failover cluster domain profile object.
    wsfc_domain_profile_object = WsfcDomainProfile(domain_fqdn=domain_fqdn,
                                                   ou_path=ou_path,
                                                   cluster_bootstrap_account=cluster_bootstrap_account,
                                                   cluster_operator_account=cluster_operator_account,
                                                   sql_service_account=sql_service_account,
                                                   file_share_witness_path=file_share_witness_path,
                                                   storage_account_url=storage_account_url,
                                                   storage_account_primary_key=storage_account_key)

    sqlvm_group_object = SqlVirtualMachineGroup(sql_image_offer=sql_image_offer,
                                                sql_image_sku=sql_image_sku,
                                                wsfc_domain_profile=wsfc_domain_profile_object,
                                                location=location,
                                                tags=tags)

    # Since it's a running operation, we will do the put and then the get to display the instance.
    LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(False, client.create_or_update, resource_group_name,
                                                  sql_virtual_machine_group_name, sqlvm_group_object))

    return client.get(resource_group_name, sql_virtual_machine_group_name)
コード例 #5
0
ファイル: custom.py プロジェクト: yugangw-msft/azure-cli
def _replica_create(cmd, client, resource_group_name, server_name, source_server, no_wait=False, **kwargs):
    provider = 'Microsoft.DBForMySQL' if isinstance(client, MySqlServersOperations) else 'Microsoft.DBforPostgreSQL'
    # set source server id
    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                                        resource_group=resource_group_name,
                                        namespace=provider,
                                        type='servers',
                                        name=source_server)
        else:
            raise CLIError('The provided source-server {} is invalid.'.format(source_server))

    source_server_id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(source_server_id_parts['resource_group'], source_server_id_parts['name'])
    except CloudError as e:
        raise CLIError('Unable to get source server: {}.'.format(str(e)))

    parameters = None
    if provider == 'Microsoft.DBForMySQL':
        from azure.mgmt.rdbms import mysql
        parameters = mysql.models.ServerForCreate(
            sku=mysql.models.Sku(name=source_server_object.sku.name),
            properties=mysql.models.ServerPropertiesForReplica(source_server_id=source_server),
            location=source_server_object.location)

    return sdk_no_wait(no_wait, client.create, resource_group_name, server_name, parameters)
コード例 #6
0
def create_streaming_endpoint(cmd, client, resource_group_name, account_name, streaming_endpoint_name,  # pylint: disable=too-many-locals
                              scale_units, auto_start=None, tags=None, cross_domain_policy=None, ips=None,
                              description=None, availability_set_name=None, max_cache_age=None, cdn_provider=None,
                              cdn_profile=None, custom_host_names=None, client_access_policy=None, no_wait=False):
    from azure.mgmt.media.models import (StreamingEndpoint, IPAccessControl, StreamingEndpointAccessControl)
    from azure.cli.command_modules.ams._client_factory import (get_mediaservices_client)

    allow_list = []
    if ips is not None:
        for ip in ips:
            allow_list.append(create_ip_range(streaming_endpoint_name, ip))

    ams_client = get_mediaservices_client(cmd.cli_ctx)
    ams = ams_client.get(resource_group_name, account_name)
    location = ams.location

    streaming_endpoint_access_control = StreamingEndpointAccessControl()

    if ips is not None:
        streaming_endpoint_access_control.ip = IPAccessControl(allow=allow_list)

    policies = create_cross_site_access_policies(client_access_policy, cross_domain_policy)

    cdn_enabled = cdn_profile is not None or cdn_provider is not None

    streaming_endpoint = StreamingEndpoint(max_cache_age=max_cache_age, tags=tags, location=location,
                                           description=description, custom_host_names=custom_host_names,
                                           scale_units=scale_units, cdn_profile=cdn_profile,
                                           availability_set_name=availability_set_name, cdn_enabled=cdn_enabled,
                                           cdn_provider=cdn_provider, cross_site_access_policies=policies,
                                           access_control=streaming_endpoint_access_control)

    return sdk_no_wait(no_wait, client.create, resource_group_name=resource_group_name, account_name=account_name,
                       auto_start=auto_start, streaming_endpoint_name=streaming_endpoint_name,
                       parameters=streaming_endpoint)
コード例 #7
0
def _deploy_arm_template_core(cli_ctx, resource_group_name,  # pylint: disable=too-many-arguments
                              template_file=None, template_uri=None, input_yaml_files=None, deployment_name=None,
                              parameters=None, mode=None, validate_only=False,
                              no_wait=False):
    DeploymentProperties, TemplateLink = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
                                                 'DeploymentProperties', 'TemplateLink', mod='models')
    template = None
    template_link = None
    template_obj = None
    if template_uri:
        template_link = TemplateLink(uri=template_uri)
        template_obj = shell_safe_json_parse(_urlretrieve(template_uri).decode('utf-8'), preserve_order=True)
    elif template_file:
        template = get_file_json(template_file, preserve_order=True)
        template_obj = template
    else:
        output_file_path = _invoke_mergeutil(input_yaml_files, parameters)
        parameters = None
        template = get_file_json(output_file_path, preserve_order=True)
        template_obj = template

    template_param_defs = template_obj.get('parameters', {})
    template_obj['resources'] = template_obj.get('resources', [])

    template = json.loads(json.dumps(template))

    if parameters is not None:
        parameters = _process_parameters(template_param_defs, parameters) or {}
        parameters = _get_missing_parameters(parameters, template_obj, _prompt_for_parameters)
        parameters = json.loads(json.dumps(parameters))

    properties = DeploymentProperties(template=template, template_link=template_link,
                                      parameters=parameters, mode=mode)
    # workaround
    properties.mode = 'incremental'
    smc = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)

    logger.warning("Deploying . . .")
    logger.warning("You can get the state of the deployment with the cmd")
    logger.warning("az group deployment show --name {0} --resource-group {1}".format(deployment_name, resource_group_name))
    if validate_only:
        return sdk_no_wait(no_wait, smc.deployments.validate, resource_group_name, deployment_name, properties)

    return sdk_no_wait(no_wait, smc.deployments.create_or_update, resource_group_name, deployment_name, properties)
コード例 #8
0
def stop(cmd, client, resource_group_name, account_name,
         streaming_endpoint_name, no_wait=False):
    if no_wait:
        return sdk_no_wait(no_wait, client.stop, resource_group_name, account_name,
                           streaming_endpoint_name)

    LongRunningOperation(cmd.cli_ctx)(client.stop(resource_group_name, account_name,
                                                  streaming_endpoint_name))

    return client.get(resource_group_name, account_name, streaming_endpoint_name)
コード例 #9
0
ファイル: live_event.py プロジェクト: sptramer/azure-cli
def stop(cmd, client, resource_group_name, account_name, live_event_name,
         remove_outputs_on_stop=False, no_wait=False):

    if no_wait:
        return sdk_no_wait(no_wait, client.stop, resource_group_name, account_name, live_event_name,
                           remove_outputs_on_stop)

    LongRunningOperation(cmd.cli_ctx)(client.stop(resource_group_name, account_name, live_event_name,
                                                  remove_outputs_on_stop))

    return client.get(resource_group_name, account_name, live_event_name)
コード例 #10
0
ファイル: custom.py プロジェクト: yugangw-msft/azure-cli
def _server_restore(cmd, client, resource_group_name, server_name, source_server, restore_point_in_time, no_wait=False):
    provider = 'Microsoft.DBforPostgreSQL'
    if isinstance(client, MySqlServersOperations):
        provider = 'Microsoft.DBforMySQL'
    elif isinstance(client, MariaDBServersOperations):
        provider = 'Microsoft.DBforMariaDB'

    parameters = None
    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=resource_group_name,
                namespace=provider,
                type='servers',
                name=source_server)
        else:
            raise ValueError('The provided source-server {} is invalid.'.format(source_server))

    if provider == 'Microsoft.DBforMySQL':
        from azure.mgmt.rdbms import mysql
        parameters = mysql.models.ServerForCreate(
            properties=mysql.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)
    elif provider == 'Microsoft.DBforPostgreSQL':
        from azure.mgmt.rdbms import postgresql
        parameters = postgresql.models.ServerForCreate(
            properties=postgresql.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)
    elif provider == 'Microsoft.DBforMariaDB':
        from azure.mgmt.rdbms import mariadb
        parameters = mariadb.models.ServerForCreate(
            properties=mariadb.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)

    parameters.properties.source_server_id = source_server
    parameters.properties.restore_point_in_time = restore_point_in_time

    # Here is a workaround that we don't support cross-region restore currently,
    # so the location must be set as the same as source server (not the resource group)
    id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(id_parts['resource_group'], id_parts['name'])
        parameters.location = source_server_object.location
    except Exception as e:
        raise ValueError('Unable to get source server: {}.'.format(str(e)))

    return sdk_no_wait(no_wait, client.create, resource_group_name, server_name, parameters)
コード例 #11
0
def update_streaming_endpoint_setter(client, resource_group_name, account_name, streaming_endpoint_name,
                                     parameters, no_wait):
    if (parameters.access_control is not None and
            parameters.access_control.ip is not None and
            parameters.access_control.ip.allow):
        ips = list(map(lambda x: create_ip_range(streaming_endpoint_name, x) if isinstance(x, str) else x,
                       parameters.access_control.ip.allow))
        parameters.access_control.ip.allow = ips

    return sdk_no_wait(no_wait, client.update, resource_group_name=resource_group_name,
                       account_name=account_name, streaming_endpoint_name=streaming_endpoint_name,
                       parameters=parameters)
コード例 #12
0
ファイル: custom.py プロジェクト: yugangw-msft/azure-cli
def rotate_hdi_cluster_key(cmd, client, resource_group_name, cluster_name,
                           encryption_vault_uri, encryption_key_name, encryption_key_version, no_wait=False):
    from azure.mgmt.hdinsight.models import ClusterDiskEncryptionParameters
    rotate_params = ClusterDiskEncryptionParameters(
        vault_uri=encryption_vault_uri,
        key_name=encryption_key_name,
        key_version=encryption_key_version
    )

    if no_wait:
        return sdk_no_wait(no_wait, client.rotate_disk_encryption_key, resource_group_name, cluster_name, rotate_params)

    return client.rotate_disk_encryption_key(resource_group_name, cluster_name, rotate_params)
コード例 #13
0
ファイル: custom.py プロジェクト: yugangw-msft/azure-cli
def create_account(client,
                   resource_group_name, account_name, location, tags=None, storage_account=None,
                   keyvault=None, keyvault_url=None, no_wait=False):
    properties = AutoStorageBaseProperties(storage_account_id=storage_account) \
        if storage_account else None
    parameters = BatchAccountCreateParameters(location=location,
                                              tags=tags,
                                              auto_storage=properties)
    if keyvault:
        parameters.key_vault_reference = {'id': keyvault, 'url': keyvault_url}
        parameters.pool_allocation_mode = 'UserSubscription'

    return sdk_no_wait(no_wait, client.create, resource_group_name=resource_group_name,
                       account_name=account_name, parameters=parameters)
コード例 #14
0
ファイル: custom.py プロジェクト: jiayexie/azure-cli
def _server_georestore(cmd, client, resource_group_name, server_name, sku_name, location, source_server,
                       backup_retention=None, geo_redundant_backup=None, no_wait=False, **kwargs):
    provider = 'Microsoft.DBForMySQL' if isinstance(client, ServersOperations) else 'Microsoft.DBforPostgreSQL'
    parameters = None

    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                                        resource_group=resource_group_name,
                                        namespace=provider,
                                        type='servers',
                                        name=source_server)
        else:
            raise ValueError('The provided source-server {} is invalid.'.format(source_server))

    if provider == 'Microsoft.DBForMySQL':
        from azure.mgmt.rdbms import mysql
        parameters = mysql.models.ServerForCreate(
            sku=mysql.models.Sku(name=sku_name),
            properties=mysql.models.ServerPropertiesForGeoRestore(
                storage_profile=mysql.models.StorageProfile(
                    backup_retention_days=backup_retention,
                    geo_redundant_backup=geo_redundant_backup),
                source_server_id=source_server),
            location=location)
    elif provider == 'Microsoft.DBforPostgreSQL':
        from azure.mgmt.rdbms import postgresql
        parameters = postgresql.models.ServerForCreate(
            sku=postgresql.models.Sku(name=sku_name),
            properties=postgresql.models.ServerPropertiesForGeoRestore(
                storage_profile=postgresql.models.StorageProfile(
                    backup_retention_days=backup_retention,
                    geo_redundant_backup=geo_redundant_backup),
                source_server_id=source_server),
            location=location)

    parameters.properties.source_server_id = source_server

    source_server_id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(source_server_id_parts['resource_group'], source_server_id_parts['name'])
        if parameters.sku.name is None:
            parameters.sku.name = source_server_object.sku.name
    except Exception as e:
        raise ValueError('Unable to get source server: {}.'.format(str(e)))

    return sdk_no_wait(no_wait, client.create, resource_group_name, server_name, parameters)
コード例 #15
0
ファイル: custom.py プロジェクト: derekbekoe/azure-cli
def iot_device_receive_message(client, hub_name, device_id, resource_group_name=None, lock_timeout=60):
    device_client = _get_device_client(client, resource_group_name, hub_name, device_id)
    result = sdk_no_wait(True, device_client.receive_message, device_id, lock_timeout)
    if result is not None and result.response.status_code == 200:
        return {
            'ack': result.headers['iothub-ack'],
            'correlationId': result.headers['iothub-correlationid'],
            'data': result.response.content,
            'deliveryCount': result.headers['iothub-deliverycount'],
            'enqueuedTime': result.headers['iothub-enqueuedtime'],
            'expiry': result.headers['iothub-expiry'],
            'lockToken': result.headers['ETag'].strip('"'),
            'messageId': result.headers['iothub-messageid'],
            'sequenceNumber': result.headers['iothub-sequencenumber'],
            'to': result.headers['iothub-to'],
            'userId': result.headers['iothub-userid']
        }
コード例 #16
0
ファイル: custom.py プロジェクト: derekbekoe/azure-cli
def create_service(client,
                   service_name,
                   resource_group_name,
                   location,
                   subnet,
                   sku_name,
                   tags=None,
                   no_wait=False):
    parameters = DataMigrationService(location=location,
                                      virtual_subnet_id=subnet,
                                      sku=ServiceSku(name=sku_name),
                                      tags=tags)

    return sdk_no_wait(no_wait,
                       client.create_or_update,
                       parameters=parameters,
                       group_name=resource_group_name,
                       service_name=service_name)
コード例 #17
0
ファイル: custom.py プロジェクト: derekbekoe/azure-cli
def _db_dw_create(
        cli_ctx,
        client,
        db_id,
        no_wait,
        kwargs):

    # Determine server location
    kwargs['location'] = _get_server_location(
        cli_ctx,
        server_name=db_id.server_name,
        resource_group_name=db_id.resource_group_name)

    # Create
    return sdk_no_wait(no_wait, client.create_or_update,
                       server_name=db_id.server_name,
                       resource_group_name=db_id.resource_group_name,
                       database_name=db_id.database_name,
                       parameters=kwargs)
コード例 #18
0
ファイル: custom.py プロジェクト: johanste/azure-cli
def _create_update_from_file(cli_ctx, resource_group_name, name, location, file, no_wait):
    resource_client = cf_resource(cli_ctx)
    container_group_client = cf_container_groups(cli_ctx)

    cg_defintion = None

    try:
        with open(file, 'r') as f:
            cg_defintion = yaml.load(f)
    except FileNotFoundError:
        raise CLIError("No such file or directory: " + file)
    except yaml.YAMLError as e:
        raise CLIError("Error while parsing yaml file:\n\n" + str(e))

    # Validate names match if both are provided
    if name and cg_defintion.get('name', None):
        if name != cg_defintion.get('name', None):
            raise CLIError("The name parameter and name from yaml definition must match.")
    else:
        # Validate at least one name is provided
        name = name or cg_defintion.get('name', None)
        if cg_defintion.get('name', None) is None and not name:
            raise CLIError("The name of the container group is required")

    cg_defintion['name'] = name

    location = location or cg_defintion.get('location', None)
    if not location:
        location = resource_client.resource_groups.get(resource_group_name).location
    cg_defintion['location'] = location

    api_version = cg_defintion.get('apiVersion', None) or container_group_client.api_version

    return sdk_no_wait(no_wait,
                       resource_client.resources.create_or_update,
                       resource_group_name,
                       "Microsoft.ContainerInstance",
                       '',
                       "containerGroups",
                       name,
                       api_version,
                       cg_defintion)
コード例 #19
0
def sqlvm_aglistener_create(client, cmd, availability_group_listener_name, sql_virtual_machine_group_name,
                            resource_group_name, availability_group_name, ip_address, subnet_resource_id,
                            load_balancer_resource_id, probe_port, sql_virtual_machine_instances, port=1433,
                            public_ip_address_resource_id=None):
    '''
    Creates an availability group listener
    '''

    if not is_valid_resource_id(subnet_resource_id):
        raise CLIError("Invalid subnet resource id.")
    if not is_valid_resource_id(load_balancer_resource_id):
        raise CLIError("Invalid load balancer resource id.")
    if public_ip_address_resource_id and not is_valid_resource_id(public_ip_address_resource_id):
        raise CLIError("Invalid public IP address resource id.")
    for sqlvm in sql_virtual_machine_instances:
        if not is_valid_resource_id(sqlvm):
            raise CLIError("Invalid SQL virtual machine resource id.")

    # Create the private ip address
    private_ip_object = PrivateIPAddress(ip_address=ip_address,
                                         subnet_resource_id=subnet_resource_id
                                         if is_valid_resource_id(subnet_resource_id) else None)

    # Create the load balancer configurations
    load_balancer_object = LoadBalancerConfiguration(private_ip_address=private_ip_object,
                                                     public_ip_address_resource_id=public_ip_address_resource_id,
                                                     load_balancer_resource_id=load_balancer_resource_id,
                                                     probe_port=probe_port,
                                                     sql_virtual_machine_instances=sql_virtual_machine_instances)

    # Create the availability group listener object
    ag_listener_object = AvailabilityGroupListener(availability_group_name=availability_group_name,
                                                   load_balancer_configurations=load_balancer_object,
                                                   port=port)

    LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(False, client.create_or_update, resource_group_name,
                                                  sql_virtual_machine_group_name, availability_group_listener_name,
                                                  ag_listener_object))

    return client.get(resource_group_name, sql_virtual_machine_group_name, availability_group_listener_name)
コード例 #20
0
ファイル: live_event.py プロジェクト: sptramer/azure-cli
def create(cmd, client, resource_group_name, account_name, live_event_name, streaming_protocol, ips,  # pylint: disable=too-many-locals
           auto_start=False, encoding_type=None, preset_name=None, tags=None, description=None,
           key_frame_interval_duration=None, access_token=None, no_wait=False, preview_ips=None,
           preview_locator=None, streaming_policy_name=None, alternative_media_id=None,
           vanity_url=False, client_access_policy=None, cross_domain_policy=None, stream_options=None):
    from azure.mgmt.media.models import (LiveEventInputProtocol, LiveEventInput, LiveEvent,
                                         LiveEventEncoding, LiveEventInputAccessControl, IPAccessControl)
    from azure.cli.command_modules.ams._client_factory import (get_mediaservices_client)

    encoding_type = 'Basic' if encoding_type == 'Standard' else encoding_type
    allowed_ips = []
    if ips[0] == 'AllowAll':
        ips = ['0.0.0.0/0']
    for ip in ips:
        allowed_ips.append(create_ip_range(live_event_name, ip))

    live_event_input_access_control = LiveEventInputAccessControl(ip=IPAccessControl(allow=allowed_ips))

    live_event_input = LiveEventInput(streaming_protocol=LiveEventInputProtocol(streaming_protocol),
                                      access_token=access_token,
                                      key_frame_interval_duration=key_frame_interval_duration,
                                      access_control=live_event_input_access_control)

    ams_client = get_mediaservices_client(cmd.cli_ctx)
    ams = ams_client.get(resource_group_name, account_name)
    location = ams.location

    live_event_preview = create_live_event_preview(preview_locator, streaming_policy_name, alternative_media_id,
                                                   preview_ips, live_event_name)

    policies = create_cross_site_access_policies(client_access_policy, cross_domain_policy)

    live_event = LiveEvent(input=live_event_input, location=location, preview=live_event_preview,
                           encoding=LiveEventEncoding(encoding_type=encoding_type, preset_name=preset_name),
                           tags=tags, vanity_url=vanity_url, stream_options=stream_options,
                           cross_site_access_policies=policies, description=description)

    return sdk_no_wait(no_wait, client.create, resource_group_name=resource_group_name, account_name=account_name,
                       live_event_name=live_event_name, parameters=live_event, auto_start=auto_start)
コード例 #21
0
ファイル: custom.py プロジェクト: derekbekoe/azure-cli
def _db_create_special(
        cli_ctx,
        client,
        source_db,
        dest_db,
        no_wait,
        kwargs):

    # Determine server location
    kwargs['location'] = _get_server_location(
        cli_ctx,
        server_name=dest_db.server_name,
        resource_group_name=dest_db.resource_group_name)

    # Set create mode properties
    kwargs['source_database_id'] = source_db.id()

    # Create
    return sdk_no_wait(no_wait, client.create_or_update,
                       server_name=dest_db.server_name,
                       resource_group_name=dest_db.resource_group_name,
                       database_name=dest_db.database_name,
                       parameters=kwargs)
コード例 #22
0
ファイル: custom.py プロジェクト: yugangw-msft/azure-cli
def database_create(cmd,
                    resource_group_name,
                    cluster_name,
                    database_name,
                    soft_delete_period=None,
                    hot_cache_period=None,
                    custom_headers=None,
                    raw=False,
                    polling=True,
                    no_wait=False,
                    **kwargs):

    from azure.mgmt.kusto.models import Database
    from azure.cli.command_modules.kusto._client_factory import cf_database

    _client = cf_database(cmd.cli_ctx, None)
    _cluster = _cluster_get(cmd, resource_group_name, cluster_name, custom_headers, raw, **kwargs)

    if no_wait:
        location = _cluster.output.location
    else:
        location = _cluster.location

    _database = Database(location=location,
                         soft_delete_period=soft_delete_period,
                         hot_cache_period=hot_cache_period)

    return sdk_no_wait(no_wait,
                       _client.create_or_update,
                       resource_group_name=resource_group_name,
                       cluster_name=cluster_name,
                       database_name=database_name,
                       parameters=_database,
                       custom_headers=custom_headers,
                       raw=raw,
                       polling=polling,
                       operation_config=kwargs)
コード例 #23
0
def create_express_route(cmd, circuit_name, resource_group_name, bandwidth_in_mbps, peering_location=None,
                         service_provider_name=None, location=None, tags=None, no_wait=False,
                         sku_family=None, sku_tier=None, allow_global_reach=None, express_route_port=None):
    ExpressRouteCircuit, ExpressRouteCircuitSku, ExpressRouteCircuitServiceProviderProperties, SubResource = \
        cmd.get_models(
            'ExpressRouteCircuit', 'ExpressRouteCircuitSku', 'ExpressRouteCircuitServiceProviderProperties',
            'SubResource')
    from azure.cli.core.util import sdk_no_wait
    client = network_client_factory(cmd.cli_ctx).express_route_circuits
    sku_name = '{}_{}'.format(sku_tier, sku_family)
    circuit = ExpressRouteCircuit(
        location=location, tags=tags,
        service_provider_properties=ExpressRouteCircuitServiceProviderProperties(
            service_provider_name=service_provider_name,
            peering_location=peering_location,
            bandwidth_in_mbps=bandwidth_in_mbps if not express_route_port else None),
        sku=ExpressRouteCircuitSku(name=sku_name, tier=sku_tier, family=sku_family),
        allow_global_reach=allow_global_reach,
        express_route_port=SubResource(id=express_route_port) if express_route_port else None,
        bandwidth_in_gbps=(int(bandwidth_in_mbps) / 1000) if express_route_port else None
    )
    if express_route_port:
        circuit.service_provider_properties = None
    return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, circuit_name, circuit)
コード例 #24
0
def create_cluster(cmd, client, cluster_name, resource_group_name, cluster_type,
                   location=None, tags=None, no_wait=False, cluster_version='default', cluster_tier=None,
                   cluster_configurations=None, component_version=None,
                   headnode_size='large', workernode_size='large', zookeepernode_size=None, edgenode_size=None,
                   workernode_count=3, workernode_data_disks_per_node=None,
                   workernode_data_disk_storage_account_type=None, workernode_data_disk_size=None,
                   http_username=None, http_password=None,
                   ssh_username='******', ssh_password=None, ssh_public_key=None,
                   storage_account=None, storage_account_key=None,
                   storage_default_container=None, storage_default_filesystem=None,
                   storage_account_managed_identity=None,
                   vnet_name=None, subnet=None,
                   domain=None, ldaps_urls=None,
                   cluster_admin_account=None, cluster_admin_password=None,
                   cluster_users_group_dns=None,
                   assign_identity=None,
                   encryption_vault_uri=None, encryption_key_name=None, encryption_key_version=None,
                   encryption_algorithm='RSA-OAEP', esp=False, no_validation_timeout=False):
    from .util import build_identities_info, build_virtual_network_profile, parse_domain_name, \
        get_storage_account_endpoint, validate_esp_cluster_create_params
    from azure.mgmt.hdinsight.models import ClusterCreateParametersExtended, ClusterCreateProperties, OSType, \
        ClusterDefinition, ComputeProfile, HardwareProfile, Role, OsProfile, LinuxOperatingSystemProfile, \
        StorageProfile, StorageAccount, DataDisksGroups, SecurityProfile, \
        DirectoryType, DiskEncryptionProperties, Tier, SshProfile, SshPublicKey

    validate_esp_cluster_create_params(esp, cluster_name, resource_group_name, cluster_type,
                                       subnet, domain, cluster_admin_account, assign_identity,
                                       ldaps_urls, cluster_admin_password, cluster_users_group_dns)

    if esp:
        if cluster_tier == Tier.standard:
            raise CLIError('Cluster tier cannot be {} when --esp is specified. '
                           'Please use default value or specify {} explicitly.'.format(Tier.standard, Tier.premium))
        if not cluster_tier:
            cluster_tier = Tier.premium

    # Update optional parameters with defaults
    location = location or _get_rg_location(cmd.cli_ctx, resource_group_name)

    # Format dictionary/free-form arguments
    if cluster_configurations:
        import json
        try:
            cluster_configurations = json.loads(cluster_configurations)
        except ValueError as ex:
            raise CLIError('The cluster_configurations argument must be valid JSON. Error: {}'.format(str(ex)))
    else:
        cluster_configurations = dict()
    if component_version:
        # See validator
        component_version = {c: v for c, v in [version.split('=') for version in component_version]}

    # Validate whether HTTP credentials were provided
    if 'gateway' in cluster_configurations:
        gateway_config = cluster_configurations['gateway']
    else:
        gateway_config = dict()
    if http_username and 'restAuthCredential.username' in gateway_config:
        raise CLIError('An HTTP username must be specified either as a command-line parameter '
                       'or in the cluster configuration, but not both.')
    if not http_username:
        http_username = '******'  # Implement default logic here, in case a user specifies the username in configurations

    if not http_password:
        try:
            http_password = prompt_pass('HTTP password for the cluster:', confirm=True)
        except NoTTYException:
            raise CLIError('Please specify --http-password in non-interactive mode.')

    # Update the cluster config with the HTTP credentials
    gateway_config['restAuthCredential.isEnabled'] = 'true'  # HTTP credentials are required
    http_username = http_username or gateway_config['restAuthCredential.username']
    gateway_config['restAuthCredential.username'] = http_username
    gateway_config['restAuthCredential.password'] = http_password
    cluster_configurations['gateway'] = gateway_config

    # Validate whether SSH credentials were provided
    if not (ssh_password or ssh_public_key):
        logger.warning("SSH credentials not specified. Using the HTTP password as the SSH password.")
        ssh_password = http_password

    # Validate storage arguments from the user
    if storage_default_container and storage_default_filesystem:
        raise CLIError('Either the default container or the default filesystem can be specified, but not both.')

    # Retrieve primary blob service endpoint
    is_wasb = not storage_account_managed_identity
    storage_account_endpoint = None
    if storage_account:
        storage_account_endpoint = get_storage_account_endpoint(cmd, storage_account, is_wasb)

    # Attempt to infer the storage account key from the endpoint
    if not storage_account_key and storage_account and is_wasb:
        from .util import get_key_for_storage_account
        logger.info('Storage account key not specified. Attempting to retrieve key...')
        key = get_key_for_storage_account(cmd, storage_account)
        if not key:
            raise CLIError('Storage account key could not be inferred from storage account.')
        storage_account_key = key

    # Attempt to provide a default container for WASB storage accounts
    if not storage_default_container and is_wasb:
        storage_default_container = cluster_name.lower()
        logger.warning('Default WASB container not specified, using "%s".', storage_default_container)
    elif not storage_default_filesystem and not is_wasb:
        storage_default_filesystem = cluster_name.lower()
        logger.warning('Default ADLS file system not specified, using "%s".', storage_default_filesystem)

    # Validate storage info parameters
    if is_wasb and not _all_or_none(storage_account, storage_account_key, storage_default_container):
        raise CLIError('If storage details are specified, the storage account, storage account key, '
                       'and the default container must be specified.')
    if not is_wasb and not _all_or_none(storage_account, storage_default_filesystem):
        raise CLIError('If storage details are specified, the storage account, '
                       'and the default filesystem must be specified.')

    # Validate disk encryption parameters
    if not _all_or_none(encryption_vault_uri, encryption_key_name, encryption_key_version):
        raise CLIError('Either the encryption vault URI, key name and key version should be specified, '
                       'or none of them should be.')

    # Specify virtual network profile only when network arguments are provided
    virtual_network_profile = subnet and build_virtual_network_profile(subnet)

    # Validate data disk parameters
    if not workernode_data_disks_per_node and workernode_data_disk_storage_account_type:
        raise CLIError("Cannot define data disk storage account type unless disks per node is defined.")
    if not workernode_data_disks_per_node and workernode_data_disk_size:
        raise CLIError("Cannot define data disk size unless disks per node is defined.")
    # Specify data disk groups only when disk arguments are provided
    workernode_data_disk_groups = workernode_data_disks_per_node and [
        DataDisksGroups(
            disks_per_node=workernode_data_disks_per_node,
            storage_account_type=workernode_data_disk_storage_account_type,
            disk_size_gb=workernode_data_disk_size
        )
    ]

    os_profile = OsProfile(
        linux_operating_system_profile=LinuxOperatingSystemProfile(
            username=ssh_username,
            password=ssh_password,
            ssh_profile=ssh_public_key and SshProfile(
                public_keys=[SshPublicKey(
                    certificate_data=ssh_public_key
                )]
            )
        )
    )

    roles = [
        # Required roles
        Role(
            name="headnode",
            target_instance_count=2,
            hardware_profile=HardwareProfile(vm_size=headnode_size),
            os_profile=os_profile,
            virtual_network_profile=virtual_network_profile
        ),
        Role(
            name="workernode",
            target_instance_count=workernode_count,
            hardware_profile=HardwareProfile(vm_size=workernode_size),
            os_profile=os_profile,
            virtual_network_profile=virtual_network_profile,
            data_disks_groups=workernode_data_disk_groups
        )
    ]
    if zookeepernode_size:
        roles.append(
            Role(
                name="zookeepernode",
                target_instance_count=3,
                hardware_profile=HardwareProfile(vm_size=zookeepernode_size),
                os_profile=os_profile,
                virtual_network_profile=virtual_network_profile
            ))
    if edgenode_size:
        roles.append(
            Role(
                name="edgenode",
                target_instance_count=1,
                hardware_profile=HardwareProfile(vm_size=edgenode_size),
                os_profile=os_profile,
                virtual_network_profile=virtual_network_profile
            ))

    storage_accounts = []
    if storage_account:
        # Specify storage account details only when storage arguments are provided
        storage_accounts.append(
            StorageAccount(
                name=storage_account_endpoint,
                key=storage_account_key,
                container=storage_default_container,
                file_system=storage_default_filesystem,
                resource_id=None if is_wasb else storage_account,
                msi_resource_id=storage_account_managed_identity,
                is_default=True
            )
        )

    additional_storage_accounts = []  # TODO: Add support for additional storage accounts
    if additional_storage_accounts:
        storage_accounts += [
            StorageAccount(
                name=s.storage_account_endpoint,
                key=s.storage_account_key,
                container=s.container,
                is_default=False
            )
            for s in additional_storage_accounts
        ]

    assign_identities = []
    if assign_identity:
        assign_identities.append(assign_identity)

    if storage_account_managed_identity:
        assign_identities.append(storage_account_managed_identity)

    cluster_identity = build_identities_info(assign_identities) if assign_identities else None

    domain_name = domain and parse_domain_name(domain)
    if not ldaps_urls and domain_name:
        ldaps_urls = ['ldaps://{}:636'.format(domain_name)]

    security_profile = domain and SecurityProfile(
        directory_type=DirectoryType.active_directory,
        domain=domain_name,
        ldaps_urls=ldaps_urls,
        domain_username=cluster_admin_account,
        domain_user_password=cluster_admin_password,
        cluster_users_group_dns=cluster_users_group_dns,
        aadds_resource_id=domain,
        msi_resource_id=assign_identity
    )

    disk_encryption_properties = encryption_vault_uri and DiskEncryptionProperties(
        vault_uri=encryption_vault_uri,
        key_name=encryption_key_name,
        key_version=encryption_key_version,
        encryption_algorithm=encryption_algorithm,
        msi_resource_id=assign_identity
    )

    create_params = ClusterCreateParametersExtended(
        location=location,
        tags=tags,
        properties=ClusterCreateProperties(
            cluster_version=cluster_version,
            os_type=OSType.linux,
            tier=cluster_tier,
            cluster_definition=ClusterDefinition(
                kind=cluster_type,
                configurations=cluster_configurations,
                component_version=component_version
            ),
            compute_profile=ComputeProfile(
                roles=roles
            ),
            storage_profile=StorageProfile(
                storageaccounts=storage_accounts
            ),
            security_profile=security_profile,
            disk_encryption_properties=disk_encryption_properties
        ),
        identity=cluster_identity
    )

    if no_wait:
        return sdk_no_wait(no_wait, client.create, resource_group_name, cluster_name, create_params)

    return client.create(resource_group_name, cluster_name, create_params)
コード例 #25
0
def aks_create(
        cmd,
        client,
        resource_group_name,
        name,
        ssh_key_value,  # pylint: disable=too-many-locals
        dns_name_prefix=None,
        location=None,
        admin_username="******",
        kubernetes_version='',
        node_vm_size="Standard_DS2_v2",
        node_osdisk_size=0,
        node_count=3,
        service_principal=None,
        client_secret=None,
        no_ssh_key=False,
        disable_rbac=None,
        enable_rbac=None,
        enable_vmss=None,
        skip_subnet_role_assignment=False,
        enable_cluster_autoscaler=False,
        network_plugin=None,
        pod_cidr=None,
        service_cidr=None,
        dns_service_ip=None,
        docker_bridge_address=None,
        enable_addons=None,
        workspace_resource_id=None,
        min_count=None,
        max_count=None,
        vnet_subnet_id=None,
        max_pods=0,
        aad_client_app_id=None,
        aad_server_app_id=None,
        aad_server_app_secret=None,
        aad_tenant_id=None,
        tags=None,
        generate_ssh_keys=False,  # pylint: disable=unused-argument
        no_wait=False):
    if not no_ssh_key:
        try:
            if not ssh_key_value or not is_valid_ssh_rsa_public_key(
                    ssh_key_value):
                raise ValueError()
        except (TypeError, ValueError):
            shortened_key = truncate_text(ssh_key_value)
            raise CLIError(
                'Provided ssh key ({}) is invalid or non-existent'.format(
                    shortened_key))

    subscription_id = _get_subscription_id(cmd.cli_ctx)
    if not dns_name_prefix:
        dns_name_prefix = _get_default_dns_prefix(name, resource_group_name,
                                                  subscription_id)

    rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
    if location is None:
        location = rg_location

    agent_pool_profile = ManagedClusterAgentPoolProfile(
        name='nodepool1',  # Must be 12 chars or less before ACS RP adds to it
        count=int(node_count),
        vm_size=node_vm_size,
        os_type="Linux",
        vnet_subnet_id=vnet_subnet_id,
        max_pods=int(max_pods) if max_pods else None)

    if enable_vmss:
        agent_pool_profile.type = "VirtualMachineScaleSets"
    if node_osdisk_size:
        agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)

    _check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count,
                                   max_count, node_count, agent_pool_profile)

    linux_profile = None
    # LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
    if not no_ssh_key:
        ssh_config = ContainerServiceSshConfiguration(
            public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
        linux_profile = ContainerServiceLinuxProfile(
            admin_username=admin_username, ssh=ssh_config)

    principal_obj = _ensure_aks_service_principal(
        cmd.cli_ctx,
        service_principal=service_principal,
        client_secret=client_secret,
        subscription_id=subscription_id,
        dns_name_prefix=dns_name_prefix,
        location=location,
        name=name)
    service_principal_profile = ManagedClusterServicePrincipalProfile(
        client_id=principal_obj.get("service_principal"),
        secret=principal_obj.get("client_secret"))

    if (vnet_subnet_id and not skip_subnet_role_assignment and
            not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
        scope = vnet_subnet_id
        if not _add_role_assignment(
                cmd.cli_ctx, 'Network Contributor', service_principal,
                scope=scope):
            logger.warning('Could not create a role assignment for subnet. '
                           'Are you an Owner on this subscription?')

    network_profile = None
    if any([
            network_plugin, pod_cidr, service_cidr, dns_service_ip,
            docker_bridge_address
    ]):
        network_profile = ContainerServiceNetworkProfile(
            network_plugin=network_plugin,
            pod_cidr=pod_cidr,
            service_cidr=service_cidr,
            dns_service_ip=dns_service_ip,
            docker_bridge_cidr=docker_bridge_address)

    addon_profiles = _handle_addons_args(cmd, enable_addons, subscription_id,
                                         resource_group_name, {},
                                         workspace_resource_id)
    if 'omsagent' in addon_profiles:
        _ensure_container_insights_for_monitoring(cmd,
                                                  addon_profiles['omsagent'])
    aad_profile = None
    if any([
            aad_client_app_id, aad_server_app_id, aad_server_app_secret,
            aad_tenant_id
    ]):
        aad_profile = ManagedClusterAADProfile(
            client_app_id=aad_client_app_id,
            server_app_id=aad_server_app_id,
            server_app_secret=aad_server_app_secret,
            tenant_id=aad_tenant_id)

    # Check that both --disable-rbac and --enable-rbac weren't provided
    if all([disable_rbac, enable_rbac]):
        raise CLIError(
            'specify either "--disable-rbac" or "--enable-rbac", not both.')

    mc = ManagedCluster(location=location,
                        tags=tags,
                        dns_prefix=dns_name_prefix,
                        kubernetes_version=kubernetes_version,
                        enable_rbac=False if disable_rbac else True,
                        agent_pool_profiles=[agent_pool_profile],
                        linux_profile=linux_profile,
                        service_principal_profile=service_principal_profile,
                        network_profile=network_profile,
                        addon_profiles=addon_profiles,
                        aad_profile=aad_profile)

    # Due to SPN replication latency, we do a few retries here
    max_retry = 30
    retry_exception = Exception(None)
    for _ in range(0, max_retry):
        try:
            return sdk_no_wait(no_wait,
                               client.managed_clusters.create_or_update,
                               resource_group_name=resource_group_name,
                               resource_name=name,
                               parameters=mc)
        except CloudError as ex:
            retry_exception = ex
            if 'not found in Active Directory tenant' in ex.message:
                time.sleep(3)
            else:
                raise ex
    raise retry_exception
コード例 #26
0
def sqlvm_create(client,
                 cmd,
                 location,
                 sql_virtual_machine_name,
                 resource_group_name,
                 sql_server_license_type='PAYG',
                 sql_virtual_machine_group_resource_id=None,
                 cluster_bootstrap_account_password=None,
                 cluster_operator_account_password=None,
                 sql_service_account_password=None,
                 enable_auto_patching=None,
                 day_of_week=None,
                 maintenance_window_starting_hour=None,
                 maintenance_window_duration=None,
                 enable_auto_backup=None,
                 enable_encryption=False,
                 retention_period=None,
                 storage_account_url=None,
                 storage_access_key=None,
                 backup_password=None,
                 backup_system_dbs=False,
                 backup_schedule_type=None,
                 full_backup_frequency=None,
                 full_backup_start_time=None,
                 full_backup_window_hours=None,
                 log_backup_frequency=None,
                 enable_key_vault_credential=None,
                 credential_name=None,
                 azure_key_vault_url=None,
                 service_principal_name=None,
                 service_principal_secret=None,
                 connectivity_type=None,
                 port=None,
                 sql_auth_update_username=None,
                 sql_auth_update_password=None,
                 sql_workload_type=None,
                 enable_r_services=None,
                 tags=None):
    '''
    Creates a SQL virtual machine.
    '''
    from azure.cli.core.commands.client_factory import get_subscription_id

    subscription_id = get_subscription_id(cmd.cli_ctx)

    virtual_machine_resource_id = resource_id(
        subscription=subscription_id,
        resource_group=resource_group_name,
        namespace='Microsoft.Compute',
        type='virtualMachines',
        name=sql_virtual_machine_name)

    if sql_virtual_machine_group_resource_id and not is_valid_resource_id(
            sql_virtual_machine_group_resource_id):
        raise CLIError("Invalid SQL virtual machine group resource id.")

    tags = tags or {}

    wsfc_domain_credentials_object = WsfcDomainCredentials(
        cluster_bootstrap_account_password=cluster_bootstrap_account_password,
        cluster_operator_account_password=cluster_operator_account_password,
        sql_service_account_password=sql_service_account_password)

    # If customer has provided any auto_patching settings, enabling plugin should be True
    if (day_of_week or maintenance_window_duration
            or maintenance_window_starting_hour):
        enable_auto_patching = True

    auto_patching_object = AutoPatchingSettings(
        enable=enable_auto_patching,
        day_of_week=day_of_week,
        maintenance_window_starting_hour=maintenance_window_starting_hour,
        maintenance_window_duration=maintenance_window_duration)

    # If customer has provided any auto_backup settings, enabling plugin should be True
    if (enable_encryption or retention_period or storage_account_url
            or storage_access_key or backup_password or backup_system_dbs
            or backup_schedule_type or full_backup_frequency
            or full_backup_start_time or full_backup_window_hours
            or log_backup_frequency):
        enable_auto_backup = True

    auto_backup_object = AutoBackupSettings(
        enable=enable_auto_backup,
        enable_encryption=enable_encryption if enable_auto_backup else None,
        retention_period=retention_period,
        storage_account_url=storage_account_url,
        storage_access_key=storage_access_key,
        password=backup_password,
        backup_system_dbs=backup_system_dbs if enable_auto_backup else None,
        backup_schedule_type=backup_schedule_type,
        full_backup_frequency=full_backup_frequency,
        full_backup_start_time=full_backup_start_time,
        full_backup_window_hours=full_backup_window_hours,
        log_backup_frequency=log_backup_frequency)

    # If customer has provided any key_vault_credential settings, enabling plugin should be True
    if (credential_name or azure_key_vault_url or service_principal_name
            or service_principal_secret):
        enable_key_vault_credential = True

    keyvault_object = KeyVaultCredentialSettings(
        enable=enable_key_vault_credential,
        credential_name=credential_name,
        azure_key_vault_url=azure_key_vault_url,
        service_principal_name=service_principal_name,
        service_principal_secret=service_principal_secret)

    connectivity_object = SqlConnectivityUpdateSettings(
        port=port,
        connectivity_type=connectivity_type,
        sql_auth_update_user_name=sql_auth_update_username,
        sql_auth_update_password=sql_auth_update_password)

    workload_type_object = SqlWorkloadTypeUpdateSettings(
        sql_workload_type=sql_workload_type)

    additional_features_object = AdditionalFeaturesServerConfigurations(
        is_rservices_enabled=enable_r_services)

    server_configuration_object = ServerConfigurationsManagementSettings(
        sql_connectivity_update_settings=connectivity_object,
        sql_workload_type_update_settings=workload_type_object,
        additional_features_server_configurations=additional_features_object)

    sqlvm_object = SqlVirtualMachine(
        location=location,
        virtual_machine_resource_id=virtual_machine_resource_id,
        sql_server_license_type=sql_server_license_type,
        sql_virtual_machine_group_resource_id=
        sql_virtual_machine_group_resource_id,
        wsfc_domain_credentials=wsfc_domain_credentials_object,
        auto_patching_settings=auto_patching_object,
        auto_backup_settings=auto_backup_object,
        key_vault_credential_settings=keyvault_object,
        server_configurations_management_settings=server_configuration_object,
        tags=tags)

    # Since it's a running operation, we will do the put and then the get to display the instance.
    LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(False,
                                                  client.create_or_update,
                                                  resource_group_name,
                                                  sql_virtual_machine_name,
                                                  sqlvm_object))

    return client.get(resource_group_name, sql_virtual_machine_name)
コード例 #27
0
def update_k8s_extension(
    cmd,
    client,
    resource_group_name,
    cluster_name,
    name,
    cluster_type,
    cluster_resource_provider=None,
    auto_upgrade_minor_version=None,
    release_train=None,
    version=None,
    configuration_settings=None,
    configuration_protected_settings=None,
    configuration_settings_file=None,
    configuration_protected_settings_file=None,
    no_wait=False,
    yes=False,
):
    """Patch an existing Extension Instance."""

    if (configuration_settings or configuration_protected_settings
            or configuration_settings_file
            or configuration_protected_settings_file):
        msg = (
            "Updating properties in --configuration-settings or --configuration-protected-settings may lead to undesirable state"
            " if the cluster extension type does not support it. Please refer to the documentation of the"
            " cluster extension service to check if updates to these properties is supported."
            " Do you wish to proceed?")
        user_confirmation_factory(cmd, yes, msg)

    # Determine ClusterRP
    cluster_rp, _ = get_cluster_rp_api_version(
        cluster_type=cluster_type, cluster_rp=cluster_resource_provider)

    # We need to determine the ExtensionType to call ExtensionFactory and create Extension class
    extension = show_k8s_extension(client, resource_group_name, cluster_name,
                                   name, cluster_type, cluster_rp)
    extension_type_lower = extension.extension_type.lower()

    config_settings = {}
    config_protected_settings = {}
    # Get Configuration Settings from file
    if configuration_settings_file is not None:
        config_settings = read_config_settings_file(
            configuration_settings_file)

    if configuration_settings is not None:
        for dicts in configuration_settings:
            for key, value in dicts.items():
                config_settings[key] = value

    # Get Configuration Protected Settings from file
    if configuration_protected_settings_file is not None:
        config_protected_settings = read_config_settings_file(
            configuration_protected_settings_file)

    if configuration_protected_settings is not None:
        for dicts in configuration_protected_settings:
            for key, value in dicts.items():
                config_protected_settings[key] = value

    # Get the extension class based on the extension type
    extension_class = ExtensionFactory(extension_type_lower)

    upd_extension = extension_class.Update(
        cmd,
        resource_group_name,
        cluster_name,
        auto_upgrade_minor_version,
        release_train,
        version,
        config_settings,
        config_protected_settings,
        extension,
        yes,
    )

    return sdk_no_wait(
        no_wait,
        client.begin_update,
        resource_group_name,
        cluster_rp,
        cluster_type,
        cluster_name,
        name,
        upd_extension,
    )
コード例 #28
0
def create_appserviceenvironment_arm(cmd,
                                     resource_group_name,
                                     name,
                                     subnet,
                                     kind='ASEv2',
                                     vnet_name=None,
                                     ignore_route_table=False,
                                     ignore_network_security_group=False,
                                     virtual_ip_type='Internal',
                                     front_end_scale_factor=None,
                                     front_end_sku=None,
                                     force_route_table=False,
                                     force_network_security_group=False,
                                     ignore_subnet_size_validation=False,
                                     location=None,
                                     no_wait=False,
                                     os_preference=None,
                                     zone_redundant=None):
    # The current SDK has a couple of challenges creating ASE. The current swagger version used,
    # did not have 201 as valid response code, and thus will fail with polling operations.
    # The Load Balancer Type is an Enum Flag, that is expressed as a simple string enum in swagger,
    # and thus will not allow you to define an Internal ASE (combining web and publishing flag).
    # Therefore the current method use direct ARM.
    location = location or _get_location_from_resource_group(
        cmd.cli_ctx, resource_group_name)
    subnet_id = _validate_subnet_id(cmd.cli_ctx, subnet, vnet_name,
                                    resource_group_name)
    deployment_name = _get_unique_deployment_name('cli_ase_deploy_')
    _validate_subnet_empty(cmd.cli_ctx, subnet_id)
    if not ignore_subnet_size_validation:
        _validate_subnet_size(cmd.cli_ctx, subnet_id)

    if kind == 'ASEv2':
        if not ignore_route_table:
            _ensure_route_table(cmd.cli_ctx, resource_group_name, name,
                                location, subnet_id, force_route_table)
        if not ignore_network_security_group:
            _ensure_network_security_group(cmd.cli_ctx, resource_group_name,
                                           name, location, subnet_id,
                                           force_network_security_group)
        ase_deployment_properties = _build_ase_deployment_properties(
            name=name,
            location=location,
            subnet_id=subnet_id,
            virtual_ip_type=virtual_ip_type,
            front_end_scale_factor=front_end_scale_factor,
            front_end_sku=front_end_sku,
            os_preference=os_preference)

    elif kind == 'ASEv3':
        _ensure_subnet_delegation(cmd.cli_ctx, subnet_id,
                                  'Microsoft.Web/hostingEnvironments')
        ase_deployment_properties = _build_ase_deployment_properties(
            name=name,
            location=location,
            subnet_id=subnet_id,
            kind='ASEv3',
            virtual_ip_type=virtual_ip_type,
            zone_redundant=zone_redundant)
    logger.info('Create App Service Environment...')
    deployment_client = _get_resource_client_factory(cmd.cli_ctx).deployments
    return sdk_no_wait(no_wait, deployment_client.begin_create_or_update,
                       resource_group_name, deployment_name,
                       ase_deployment_properties)
コード例 #29
0
ファイル: artifacts.py プロジェクト: jaysterp/azure-cli
def stop_trigger(cmd, workspace_name, trigger_name, no_wait=False):
    client = cf_synapse_trigger(cmd.cli_ctx, workspace_name)
    return sdk_no_wait(no_wait,
                       client.begin_stop_trigger,
                       trigger_name,
                       polling=True)
コード例 #30
0
def enable_protection_for_vm(cmd,
                             client,
                             resource_group_name,
                             vault_name,
                             vm,
                             policy_name,
                             diskslist=None,
                             disk_list_setting=None,
                             exclude_all_data_disks=None):
    vm_name, vm_rg = _get_resource_name_and_rg(resource_group_name, vm)
    vm = virtual_machines_cf(cmd.cli_ctx).get(vm_rg, vm_name)
    vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name)
    policy = show_policy(protection_policies_cf(cmd.cli_ctx),
                         resource_group_name, vault_name, policy_name)

    if vm.location.lower() != vault.location.lower():
        raise CLIError("""
            The VM should be in the same location as that of the Recovery Services vault to enable protection.
            """)

    if policy.properties.backup_management_type != BackupManagementType.azure_iaas_vm.value:
        raise CLIError("""
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to protect the workload.
            """)

    # Get protectable item.
    protectable_item = _get_protectable_item_for_vm(cmd.cli_ctx, vault_name,
                                                    resource_group_name,
                                                    vm_name, vm_rg)
    if protectable_item is None:
        raise CLIError("""
            The specified Azure Virtual Machine Not Found. Possible causes are
               1. VM does not exist
               2. The VM name or the Service name needs to be case sensitive
               3. VM is already Protected with same or other Vault.
                  Please Unprotect VM first and then try to protect it again.

            Please contact Microsoft for further assistance.
            """)

    # Construct enable protection request object
    container_uri = _get_protection_container_uri_from_id(protectable_item.id)
    item_uri = _get_protectable_item_uri_from_id(protectable_item.id)
    vm_item_properties = _get_vm_item_properties_from_vm_type(vm.type)
    vm_item_properties.policy_id = policy.id
    vm_item_properties.source_resource_id = protectable_item.properties.virtual_machine_id

    if disk_list_setting is not None:
        if diskslist is None:
            raise CLIError(
                "Please provide LUNs of disks that will be included or excluded."
            )
        is_inclusion_list = False
        if disk_list_setting == "include":
            is_inclusion_list = True
        disk_exclusion_properties = DiskExclusionProperties(
            disk_lun_list=diskslist, is_inclusion_list=is_inclusion_list)
        extended_properties = ExtendedProperties(
            disk_exclusion_properties=disk_exclusion_properties)
        vm_item_properties.extended_properties = extended_properties
    elif exclude_all_data_disks:
        disk_exclusion_properties = DiskExclusionProperties(
            disk_lun_list=[], is_inclusion_list=True)
        extended_properties = ExtendedProperties(
            disk_exclusion_properties=disk_exclusion_properties)
        vm_item_properties.extended_properties = extended_properties

    vm_item = ProtectedItemResource(properties=vm_item_properties)

    # Trigger enable protection and wait for completion
    result = sdk_no_wait(True, client.create_or_update, vault_name,
                         resource_group_name, fabric_name, container_uri,
                         item_uri, vm_item)
    return _track_backup_job(cmd.cli_ctx, result, vault_name,
                             resource_group_name)
コード例 #31
0
def aro_create(
        cmd,  # pylint: disable=too-many-locals
        client,
        resource_group_name,
        resource_name,
        master_subnet,
        worker_subnet,
        vnet=None,
        vnet_resource_group_name=None,  # pylint: disable=unused-argument
        location=None,
        domain=None,
        cluster_resource_group=None,
        client_id=None,
        client_secret=None,
        pod_cidr=None,
        service_cidr=None,
        master_vm_size=None,
        worker_vm_size=None,
        worker_vm_disk_size_gb=None,
        worker_count=None,
        apiserver_visibility=None,
        ingress_visibility=None,
        tags=None,
        no_wait=False):
    vnet = validate_subnets(master_subnet, worker_subnet)

    subscription_id = get_subscription_id(cmd.cli_ctx)

    random_id = ''.join(
        random.choice('abcdefghijklmnopqrstuvwxyz0123456789')
        for _ in range(8))

    aad = AADManager(cmd.cli_ctx)
    if client_id is None:
        app, client_secret = aad.create_application('aro-%s' % random_id)
        client_id = app.app_id

    client_sp = aad.get_service_principal(client_id)
    if not client_sp:
        client_sp = aad.create_service_principal(client_id)

    rp_client_id = FP_CLIENT_ID
    if rp_mode_development():
        rp_client_id = os.environ['AZURE_FP_CLIENT_ID']

    rp_client_sp = aad.get_service_principal(rp_client_id)

    assign_contributor_to_vnet(cmd.cli_ctx, vnet, client_sp.object_id)
    assign_contributor_to_vnet(cmd.cli_ctx, vnet, rp_client_sp.object_id)

    oc = v2019_12_31_preview.OpenShiftCluster(
        location=location,
        tags=tags,
        cluster_profile=v2019_12_31_preview.ClusterProfile(
            domain=domain or random_id,
            resource_group_id='/subscriptions/%s/resourceGroups/%s' %
            (subscription_id, cluster_resource_group or "aro-" + random_id),
        ),
        service_principal_profile=v2019_12_31_preview.ServicePrincipalProfile(
            client_id=client_id,
            client_secret=client_secret,
        ),
        network_profile=v2019_12_31_preview.NetworkProfile(
            pod_cidr=pod_cidr or '10.128.0.0/14',
            service_cidr=service_cidr or '172.30.0.0/16',
        ),
        master_profile=v2019_12_31_preview.MasterProfile(
            vm_size=master_vm_size or 'Standard_D8s_v3',
            subnet_id=master_subnet,
        ),
        worker_profiles=[
            v2019_12_31_preview.WorkerProfile(
                name='worker',  # TODO: 'worker' should not be hard-coded
                vm_size=worker_vm_size or 'Standard_D2s_v3',
                disk_size_gb=worker_vm_disk_size_gb or 128,
                subnet_id=worker_subnet,
                count=worker_count or 3,
            )
        ],
        apiserver_profile=v2019_12_31_preview.APIServerProfile(
            visibility=apiserver_visibility or 'Public', ),
        ingress_profiles=[
            v2019_12_31_preview.IngressProfile(
                name='default',  # TODO: 'default' should not be hard-coded
                visibility=ingress_visibility or 'Public',
            )
        ],
    )

    return sdk_no_wait(no_wait,
                       client.create_or_update,
                       resource_group_name=resource_group_name,
                       resource_name=resource_name,
                       parameters=oc)
コード例 #32
0
ファイル: custom.py プロジェクト: sptramer/azure-cli
def create_container(cmd,
                     resource_group_name,
                     name=None,
                     image=None,
                     location=None,
                     cpu=1,
                     memory=1.5,
                     restart_policy='Always',
                     ports=None,
                     protocol=None,
                     os_type='Linux',
                     ip_address=None,
                     dns_name_label=None,
                     command_line=None,
                     environment_variables=None,
                     secure_environment_variables=None,
                     registry_login_server=None,
                     registry_username=None,
                     registry_password=None,
                     azure_file_volume_share_name=None,
                     azure_file_volume_account_name=None,
                     azure_file_volume_account_key=None,
                     azure_file_volume_mount_path=None,
                     log_analytics_workspace=None,
                     log_analytics_workspace_key=None,
                     vnet=None,
                     vnet_name=None,
                     vnet_address_prefix='10.0.0.0/16',
                     subnet=None,
                     subnet_address_prefix='10.0.0.0/24',
                     network_profile=None,
                     gitrepo_url=None,
                     gitrepo_dir='.',
                     gitrepo_revision=None,
                     gitrepo_mount_path=None,
                     secrets=None,
                     secrets_mount_path=None,
                     file=None,
                     assign_identity=None,
                     identity_scope=None,
                     identity_role='Contributor',
                     no_wait=False):
    """Create a container group. """
    if file:
        return _create_update_from_file(cmd.cli_ctx, resource_group_name, name, location, file, no_wait)

    if not name:
        raise CLIError("error: the --name/-n argument is required unless specified with a passed in file.")

    if not image:
        raise CLIError("error: the --image argument is required unless specified with a passed in file.")

    ports = ports or [80]
    protocol = protocol or ContainerGroupNetworkProtocol.tcp

    container_resource_requirements = _create_resource_requirements(cpu=cpu, memory=memory)

    image_registry_credentials = _create_image_registry_credentials(registry_login_server=registry_login_server,
                                                                    registry_username=registry_username,
                                                                    registry_password=registry_password,
                                                                    image=image)

    command = shlex.split(command_line) if command_line else None

    volumes = []
    mounts = []

    azure_file_volume = _create_azure_file_volume(azure_file_volume_share_name=azure_file_volume_share_name,
                                                  azure_file_volume_account_name=azure_file_volume_account_name,
                                                  azure_file_volume_account_key=azure_file_volume_account_key)
    azure_file_volume_mount = _create_azure_file_volume_mount(azure_file_volume=azure_file_volume,
                                                              azure_file_volume_mount_path=azure_file_volume_mount_path)

    if azure_file_volume:
        volumes.append(azure_file_volume)
        mounts.append(azure_file_volume_mount)

    secrets_volume = _create_secrets_volume(secrets)
    secrets_volume_mount = _create_secrets_volume_mount(secrets_volume=secrets_volume,
                                                        secrets_mount_path=secrets_mount_path)

    if secrets_volume:
        volumes.append(secrets_volume)
        mounts.append(secrets_volume_mount)

    diagnostics = None
    tags = {}
    if log_analytics_workspace and log_analytics_workspace_key:
        log_analytics = LogAnalytics(
            workspace_id=log_analytics_workspace, workspace_key=log_analytics_workspace_key)

        diagnostics = ContainerGroupDiagnostics(
            log_analytics=log_analytics
        )
    elif log_analytics_workspace and not log_analytics_workspace_key:
        diagnostics, tags = _get_diagnostics_from_workspace(
            cmd.cli_ctx, log_analytics_workspace)
        if not diagnostics:
            raise CLIError('Log Analytics workspace "' + log_analytics_workspace + '" not found.')
    elif not log_analytics_workspace and log_analytics_workspace_key:
        raise CLIError('"--log-analytics-workspace-key" requires "--log-analytics-workspace".')

    gitrepo_volume = _create_gitrepo_volume(gitrepo_url=gitrepo_url, gitrepo_dir=gitrepo_dir, gitrepo_revision=gitrepo_revision)
    gitrepo_volume_mount = _create_gitrepo_volume_mount(gitrepo_volume=gitrepo_volume, gitrepo_mount_path=gitrepo_mount_path)

    if gitrepo_volume:
        volumes.append(gitrepo_volume)
        mounts.append(gitrepo_volume_mount)

    # Concatenate secure and standard environment variables
    if environment_variables and secure_environment_variables:
        environment_variables = environment_variables + secure_environment_variables
    else:
        environment_variables = environment_variables or secure_environment_variables

    identity = None
    if assign_identity is not None:
        identity = _build_identities_info(assign_identity)

    # Set up VNET, subnet and network profile if needed
    if subnet and not network_profile:
        network_profile = _get_vnet_network_profile(cmd, location, resource_group_name, vnet, vnet_address_prefix, subnet, subnet_address_prefix)

    cg_network_profile = None
    if network_profile:
        cg_network_profile = ContainerGroupNetworkProfile(id=network_profile)

    cgroup_ip_address = _create_ip_address(ip_address, ports, protocol, dns_name_label, network_profile)

    container = Container(name=name,
                          image=image,
                          resources=container_resource_requirements,
                          command=command,
                          ports=[ContainerPort(
                              port=p, protocol=protocol) for p in ports] if cgroup_ip_address else None,
                          environment_variables=environment_variables,
                          volume_mounts=mounts or None)

    cgroup = ContainerGroup(location=location,
                            identity=identity,
                            containers=[container],
                            os_type=os_type,
                            restart_policy=restart_policy,
                            ip_address=cgroup_ip_address,
                            image_registry_credentials=image_registry_credentials,
                            volumes=volumes or None,
                            network_profile=cg_network_profile,
                            diagnostics=diagnostics,
                            tags=tags)

    container_group_client = cf_container_groups(cmd.cli_ctx)

    lro = sdk_no_wait(no_wait, container_group_client.create_or_update, resource_group_name,
                      name, cgroup)

    if assign_identity is not None and identity_scope:
        from azure.cli.core.commands.arm import assign_identity
        cg = container_group_client.get(resource_group_name, name)
        assign_identity(cmd.cli_ctx, lambda: cg, lambda cg: cg, identity_role, identity_scope)

    return lro
コード例 #33
0
ファイル: custom.py プロジェクト: weizeng-msft/azure-cli
def restore_disks(cmd,
                  client,
                  resource_group_name,
                  vault_name,
                  container_name,
                  item_name,
                  rp_name,
                  storage_account,
                  target_resource_group=None,
                  restore_to_staging_storage_account=None):
    item = show_item(cmd, backup_protected_items_cf(cmd.cli_ctx),
                     resource_group_name, vault_name, container_name,
                     item_name, "AzureIaasVM", "VM")
    _validate_item(item)
    recovery_point = show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx),
                                         resource_group_name, vault_name,
                                         container_name, item_name, rp_name,
                                         "AzureIaasVM", "VM")
    vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name)
    vault_location = vault.location

    # Get container and item URIs
    container_uri = _get_protection_container_uri_from_id(item.id)
    item_uri = _get_protected_item_uri_from_id(item.id)

    # Original Storage Account Restore Logic
    use_original_storage_account = _should_use_original_storage_account(
        recovery_point, restore_to_staging_storage_account)
    if use_original_storage_account:
        logger.warning("""
            The disks will be restored to their original storage accounts. The VM config file will be uploaded to given
            storage account.
            """)

    # Construct trigger restore request object
    sa_name, sa_rg = _get_resource_name_and_rg(resource_group_name,
                                               storage_account)
    _storage_account_id = _get_storage_account_id(cmd.cli_ctx, sa_name, sa_rg)
    _source_resource_id = item.properties.source_resource_id
    target_rg_id = None
    if recovery_point.properties.is_managed_virtual_machine and target_resource_group is not None:
        target_rg_id = '/'.join(
            _source_resource_id.split('/')[:4]) + "/" + target_resource_group
    trigger_restore_properties = IaasVMRestoreRequest(
        create_new_cloud_service=True,
        recovery_point_id=rp_name,
        recovery_type='RestoreDisks',
        region=vault_location,
        storage_account_id=_storage_account_id,
        source_resource_id=_source_resource_id,
        target_resource_group_id=target_rg_id,
        original_storage_account_option=use_original_storage_account)
    trigger_restore_request = RestoreRequestResource(
        properties=trigger_restore_properties)

    # Trigger restore
    result = sdk_no_wait(True, client.trigger, vault_name, resource_group_name,
                         fabric_name, container_uri, item_uri, rp_name,
                         trigger_restore_request)
    return _track_backup_job(cmd.cli_ctx, result, vault_name,
                             resource_group_name)
コード例 #34
0
ファイル: custom.py プロジェクト: jiayexie/azure-cli
def create_container(cmd,
                     resource_group_name,
                     name=None,
                     image=None,
                     location=None,
                     cpu=1,
                     memory=1.5,
                     restart_policy='Always',
                     ports=None,
                     protocol=None,
                     os_type='Linux',
                     ip_address=None,
                     dns_name_label=None,
                     command_line=None,
                     environment_variables=None,
                     registry_login_server=None,
                     registry_username=None,
                     registry_password=None,
                     azure_file_volume_share_name=None,
                     azure_file_volume_account_name=None,
                     azure_file_volume_account_key=None,
                     azure_file_volume_mount_path=None,
                     log_analytics_workspace=None,
                     log_analytics_workspace_key=None,
                     gitrepo_url=None,
                     gitrepo_dir='.',
                     gitrepo_revision=None,
                     gitrepo_mount_path=None,
                     secrets=None,
                     secrets_mount_path=None,
                     file=None,
                     no_wait=False):
    """Create a container group. """

    if file:
        return _create_update_from_file(cmd.cli_ctx, resource_group_name, name, location, file, no_wait)

    if not name:
        raise CLIError("error: the --name/-n argument is required unless specified with a passed in file.")

    if not image:
        raise CLIError("error: the --image argument is required unless specified with a passed in file.")

    ports = ports or [80]
    protocol = protocol or ContainerGroupNetworkProtocol.tcp

    container_resource_requirements = _create_resource_requirements(cpu=cpu, memory=memory)

    image_registry_credentials = _create_image_registry_credentials(registry_login_server=registry_login_server,
                                                                    registry_username=registry_username,
                                                                    registry_password=registry_password,
                                                                    image=image)

    command = shlex.split(command_line) if command_line else None

    volumes = []
    mounts = []

    azure_file_volume = _create_azure_file_volume(azure_file_volume_share_name=azure_file_volume_share_name,
                                                  azure_file_volume_account_name=azure_file_volume_account_name,
                                                  azure_file_volume_account_key=azure_file_volume_account_key)
    azure_file_volume_mount = _create_azure_file_volume_mount(azure_file_volume=azure_file_volume,
                                                              azure_file_volume_mount_path=azure_file_volume_mount_path)

    if azure_file_volume:
        volumes.append(azure_file_volume)
        mounts.append(azure_file_volume_mount)

    secrets_volume = _create_secrets_volume(secrets)
    secrets_volume_mount = _create_secrets_volume_mount(secrets_volume=secrets_volume,
                                                        secrets_mount_path=secrets_mount_path)

    if secrets_volume:
        volumes.append(secrets_volume)
        mounts.append(secrets_volume_mount)

    diagnostics = None
    tags = {}
    if log_analytics_workspace and log_analytics_workspace_key:
        log_analytics = LogAnalytics(
            workspace_id=log_analytics_workspace, workspace_key=log_analytics_workspace_key)

        diagnostics = ContainerGroupDiagnostics(
            log_analytics=log_analytics
        )
    elif log_analytics_workspace and not log_analytics_workspace_key:
        diagnostics, tags = _get_diagnostics_from_workspace(
            cmd.cli_ctx, log_analytics_workspace)
        if not diagnostics:
            raise CLIError('Log Analytics workspace "' + log_analytics_workspace + '" not found.')
    elif not log_analytics_workspace and log_analytics_workspace_key:
        raise CLIError('"--log-analytics-workspace-key" requires "--log-analytics-workspace".')

    gitrepo_volume = _create_gitrepo_volume(gitrepo_url=gitrepo_url, gitrepo_dir=gitrepo_dir, gitrepo_revision=gitrepo_revision)
    gitrepo_volume_mount = _create_gitrepo_volume_mount(gitrepo_volume=gitrepo_volume, gitrepo_mount_path=gitrepo_mount_path)

    if gitrepo_volume:
        volumes.append(gitrepo_volume)
        mounts.append(gitrepo_volume_mount)

    cgroup_ip_address = _create_ip_address(ip_address, ports, protocol, dns_name_label)

    container = Container(name=name,
                          image=image,
                          resources=container_resource_requirements,
                          command=command,
                          ports=[ContainerPort(
                              port=p, protocol=protocol) for p in ports] if cgroup_ip_address else None,
                          environment_variables=environment_variables,
                          volume_mounts=mounts or None)

    cgroup = ContainerGroup(location=location,
                            containers=[container],
                            os_type=os_type,
                            restart_policy=restart_policy,
                            ip_address=cgroup_ip_address,
                            image_registry_credentials=image_registry_credentials,
                            volumes=volumes or None,
                            diagnostics=diagnostics,
                            tags=tags)

    container_group_client = cf_container_groups(cmd.cli_ctx)
    return sdk_no_wait(no_wait, container_group_client.create_or_update, resource_group_name, name, cgroup)
コード例 #35
0
ファイル: custom.py プロジェクト: derekbekoe/azure-cli
def delete_service(client, service_name, resource_group_name, delete_running_tasks=None, no_wait=False):
    return sdk_no_wait(no_wait,
                       client.delete,
                       group_name=resource_group_name,
                       service_name=service_name,
                       delete_running_tasks=delete_running_tasks)
コード例 #36
0
ファイル: custom.py プロジェクト: derekbekoe/azure-cli
def stop_service(client, service_name, resource_group_name, no_wait=False):
    return sdk_no_wait(no_wait,
                       client.stop,
                       group_name=resource_group_name,
                       service_name=service_name)
コード例 #37
0
def create_front_door(cmd, resource_group_name, front_door_name, backend_address,
                      friendly_name=None, tags=None, disabled=None, no_wait=False,
                      backend_host_header=None, frontend_host_name=None,
                      probe_path='/', probe_protocol='Https', probe_interval=30,
                      accepted_protocols=None, patterns_to_match=None, forwarding_protocol='MatchRequest',
                      enforce_certificate_name_check='Enabled'):
    from azext_front_door.vendored_sdks.models import (
        FrontDoor, FrontendEndpoint, BackendPool, Backend, HealthProbeSettingsModel, LoadBalancingSettingsModel,
        RoutingRule, ForwardingConfiguration, BackendPoolsSettings)

    # set the default names (consider making user-settable)
    backend_pool_name = 'DefaultBackendPool'
    frontend_endpoint_name = 'DefaultFrontendEndpoint'
    probe_setting_name = 'DefaultProbeSettings'
    load_balancing_settings_name = 'DefaultLoadBalancingSettings'
    routing_rule_name = 'DefaultRoutingRule'

    # get the IDs to fill the references
    backend_pool_id = _front_door_subresource_id(
        cmd, resource_group_name, front_door_name, 'backendPools', backend_pool_name)
    frontend_endpoint_id = _front_door_subresource_id(
        cmd, resource_group_name, front_door_name, 'frontendEndpoints', frontend_endpoint_name)
    probe_settings_id = _front_door_subresource_id(
        cmd, resource_group_name, front_door_name, 'healthProbeSettings', probe_setting_name)
    load_balancing_settings_id = _front_door_subresource_id(
        cmd, resource_group_name, front_door_name, 'loadBalancingSettings', load_balancing_settings_name)

    front_door = FrontDoor(
        tags=tags,
        location='global',
        friendly_name=friendly_name or front_door_name,
        enabled_state='Enabled' if not disabled else 'Disabled',
        backend_pools=[
            BackendPool(
                name=backend_pool_name,
                backends=[
                    Backend(
                        address=backend_address,
                        http_port=80,
                        https_port=443,
                        priority=1,
                        weight=50,
                        backend_host_header=backend_host_header or backend_address,
                        enabled_state='Enabled')
                ],
                health_probe_settings={'id': probe_settings_id},
                load_balancing_settings={'id': load_balancing_settings_id},
                resource_state='Enabled'
            )
        ],
        health_probe_settings=[
            HealthProbeSettingsModel(
                name=probe_setting_name,
                interval_in_seconds=probe_interval,
                path=probe_path,
                protocol=probe_protocol,
                resource_state='Enabled'
            )
        ],
        frontend_endpoints=[
            FrontendEndpoint(
                name=frontend_endpoint_name,
                host_name=frontend_host_name if frontend_host_name else '{}.azurefd.net'.format(front_door_name),
                session_affinity_enabled_state='Disabled',
                resource_state='Enabled'
            )
        ],
        load_balancing_settings=[
            LoadBalancingSettingsModel(
                name=load_balancing_settings_name,
                additional_latency_milliseconds=0,
                sample_size=4,
                successful_samples_required=2,
                resource_state='Enabled'
            )
        ],
        routing_rules=[
            RoutingRule(
                name=routing_rule_name,
                frontend_endpoints=[{'id': frontend_endpoint_id}],
                accepted_protocols=accepted_protocols or ['Http'],
                patterns_to_match=patterns_to_match or ['/*'],
                route_configuration=ForwardingConfiguration(forwarding_protocol=forwarding_protocol,
                                                            backend_pool={'id': backend_pool_id}),
                enabled_state='Enabled',
                resource_state='Enabled'
            )
        ],
        backend_pools_settings=BackendPoolsSettings(enforce_certificate_name_check=enforce_certificate_name_check)
    )
    return sdk_no_wait(no_wait, cf_frontdoor(cmd.cli_ctx, None).create_or_update,
                       resource_group_name, front_door_name, front_door)
コード例 #38
0
def create_container(cmd,
                     resource_group_name,
                     name=None,
                     image=None,
                     location=None,
                     cpu=1,
                     memory=1.5,
                     restart_policy='Always',
                     ports=None,
                     protocol=None,
                     os_type='Linux',
                     ip_address=None,
                     dns_name_label=None,
                     command_line=None,
                     environment_variables=None,
                     registry_login_server=None,
                     registry_username=None,
                     registry_password=None,
                     azure_file_volume_share_name=None,
                     azure_file_volume_account_name=None,
                     azure_file_volume_account_key=None,
                     azure_file_volume_mount_path=None,
                     log_analytics_workspace=None,
                     log_analytics_workspace_key=None,
                     gitrepo_url=None,
                     gitrepo_dir='.',
                     gitrepo_revision=None,
                     gitrepo_mount_path=None,
                     secrets=None,
                     secrets_mount_path=None,
                     file=None,
                     no_wait=False):
    """Create a container group. """

    if file:
        return _create_update_from_file(cmd.cli_ctx, resource_group_name, name, location, file, no_wait)

    if not name:
        raise CLIError("error: the --name/-n argument is required unless specified with a passed in file.")

    if not image:
        raise CLIError("error: the --image argument is required unless specified with a passed in file.")

    ports = ports or [80]
    protocol = protocol or ContainerGroupNetworkProtocol.tcp

    container_resource_requirements = _create_resource_requirements(cpu=cpu, memory=memory)

    image_registry_credentials = _create_image_registry_credentials(registry_login_server=registry_login_server,
                                                                    registry_username=registry_username,
                                                                    registry_password=registry_password,
                                                                    image=image)

    command = shlex.split(command_line) if command_line else None

    volumes = []
    mounts = []

    azure_file_volume = _create_azure_file_volume(azure_file_volume_share_name=azure_file_volume_share_name,
                                                  azure_file_volume_account_name=azure_file_volume_account_name,
                                                  azure_file_volume_account_key=azure_file_volume_account_key)
    azure_file_volume_mount = _create_azure_file_volume_mount(azure_file_volume=azure_file_volume,
                                                              azure_file_volume_mount_path=azure_file_volume_mount_path)

    if azure_file_volume:
        volumes.append(azure_file_volume)
        mounts.append(azure_file_volume_mount)

    secrets_volume = _create_secrets_volume(secrets)
    secrets_volume_mount = _create_secrets_volume_mount(secrets_volume=secrets_volume,
                                                        secrets_mount_path=secrets_mount_path)

    if secrets_volume:
        volumes.append(secrets_volume)
        mounts.append(secrets_volume_mount)

    diagnostics = None
    tags = {}
    if log_analytics_workspace and log_analytics_workspace_key:
        log_analytics = LogAnalytics(
            workspace_id=log_analytics_workspace, workspace_key=log_analytics_workspace_key)

        diagnostics = ContainerGroupDiagnostics(
            log_analytics=log_analytics
        )
    elif log_analytics_workspace and not log_analytics_workspace_key:
        diagnostics, tags = _get_diagnostics_from_workspace(
            cmd.cli_ctx, log_analytics_workspace)
        if not diagnostics:
            raise CLIError('Log Analytics workspace "' + log_analytics_workspace + '" not found.')
    elif not log_analytics_workspace and log_analytics_workspace_key:
        raise CLIError('"--log-analytics-workspace-key" requires "--log-analytics-workspace".')

    gitrepo_volume = _create_gitrepo_volume(gitrepo_url=gitrepo_url, gitrepo_dir=gitrepo_dir, gitrepo_revision=gitrepo_revision)
    gitrepo_volume_mount = _create_gitrepo_volume_mount(gitrepo_volume=gitrepo_volume, gitrepo_mount_path=gitrepo_mount_path)

    if gitrepo_volume:
        volumes.append(gitrepo_volume)
        mounts.append(gitrepo_volume_mount)

    cgroup_ip_address = _create_ip_address(ip_address, ports, protocol, dns_name_label)

    container = Container(name=name,
                          image=image,
                          resources=container_resource_requirements,
                          command=command,
                          ports=[ContainerPort(
                              port=p, protocol=protocol) for p in ports] if cgroup_ip_address else None,
                          environment_variables=environment_variables,
                          volume_mounts=mounts or None)

    cgroup = ContainerGroup(location=location,
                            containers=[container],
                            os_type=os_type,
                            restart_policy=restart_policy,
                            ip_address=cgroup_ip_address,
                            image_registry_credentials=image_registry_credentials,
                            volumes=volumes or None,
                            diagnostics=diagnostics,
                            tags=tags)

    container_group_client = cf_container_groups(cmd.cli_ctx)
    return sdk_no_wait(no_wait, container_group_client.create_or_update, resource_group_name, name, cgroup)
コード例 #39
0
ファイル: custom.py プロジェクト: yugangw-msft/azure-cli
def create_cluster(cmd, client, cluster_name, resource_group_name, cluster_type, location=None, tags=None,
                   no_wait=False, cluster_version='default', cluster_tier=None,
                   cluster_configurations=None, component_version=None,
                   headnode_size='large', workernode_size='large', zookeepernode_size=None, edgenode_size=None,
                   workernode_count=3, workernode_data_disks_per_node=None,
                   workernode_data_disk_storage_account_type=None, workernode_data_disk_size=None,
                   http_username=None, http_password=None,
                   ssh_username='******', ssh_password=None, ssh_public_key=None,
                   storage_account=None, storage_account_key=None,
                   storage_default_container=None, storage_default_filesystem=None,
                   storage_account_managed_identity=None,
                   vnet_name=None, subnet=None,
                   domain=None, ldaps_urls=None,
                   cluster_admin_account=None, cluster_admin_password=None,
                   cluster_users_group_dns=None,
                   assign_identity=None,
                   encryption_vault_uri=None, encryption_key_name=None, encryption_key_version=None,
                   encryption_algorithm='RSA-OAEP', esp=False):
    from .util import build_identities_info, build_virtual_network_profile, parse_domain_name, \
        get_storage_account_endpoint, validate_esp_cluster_create_params
    from azure.mgmt.hdinsight.models import ClusterCreateParametersExtended, ClusterCreateProperties, OSType, \
        ClusterDefinition, ComputeProfile, HardwareProfile, Role, OsProfile, LinuxOperatingSystemProfile, \
        StorageProfile, StorageAccount, DataDisksGroups, SecurityProfile, \
        DirectoryType, DiskEncryptionProperties, Tier

    validate_esp_cluster_create_params(esp, cluster_name, resource_group_name, cluster_type,
                                       subnet, domain, cluster_admin_account, assign_identity,
                                       ldaps_urls, cluster_admin_password, cluster_users_group_dns)

    if esp:
        if cluster_tier == Tier.standard:
            raise CLIError('Cluster tier cannot be {} when --esp is specified. '
                           'Please use default value or specify {} explicitly.'.format(Tier.standard, Tier.premium))
        if not cluster_tier:
            cluster_tier = Tier.premium

    # Update optional parameters with defaults
    location = location or _get_rg_location(cmd.cli_ctx, resource_group_name)

    # Format dictionary/free-form arguments
    if cluster_configurations:
        import json
        try:
            cluster_configurations = json.loads(cluster_configurations)
        except ValueError as ex:
            raise CLIError('The cluster_configurations argument must be valid JSON. Error: {}'.format(str(ex)))
    else:
        cluster_configurations = dict()
    if component_version:
        # See validator
        component_version = {c: v for c, v in [version.split('=') for version in component_version]}

    # Validate whether HTTP credentials were provided
    if 'gateway' in cluster_configurations:
        gateway_config = cluster_configurations['gateway']
    else:
        gateway_config = dict()
    if http_username and 'restAuthCredential.username' in gateway_config:
        raise CLIError('An HTTP username must be specified either as a command-line parameter '
                       'or in the cluster configuration, but not both.')
    else:
        http_username = '******'  # Implement default logic here, in case a user specifies the username in configurations
    is_password_in_cluster_config = 'restAuthCredential.password' in gateway_config
    if http_password and is_password_in_cluster_config:
        raise CLIError('An HTTP password must be specified either as a command-line parameter '
                       'or in the cluster configuration, but not both.')
    if not (http_password or is_password_in_cluster_config):
        raise CLIError('An HTTP password is required.')

    # Update the cluster config with the HTTP credentials
    gateway_config['restAuthCredential.isEnabled'] = 'true'  # HTTP credentials are required
    http_username = http_username or gateway_config['restAuthCredential.username']
    gateway_config['restAuthCredential.username'] = http_username
    http_password = http_password or gateway_config['restAuthCredential.password']
    gateway_config['restAuthCredential.password'] = http_password
    cluster_configurations['gateway'] = gateway_config

    # Validate whether SSH credentials were provided
    if not (ssh_password or ssh_public_key):
        logger.warning("SSH credentials not specified. Using the HTTP password as the SSH password.")
        ssh_password = http_password

    # Validate storage arguments from the user
    if storage_default_container and storage_default_filesystem:
        raise CLIError('Either the default container or the default filesystem can be specified, but not both.')

    # Retrieve primary blob service endpoint
    is_wasb = not storage_account_managed_identity
    storage_account_endpoint = None
    if storage_account:
        storage_account_endpoint = get_storage_account_endpoint(cmd, storage_account, is_wasb)

    # Attempt to infer the storage account key from the endpoint
    if not storage_account_key and storage_account and is_wasb:
        from .util import get_key_for_storage_account
        logger.info('Storage account key not specified. Attempting to retrieve key...')
        key = get_key_for_storage_account(cmd, storage_account)
        if not key:
            raise CLIError('Storage account key could not be inferred from storage account.')
        else:
            storage_account_key = key

    # Attempt to provide a default container for WASB storage accounts
    if not storage_default_container and is_wasb:
        storage_default_container = cluster_name
        logger.warning('Default WASB container not specified, using "%s".', storage_default_container)
    elif not storage_default_filesystem and not is_wasb:
        storage_default_filesystem = cluster_name
        logger.warning('Default ADLS file system not specified, using "%s".', storage_default_filesystem)

    # Validate storage info parameters
    if is_wasb and not _all_or_none(storage_account, storage_account_key, storage_default_container):
        raise CLIError('If storage details are specified, the storage account, storage account key, '
                       'and the default container must be specified.')
    elif not is_wasb and not _all_or_none(storage_account, storage_default_filesystem):
        raise CLIError('If storage details are specified, the storage account, '
                       'and the default filesystem must be specified.')

    # Validate disk encryption parameters
    if not _all_or_none(encryption_vault_uri, encryption_key_name, encryption_key_version):
        raise CLIError('Either the encryption vault URI, key name and key version should be specified, '
                       'or none of them should be.')

    # Specify virtual network profile only when network arguments are provided
    virtual_network_profile = subnet and build_virtual_network_profile(subnet)

    # Validate data disk parameters
    if not workernode_data_disks_per_node and workernode_data_disk_storage_account_type:
        raise CLIError("Cannot define data disk storage account type unless disks per node is defined.")
    if not workernode_data_disks_per_node and workernode_data_disk_size:
        raise CLIError("Cannot define data disk size unless disks per node is defined.")
    # Specify data disk groups only when disk arguments are provided
    workernode_data_disk_groups = workernode_data_disks_per_node and [
        DataDisksGroups(
            disks_per_node=workernode_data_disks_per_node,
            storage_account_type=workernode_data_disk_storage_account_type,
            disk_size_gb=workernode_data_disk_size
        )
    ]

    os_profile = OsProfile(
        linux_operating_system_profile=LinuxOperatingSystemProfile(
            username=ssh_username,
            password=ssh_password,
            ssh_public_key=ssh_public_key
        )
    )

    roles = [
        # Required roles
        Role(
            name="headnode",
            target_instance_count=2,
            hardware_profile=HardwareProfile(vm_size=headnode_size),
            os_profile=os_profile,
            virtual_network_profile=virtual_network_profile
        ),
        Role(
            name="workernode",
            target_instance_count=workernode_count,
            hardware_profile=HardwareProfile(vm_size=workernode_size),
            os_profile=os_profile,
            virtual_network_profile=virtual_network_profile,
            data_disks_groups=workernode_data_disk_groups
        )
    ]
    if zookeepernode_size:
        roles.append(
            Role(
                name="zookeepernode",
                target_instance_count=3,
                hardware_profile=HardwareProfile(vm_size=zookeepernode_size),
                os_profile=os_profile,
                virtual_network_profile=virtual_network_profile
            ))
    if edgenode_size:
        roles.append(
            Role(
                name="edgenode",
                target_instance_count=1,
                hardware_profile=HardwareProfile(vm_size=edgenode_size),
                os_profile=os_profile,
                virtual_network_profile=virtual_network_profile
            ))

    storage_accounts = []
    if storage_account:
        # Specify storage account details only when storage arguments are provided
        storage_accounts.append(
            StorageAccount(
                name=storage_account_endpoint,
                key=storage_account_key,
                container=storage_default_container,
                file_system=storage_default_filesystem,
                resource_id=None if is_wasb else storage_account,
                msi_resource_id=storage_account_managed_identity,
                is_default=True
            )
        )

    additional_storage_accounts = []  # TODO: Add support for additional storage accounts
    if additional_storage_accounts:
        storage_accounts += [
            StorageAccount(
                name=s.storage_account_endpoint,
                key=s.storage_account_key,
                container=s.container,
                is_default=False
            )
            for s in additional_storage_accounts
        ]

    assign_identities = []
    if assign_identity:
        assign_identities.append(assign_identity)

    if storage_account_managed_identity:
        assign_identities.append(storage_account_managed_identity)

    cluster_identity = build_identities_info(assign_identities) if assign_identities else None

    domain_name = domain and parse_domain_name(domain)
    if not ldaps_urls and domain_name:
        ldaps_urls = ['ldaps://{}:636'.format(domain_name)]

    security_profile = domain and SecurityProfile(
        directory_type=DirectoryType.active_directory,
        domain=domain_name,
        ldaps_urls=ldaps_urls,
        domain_username=cluster_admin_account,
        domain_user_password=cluster_admin_password,
        cluster_users_group_dns=cluster_users_group_dns,
        aadds_resource_id=domain,
        msi_resource_id=assign_identity
    )

    disk_encryption_properties = encryption_vault_uri and DiskEncryptionProperties(
        vault_uri=encryption_vault_uri,
        key_name=encryption_key_name,
        key_version=encryption_key_version,
        encryption_algorithm=encryption_algorithm,
        msi_resource_id=assign_identity
    )

    create_params = ClusterCreateParametersExtended(
        location=location,
        tags=tags,
        properties=ClusterCreateProperties(
            cluster_version=cluster_version,
            os_type=OSType.linux,
            tier=cluster_tier,
            cluster_definition=ClusterDefinition(
                kind=cluster_type,
                configurations=cluster_configurations,
                component_version=component_version
            ),
            compute_profile=ComputeProfile(
                roles=roles
            ),
            storage_profile=StorageProfile(
                storageaccounts=storage_accounts
            ),
            security_profile=security_profile,
            disk_encryption_properties=disk_encryption_properties
        ),
        identity=cluster_identity
    )

    if no_wait:
        return sdk_no_wait(no_wait, client.create, resource_group_name, cluster_name, create_params)

    return client.create(resource_group_name, cluster_name, create_params)
コード例 #40
0
ファイル: custom.py プロジェクト: bennerv/azure-cli
def create_container(cmd,
                     resource_group_name,
                     name=None,
                     image=None,
                     location=None,
                     cpu=1,
                     memory=1.5,
                     restart_policy='Always',
                     ports=None,
                     protocol=None,
                     os_type='Linux',
                     ip_address=None,
                     dns_name_label=None,
                     command_line=None,
                     environment_variables=None,
                     secure_environment_variables=None,
                     registry_login_server=None,
                     registry_username=None,
                     registry_password=None,
                     azure_file_volume_share_name=None,
                     azure_file_volume_account_name=None,
                     azure_file_volume_account_key=None,
                     azure_file_volume_mount_path=None,
                     log_analytics_workspace=None,
                     log_analytics_workspace_key=None,
                     vnet=None,
                     vnet_name=None,
                     vnet_address_prefix='10.0.0.0/16',
                     subnet=None,
                     subnet_address_prefix='10.0.0.0/24',
                     network_profile=None,
                     gitrepo_url=None,
                     gitrepo_dir='.',
                     gitrepo_revision=None,
                     gitrepo_mount_path=None,
                     secrets=None,
                     secrets_mount_path=None,
                     file=None,
                     assign_identity=None,
                     identity_scope=None,
                     identity_role='Contributor',
                     no_wait=False):
    """Create a container group. """
    if file:
        return _create_update_from_file(cmd.cli_ctx, resource_group_name, name,
                                        location, file, no_wait)

    if not name:
        raise CLIError(
            "error: the --name/-n argument is required unless specified with a passed in file."
        )

    if not image:
        raise CLIError(
            "error: the --image argument is required unless specified with a passed in file."
        )

    ports = ports or [80]
    protocol = protocol or ContainerGroupNetworkProtocol.tcp

    container_resource_requirements = _create_resource_requirements(
        cpu=cpu, memory=memory)

    image_registry_credentials = _create_image_registry_credentials(
        registry_login_server=registry_login_server,
        registry_username=registry_username,
        registry_password=registry_password,
        image=image)

    command = shlex.split(command_line) if command_line else None

    volumes = []
    mounts = []

    azure_file_volume = _create_azure_file_volume(
        azure_file_volume_share_name=azure_file_volume_share_name,
        azure_file_volume_account_name=azure_file_volume_account_name,
        azure_file_volume_account_key=azure_file_volume_account_key)
    azure_file_volume_mount = _create_azure_file_volume_mount(
        azure_file_volume=azure_file_volume,
        azure_file_volume_mount_path=azure_file_volume_mount_path)

    if azure_file_volume:
        volumes.append(azure_file_volume)
        mounts.append(azure_file_volume_mount)

    secrets_volume = _create_secrets_volume(secrets)
    secrets_volume_mount = _create_secrets_volume_mount(
        secrets_volume=secrets_volume, secrets_mount_path=secrets_mount_path)

    if secrets_volume:
        volumes.append(secrets_volume)
        mounts.append(secrets_volume_mount)

    diagnostics = None
    tags = {}
    if log_analytics_workspace and log_analytics_workspace_key:
        log_analytics = LogAnalytics(workspace_id=log_analytics_workspace,
                                     workspace_key=log_analytics_workspace_key)

        diagnostics = ContainerGroupDiagnostics(log_analytics=log_analytics)
    elif log_analytics_workspace and not log_analytics_workspace_key:
        diagnostics, tags = _get_diagnostics_from_workspace(
            cmd.cli_ctx, log_analytics_workspace)
        if not diagnostics:
            raise CLIError('Log Analytics workspace "' +
                           log_analytics_workspace + '" not found.')
    elif not log_analytics_workspace and log_analytics_workspace_key:
        raise CLIError(
            '"--log-analytics-workspace-key" requires "--log-analytics-workspace".'
        )

    gitrepo_volume = _create_gitrepo_volume(gitrepo_url=gitrepo_url,
                                            gitrepo_dir=gitrepo_dir,
                                            gitrepo_revision=gitrepo_revision)
    gitrepo_volume_mount = _create_gitrepo_volume_mount(
        gitrepo_volume=gitrepo_volume, gitrepo_mount_path=gitrepo_mount_path)

    if gitrepo_volume:
        volumes.append(gitrepo_volume)
        mounts.append(gitrepo_volume_mount)

    # Concatenate secure and standard environment variables
    if environment_variables and secure_environment_variables:
        environment_variables = environment_variables + secure_environment_variables
    else:
        environment_variables = environment_variables or secure_environment_variables

    identity = None
    if assign_identity is not None:
        identity = _build_identities_info(assign_identity)

    # Set up VNET, subnet and network profile if needed
    if subnet and not network_profile:
        network_profile = _get_vnet_network_profile(cmd, location,
                                                    resource_group_name, vnet,
                                                    vnet_address_prefix,
                                                    subnet,
                                                    subnet_address_prefix)

    cg_network_profile = None
    if network_profile:
        cg_network_profile = ContainerGroupNetworkProfile(id=network_profile)

    cgroup_ip_address = _create_ip_address(ip_address, ports, protocol,
                                           dns_name_label, network_profile)

    container = Container(
        name=name,
        image=image,
        resources=container_resource_requirements,
        command=command,
        ports=[ContainerPort(port=p, protocol=protocol)
               for p in ports] if cgroup_ip_address else None,
        environment_variables=environment_variables,
        volume_mounts=mounts or None)

    cgroup = ContainerGroup(
        location=location,
        identity=identity,
        containers=[container],
        os_type=os_type,
        restart_policy=restart_policy,
        ip_address=cgroup_ip_address,
        image_registry_credentials=image_registry_credentials,
        volumes=volumes or None,
        network_profile=cg_network_profile,
        diagnostics=diagnostics,
        tags=tags)

    container_group_client = cf_container_groups(cmd.cli_ctx)

    lro = sdk_no_wait(no_wait, container_group_client.create_or_update,
                      resource_group_name, name, cgroup)

    if assign_identity is not None and identity_scope:
        from azure.cli.core.commands.arm import assign_identity
        cg = container_group_client.get(resource_group_name, name)
        assign_identity(cmd.cli_ctx, lambda: cg, lambda cg: cg, identity_role,
                        identity_scope)

    return lro
コード例 #41
0
def restore_disks(cmd,
                  client,
                  resource_group_name,
                  vault_name,
                  container_name,
                  item_name,
                  rp_name,
                  storage_account,
                  target_resource_group=None,
                  restore_to_staging_storage_account=None,
                  restore_only_osdisk=None,
                  diskslist=None,
                  restore_as_unmanaged_disks=None):
    item = show_item(cmd, backup_protected_items_cf(cmd.cli_ctx),
                     resource_group_name, vault_name, container_name,
                     item_name, "AzureIaasVM", "VM")
    _validate_item(item)
    recovery_point = show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx),
                                         resource_group_name, vault_name,
                                         container_name, item_name, rp_name,
                                         "AzureIaasVM", "VM")
    vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name)
    vault_location = vault.location

    # Get container and item URIs
    container_uri = _get_protection_container_uri_from_id(item.id)
    item_uri = _get_protected_item_uri_from_id(item.id)

    # Original Storage Account Restore Logic
    use_original_storage_account = _should_use_original_storage_account(
        recovery_point, restore_to_staging_storage_account)
    if use_original_storage_account:
        logger.warning("""
            The disks will be restored to their original storage accounts. The VM config file will be uploaded to given
            storage account.
            """)

    # Construct trigger restore request object
    sa_name, sa_rg = _get_resource_name_and_rg(resource_group_name,
                                               storage_account)
    _storage_account_id = _get_storage_account_id(cmd.cli_ctx, sa_name, sa_rg)
    _source_resource_id = item.properties.source_resource_id
    target_rg_id = None

    if restore_as_unmanaged_disks and target_resource_group is not None:
        raise CLIError("""
            Both restore_as_unmanaged_disks and target_resource_group can't be spceified.
            Please give Only one parameter and retry.
            """)

    if recovery_point.properties.is_managed_virtual_machine:
        if target_resource_group is not None:
            target_rg_id = '/'.join(_source_resource_id.split('/')
                                    [:4]) + "/" + target_resource_group
        if not restore_as_unmanaged_disks:
            logger.warning("""
                The disks of the managed VM will be restored as unmanaged since targetRG parameter is not provided.
                This will NOT leverage the instant restore functionality.
                Hence it can be significantly slow based on given storage account.
                To leverage instant restore, provide the target RG parameter.
                Otherwise, provide the intent next time by passing the --restore-as-unmanaged-disks parameter
                """)

    _validate_restore_disk_parameters(restore_only_osdisk, diskslist)
    restore_disk_lun_list = None
    if restore_only_osdisk:
        restore_disk_lun_list = []

    if diskslist:
        restore_disk_lun_list = diskslist

    trigger_restore_properties = IaasVMRestoreRequest(
        create_new_cloud_service=True,
        recovery_point_id=rp_name,
        recovery_type='RestoreDisks',
        region=vault_location,
        storage_account_id=_storage_account_id,
        source_resource_id=_source_resource_id,
        target_resource_group_id=target_rg_id,
        original_storage_account_option=use_original_storage_account,
        restore_disk_lun_list=restore_disk_lun_list)
    trigger_restore_request = RestoreRequestResource(
        properties=trigger_restore_properties)

    # Trigger restore
    result = sdk_no_wait(True, client.trigger, vault_name, resource_group_name,
                         fabric_name, container_uri, item_uri, rp_name,
                         trigger_restore_request)
    return _track_backup_job(cmd.cli_ctx, result, vault_name,
                             resource_group_name)
コード例 #42
0
ファイル: custom.py プロジェクト: woakesd/azure-cli
def apim_api_schema_delete(client, resource_group_name, service_name, api_id, schema_id, if_match=None, no_wait=False):
    """Deletes an API Schema. """
    return sdk_no_wait(no_wait, client.api_schema.delete,
                       resource_group_name=resource_group_name,
                       service_name=service_name, api_id=api_id, schema_id=schema_id,
                       if_match="*" if if_match is None else if_match)
コード例 #43
0
ファイル: artifacts.py プロジェクト: jaysterp/azure-cli
def delete_dataset(cmd, workspace_name, dataset_name, no_wait=False):
    client = cf_synapse_dataset(cmd.cli_ctx, workspace_name)
    return sdk_no_wait(no_wait,
                       client.begin_delete_dataset,
                       dataset_name,
                       polling=True)
コード例 #44
0
ファイル: custom.py プロジェクト: woakesd/azure-cli
def apim_api_import(
        client, resource_group_name, service_name, path, specification_format, description=None,
        subscription_key_header_name=None, subscription_key_query_param_name=None, api_id=None, api_revision=None,
        api_version=None, api_version_set_id=None, display_name=None, service_url=None, protocols=None,
        specification_path=None, specification_url=None, api_type=None, subscription_required=None, soap_api_type=None,
        wsdl_endpoint_name=None, wsdl_service_name=None, no_wait=False):
    """Import a new API"""
    cms = client.api

    # api_type: Type of API. Possible values include: 'http', 'soap'
    # possible parameter format is 'wadl-xml', 'wadl-link-json', 'swagger-json', 'swagger-link-json',
    #   'wsdl', 'wsdl-link', 'openapi', 'openapi+json', 'openapi-link'
    # possible parameter specificationFormat is 'Wadl', 'Swagger', 'OpenApi', 'OpenApiJson', 'Wsdl'

    parameters = ApiCreateOrUpdateParameter(
        path=path,
        protocols=protocols,
        service_url=service_url,
        display_name=display_name,
        description=description,
        subscription_required=subscription_required,
        subscription_key_parameter_names=_get_subscription_key_parameter_names(
            subscription_key_query_param_name,
            subscription_key_header_name),
        api_version=api_version,
        api_version_set_id=_get_vs_fullpath(api_version_set_id)
    )

    if api_revision is not None and api_id is not None:
        api_id = api_id + ";rev=" + api_revision
    if api_revision is not None and api_id is None:
        api_id = uuid.uuid4().hex + ";rev=" + api_revision
    elif api_id is None:
        api_id = uuid.uuid4().hex

    if specification_path is not None and specification_url is None:
        api_file = open(specification_path, 'r')
        content_value = api_file.read()
        parameters.value = content_value
    elif specification_url is not None and specification_path is None:
        parameters.value = specification_url
    elif specification_path is not None and specification_url is not None:
        raise MutuallyExclusiveArgumentError(
            "Can't specify specification-url and specification-path at the same time.")
    else:
        raise RequiredArgumentMissingError(
            "Please either specify specification-url or specification-path.")

    FORMAT_MAPPINGS = {
        ImportFormat.Wadl.value: {
            # specification_path is not none
            True: ContentFormat.WADL_XML.value,
            # specification_url is not none
            False: ContentFormat.WADL_LINK_JSON.value
        },
        ImportFormat.Swagger.value: {
            True: ContentFormat.SWAGGER_JSON.value,
            False: ContentFormat.SWAGGER_LINK_JSON.value
        },
        ImportFormat.OpenApi.value: {
            True: ContentFormat.OPENAPI.value,
            False: ContentFormat.OPENAPI_LINK.value
        },
        ImportFormat.OpenApiJson.value: {
            True: ContentFormat.OPENAPI_JSON.value,
            False: ContentFormat.OPENAPI_JSON_LINK.value
        },
        ImportFormat.Wsdl.value: {
            True: ContentFormat.WSDL.value,
            False: ContentFormat.WSDL_LINK.value
        }
    }

    if specification_format in FORMAT_MAPPINGS:
        parameters.format = FORMAT_MAPPINGS[specification_format][specification_path is not None]
    else:
        raise InvalidArgumentValueError(
            "SpecificationFormat: " + specification_format + "is not supported.")

    if specification_format == ImportFormat.Wsdl.value:
        if api_type == ApiType.http.value:
            soap_api_type = SoapApiType.soap_to_rest.value
        else:
            soap_api_type = SoapApiType.soap_pass_through.value

        parameters.soap_api_type = soap_api_type

        if wsdl_service_name is not None and wsdl_endpoint_name is not None:
            parameters.wsdl_selector = ApiCreateOrUpdatePropertiesWsdlSelector(
                wsdl_service_name=wsdl_service_name,
                wsdl_endpoint_name=wsdl_endpoint_name
            )

    return sdk_no_wait(
        no_wait,
        cms.begin_create_or_update,
        resource_group_name=resource_group_name,
        service_name=service_name,
        api_id=api_id,
        parameters=parameters)
コード例 #45
0
def create_streaming_endpoint(
        cmd,
        client,
        resource_group_name,
        account_name,
        streaming_endpoint_name,  # pylint: disable=too-many-locals
        scale_units,
        auto_start=None,
        tags=None,
        cross_domain_policy=None,
        ips=None,
        description=None,
        availability_set_name=None,
        max_cache_age=None,
        cdn_provider=None,
        cdn_profile=None,
        custom_host_names=None,
        client_access_policy=None,
        no_wait=False):
    from azure.cli.command_modules.ams._client_factory import (
        get_mediaservices_client)

    allow_list = []
    if ips is not None:
        for ip in ips:
            allow_list.append(create_ip_range(streaming_endpoint_name, ip))

    ams_client = get_mediaservices_client(cmd.cli_ctx)
    ams = ams_client.get(resource_group_name, account_name)
    location = ams.location

    streaming_endpoint_access_control = StreamingEndpointAccessControl()

    if ips is not None:
        streaming_endpoint_access_control.ip = IPAccessControl(
            allow=allow_list)

    policies = create_cross_site_access_policies(client_access_policy,
                                                 cross_domain_policy)

    cdn_enabled = cdn_profile is not None or cdn_provider is not None

    streaming_endpoint = StreamingEndpoint(
        max_cache_age=max_cache_age,
        tags=tags,
        location=location,
        description=description,
        custom_host_names=custom_host_names,
        scale_units=scale_units,
        cdn_profile=cdn_profile,
        availability_set_name=availability_set_name,
        cdn_enabled=cdn_enabled,
        cdn_provider=cdn_provider,
        cross_site_access_policies=policies,
        access_control=streaming_endpoint_access_control)

    return sdk_no_wait(no_wait,
                       client.create,
                       resource_group_name=resource_group_name,
                       account_name=account_name,
                       auto_start=auto_start,
                       streaming_endpoint_name=streaming_endpoint_name,
                       parameters=streaming_endpoint)
コード例 #46
0
def flexible_server_restore(cmd, client,
                            resource_group_name, server_name,
                            source_server, restore_point_in_time=None, zone=None, no_wait=False,
                            subnet=None, subnet_address_prefix=None, vnet=None, vnet_address_prefix=None,
                            private_dns_zone_arguments=None, yes=False):

    server_name = server_name.lower()
    db_context = DbContext(
        cmd=cmd, azure_sdk=postgresql_flexibleservers, cf_firewall=cf_postgres_flexible_firewall_rules, cf_db=cf_postgres_flexible_db,
        cf_availability=cf_postgres_check_resource_availability, cf_private_dns_zone_suffix=cf_postgres_flexible_private_dns_zone_suffix_operations, logging_name='PostgreSQL', command_group='postgres', server_client=client)
    validate_server_name(db_context, server_name, 'Microsoft.DBforPostgreSQL/flexibleServers')

    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server_id = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=resource_group_name,
                namespace=RESOURCE_PROVIDER,
                type='flexibleServers',
                name=source_server)
        else:
            raise ValueError('The provided source server {} is invalid.'.format(source_server))
    else:
        source_server_id = source_server

    restore_point_in_time = validate_and_format_restore_point_in_time(restore_point_in_time)

    try:
        id_parts = parse_resource_id(source_server_id)
        source_server_object = client.get(id_parts['resource_group'], id_parts['name'])

        location = ''.join(source_server_object.location.lower().split())
        parameters = postgresql_flexibleservers.models.Server(
            location=location,
            point_in_time_utc=restore_point_in_time,
            source_server_resource_id=source_server_id,  # this should be the source server name, not id
            create_mode="PointInTimeRestore",
            availability_zone=zone
        )

        if source_server_object.network.public_network_access == 'Disabled':
            network = postgresql_flexibleservers.models.Network()
            if subnet is not None or vnet is not None:
                subnet_id = prepare_private_network(cmd,
                                                    resource_group_name,
                                                    server_name,
                                                    vnet=vnet,
                                                    subnet=subnet,
                                                    location=location,
                                                    delegation_service_name=DELEGATION_SERVICE_NAME,
                                                    vnet_address_pref=vnet_address_prefix,
                                                    subnet_address_pref=subnet_address_prefix,
                                                    yes=yes)
            else:
                subnet_id = source_server_object.network.delegated_subnet_resource_id

            if private_dns_zone_arguments is not None:
                private_dns_zone_id = prepare_private_dns_zone(db_context,
                                                               'PostgreSQL',
                                                               resource_group_name,
                                                               server_name,
                                                               private_dns_zone=private_dns_zone_arguments,
                                                               subnet_id=subnet_id,
                                                               location=location,
                                                               yes=yes)
            else:
                private_dns_zone_id = source_server_object.network.private_dns_zone_arm_resource_id

            network.delegated_subnet_resource_id = subnet_id
            network.private_dns_zone_arm_resource_id = private_dns_zone_id
            parameters.network = network

    except Exception as e:
        raise ResourceNotFoundError(e)

    return sdk_no_wait(no_wait, client.begin_create, resource_group_name, server_name, parameters)
コード例 #47
0
def create_k8s_extension(
    cmd,
    client,
    resource_group_name,
    cluster_name,
    name,
    cluster_type,
    extension_type,
    cluster_resource_provider=None,
    scope=None,
    auto_upgrade_minor_version=None,
    release_train=None,
    version=None,
    target_namespace=None,
    release_namespace=None,
    configuration_settings=None,
    configuration_protected_settings=None,
    configuration_settings_file=None,
    configuration_protected_settings_file=None,
    no_wait=False,
):
    """Create a new Extension Instance."""

    extension_type_lower = extension_type.lower()
    cluster_rp, _ = get_cluster_rp_api_version(
        cluster_type=cluster_type, cluster_rp=cluster_resource_provider)

    # Configuration Settings & Configuration Protected Settings
    if configuration_settings is not None and configuration_settings_file is not None:
        raise MutuallyExclusiveArgumentError(
            "Error! Both configuration-settings and configuration-settings-file cannot be provided."
        )

    if (configuration_protected_settings is not None
            and configuration_protected_settings_file is not None):
        raise MutuallyExclusiveArgumentError(
            "Error! Both configuration-protected-settings and configuration-protected-settings-file "
            "cannot be provided.")

    config_settings = {}
    config_protected_settings = {}
    # Get Configuration Settings from file
    if configuration_settings_file is not None:
        config_settings = read_config_settings_file(
            configuration_settings_file)

    if configuration_settings is not None:
        for dicts in configuration_settings:
            for key, value in dicts.items():
                config_settings[key] = value

    # Get Configuration Protected Settings from file
    if configuration_protected_settings_file is not None:
        config_protected_settings = read_config_settings_file(
            configuration_protected_settings_file)

    if configuration_protected_settings is not None:
        for dicts in configuration_protected_settings:
            for key, value in dicts.items():
                config_protected_settings[key] = value

    # Identity is not created by default.  Extension type must specify if identity is required.
    create_identity = False
    extension_instance = None

    # Scope & Namespace validation - common to all extension-types
    __validate_scope_and_namespace(scope, release_namespace, target_namespace)

    # Give Partners a chance to their extensionType specific validations and to set value over-rides.

    # Get the extension class based on the extension name
    extension_class = ExtensionFactory(extension_type_lower)
    extension_instance, name, create_identity = extension_class.Create(
        cmd,
        client,
        resource_group_name,
        cluster_name,
        name,
        cluster_type,
        cluster_rp,
        extension_type_lower,
        scope,
        auto_upgrade_minor_version,
        release_train,
        version,
        target_namespace,
        release_namespace,
        config_settings,
        config_protected_settings,
        configuration_settings_file,
        configuration_protected_settings_file,
    )

    # Common validations
    __validate_version_and_auto_upgrade(
        extension_instance.version,
        extension_instance.auto_upgrade_minor_version)
    __validate_scope_after_customization(extension_instance.scope)

    # Check that registration has been done on Microsoft.KubernetesConfiguration for the subscription
    validate_cc_registration(cmd)

    # Create identity, if required
    # We don't create the identity if we are in DF
    if create_identity and not is_dogfood_cluster(cmd):
        identity_object, location = __create_identity(cmd, resource_group_name,
                                                      cluster_name,
                                                      cluster_type, cluster_rp)
        if identity_object is not None and location is not None:
            extension_instance.identity, extension_instance.location = (
                identity_object,
                location,
            )

    # Try to create the resource
    return sdk_no_wait(
        no_wait,
        client.begin_create,
        resource_group_name,
        cluster_rp,
        cluster_type,
        cluster_name,
        name,
        extension_instance,
    )
コード例 #48
0
ファイル: custom.py プロジェクト: nilsanderselde/ARO-RP
def aro_create(cmd,  # pylint: disable=too-many-locals
               client,
               resource_group_name,
               resource_name,
               master_subnet,
               worker_subnet,
               vnet=None,  # pylint: disable=unused-argument
               vnet_resource_group_name=None,  # pylint: disable=unused-argument
               location=None,
               pull_secret=None,
               domain=None,
               cluster_resource_group=None,
               client_id=None,
               client_secret=None,
               pod_cidr=None,
               service_cidr=None,
               software_defined_network=None,
               disk_encryption_set=None,
               master_encryption_at_host=False,
               master_vm_size=None,
               worker_encryption_at_host=False,
               worker_vm_size=None,
               worker_vm_disk_size_gb=None,
               worker_count=None,
               apiserver_visibility=None,
               ingress_visibility=None,
               tags=None,
               no_wait=False):
    if not rp_mode_development():
        resource_client = get_mgmt_service_client(
            cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
        provider = resource_client.providers.get('Microsoft.RedHatOpenShift')
        if provider.registration_state != 'Registered':
            raise UnauthorizedError('Microsoft.RedHatOpenShift provider is not registered.',
                                    'Run `az provider register -n Microsoft.RedHatOpenShift --wait`.')

    validate_subnets(master_subnet, worker_subnet)

    subscription_id = get_subscription_id(cmd.cli_ctx)

    random_id = generate_random_id()

    aad = AADManager(cmd.cli_ctx)
    if client_id is None:
        app, client_secret = aad.create_application(cluster_resource_group or 'aro-' + random_id)
        client_id = app.app_id

    client_sp = aad.get_service_principal(client_id)
    if not client_sp:
        client_sp = aad.create_service_principal(client_id)

    rp_client_sp = aad.get_service_principal(resolve_rp_client_id())
    if not rp_client_sp:
        raise ResourceNotFoundError("RP service principal not found.")

    if rp_mode_development():
        worker_vm_size = worker_vm_size or 'Standard_D2s_v3'
    else:
        worker_vm_size = worker_vm_size or 'Standard_D4s_v3'

    if apiserver_visibility is not None:
        apiserver_visibility = apiserver_visibility.capitalize()

    if ingress_visibility is not None:
        ingress_visibility = ingress_visibility.capitalize()

    oc = openshiftcluster.OpenShiftCluster(
        location=location,
        tags=tags,
        cluster_profile=openshiftcluster.ClusterProfile(
            pull_secret=pull_secret or "",
            domain=domain or random_id,
            resource_group_id='/subscriptions/%s/resourceGroups/%s' %
            (subscription_id, cluster_resource_group or "aro-" + random_id),
        ),
        service_principal_profile=openshiftcluster.ServicePrincipalProfile(
            client_id=client_id,
            client_secret=client_secret,
        ),
        network_profile=openshiftcluster.NetworkProfile(
            pod_cidr=pod_cidr or '10.128.0.0/14',
            service_cidr=service_cidr or '172.30.0.0/16',
            software_defined_network=software_defined_network or 'OpenShiftSDN'
        ),
        master_profile=openshiftcluster.MasterProfile(
            vm_size=master_vm_size or 'Standard_D8s_v3',
            subnet_id=master_subnet,
            encryption_at_host='Enabled' if master_encryption_at_host else 'Disabled',
            disk_encryption_set_id=disk_encryption_set,
        ),
        worker_profiles=[
            openshiftcluster.WorkerProfile(
                name='worker',  # TODO: 'worker' should not be hard-coded
                vm_size=worker_vm_size,
                disk_size_gb=worker_vm_disk_size_gb or 128,
                subnet_id=worker_subnet,
                count=worker_count or 3,
                encryption_at_host='Enabled' if worker_encryption_at_host else 'Disabled',
                disk_encryption_set_id=disk_encryption_set,
            )
        ],
        apiserver_profile=openshiftcluster.APIServerProfile(
            visibility=apiserver_visibility or 'Public',
        ),
        ingress_profiles=[
            openshiftcluster.IngressProfile(
                name='default',  # TODO: 'default' should not be hard-coded
                visibility=ingress_visibility or 'Public',
            )
        ],
    )

    sp_obj_ids = [client_sp.object_id, rp_client_sp.object_id]
    ensure_resource_permissions(cmd.cli_ctx, oc, True, sp_obj_ids)

    return sdk_no_wait(no_wait, client.begin_create_or_update,
                       resource_group_name=resource_group_name,
                       resource_name=resource_name,
                       parameters=oc)
コード例 #49
0
ファイル: custom.py プロジェクト: ranisha2/azure-cli-1
def aro_create(
        cmd,  # pylint: disable=too-many-locals
        client,
        resource_group_name,
        resource_name,
        master_subnet,
        worker_subnet,
        vnet=None,
        vnet_resource_group_name=None,  # pylint: disable=unused-argument
        location=None,
        pull_secret=None,
        domain=None,
        cluster_resource_group=None,
        client_id=None,
        client_secret=None,
        pod_cidr=None,
        service_cidr=None,
        master_vm_size=None,
        worker_vm_size=None,
        worker_vm_disk_size_gb=None,
        worker_count=None,
        apiserver_visibility=None,
        ingress_visibility=None,
        tags=None,
        no_wait=False):
    resource_client = get_mgmt_service_client(
        cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
    provider = resource_client.providers.get('Microsoft.RedHatOpenShift')
    if provider.registration_state != 'Registered':
        raise CLIError(
            'Microsoft.RedHatOpenShift provider is not registered.  Run `az provider '
            + 'register -n Microsoft.RedHatOpenShift --wait`.')

    vnet = validate_subnets(master_subnet, worker_subnet)

    subscription_id = get_subscription_id(cmd.cli_ctx)

    random_id = generate_random_id()

    aad = AADManager(cmd.cli_ctx)
    if client_id is None:
        app, client_secret = aad.create_application(cluster_resource_group
                                                    or 'aro-' + random_id)
        client_id = app.app_id

    client_sp = aad.get_service_principal(client_id)
    if not client_sp:
        client_sp = aad.create_service_principal(client_id)

    rp_client_id = FP_CLIENT_ID

    rp_client_sp = aad.get_service_principal(rp_client_id)

    for sp_id in [client_sp.object_id, rp_client_sp.object_id]:
        assign_contributor_to_vnet(cmd.cli_ctx, vnet, sp_id)
        assign_contributor_to_routetable(cmd.cli_ctx, master_subnet,
                                         worker_subnet, sp_id)

    if rp_mode_development():
        worker_vm_size = worker_vm_size or 'Standard_D2s_v3'
    else:
        worker_vm_size = worker_vm_size or 'Standard_D4s_v3'

    if apiserver_visibility is not None:
        apiserver_visibility = apiserver_visibility.capitalize()

    if ingress_visibility is not None:
        ingress_visibility = ingress_visibility.capitalize()

    oc = v2020_04_30.OpenShiftCluster(
        location=location,
        tags=tags,
        cluster_profile=v2020_04_30.ClusterProfile(
            pull_secret=pull_secret or "",
            domain=domain or random_id,
            resource_group_id='/subscriptions/%s/resourceGroups/%s' %
            (subscription_id, cluster_resource_group or "aro-" + random_id),
        ),
        service_principal_profile=v2020_04_30.ServicePrincipalProfile(
            client_id=client_id,
            client_secret=client_secret,
        ),
        network_profile=v2020_04_30.NetworkProfile(
            pod_cidr=pod_cidr or '10.128.0.0/14',
            service_cidr=service_cidr or '172.30.0.0/16',
        ),
        master_profile=v2020_04_30.MasterProfile(
            vm_size=master_vm_size or 'Standard_D8s_v3',
            subnet_id=master_subnet,
        ),
        worker_profiles=[
            v2020_04_30.WorkerProfile(
                name='worker',  # TODO: 'worker' should not be hard-coded
                vm_size=worker_vm_size,
                disk_size_gb=worker_vm_disk_size_gb or 128,
                subnet_id=worker_subnet,
                count=worker_count or 3,
            )
        ],
        apiserver_profile=v2020_04_30.APIServerProfile(
            visibility=apiserver_visibility or 'Public', ),
        ingress_profiles=[
            v2020_04_30.IngressProfile(
                name='default',  # TODO: 'default' should not be hard-coded
                visibility=ingress_visibility or 'Public',
            )
        ],
    )

    return sdk_no_wait(no_wait,
                       client.create_or_update,
                       resource_group_name=resource_group_name,
                       resource_name=resource_name,
                       parameters=oc)
コード例 #50
0
ファイル: custom.py プロジェクト: ykimura0725/azure-cli
def _server_restore(cmd,
                    client,
                    resource_group_name,
                    server_name,
                    source_server,
                    restore_point_in_time,
                    no_wait=False):
    provider = 'Microsoft.DBforPostgreSQL'
    if isinstance(client, MySqlServersOperations):
        provider = 'Microsoft.DBforMySQL'
    elif isinstance(client, MariaDBServersOperations):
        provider = 'Microsoft.DBforMariaDB'

    parameters = None
    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(subscription=get_subscription_id(
                cmd.cli_ctx),
                                        resource_group=resource_group_name,
                                        namespace=provider,
                                        type='servers',
                                        name=source_server)
        else:
            raise ValueError(
                'The provided source-server {} is invalid.'.format(
                    source_server))

    if provider == 'Microsoft.DBforMySQL':
        from azure.mgmt.rdbms import mysql
        parameters = mysql.models.ServerForCreate(
            properties=mysql.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)
    elif provider == 'Microsoft.DBforPostgreSQL':
        from azure.mgmt.rdbms import postgresql
        parameters = postgresql.models.ServerForCreate(
            properties=postgresql.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)
    elif provider == 'Microsoft.DBforMariaDB':
        from azure.mgmt.rdbms import mariadb
        parameters = mariadb.models.ServerForCreate(
            properties=mariadb.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)

    parameters.properties.source_server_id = source_server
    parameters.properties.restore_point_in_time = restore_point_in_time

    # Here is a workaround that we don't support cross-region restore currently,
    # so the location must be set as the same as source server (not the resource group)
    id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(id_parts['resource_group'],
                                          id_parts['name'])
        parameters.location = source_server_object.location
    except Exception as e:
        raise ValueError('Unable to get source server: {}.'.format(str(e)))

    return sdk_no_wait(no_wait, client.create, resource_group_name,
                       server_name, parameters)
コード例 #51
0
def aks_update(cmd,
               client,
               resource_group_name,
               name,
               enable_cluster_autoscaler=False,
               disable_cluster_autoscaler=False,
               update_cluster_autoscaler=False,
               min_count=None,
               max_count=None,
               no_wait=False):
    update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
    if update_flags != 1:
        raise CLIError('Please specify "--enable-cluster-autoscaler" or '
                       '"--disable-cluster-autoscaler" or '
                       '"--update-cluster-autoscaler".')

    # TODO: change this approach when we support multiple agent pools.
    instance = client.managed_clusters.get(resource_group_name, name)
    node_count = instance.agent_pool_profiles[0].count

    if min_count is None or max_count is None:
        if enable_cluster_autoscaler or update_cluster_autoscaler:
            raise CLIError(
                'Please specifying both min-count and max-count when --enable-cluster-autoscaler or '
                '--update-cluster-autoscaler set.')
    if min_count is not None and max_count is not None:
        if int(min_count) > int(max_count):
            raise CLIError(
                'value of min-count should be less than or equal to value of max-count.'
            )
        if int(node_count) < int(min_count) or int(node_count) > int(
                max_count):
            raise CLIError(
                "current node count '{}' is not in the range of min-count and max-count."
                .format(node_count))

    if enable_cluster_autoscaler:
        if instance.agent_pool_profiles[0].enable_auto_scaling:
            logger.warning(
                'Cluster autoscaler is already enabled for this managed cluster.\n'
                'Please run "az aks update --update-cluster-autoscaler" '
                'if you want to update min-count or max-count.')
            return None
        instance.agent_pool_profiles[0].min_count = int(min_count)
        instance.agent_pool_profiles[0].max_count = int(max_count)
        instance.agent_pool_profiles[0].enable_auto_scaling = True

    if update_cluster_autoscaler:
        if not instance.agent_pool_profiles[0].enable_auto_scaling:
            raise CLIError(
                'Cluster autoscaler is not enabled for this managed cluster.\n'
                'Run "az aks update --enable-cluster-autoscaler" '
                'to enable cluster with min-count and max-count.')
        instance.agent_pool_profiles[0].min_count = int(min_count)
        instance.agent_pool_profiles[0].max_count = int(max_count)

    if disable_cluster_autoscaler:
        if not instance.agent_pool_profiles[0].enable_auto_scaling:
            logger.warning(
                'Cluster autoscaler is already disabled for this managed cluster.'
            )
            return None
        instance.agent_pool_profiles[0].enable_auto_scaling = False
        instance.agent_pool_profiles[0].min_count = None
        instance.agent_pool_profiles[0].max_count = None

    return sdk_no_wait(no_wait, client.managed_clusters.create_or_update,
                       resource_group_name, name, instance)
コード例 #52
0
ファイル: custom.py プロジェクト: ykimura0725/azure-cli
def _server_georestore(cmd,
                       client,
                       resource_group_name,
                       server_name,
                       sku_name,
                       location,
                       source_server,
                       backup_retention=None,
                       geo_redundant_backup=None,
                       no_wait=False,
                       **kwargs):
    provider = 'Microsoft.DBforPostgreSQL'
    if isinstance(client, MySqlServersOperations):
        provider = 'Microsoft.DBforMySQL'
    elif isinstance(client, MariaDBServersOperations):
        provider = 'Microsoft.DBforMariaDB'

    parameters = None

    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(subscription=get_subscription_id(
                cmd.cli_ctx),
                                        resource_group=resource_group_name,
                                        namespace=provider,
                                        type='servers',
                                        name=source_server)
        else:
            raise ValueError(
                'The provided source-server {} is invalid.'.format(
                    source_server))

    if provider == 'Microsoft.DBforMySQL':
        from azure.mgmt.rdbms import mysql
        parameters = mysql.models.ServerForCreate(
            sku=mysql.models.Sku(name=sku_name),
            properties=mysql.models.ServerPropertiesForGeoRestore(
                storage_profile=mysql.models.StorageProfile(
                    backup_retention_days=backup_retention,
                    geo_redundant_backup=geo_redundant_backup),
                source_server_id=source_server),
            location=location)
    elif provider == 'Microsoft.DBforPostgreSQL':
        from azure.mgmt.rdbms import postgresql
        parameters = postgresql.models.ServerForCreate(
            sku=postgresql.models.Sku(name=sku_name),
            properties=postgresql.models.ServerPropertiesForGeoRestore(
                storage_profile=postgresql.models.StorageProfile(
                    backup_retention_days=backup_retention,
                    geo_redundant_backup=geo_redundant_backup),
                source_server_id=source_server),
            location=location)
    elif provider == 'Microsoft.DBforMariaDB':
        from azure.mgmt.rdbms import mariadb
        parameters = mariadb.models.ServerForCreate(
            sku=mariadb.models.Sku(name=sku_name),
            properties=mariadb.models.ServerPropertiesForGeoRestore(
                storage_profile=mariadb.models.StorageProfile(
                    backup_retention_days=backup_retention,
                    geo_redundant_backup=geo_redundant_backup),
                source_server_id=source_server),
            location=location)

    parameters.properties.source_server_id = source_server

    source_server_id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(
            source_server_id_parts['resource_group'],
            source_server_id_parts['name'])
        if parameters.sku.name is None:
            parameters.sku.name = source_server_object.sku.name
    except Exception as e:
        raise ValueError('Unable to get source server: {}.'.format(str(e)))

    return sdk_no_wait(no_wait, client.create, resource_group_name,
                       server_name, parameters)
コード例 #53
0
def connectedmachine_extension_create(client,
                                      resource_group_name,
                                      machine_name,
                                      name,
                                      location,
                                      tags=None,
                                      force_update_tag=None,
                                      publisher=None,
                                      type_=None,
                                      type_handler_version=None,
                                      enable_auto_upgrade=None,
                                      auto_upgrade_minor=None,
                                      settings=None,
                                      protected_settings=None,
                                      instance_view_type=None,
                                      inst_handler_version=None,
                                      status=None,
                                      no_wait=False):
    extension_parameters = {}
    if tags is not None:
        extension_parameters['tags'] = tags
    extension_parameters['location'] = location
    extension_parameters['properties'] = {}
    if force_update_tag is not None:
        extension_parameters['properties'][
            'force_update_tag'] = force_update_tag
    if publisher is not None:
        extension_parameters['properties']['publisher'] = publisher
    if type_ is not None:
        extension_parameters['properties']['type'] = type_
    if type_handler_version is not None:
        extension_parameters['properties'][
            'type_handler_version'] = type_handler_version
    if enable_auto_upgrade is not None:
        extension_parameters['properties'][
            'enable_automatic_upgrade'] = enable_auto_upgrade
    if auto_upgrade_minor is not None:
        extension_parameters['properties'][
            'auto_upgrade_minor_version'] = auto_upgrade_minor
    if settings is not None:
        extension_parameters['properties']['settings'] = settings
    if protected_settings is not None:
        extension_parameters['properties'][
            'protected_settings'] = protected_settings
    extension_parameters['properties']['instance_view'] = {}
    extension_parameters['properties']['instance_view']['name'] = name
    if instance_view_type is not None:
        extension_parameters['properties']['instance_view'][
            'type'] = instance_view_type
    if inst_handler_version is not None:
        extension_parameters['properties']['instance_view'][
            'type_handler_version'] = inst_handler_version
    if status is not None:
        extension_parameters['properties']['instance_view']['status'] = status
    if len(extension_parameters['properties']['instance_view']) == 0:
        del extension_parameters['properties']['instance_view']
    return sdk_no_wait(no_wait,
                       client.begin_create_or_update,
                       resource_group_name=resource_group_name,
                       machine_name=machine_name,
                       extension_name=name,
                       extension_parameters=extension_parameters)
コード例 #54
0
ファイル: custom.py プロジェクト: ykimura0725/azure-cli
def _replica_create(cmd,
                    client,
                    resource_group_name,
                    server_name,
                    source_server,
                    no_wait=False,
                    location=None,
                    sku_name=None,
                    **kwargs):
    provider = 'Microsoft.DBforPostgreSQL'
    if isinstance(client, MySqlServersOperations):
        provider = 'Microsoft.DBforMySQL'
    elif isinstance(client, MariaDBServersOperations):
        provider = 'Microsoft.DBforMariaDB'
    # set source server id
    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(subscription=get_subscription_id(
                cmd.cli_ctx),
                                        resource_group=resource_group_name,
                                        namespace=provider,
                                        type='servers',
                                        name=source_server)
        else:
            raise CLIError('The provided source-server {} is invalid.'.format(
                source_server))

    source_server_id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(
            source_server_id_parts['resource_group'],
            source_server_id_parts['name'])
    except CloudError as e:
        raise CLIError('Unable to get source server: {}.'.format(str(e)))

    if location is None:
        location = source_server_object.location

    if sku_name is None:
        sku_name = source_server_object.sku.name

    parameters = None
    if provider == 'Microsoft.DBforMySQL':
        from azure.mgmt.rdbms import mysql
        parameters = mysql.models.ServerForCreate(
            sku=mysql.models.Sku(name=sku_name),
            properties=mysql.models.ServerPropertiesForReplica(
                source_server_id=source_server),
            location=location)
    elif provider == 'Microsoft.DBforPostgreSQL':
        from azure.mgmt.rdbms import postgresql
        parameters = postgresql.models.ServerForCreate(
            sku=postgresql.models.Sku(name=sku_name),
            properties=postgresql.models.ServerPropertiesForReplica(
                source_server_id=source_server),
            location=location)
    elif provider == 'Microsoft.DBforMariaDB':
        from azure.mgmt.rdbms import mariadb
        parameters = mariadb.models.ServerForCreate(
            sku=mariadb.models.Sku(name=sku_name),
            properties=mariadb.models.ServerPropertiesForReplica(
                source_server_id=source_server),
            location=location)

    return sdk_no_wait(no_wait, client.create, resource_group_name,
                       server_name, parameters)
コード例 #55
0
def delete_sql_script(cmd, workspace_name, sql_script_name, no_wait=False):
    client = cf_synapse_sql_script(cmd.cli_ctx, workspace_name)
    return sdk_no_wait(no_wait,
                       client.begin_delete_sql_script,
                       sql_script_name,
                       polling=True)
コード例 #56
0
def sqlvm_create(client, cmd, location, sql_virtual_machine_name, resource_group_name,
                 sql_server_license_type='PAYG', sql_virtual_machine_group_resource_id=None, cluster_bootstrap_account_password=None,
                 cluster_operator_account_password=None, sql_service_account_password=None, enable_auto_patching=None,
                 day_of_week=None, maintenance_window_starting_hour=None, maintenance_window_duration=None,
                 enable_auto_backup=None, enable_encryption=False, retention_period=None, storage_account_url=None,
                 storage_access_key=None, backup_password=None, backup_system_dbs=False, backup_schedule_type=None,
                 full_backup_frequency=None, full_backup_start_time=None, full_backup_window_hours=None, log_backup_frequency=None,
                 enable_key_vault_credential=None, credential_name=None, azure_key_vault_url=None, service_principal_name=None,
                 service_principal_secret=None, connectivity_type=None, port=None, sql_auth_update_username=None,
                 sql_auth_update_password=None, sql_workload_type=None, enable_r_services=None, tags=None):
    '''
    Creates a SQL virtual machine.
    '''
    from azure.cli.core.commands.client_factory import get_subscription_id

    subscription_id = get_subscription_id(cmd.cli_ctx)

    virtual_machine_resource_id = resource_id(
        subscription=subscription_id, resource_group=resource_group_name,
        namespace='Microsoft.Compute', type='virtualMachines', name=sql_virtual_machine_name)

    if sql_virtual_machine_group_resource_id and not is_valid_resource_id(sql_virtual_machine_group_resource_id):
        raise CLIError("Invalid SQL virtual machine group resource id.")

    tags = tags or {}

    wsfc_domain_credentials_object = WsfcDomainCredentials(cluster_bootstrap_account_password=cluster_bootstrap_account_password,
                                                           cluster_operator_account_password=cluster_operator_account_password,
                                                           sql_service_account_password=sql_service_account_password)

    # If customer has provided any auto_patching settings, enabling plugin should be True
    if (day_of_week or maintenance_window_duration or maintenance_window_starting_hour):
        enable_auto_patching = True

    auto_patching_object = AutoPatchingSettings(enable=enable_auto_patching,
                                                day_of_week=day_of_week,
                                                maintenance_window_starting_hour=maintenance_window_starting_hour,
                                                maintenance_window_duration=maintenance_window_duration)

    # If customer has provided any auto_backup settings, enabling plugin should be True
    if (enable_encryption or retention_period or storage_account_url or storage_access_key or backup_password or
            backup_system_dbs or backup_schedule_type or full_backup_frequency or full_backup_start_time or
            full_backup_window_hours or log_backup_frequency):
        enable_auto_backup = True

    auto_backup_object = AutoBackupSettings(enable=enable_auto_backup,
                                            enable_encryption=enable_encryption if enable_auto_backup else None,
                                            retention_period=retention_period,
                                            storage_account_url=storage_account_url,
                                            storage_access_key=storage_access_key,
                                            password=backup_password,
                                            backup_system_dbs=backup_system_dbs if enable_auto_backup else None,
                                            backup_schedule_type=backup_schedule_type,
                                            full_backup_frequency=full_backup_frequency,
                                            full_backup_start_time=full_backup_start_time,
                                            full_backup_window_hours=full_backup_window_hours,
                                            log_backup_frequency=log_backup_frequency)

    # If customer has provided any key_vault_credential settings, enabling plugin should be True
    if (credential_name or azure_key_vault_url or service_principal_name or service_principal_secret):
        enable_key_vault_credential = True

    keyvault_object = KeyVaultCredentialSettings(enable=enable_key_vault_credential,
                                                 credential_name=credential_name,
                                                 azure_key_vault_url=azure_key_vault_url,
                                                 service_principal_name=service_principal_name,
                                                 service_principal_secret=service_principal_secret)

    connectivity_object = SqlConnectivityUpdateSettings(port=port,
                                                        connectivity_type=connectivity_type,
                                                        sql_auth_update_user_name=sql_auth_update_username,
                                                        sql_auth_update_password=sql_auth_update_password)

    workload_type_object = SqlWorkloadTypeUpdateSettings(sql_workload_type=sql_workload_type)

    additional_features_object = AdditionalFeaturesServerConfigurations(is_rservices_enabled=enable_r_services)

    server_configuration_object = ServerConfigurationsManagementSettings(sql_connectivity_update_settings=connectivity_object,
                                                                         sql_workload_type_update_settings=workload_type_object,
                                                                         additional_features_server_configurations=additional_features_object)

    sqlvm_object = SqlVirtualMachine(location=location,
                                     virtual_machine_resource_id=virtual_machine_resource_id,
                                     sql_server_license_type=sql_server_license_type,
                                     sql_virtual_machine_group_resource_id=sql_virtual_machine_group_resource_id,
                                     wsfc_domain_credentials=wsfc_domain_credentials_object,
                                     auto_patching_settings=auto_patching_object,
                                     auto_backup_settings=auto_backup_object,
                                     key_vault_credential_settings=keyvault_object,
                                     server_configurations_management_settings=server_configuration_object,
                                     tags=tags)

    # Since it's a running operation, we will do the put and then the get to display the instance.
    LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(False, client.create_or_update,
                                                  resource_group_name, sql_virtual_machine_name, sqlvm_object))

    return client.get(resource_group_name, sql_virtual_machine_name)
コード例 #57
0
def create_front_door(cmd,
                      resource_group_name,
                      front_door_name,
                      backend_address,
                      friendly_name=None,
                      tags=None,
                      disabled=None,
                      no_wait=False,
                      backend_host_header=None,
                      frontend_host_name=None,
                      probe_path='/',
                      probe_protocol='Https',
                      probe_interval=30,
                      accepted_protocols=None,
                      patterns_to_match=None,
                      forwarding_protocol='MatchRequest'):
    from azext_front_door.vendored_sdks.models import (
        FrontDoor, FrontendEndpoint, BackendPool, Backend,
        HealthProbeSettingsModel, LoadBalancingSettingsModel, RoutingRule)

    # set the default names (consider making user-settable)
    backend_pool_name = 'DefaultBackendPool'
    frontend_endpoint_name = 'DefaultFrontendEndpoint'
    probe_setting_name = 'DefaultProbeSettings'
    load_balancing_settings_name = 'DefaultLoadBalancingSettings'
    routing_rule_name = 'DefaultRoutingRule'

    # get the IDs to fill the references
    backend_pool_id = _front_door_subresource_id(cmd, resource_group_name,
                                                 front_door_name,
                                                 'backendPools',
                                                 backend_pool_name)
    frontend_endpoint_id = _front_door_subresource_id(cmd, resource_group_name,
                                                      front_door_name,
                                                      'frontendEndpoints',
                                                      frontend_endpoint_name)
    probe_settings_id = _front_door_subresource_id(cmd, resource_group_name,
                                                   front_door_name,
                                                   'healthProbeSettings',
                                                   probe_setting_name)
    load_balancing_settings_id = _front_door_subresource_id(
        cmd, resource_group_name, front_door_name, 'loadBalancingSettings',
        load_balancing_settings_name)

    front_door = FrontDoor(
        tags=tags,
        location='global',
        friendly_name=friendly_name or front_door_name,
        enabled_state='Enabled' if not disabled else 'Disabled',
        backend_pools=[
            BackendPool(
                name=backend_pool_name,
                backends=[
                    Backend(address=backend_address,
                            http_port=80,
                            https_port=443,
                            priority=1,
                            weight=50,
                            backend_host_header=backend_host_header
                            or backend_address,
                            enabled_state='Enabled')
                ],
                health_probe_settings={'id': probe_settings_id},
                load_balancing_settings={'id': load_balancing_settings_id},
                resource_state='Enabled')
        ],
        health_probe_settings=[
            HealthProbeSettingsModel(name=probe_setting_name,
                                     interval_in_seconds=probe_interval,
                                     path=probe_path,
                                     protocol=probe_protocol,
                                     resource_state='Enabled')
        ],
        frontend_endpoints=[
            FrontendEndpoint(name=frontend_endpoint_name,
                             host_name=frontend_host_name if frontend_host_name
                             else '{}.azurefd.net'.format(front_door_name),
                             session_affinity_enabled_state='Disabled',
                             resource_state='Enabled')
        ],
        load_balancing_settings=[
            LoadBalancingSettingsModel(name=load_balancing_settings_name,
                                       additional_latency_milliseconds=0,
                                       sample_size=4,
                                       successful_samples_required=2,
                                       resource_state='Enabled')
        ],
        routing_rules=[
            RoutingRule(name=routing_rule_name,
                        frontend_endpoints=[{
                            'id': frontend_endpoint_id
                        }],
                        accepted_protocols=accepted_protocols or ['Http'],
                        patterns_to_match=patterns_to_match or ['/*'],
                        forwarding_protocol=forwarding_protocol,
                        backend_pool={'id': backend_pool_id},
                        enabled_state='Enabled',
                        resource_state='Enabled')
        ])
    return sdk_no_wait(no_wait,
                       cf_frontdoor(cmd.cli_ctx, None).create_or_update,
                       resource_group_name, front_door_name, front_door)
コード例 #58
0
def gateway_update(cmd,
                   client,
                   resource_group,
                   service,
                   cpu=None,
                   memory=None,
                   instance_count=None,
                   assign_endpoint=None,
                   https_only=None,
                   scope=None,
                   client_id=None,
                   client_secret=None,
                   issuer_uri=None,
                   api_title=None,
                   api_description=None,
                   api_doc_location=None,
                   api_version=None,
                   server_url=None,
                   allowed_origins=None,
                   allowed_methods=None,
                   allowed_headers=None,
                   max_age=None,
                   allow_credentials=None,
                   exposed_headers=None,
                   no_wait=False):
    gateway = client.gateways.get(resource_group, service, DEFAULT_NAME)

    sso_properties = gateway.properties.sso_properties
    if scope and client_id and client_secret and issuer_uri:
        sso_properties = models.SsoProperties(
            scope=scope,
            client_id=client_id,
            client_secret=client_secret,
            issuer_uri=issuer_uri,
        )

    api_metadata_properties = _update_api_metadata(
        gateway.properties.api_metadata_properties, api_title, api_description,
        api_doc_location, api_version, server_url)

    cors_properties = _update_cors(gateway.properties.cors_properties,
                                   allowed_origins, allowed_methods,
                                   allowed_headers, max_age, allow_credentials,
                                   exposed_headers)

    resource_requests = models.GatewayResourceRequests(
        cpu=cpu or gateway.properties.resource_requests.cpu,
        memory=memory or gateway.properties.resource_requests.memory)

    properties = models.GatewayProperties(
        public=assign_endpoint
        if assign_endpoint is not None else gateway.properties.public,
        https_only=https_only
        if https_only is not None else gateway.properties.https_only,
        sso_properties=sso_properties,
        api_metadata_properties=api_metadata_properties,
        cors_properties=cors_properties,
        resource_requests=resource_requests)

    sku = models.Sku(name=gateway.sku.name,
                     tier=gateway.sku.tier,
                     capacity=instance_count or gateway.sku.capacity)

    gateway_resource = models.GatewayResource(properties=properties, sku=sku)

    logger.warning(LOG_RUNNING_PROMPT)
    return sdk_no_wait(no_wait, client.gateways.begin_create_or_update,
                       resource_group, service, DEFAULT_NAME, gateway_resource)
コード例 #59
0
ファイル: custom.py プロジェクト: tjegbejimba/azure-cli
def stop_service(client, service_name, resource_group_name, no_wait=False):
    return sdk_no_wait(no_wait,
                       client.begin_stop,
                       group_name=resource_group_name,
                       service_name=service_name)
コード例 #60
0
ファイル: custom.py プロジェクト: mabenedi/azure-cli
def create_cluster(cmd,
                   client,
                   cluster_name,
                   resource_group_name,
                   cluster_type,
                   location=None,
                   tags=None,
                   no_wait=False,
                   cluster_version='default',
                   cluster_tier=None,
                   cluster_configurations=None,
                   component_version=None,
                   headnode_size=None,
                   workernode_size=None,
                   zookeepernode_size=None,
                   edgenode_size=None,
                   kafka_management_node_size=None,
                   kafka_management_node_count=2,
                   kafka_client_group_id=None,
                   kafka_client_group_name=None,
                   workernode_count=3,
                   workernode_data_disks_per_node=None,
                   workernode_data_disk_storage_account_type=None,
                   workernode_data_disk_size=None,
                   http_username=None,
                   http_password=None,
                   ssh_username='******',
                   ssh_password=None,
                   ssh_public_key=None,
                   storage_account=None,
                   storage_account_key=None,
                   storage_default_container=None,
                   storage_default_filesystem=None,
                   storage_account_managed_identity=None,
                   vnet_name=None,
                   subnet=None,
                   domain=None,
                   ldaps_urls=None,
                   cluster_admin_account=None,
                   cluster_admin_password=None,
                   cluster_users_group_dns=None,
                   assign_identity=None,
                   minimal_tls_version=None,
                   encryption_vault_uri=None,
                   encryption_key_name=None,
                   encryption_key_version=None,
                   encryption_algorithm='RSA-OAEP',
                   encryption_in_transit=None,
                   autoscale_type=None,
                   autoscale_min_workernode_count=None,
                   autoscale_max_workernode_count=None,
                   timezone=None,
                   days=None,
                   time=None,
                   autoscale_workernode_count=None,
                   encryption_at_host=None,
                   esp=False,
                   idbroker=False,
                   resource_provider_connection=None,
                   enable_private_link=None,
                   enable_compute_isolation=None,
                   host_sku=None,
                   no_validation_timeout=False):
    from .util import build_identities_info, build_virtual_network_profile, parse_domain_name, \
        get_storage_account_endpoint, validate_esp_cluster_create_params, set_vm_size
    from azure.mgmt.hdinsight.models import ClusterCreateParametersExtended, ClusterCreateProperties, OSType, \
        ClusterDefinition, ComputeProfile, HardwareProfile, Role, OsProfile, LinuxOperatingSystemProfile, \
        StorageProfile, StorageAccount, DataDisksGroups, SecurityProfile, \
        DirectoryType, DiskEncryptionProperties, Tier, SshProfile, SshPublicKey, \
        KafkaRestProperties, ClientGroupInfo, EncryptionInTransitProperties, \
        Autoscale, AutoscaleCapacity, AutoscaleRecurrence, AutoscaleSchedule, AutoscaleTimeAndCapacity, \
        NetworkProperties, PrivateLink, ComputeIsolationProperties

    validate_esp_cluster_create_params(esp, cluster_name, resource_group_name,
                                       cluster_type, subnet, domain,
                                       cluster_admin_account, assign_identity,
                                       ldaps_urls, cluster_admin_password,
                                       cluster_users_group_dns)

    if esp:
        if cluster_tier == Tier.standard:
            raise CLIError(
                'Cluster tier cannot be {} when --esp is specified. '
                'Please use default value or specify {} explicitly.'.format(
                    Tier.standard, Tier.premium))
        if not cluster_tier:
            cluster_tier = Tier.premium

    # Update optional parameters with defaults
    location = location or _get_rg_location(cmd.cli_ctx, resource_group_name)

    # Format dictionary/free-form arguments
    if not cluster_configurations:
        cluster_configurations = dict()

    if component_version:
        # See validator
        component_version = {
            c: v
            for c, v in [version.split('=') for version in component_version]
        }

    # Validate whether HTTP credentials were provided
    if 'gateway' in cluster_configurations:
        gateway_config = cluster_configurations['gateway']
    else:
        gateway_config = dict()
    if http_username and 'restAuthCredential.username' in gateway_config:
        raise CLIError(
            'An HTTP username must be specified either as a command-line parameter '
            'or in the cluster configuration, but not both.')
    if not http_username:
        http_username = '******'  # Implement default logic here, in case a user specifies the username in configurations

    if not http_password:
        try:
            http_password = prompt_pass('HTTP password for the cluster:',
                                        confirm=True)
        except NoTTYException:
            raise CLIError(
                'Please specify --http-password in non-interactive mode.')

    # Update the cluster config with the HTTP credentials
    gateway_config[
        'restAuthCredential.isEnabled'] = 'true'  # HTTP credentials are required
    http_username = http_username or gateway_config[
        'restAuthCredential.username']
    gateway_config['restAuthCredential.username'] = http_username
    gateway_config['restAuthCredential.password'] = http_password
    cluster_configurations['gateway'] = gateway_config

    # Validate whether SSH credentials were provided
    if not (ssh_password or ssh_public_key):
        logger.warning(
            "SSH credentials not specified. Using the HTTP password as the SSH password."
        )
        ssh_password = http_password

    # Validate storage arguments from the user
    if storage_default_container and storage_default_filesystem:
        raise CLIError(
            'Either the default container or the default filesystem can be specified, but not both.'
        )

    # Retrieve primary blob service endpoint
    is_wasb = not storage_account_managed_identity
    storage_account_endpoint = None
    if storage_account:
        storage_account_endpoint = get_storage_account_endpoint(
            cmd, storage_account, is_wasb)

    # Attempt to infer the storage account key from the endpoint
    if not storage_account_key and storage_account and is_wasb:
        from .util import get_key_for_storage_account
        logger.info(
            'Storage account key not specified. Attempting to retrieve key...')
        key = get_key_for_storage_account(cmd, storage_account)
        if not key:
            raise CLIError(
                'Storage account key could not be inferred from storage account.'
            )
        storage_account_key = key

    # Attempt to provide a default container for WASB storage accounts
    if not storage_default_container and storage_account and is_wasb:
        storage_default_container = cluster_name.lower()
        logger.warning('Default WASB container not specified, using "%s".',
                       storage_default_container)
    elif not storage_default_filesystem and not is_wasb:
        storage_default_filesystem = cluster_name.lower()
        logger.warning('Default ADLS file system not specified, using "%s".',
                       storage_default_filesystem)

    # Validate storage info parameters
    if is_wasb and not _all_or_none(storage_account, storage_account_key,
                                    storage_default_container):
        raise CLIError(
            'If storage details are specified, the storage account, storage account key, '
            'and the default container must be specified.')
    if not is_wasb and not _all_or_none(storage_account,
                                        storage_default_filesystem):
        raise CLIError(
            'If storage details are specified, the storage account, '
            'and the default filesystem must be specified.')

    # Validate disk encryption parameters
    if not _all_or_none(encryption_vault_uri, encryption_key_name,
                        encryption_key_version):
        raise CLIError(
            'Either the encryption vault URI, key name and key version should be specified, '
            'or none of them should be.')

    # Validate kafka rest proxy parameters
    if not _all_or_none(kafka_client_group_id, kafka_client_group_name):
        raise CLIError(
            'Either the kafka client group id and kafka client group name should be specified, '
            'or none of them should be')

    # Validate and initialize autoscale setting
    autoscale_configuration = None
    load_based_type = "Load"
    schedule_based_type = "Schedule"
    if autoscale_type and autoscale_type.lower() == load_based_type.lower():
        if not all(
            [autoscale_min_workernode_count, autoscale_max_workernode_count]):
            raise CLIError(
                'When the --autoscale-type is Load, '
                'both --autoscale-min-workernode-count and --autoscale-max-workernode-count should be specified.'
            )

        autoscale_configuration = Autoscale(capacity=AutoscaleCapacity(
            min_instance_count=autoscale_min_workernode_count,
            max_instance_count=autoscale_max_workernode_count))
    elif autoscale_type and autoscale_type.lower(
    ) == schedule_based_type.lower():
        if not all([timezone, days, time, autoscale_workernode_count]):
            raise CLIError(
                'When the --autoscale-type is Schedule, all of the --timezone, --days, --time, '
                '--autoscale-workernode-count should be specified.')

        autoscale_configuration = Autoscale(recurrence=AutoscaleRecurrence(
            time_zone=timezone,
            schedule=[
                AutoscaleSchedule(
                    days=days,
                    time_and_capacity=AutoscaleTimeAndCapacity(
                        time=time,
                        min_instance_count=autoscale_workernode_count,
                        max_instance_count=autoscale_workernode_count))
            ]))

    # Specify virtual network profile only when network arguments are provided
    virtual_network_profile = subnet and build_virtual_network_profile(subnet)

    # Validate data disk parameters
    if not workernode_data_disks_per_node and workernode_data_disk_storage_account_type:
        raise CLIError(
            "Cannot define data disk storage account type unless disks per node is defined."
        )
    if not workernode_data_disks_per_node and workernode_data_disk_size:
        raise CLIError(
            "Cannot define data disk size unless disks per node is defined.")
    # Specify data disk groups only when disk arguments are provided
    workernode_data_disk_groups = workernode_data_disks_per_node and [
        DataDisksGroups(
            disks_per_node=workernode_data_disks_per_node,
            storage_account_type=workernode_data_disk_storage_account_type,
            disk_size_gb=workernode_data_disk_size)
    ]

    # call get default vm size api to set vm size if customer does not provide the value
    if not (workernode_size and headnode_size):
        headnode_size, workernode_size = set_vm_size(cmd.cli_ctx, location,
                                                     cluster_type,
                                                     headnode_size,
                                                     workernode_size)

    if not headnode_size:
        raise RequiredArgumentMissingError(
            'Please specify --headnode-size explicitly.')
    if not workernode_size:
        raise RequiredArgumentMissingError(
            'Please specify --workernode-size explicitly.')

    os_profile = OsProfile(
        linux_operating_system_profile=LinuxOperatingSystemProfile(
            username=ssh_username,
            password=ssh_password,
            ssh_profile=ssh_public_key and SshProfile(
                public_keys=[SshPublicKey(certificate_data=ssh_public_key)])))

    roles = [
        # Required roles
        Role(name="headnode",
             target_instance_count=2,
             hardware_profile=HardwareProfile(vm_size=headnode_size),
             os_profile=os_profile,
             virtual_network_profile=virtual_network_profile),
        Role(name="workernode",
             target_instance_count=workernode_count,
             hardware_profile=HardwareProfile(vm_size=workernode_size),
             os_profile=os_profile,
             virtual_network_profile=virtual_network_profile,
             data_disks_groups=workernode_data_disk_groups,
             autoscale_configuration=autoscale_configuration)
    ]

    if zookeepernode_size:
        roles.append(
            Role(name="zookeepernode",
                 target_instance_count=3,
                 hardware_profile=HardwareProfile(vm_size=zookeepernode_size),
                 os_profile=os_profile,
                 virtual_network_profile=virtual_network_profile))
    if edgenode_size:
        roles.append(
            Role(name="edgenode",
                 target_instance_count=1,
                 hardware_profile=HardwareProfile(vm_size=edgenode_size),
                 os_profile=os_profile,
                 virtual_network_profile=virtual_network_profile))
    if kafka_management_node_size:
        # generate kafkaRestProperties
        roles.append(
            Role(name="kafkamanagementnode",
                 target_instance_count=kafka_management_node_count,
                 hardware_profile=HardwareProfile(
                     vm_size=kafka_management_node_size),
                 os_profile=os_profile,
                 virtual_network_profile=virtual_network_profile))

    if esp and idbroker:
        roles.append(
            Role(name="idbrokernode",
                 target_instance_count=2,
                 virtual_network_profile=virtual_network_profile))

    storage_accounts = []
    if storage_account:
        # Specify storage account details only when storage arguments are provided
        storage_accounts.append(
            StorageAccount(name=storage_account_endpoint,
                           key=storage_account_key,
                           container=storage_default_container,
                           file_system=storage_default_filesystem,
                           resource_id=storage_account,
                           msi_resource_id=storage_account_managed_identity,
                           is_default=True))

    additional_storage_accounts = [
    ]  # TODO: Add support for additional storage accounts
    if additional_storage_accounts:
        storage_accounts += [
            StorageAccount(name=s.storage_account_endpoint,
                           key=s.storage_account_key,
                           container=s.container,
                           is_default=False)
            for s in additional_storage_accounts
        ]

    assign_identities = []
    if assign_identity:
        assign_identities.append(assign_identity)

    if storage_account_managed_identity:
        assign_identities.append(storage_account_managed_identity)

    cluster_identity = build_identities_info(
        assign_identities) if assign_identities else None

    domain_name = domain and parse_domain_name(domain)
    if not ldaps_urls and domain_name:
        ldaps_urls = ['ldaps://{}:636'.format(domain_name)]

    security_profile = domain and SecurityProfile(
        directory_type=DirectoryType.active_directory,
        domain=domain_name,
        ldaps_urls=ldaps_urls,
        domain_username=cluster_admin_account,
        domain_user_password=cluster_admin_password,
        cluster_users_group_dns=cluster_users_group_dns,
        aadds_resource_id=domain,
        msi_resource_id=assign_identity)

    disk_encryption_properties = encryption_vault_uri and DiskEncryptionProperties(
        vault_uri=encryption_vault_uri,
        key_name=encryption_key_name,
        key_version=encryption_key_version,
        encryption_algorithm=encryption_algorithm,
        msi_resource_id=assign_identity)

    if encryption_at_host:
        if disk_encryption_properties:
            disk_encryption_properties.encryption_at_host = encryption_at_host
        else:
            disk_encryption_properties = DiskEncryptionProperties(
                encryption_at_host=encryption_at_host)

    kafka_rest_properties = (
        kafka_client_group_id and kafka_client_group_name
    ) and KafkaRestProperties(client_group_info=ClientGroupInfo(
        group_id=kafka_client_group_id, group_name=kafka_client_group_name))

    encryption_in_transit_properties = encryption_in_transit and EncryptionInTransitProperties(
        is_encryption_in_transit_enabled=encryption_in_transit)

    # relay outbound and private link
    network_properties = (
        resource_provider_connection
        or enable_private_link) and NetworkProperties(
            resource_provider_connection=resource_provider_connection,
            private_link=PrivateLink.enabled
            if enable_private_link is True else PrivateLink.disabled)

    # compute isolation
    compute_isolation_properties = enable_compute_isolation and ComputeIsolationProperties(
        enable_compute_isolation=enable_compute_isolation, host_sku=host_sku)

    create_params = ClusterCreateParametersExtended(
        location=location,
        tags=tags,
        properties=ClusterCreateProperties(
            cluster_version=cluster_version,
            os_type=OSType.linux,
            tier=cluster_tier,
            cluster_definition=ClusterDefinition(
                kind=cluster_type,
                configurations=cluster_configurations,
                component_version=component_version),
            compute_profile=ComputeProfile(roles=roles),
            storage_profile=StorageProfile(storageaccounts=storage_accounts),
            security_profile=security_profile,
            disk_encryption_properties=disk_encryption_properties,
            kafka_rest_properties=kafka_rest_properties,
            min_supported_tls_version=minimal_tls_version,
            encryption_in_transit_properties=encryption_in_transit_properties,
            network_properties=network_properties,
            compute_isolation_properties=compute_isolation_properties),
        identity=cluster_identity)

    if no_wait:
        return sdk_no_wait(no_wait, client.create, resource_group_name,
                           cluster_name, create_params)

    return client.create(resource_group_name, cluster_name, create_params)