# Service Principal
tenant = os.environ['AZURE_TENANT_ID']
client_id = os.environ['AZURE_CLIENT_ID']
password = os.environ['AZURE_CLIENT_SECRET']

# Public Azure - default values
authentication_endpoint = 'https://login.microsoftonline.com/'
azure_endpoint = 'https://management.azure.com/'

context = adal.AuthenticationContext(authentication_endpoint + tenant)
credentials = AdalAuthentication(context.acquire_token_with_client_credentials,
                                 azure_endpoint, client_id, password)
subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']

resource_client = ResourceManagementClient(credentials,
                                           subscription_id,
                                           base_url=azure_endpoint)

client = ResourceManagementClient(credentials, subscription_id)

resource_group_params = {'location': 'Southindia'}

# Creating resource group

client.resource_groups.create_or_update('azure-sample-group',
                                        resource_group_params)

try:
    client.resource_groups.create_or_update('azure-sample-group',
                                            resource_group_params)
except:
Esempio n. 2
0
    def run(self, SUBSCRIPTION_ID,GROUP_NAME,LOCATION,VM_NAME,client_id, secret,tenant):
		get_credentials()
		resource_group_client = ResourceManagementClient(credentials,SUBSCRIPTION_ID)
                network_client = NetworkManagementClient(credentials,SUBSCRIPTION_ID)
                compute_client = ComputeManagementClient(credentials,SUBSCRIPTION_ID)
	        stop_vm(compute_client)
    def __init__(self, name=None, configuration="~/.cloudmesh/cloudmesh.yaml"):
        """
        Initializes the provider. The default parameters are read from the
        configuration file that is defined in yaml format.

        :param name: The name of the provider as defined in the yaml file
        :param configuration: The location of the yaml configuration file
        """

        conf = Config(configuration)["cloudmesh"]

        self.user = Config()["cloudmesh"]["profile"]["user"]

        self.spec = conf["cloud"][name]
        self.cloud = name

        cred = self.spec["credentials"]
        self.default = self.spec["default"]
        self.cloudtype = self.spec["cm"]["kind"]
        super().__init__(name, conf)

        VERBOSE(cred, verbose=10)

        if self.cloudtype != 'azure':
            Console.error("This class is meant for azure cloud")

        # ServicePrincipalCredentials related Variables to configure in
        # cloudmesh.yaml file

        # AZURE_APPLICATION_ID = '<Application ID from Azure Active Directory
        # App Registration Process>'

        # AZURE_SECRET_KEY = '<Secret Key from Application configured in
        # Azure>'

        # AZURE_TENANT_ID = '<Directory ID from Azure Active Directory
        # section>'

        credentials = ServicePrincipalCredentials(
            client_id=cred['AZURE_APPLICATION_ID'],
            secret=cred['AZURE_SECRET_KEY'],
            tenant=cred['AZURE_TENANT_ID']
        )

        subscription = cred['AZURE_SUBSCRIPTION_ID']

        # Management Clients
        self.resource_client = ResourceManagementClient(
            credentials, subscription)
        self.compute_client = ComputeManagementClient(
            credentials, subscription)
        self.network_client = NetworkManagementClient(
            credentials, subscription)

        # VMs abbreviation
        self.vms = self.compute_client.virtual_machines
        self.imgs = self.compute_client.virtual_machine_images

        # Azure Resource Group
        self.GROUP_NAME = self.default["resource_group"]

        # Azure Datacenter Region
        self.LOCATION = cred["AZURE_REGION"]

        # NetworkManagementClient related Variables
        self.VNET_NAME = self.default["network"]
        self.SUBNET_NAME = self.default["subnet"]
        self.IP_CONFIG_NAME = self.default["AZURE_VM_IP_CONFIG"]
        self.NIC_NAME = self.default["AZURE_VM_NIC"]

        # Azure VM Storage details
        self.OS_DISK_NAME = self.default["AZURE_VM_DISK_NAME"]
        self.USERNAME = self.default["AZURE_VM_USER"]
        self.PASSWORD = self.default["AZURE_VM_PASSWORD"]
        self.VM_NAME = self.default["AZURE_VM_NAME"]

        # Create or Update Resource group
        self.get_resource_group()
Esempio n. 4
0
def main():
    # # Loading input values
    # print("::debug::Loading input values")
    template_file = os.environ.get("INPUT_ARMTEMPLATE_FILE",
                                   default="arm_deploy.json")
    template_params_file = os.environ.get("INPUT_ARMTEMPLATEPARAMS_FILE",
                                          default="")
    azure_credentials = os.environ.get("INPUT_AZURE_CREDENTIALS", default="{}")
    resource_group = os.environ.get("INPUT_RESOURCE_GROUP", default=None)
    mapped_params = os.environ.get("INPUT_MAPPED_PARAMS", default="{}")
    deployment_mode = os.environ.get("INPUT_DEPLOYMENT_MODE",
                                     default="Incremental")

    repo_name = os.environ.get("GITHUB_REPOSITORY")
    print(repo_name)
    repo_name = repo_name.split('/')
    print(repo_name)
    repo_name = repo_name[0] + '_' + repo_name[1]
    print(repo_name)
    deploy_enum = get_deploy_mode_obj(deployment_mode)
    try:
        azure_credentials = json.loads(azure_credentials)
    except JSONDecodeError:
        print(
            "::error::Please paste output of `az ad sp create-for-rbac --name <your-sp-name> --role contributor --scopes /subscriptions/<your-subscriptionId>/resourceGroups/<your-rg> --sdk-auth` as value of secret variable: AZURE_CREDENTIALS"
        )
        raise AMLConfigurationException(
            f"Incorrect or poorly formed output from azure credentials saved in AZURE_CREDENTIALS secret. See setup in https://github.com/Azure/aml-workspace/blob/master/README.md"
        )

    try:
        mapped_params = json.loads(mapped_params)
    except JSONDecodeError:
        print(
            "::error::Incorrect mapped parameters Format , please put mapped parameters strings like this {\"patToken\":\"${{secrets.PAT_TOKEN}}\", .... }"
        )
        raise AMLConfigurationException(
            f"Incorrect or poorly formed mapped params. See setup in https://github.com/Azure/aml_configure/blob/master/README.md"
        )

    if not resource_group:
        raise AMLConfigurationException(f"A resource group must be provided")
    # Checking provided parameters
    print("::debug::Checking provided parameters")
    required_parameters_provided(
        parameters=azure_credentials,
        keys=["tenantId", "clientId", "clientSecret"],
        message=
        "Required parameter(s) not found in your azure credentials saved in AZURE_CREDENTIALS secret for logging in to the workspace. Please provide a value for the following key(s): "
    )

    # # Loading parameters file
    # print("::debug::Loading parameters file")
    template_file_file_path = os.path.join(".cloud", ".azure", template_file)

    # Mask values
    print("::debug::Masking parameters")
    mask_parameter(parameter=azure_credentials.get("tenantId", ""))
    mask_parameter(parameter=azure_credentials.get("clientId", ""))
    mask_parameter(parameter=azure_credentials.get("clientSecret", ""))
    #mask_parameter(parameter=azure_credentials.get("subscriptionId", ""))

    # Login User on CLI
    tenant_id = azure_credentials.get("tenantId", "")
    service_principal_id = azure_credentials.get("clientId", "")
    service_principal_password = azure_credentials.get("clientSecret", "")
    subscriptionId = azure_credentials.get("subscriptionId", "")

    parameters = get_template_parameters(template_params_file, mapped_params)
    credentials = None
    try:
        credentials = ServicePrincipalCredentials(
            client_id=service_principal_id,
            secret=service_principal_password,
            tenant=tenant_id)
    except Exception as ex:
        raise CredentialsVerificationError(ex)

    client = None
    try:
        client = ResourceManagementClient(credentials, subscriptionId)
    except Exception as ex:
        raise ResourceManagementError(ex)

    template = None
    with open(template_file_file_path, 'r') as template_file_fd:
        template = json.load(template_file_fd)

    deployment_properties = {
        'properties': {
            'mode': deploy_enum,
            'template': template,
            'parameters': parameters
        }
    }

    try:
        validate = client.deployments.validate(resource_group, repo_name,
                                               deployment_properties)
        validate.wait()
    except Exception as ex:
        raise ActionDeploymentError(ex)
    deployment_async_operation = None
    try:
        deployment_async_operation = client.deployments.create_or_update(
            resource_group, repo_name, deployment_properties)
        deployment_async_operation.wait()
    except Exception as ex:
        raise ActionDeploymentError(ex)

    deploy_result = deployment_async_operation.result()
    print(
        f"::set-output name=deployment_parameters::{deploy_result.properties.parameters}"
    )
    print(
        f"::set-output name=deployment_output::{deploy_result.properties.outputs}"
    )
Esempio n. 5
0
parser = argparse.ArgumentParser()
parser.add_argument("--sub", help="SubscriptionID.", type=str)
parser.add_argument("--cliid", help="ClientID ou AppID.", type=str)
parser.add_argument("--pwd", help="Password.", type=str)
parser.add_argument("--tntid", help="TenantID.", type=str)
parser.add_argument("--basename", help="Prefix for all names.", type=str)
parser.add_argument("--loc", help="Location.", type=str)
args = parser.parse_args()

print('SubscriptionID :' + args.sub)

# Create credentials
credentials = ServicePrincipalCredentials(client_id=args.cliid,
                                          secret=args.pwd,
                                          tenant=args.tntid)
resource_client = ResourceManagementClient(credentials, args.sub)
adf_client = DataFactoryManagementClient(credentials, args.sub)

# Create resources group
#resource_client.resource_groups.create_or_update(args.basename + '-RG', {'location': args.loc})

# Create a data factory
df_resource = Factory(location=args.loc)
df = adf_client.factories.create_or_update(args.basename + '-RG',
                                           args.basename + '-DF', df_resource)
while df.provisioning_state != 'Succeeded':
    df = adf_client.factories.get(args.basename + '-RG', args.basename + '-DF')
    time.sleep(1)

# Create a machine learning workspace
#ws = Workspace.create(
def get_resource_client(credentials, subscription_id) : 
    resource_client = ResourceManagementClient(credentials, subscription_id)
    return resource_client
Esempio n. 7
0
client_id = os.environ["ARM_CLIENT_ID"]
secret = os.environ["ARM_CLIENT_SECRET"]
tenant = os.environ["TENANT_ID"]

CREDENTIALS = ClientSecretCredential(
    client_id=client_id,
    client_secret=secret,
    tenant_id=tenant,
)

client = SubscriptionClient(CREDENTIALS)

az_services = []

for sub in client.subscriptions.list():
    if sub.state == "Enabled":
        try:
            resource_group_client = ResourceManagementClient(
                CREDENTIALS, sub.subscription_id)

            resources = resource_group_client.resources.list()
            for i in resources:
                if "databases/master" not in i.id:
                    az_services.append(i.type.lower())
        except Exception as e:
            logging.error(f"Unable to get data - {sub.subscription_id}. {e}")

print(len(az_services))

for i in Counter(az_services).most_common():
    print(i)
Esempio n. 8
0
 def get_resource_client(credential,subscription_id):
     return ResourceManagementClient(credentials=credential, subscription_id=subscription_id)
Esempio n. 9
0
def run_example():
    """Web Site management example."""
    #
    # Create the Resource Manager Client with an Application (service principal) token provider
    #
    subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']

    credentials = ServicePrincipalCredentials(
        client_id=os.environ['AZURE_CLIENT_ID'],
        secret=os.environ['AZURE_CLIENT_SECRET'],
        tenant=os.environ['AZURE_TENANT_ID'])
    resource_client = ResourceManagementClient(credentials, subscription_id)
    web_client = WebSiteManagementClient(credentials, subscription_id)

    # Register for required namespace
    resource_client.providers.register('Microsoft.Web')

    # Create Resource group
    print('Create Resource Group')
    resource_group_params = {'location': 'westus'}
    print_item(
        resource_client.resource_groups.create_or_update(
            GROUP_NAME, resource_group_params))

    #
    # Create an App Service plan for your WebApp
    #
    print('Create an App Service plan for your WebApp')

    service_plan_async_operation = web_client.app_service_plans.create_or_update(
        GROUP_NAME, SERVER_FARM_NAME,
        AppServicePlan(location=WEST_US,
                       sku=SkuDescription(name='S1',
                                          capacity=1,
                                          tier='Standard')))
    service_plan = service_plan_async_operation.result()
    print_item(service_plan)

    #
    # Create a Site to be hosted on the App Service plan
    #
    print('Create a Site to be hosted on the App Service plan')
    site_async_operation = web_client.web_apps.create_or_update(
        GROUP_NAME, SITE_NAME,
        Site(location=WEST_US, server_farm_id=service_plan.id))
    site = site_async_operation.result()
    print_item(site)

    #
    # List Sites by Resource Group
    #
    print('List Sites by Resource Group')
    for site in web_client.web_apps.list_by_resource_group(GROUP_NAME):
        print_item(site)

    #
    # Get a single Site
    #
    print('Get a single Site')
    site = web_client.web_apps.get(GROUP_NAME, SITE_NAME)
    print_item(site)

    print("Your site and server farm have been created. " \
      "You can now go and visit at http://{}/".format(site.default_host_name))
    input("Press enter to delete the site and server farm.")

    #
    # Delete a Site
    #
    print('Deleting the Site')
    web_client.web_apps.delete(GROUP_NAME, SITE_NAME)

    #
    # Delete the Resource Group
    #
    print('Deleting the resource group')
    delete_async_operation = resource_client.resource_groups.delete(GROUP_NAME)
    delete_async_operation.wait()
Esempio n. 10
0
import os,traceback,sys,threading,time
from modules import credential_set
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient

print ('Delete Azure Resource-Group\ntype the Resource-Group NAME')
GROUP_NAME = input('>>>  ')

credentials, subscription_id = credential_set.get_credentials()
resource_client = ResourceManagementClient(credentials, subscription_id)

def delete_rg():
    print('\nDelete the Resource Group ...')
    delete_async_operation = resource_client.resource_groups.delete(GROUP_NAME)
    delete_async_operation.wait()

deleting = threading.Thread(target=delete_rg)
deleting.start()

while True:
    sys.stdout.write('.')
    sys.stdout.flush()
    time.sleep(1)
    if(not deleting.isAlive()):
        break
print('\nDeleted !: {}'.format(GROUP_NAME))
Esempio n. 11
0
    def run(self, Subcription_id, Group_Name, Location, VM_Name, Client_Id,
            Secret, Tenant_Id):

        SUBSCRIPTION_ID = Subcription_id
        GROUP_NAME = Group_Name
        LOCATION = Location
        VM_NAME = VM_Name

        def create_availability_set(compute_client):
            avset_params = {
                'location': LOCATION,
                'sku': {
                    'name': 'Aligned'
                },
                'platform_fault_domain_count': 2
            }
            availability_set_result = compute_client.availability_sets.create_or_update(
                GROUP_NAME, 'myAVSet', avset_params)

        def create_resource_group(resource_group_client):
            resource_group_params = {'location': LOCATION}
            resource_group_result = resource_group_client.resource_groups.create_or_update(
                GROUP_NAME, resource_group_params)

        def get_credentials():
            credentials = ServicePrincipalCredentials(client_id=Client_Id,
                                                      secret=Secret,
                                                      tenant=Tenant_Id)
            return credentials

        def create_public_ip_address(network_client):
            public_ip_addess_params = {
                'location': LOCATION,
                'public_ip_allocation_method': 'Dynamic'
            }
            creation_result = network_client.public_ip_addresses.create_or_update(
                GROUP_NAME, 'myIPAddress', public_ip_addess_params)
            return creation_result.result()

        def create_vnet(network_client):
            vnet_params = {
                'location': LOCATION,
                'address_space': {
                    'address_prefixes': ['10.0.0.0/16']
                }
            }
            creation_result = network_client.virtual_networks.create_or_update(
                GROUP_NAME, 'myVNet', vnet_params)
            return creation_result.result()

        def create_subnet(network_client):
            subnet_params = {'address_prefix': '10.0.0.0/24'}
            creation_result = network_client.subnets.create_or_update(
                GROUP_NAME, 'myVNet', 'mySubnet', subnet_params)
            return creation_result.result()

        def create_nic(network_client):
            subnet_info = network_client.subnets.get(GROUP_NAME, 'myVNet',
                                                     'mySubnet')
            publicIPAddress = network_client.public_ip_addresses.get(
                GROUP_NAME, 'myIPAddress')
            nic_params = {
                'location':
                LOCATION,
                'ip_configurations': [{
                    'name': 'myIPConfig',
                    'public_ip_address': publicIPAddress,
                    'subnet': {
                        'id': subnet_info.id
                    }
                }]
            }
            creation_result = network_client.network_interfaces.create_or_update(
                GROUP_NAME, 'myNic', nic_params)
            return creation_result.result()

        def create_vm(network_client, compute_client):
            nic = network_client.network_interfaces.get(GROUP_NAME, 'myNic')
            avset = compute_client.availability_sets.get(GROUP_NAME, 'myAVSet')
            vm_parameters = {
                'location': LOCATION,
                'os_profile': {
                    'computer_name': VM_NAME,
                    'admin_username': '******',
                    'admin_password': '******'
                },
                'hardware_profile': {
                    'vm_size': 'Standard_A0'
                },
                'storage_profile': {
                    'image_reference': {
                        'publisher': 'MicrosoftWindowsServer',
                        'offer': 'WindowsServer',
                        'sku': '2012-R2-Datacenter',
                        'version': 'latest'
                    }
                },
                'network_profile': {
                    'network_interfaces': [{
                        'id': nic.id
                    }]
                },
                'availability_set': {
                    'id': avset.id
                }
            }
            creation_result = compute_client.virtual_machines.create_or_update(
                GROUP_NAME, VM_NAME, vm_parameters)
            return creation_result.result()

        credentials = get_credentials()
        resource_group_client = ResourceManagementClient(
            credentials, SUBSCRIPTION_ID)
        network_client = NetworkManagementClient(credentials, SUBSCRIPTION_ID)
        compute_client = ComputeManagementClient(credentials, SUBSCRIPTION_ID)

        create_resource_group(resource_group_client)
        create_availability_set(compute_client)
        creation_result = create_public_ip_address(network_client)
        creation_result = create_vnet(network_client)
        creation_result = create_subnet(network_client)
        creation_result = create_nic(network_client)
        creation_result = create_vm(network_client, compute_client)
        print("VM created Successfully")
Esempio n. 12
0
def main():

    SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
    PASSWORD = os.environ.get("PASSWORD", None)
    TENANT_ID = os.environ.get("TENANT_ID", None)
    CLIENT_OID = os.environ.get("CLIENT_OID", None)
    GROUP_NAME = "testgroupx"
    SERVER_KEY = "server_keyxxyyzz"
    SERVER = "serverxxy"
    VAULT = "vaultxxy"

    # Create client
    # # For other authentication approaches, please see: https://pypi.org/project/azure-identity/
    resource_client = ResourceManagementClient(
        credential=DefaultAzureCredential(),
        subscription_id=SUBSCRIPTION_ID
    )
    sql_client = SqlManagementClient(
        credential=DefaultAzureCredential(),
        subscription_id=SUBSCRIPTION_ID
    )
    # - init depended client -
    keyvault_client = KeyVaultManagementClient(
        credential=DefaultAzureCredential(),
        subscription_id=SUBSCRIPTION_ID
    )
    # - end -

    # Create resource group
    resource_client.resource_groups.create_or_update(
        GROUP_NAME,
        {"location": "eastus"}
    )

    # - init depended resources -
    # Create Server
    server = sql_client.servers.begin_create_or_update(
        GROUP_NAME,
        SERVER,
        {
          "location": "eastus",
          "identity": {
            "type": "SystemAssigned"
          },
          "administrator_login": "******",
          "administrator_login_password": PASSWORD,
          "version": "12.0",
          "public_network_access":"Enabled"
        }
    ).result()
    print("Create server:\n{}".format(server))

    # Create vault
    vault = keyvault_client.vaults.begin_create_or_update(
        GROUP_NAME,
        VAULT,
        {
            'location': "eastus",
            'properties': {
                'sku': {
                    'family': 'A',
                    'name': 'standard'
                },
                'tenant_id': TENANT_ID,
                "access_policies": [
                    {
                    "tenant_id": TENANT_ID,
                    "object_id": CLIENT_OID,
                    "permissions": {
                        "keys": [
                        "get",
                        "create",
                        "delete",
                        "list",
                        "update",
                        "import",
                        "backup",
                        "restore",
                        "recover"
                        ],
                        "secrets": [
                        "get",
                        "list",
                        "set",
                        "delete",
                        "backup",
                        "restore",
                        "recover"
                        ],
                        "certificates": [
                        "get",
                        "list",
                        "delete",
                        "create",
                        "import",
                        "update",
                        "managecontacts",
                        "getissuers",
                        "listissuers",
                        "setissuers",
                        "deleteissuers",
                        "manageissuers",
                        "recover"
                        ],
                        "storage": [
                        "get",
                        "list",
                        "delete",
                        "set",
                        "update",
                        "regeneratekey",
                        "setsas",
                        "listsas",
                        "getsas",
                        "deletesas"
                        ]
                    }
                    },
                    {
                    "tenantId": TENANT_ID,
                    "objectId": server.identity.principal_id,
                    "permissions": {
                        "keys": [
                        "unwrapKey",
                        "get",
                        "wrapKey",
                        "list"
                        ]
                    }
                    }
                ],
                'enabled_for_disk_encryption': True,
                'enable_soft_delete': True,
                'soft_delete_retention_in_days': 90,
                'nework_acls': {
                    'bypass': '******',
                    'default_action': "Allow",
                    'ip_rules': [],
                    'virtual_network_rules': []
                }
            }
        }
    ).result()

    key_client = KeyClient(
        vault.properties.vault_uri,
        DefaultAzureCredential()
    )

    key = key_client.create_key(
        "testkey",
        "RSA",
        size=2048,
        expires_on=date_parse.parse("2050-02-02T08:00:00.000Z")
    )
    SERVER_KEY = VAULT + "_testkey_" + key.id.split("/")[-1]
    # - end -

    # Create server key
    server_key = sql_client.server_keys.begin_create_or_update(
        GROUP_NAME,
        SERVER,
        SERVER_KEY,
        {
            # TODO: init resource body
            "server_key_type": "AzureKeyVault",
            "uri": key.id
        }
    ).result()
    print("Create server key:\n{}".format(server_key))

    # Get server key
    server_key = sql_client.server_keys.get(
        GROUP_NAME,
        SERVER,
        SERVER_KEY
    )
    print("Get server key:\n{}".format(server_key))

    # Delete server key
    server_key = sql_client.server_keys.begin_delete(
        GROUP_NAME,
        SERVER,
        SERVER_KEY
    ).result()
    print("Delete server key.\n")

    # Delete Group
    resource_client.resource_groups.begin_delete(
        GROUP_NAME
    ).result()
Esempio n. 13
0
def main():

    SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
    GROUP_NAME = "testgroupx"
    ROUTE_FILTER_RULE = "route_filter_rulexxyyzz"
    ROUTE_FILTER = "route_filterxxyyzz"

    # Create client
    # For other authentication approaches, please see: https://pypi.org/project/azure-identity/
    resource_client = ResourceManagementClient(
        credential=DefaultAzureCredential(),
        subscription_id=SUBSCRIPTION_ID
    )
    network_client = NetworkManagementClient(
        credential=DefaultAzureCredential(),
        subscription_id=SUBSCRIPTION_ID
    )

    # Create resource group
    resource_client.resource_groups.create_or_update(
        GROUP_NAME,
        {"location": "eastus"}
    )

    # - init depended resources -
    # Create route filter
    network_client.route_filters.begin_create_or_update(
        GROUP_NAME,
        ROUTE_FILTER,
        {
          "location": "eastus",
          "tags": {
            "key1": "value1"
          },
          "rules": []
        }
    ).result()
    # - end -

    # Create route filter rule
    route_filter_rule = network_client.route_filter_rules.begin_create_or_update(
        GROUP_NAME,
        ROUTE_FILTER,
        ROUTE_FILTER_RULE,
        {
          "access": "Allow",
          "route_filter_rule_type": "Community",
          "communities": [
            "12076:51004"
          ]
        }
    ).result()
    print("Create route filter rule:\n{}".format(route_filter_rule))

    # Get route filter rule
    route_filter_rule = network_client.route_filter_rules.get(
        GROUP_NAME,
        ROUTE_FILTER,
        ROUTE_FILTER_RULE
    )
    print("Get route filter rule:\n{}".format(route_filter_rule))

    # Delete route filter rule
    route_filter_rule = network_client.route_filter_rules.begin_delete(
        GROUP_NAME,
        ROUTE_FILTER,
        ROUTE_FILTER_RULE
    ).result()
    print("Delete route filter rule.\n")

    # Delete Group
    resource_client.resource_groups.begin_delete(
        GROUP_NAME
    ).result()
Esempio n. 14
0
#--------------------------------------------------------------------------
AZURE_LOCATION = 'eastus'
RESOURCE_GROUP = "myResourceGroup"
NAT_GATEWAY_NAME = "myNatGateway"
PUBLIC_IP_ADDRESS_NAME = "myPublicIpAddress"
PUBLIC_IPPREFIX_NAME = "myPublicIpprefix"
PUBLIC_IP_PREFIX_NAME = "myPublicIpPrefix"

#--------------------------------------------------------------------------
# management clients
#--------------------------------------------------------------------------
credentials = ServicePrincipalCredentials(client_id=CLIENT_ID,
                                          secret=CLIENT_SECRET,
                                          tenant=TENANT_ID)
mgmt_client = NetworkManagementClient(credentials, SUBSCRIPTION_ID)
resource_client = ResourceManagementClient(credentials, SUBSCRIPTION_ID)

#--------------------------------------------------------------------------
# resource group (prerequisite)
#--------------------------------------------------------------------------
print("Creating Resource Group")
resource_client.resource_groups.create_or_update(
    resource_group_name=RESOURCE_GROUP,
    parameters={'location': AZURE_LOCATION})

#--------------------------------------------------------------------------
# /PublicIPAddresses/put/Create public IP address defaults[put]
#--------------------------------------------------------------------------
print("Create public IP address defaults")
BODY = {
    "location": AZURE_LOCATION,
def main():

    SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
    GROUP_NAME = "testgroupx"
    HOST_GROUP_NAME = "hostgroupx"
    HOST_NAME = "hostx"

    # Create client
    # For other authentication approaches, please see: https://pypi.org/project/azure-identity/
    resource_client = ResourceManagementClient(
        credential=DefaultAzureCredentials(),
        subscription_id=SUBSCRIPTION_ID
    )
    compute_client = ComputeManagementClient(
        credential=DefaultAzureCredentials(),
        subscription_id=SUBSCRIPTION_ID
    )

    # Create resource group
    resource_client.resource_groups.create_or_update(
        GROUP_NAME,
        {"location": "eastus"}
    )

    # Create dedicated host group
    host_group = compute_client.dedicated_host_groups.create_or_update(
        GROUP_NAME,
        HOST_GROUP_NAME,
        {
          "location": "eastus",
          "tags": {
            "department": "finance"
          },
          "zones": [
            "1"
          ],
          "platform_fault_domain_count": "3"
        }
    )
    print("Create dedicated host group:\n{}".format(host_group))

    # Create dedicated host
    host = compute_client.dedicated_hosts.begin_create_or_update(
        GROUP_NAME,
        HOST_GROUP_NAME,
        HOST_NAME,
        {
          "location": "eastus",
          "tags": {
            "department": "HR"
          },
          "platform_fault_domain": "1",
          "sku": {
            "name": "DSv3-Type1"
          }
        }
    ).result()
    print("Create dedicated host:\n{}".format(host))

    # Get dedicated host group
    host_group = compute_client.dedicated_host_groups.get(
        GROUP_NAME,
        HOST_GROUP_NAME
    )
    print("Get dedicated host group:\n{}".format(host_group))

    # Get dedicated host
    host = compute_client.dedicated_hosts.get(
        GROUP_NAME,
        HOST_GROUP_NAME,
        HOST_NAME
    )
    print("Get dedicated host:\n{}".format(host))

    # Update dedicated host group
    host_group = compute_client.dedicated_host_groups.update(
        GROUP_NAME,
        HOST_GROUP_NAME,
        {
          "tags": {
            "department": "finance"
          },
          "platform_fault_domain_count": "3"
        }
    )
    print("Update dedicated host group:\n{}".format(host_group))

    # Update dedicated host
    host = compute_client.dedicated_hosts.begin_update(
        GROUP_NAME,
        HOST_GROUP_NAME,
        HOST_NAME,
        {
          "tags": {
            "department": "HR"
          }
        }
    ).result()
    print("Update dedicated host:\n{}".format(host))

    # Delete dedicated host
    compute_client.dedicated_hosts.begin_delete(
        GROUP_NAME,
        HOST_GROUP_NAME,
        HOST_NAME
    ).result()
    print("Delete dedicated host.\n")

    # Delete dedicated host group
    compute_client.dedicated_host_groups.delete(
        GROUP_NAME,
        HOST_GROUP_NAME
    )
    print("Delete dedicated host group.\n")

    # Delete Group
    resource_client.resource_groups.begin_delete(
        GROUP_NAME
    ).result()
def main():

    # Azure subscription ID
    subscription_id = 'fb3980ef-51e7-4f04-9aa0-9f4dae9250d8'

    # This program creates this resource group. If it's an existing resource group, comment out the code that creates the resource group
    rg_name = 'ADFTutorialResourceGroup'

    # The data factory name. It must be globally unique.
    df_name = 'myadf-17'

    # Specify your Active Directory client ID, client secret, and tenant ID
    credentials = ServicePrincipalCredentials(
        client_id='0e7776ac-e1de-418c-a2e9-386e4cd8d9a9',
        secret='DZVuXInE?w1Ft/re.uB5Fr.Y9tTi4sBK',
        tenant='98df3faa-19c6-40a2-8a75-7a046f001fde')
    resource_client = ResourceManagementClient(credentials, subscription_id)
    adf_client = DataFactoryManagementClient(credentials, subscription_id)

    rg_params = {'location': 'eastus'}
    df_params = {'location': 'eastus'}

    # create the resource group
    # comment out if the resource group already exits
    resource_client.resource_groups.create_or_update(rg_name, rg_params)

    #Create a data factory
    df_resource = Factory(location='eastus')
    df = adf_client.factories.create_or_update(rg_name, df_name, df_resource)
    print_item(df)
    while df.provisioning_state != 'Succeeded':
        df = adf_client.factories.get(rg_name, df_name)
        time.sleep(1)

    # Create an Azure Storage linked service
    ls_name = 'storageLinkedService'

    # IMPORTANT: specify the name and key of your Azure Storage account.
    storage_string = SecureString(
        'DefaultEndpointsProtocol=https;AccountName=stacc17;AccountKey=aoulemZGHxahJ+RFGsR/WvWYHBIplL2ecQ4LHfPBjZAPfypphUNCYvZoXqtD7ENIPoZ5v0BfkYwCOvOw5MtS/w=='
    )

    ls_azure_storage = AzureStorageLinkedService(
        connection_string=storage_string)
    ls = adf_client.linked_services.create_or_update(rg_name, df_name, ls_name,
                                                     ls_azure_storage)
    print_item(ls)

    # Create an Azure blob dataset (input)
    ds_name = 'ds_in'
    ds_ls = LinkedServiceReference(reference_name=ls_name)
    blob_path = 'adfv2tutorial/input'
    blob_filename = 'input.txt'
    ds_azure_blob = AzureBlobDataset(linked_service_name=ds_ls,
                                     folder_path=blob_path,
                                     file_name=blob_filename)
    ds = adf_client.datasets.create_or_update(rg_name, df_name, ds_name,
                                              ds_azure_blob)
    print_item(ds)

    # Create an Azure blob dataset (output)
    dsOut_name = 'ds_out'
    output_blobpath = 'adfv2tutorial/output'
    dsOut_azure_blob = AzureBlobDataset(linked_service_name=ds_ls,
                                        folder_path=output_blobpath)
    dsOut = adf_client.datasets.create_or_update(rg_name, df_name, dsOut_name,
                                                 dsOut_azure_blob)
    print_item(dsOut)

    # Create a copy activity
    act_name = 'copyBlobtoBlob'
    blob_source = BlobSource()
    blob_sink = BlobSink()
    dsin_ref = DatasetReference(reference_name=ds_name)
    dsOut_ref = DatasetReference(reference_name=dsOut_name)
    copy_activity = CopyActivity(name=act_name,
                                 inputs=[dsin_ref],
                                 outputs=[dsOut_ref],
                                 source=blob_source,
                                 sink=blob_sink)

    #Create a pipeline with the copy activity
    p_name = 'copyPipeline'
    params_for_pipeline = {}
    p_obj = PipelineResource(activities=[copy_activity],
                             parameters=params_for_pipeline)
    p = adf_client.pipelines.create_or_update(rg_name, df_name, p_name, p_obj)
    print_item(p)

    #Create a pipeline run.
    run_response = adf_client.pipelines.create_run(rg_name, df_name, p_name,
                                                   {})

    #Monitor the pipeline run
    time.sleep(30)
    pipeline_run = adf_client.pipeline_runs.get(rg_name, df_name,
                                                run_response.run_id)
    print("\n\tPipeline run status: {}".format(pipeline_run.status))
    activity_runs_paged = list(
        adf_client.activity_runs.list_by_pipeline_run(
            rg_name, df_name, pipeline_run.run_id,
            datetime.now() - timedelta(1),
            datetime.now() + timedelta(1)))
    print_activity_run_details(activity_runs_paged[0])