예제 #1
0
import logging
import os
import json

from azure.common.credentials import ServicePrincipalCredentials

import azure.functions as func
from azure.mgmt.resource import ResourceManagementClient

credentials = ServicePrincipalCredentials(
    client_id=
    '5d08da8e-d80e-4d63-a215-562dd40464b7',  # os.environ['AZURE_CLIENT_ID'],
    secret=
    'u4O4CAwwNMnz/m?.g3Yuctg]BiPkY3fe',  #os.environ['AZURE_CLIENT_SECRET'],
    tenant=
    '2dfb2f0b-4d21-4268-9559-72926144c918'  # os.environ['AZURE_TENANT_ID']
)


def main(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')

    try:
        req_body = req.get_json()
    except ValueError:
        pass

    subscription_id = req_body['SubscriptionId']
    resource_id = req_body['ResourceId']

    client = ResourceManagementClient(credentials=credentials,
예제 #2
0
def main():
    # # Loading input values
    # print("::debug::Loading input values")
    template_file = os.environ.get("INPUT_ARMTEMPLATE_FILE",
                                   default="arm_deploy.json")
    template_params_file = os.environ.get("INPUT_ARMTEMPLATEPARAMS_FILE",
                                          default="")
    azure_credentials = os.environ.get("INPUT_AZURE_CREDENTIALS", default="{}")
    resource_group = os.environ.get("INPUT_RESOURCE_GROUP", default=None)
    mapped_params = os.environ.get("INPUT_MAPPED_PARAMS", default="{}")
    deployment_mode = os.environ.get("INPUT_DEPLOYMENT_MODE",
                                     default="Incremental")

    repo_name = os.environ.get("GITHUB_REPOSITORY")
    print(repo_name)
    repo_name = repo_name.split('/')
    print(repo_name)
    repo_name = repo_name[0] + '_' + repo_name[1]
    print(repo_name)
    deploy_enum = get_deploy_mode_obj(deployment_mode)
    try:
        azure_credentials = json.loads(azure_credentials)
    except JSONDecodeError:
        print(
            "::error::Please paste output of `az ad sp create-for-rbac --name <your-sp-name> --role contributor --scopes /subscriptions/<your-subscriptionId>/resourceGroups/<your-rg> --sdk-auth` as value of secret variable: AZURE_CREDENTIALS"
        )
        raise AMLConfigurationException(
            f"Incorrect or poorly formed output from azure credentials saved in AZURE_CREDENTIALS secret. See setup in https://github.com/Azure/aml-workspace/blob/master/README.md"
        )

    try:
        mapped_params = json.loads(mapped_params)
    except JSONDecodeError:
        print(
            "::error::Incorrect mapped parameters Format , please put mapped parameters strings like this {\"patToken\":\"${{secrets.PAT_TOKEN}}\", .... }"
        )
        raise AMLConfigurationException(
            f"Incorrect or poorly formed mapped params. See setup in https://github.com/Azure/aml_configure/blob/master/README.md"
        )

    if not resource_group:
        raise AMLConfigurationException(f"A resource group must be provided")
    # Checking provided parameters
    print("::debug::Checking provided parameters")
    required_parameters_provided(
        parameters=azure_credentials,
        keys=["tenantId", "clientId", "clientSecret"],
        message=
        "Required parameter(s) not found in your azure credentials saved in AZURE_CREDENTIALS secret for logging in to the workspace. Please provide a value for the following key(s): "
    )

    # # Loading parameters file
    # print("::debug::Loading parameters file")
    template_file_file_path = os.path.join(".cloud", ".azure", template_file)

    # Mask values
    print("::debug::Masking parameters")
    mask_parameter(parameter=azure_credentials.get("tenantId", ""))
    mask_parameter(parameter=azure_credentials.get("clientId", ""))
    mask_parameter(parameter=azure_credentials.get("clientSecret", ""))
    #mask_parameter(parameter=azure_credentials.get("subscriptionId", ""))

    # Login User on CLI
    tenant_id = azure_credentials.get("tenantId", "")
    service_principal_id = azure_credentials.get("clientId", "")
    service_principal_password = azure_credentials.get("clientSecret", "")
    subscriptionId = azure_credentials.get("subscriptionId", "")

    parameters = get_template_parameters(template_params_file, mapped_params)
    credentials = None
    try:
        credentials = ServicePrincipalCredentials(
            client_id=service_principal_id,
            secret=service_principal_password,
            tenant=tenant_id)
    except Exception as ex:
        raise CredentialsVerificationError(ex)

    client = None
    try:
        client = ResourceManagementClient(credentials, subscriptionId)
    except Exception as ex:
        raise ResourceManagementError(ex)

    template = None
    with open(template_file_file_path, 'r') as template_file_fd:
        template = json.load(template_file_fd)

    deployment_properties = {
        'properties': {
            'mode': deploy_enum,
            'template': template,
            'parameters': parameters
        }
    }

    try:
        validate = client.deployments.validate(resource_group, repo_name,
                                               deployment_properties)
        validate.wait()
    except Exception as ex:
        raise ActionDeploymentError(ex)
    deployment_async_operation = None
    try:
        deployment_async_operation = client.deployments.create_or_update(
            resource_group, repo_name, deployment_properties)
        deployment_async_operation.wait()
    except Exception as ex:
        raise ActionDeploymentError(ex)

    deploy_result = deployment_async_operation.result()
    print(
        f"::set-output name=deployment_parameters::{deploy_result.properties.parameters}"
    )
    print(
        f"::set-output name=deployment_output::{deploy_result.properties.outputs}"
    )
예제 #3
0
    def __init__(self, name=None, configuration="~/.cloudmesh/cloudmesh.yaml"):
        """
        Initializes the provider. The default parameters are read from the
        configuration file that is defined in yaml format.

        :param name: The name of the provider as defined in the yaml file
        :param configuration: The location of the yaml configuration file
        """

        conf = Config(configuration)["cloudmesh"]

        self.user = Config()["cloudmesh"]["profile"]["user"]

        self.spec = conf["cloud"][name]
        self.cloud = name

        cred = self.spec["credentials"]
        self.default = self.spec["default"]
        self.cloudtype = self.spec["cm"]["kind"]
        super().__init__(name, conf)

        VERBOSE(cred, verbose=10)

        if self.cloudtype != 'azure':
            Console.error("This class is meant for azure cloud")

        # ServicePrincipalCredentials related Variables to configure in
        # cloudmesh.yaml file

        # AZURE_APPLICATION_ID = '<Application ID from Azure Active Directory
        # App Registration Process>'

        # AZURE_SECRET_KEY = '<Secret Key from Application configured in
        # Azure>'

        # AZURE_TENANT_ID = '<Directory ID from Azure Active Directory
        # section>'

        credentials = ServicePrincipalCredentials(
            client_id=cred['AZURE_APPLICATION_ID'],
            secret=cred['AZURE_SECRET_KEY'],
            tenant=cred['AZURE_TENANT_ID']
        )

        subscription = cred['AZURE_SUBSCRIPTION_ID']

        # Management Clients
        self.resource_client = ResourceManagementClient(
            credentials, subscription)
        self.compute_client = ComputeManagementClient(
            credentials, subscription)
        self.network_client = NetworkManagementClient(
            credentials, subscription)

        # VMs abbreviation
        self.vms = self.compute_client.virtual_machines
        self.imgs = self.compute_client.virtual_machine_images

        # Azure Resource Group
        self.GROUP_NAME = self.default["resource_group"]

        # Azure Datacenter Region
        self.LOCATION = cred["AZURE_REGION"]

        # NetworkManagementClient related Variables
        self.VNET_NAME = self.default["network"]
        self.SUBNET_NAME = self.default["subnet"]
        self.IP_CONFIG_NAME = self.default["AZURE_VM_IP_CONFIG"]
        self.NIC_NAME = self.default["AZURE_VM_NIC"]

        # Azure VM Storage details
        self.OS_DISK_NAME = self.default["AZURE_VM_DISK_NAME"]
        self.USERNAME = self.default["AZURE_VM_USER"]
        self.PASSWORD = self.default["AZURE_VM_PASSWORD"]
        self.VM_NAME = self.default["AZURE_VM_NAME"]

        # Create or Update Resource group
        self.get_resource_group()
예제 #4
0
class Session(object):

    def __init__(self, subscription_id=None, authorization_file=None,
                 resource=AZURE_PUBLIC_CLOUD.endpoints.active_directory_resource_id):
        """
        :param subscription_id: If provided overrides environment variables.

        """

        self.log = logging.getLogger('custodian.azure.session')
        self._provider_cache = {}
        self.subscription_id_override = subscription_id
        self.credentials = None
        self.subscription_id = None
        self.tenant_id = None
        self.resource_namespace = resource
        self._is_token_auth = False
        self._is_cli_auth = False
        self.authorization_file = authorization_file

    def _initialize_session(self):
        """
        Creates a session using available authentication type.

        Auth priority:
        1. Token Auth
        2. Tenant Auth
        3. Azure CLI Auth

        """

        # Only run once
        if self.credentials is not None:
            return

        tenant_auth_variables = [
            'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',
            'AZURE_CLIENT_ID', 'AZURE_CLIENT_SECRET'
        ]
        token_auth_variables = ['AZURE_ACCESS_TOKEN', 'AZURE_SUBSCRIPTION_ID']

        if self.authorization_file:
            self.credentials, self.subscription_id = self.load_auth_file(self.authorization_file)
            self.log.info("Creating session with authorization file")

        elif all(k in os.environ for k in token_auth_variables):
            # Token authentication
            self.credentials = BasicTokenAuthentication(
                token={
                    'access_token': os.environ['AZURE_ACCESS_TOKEN']
                })
            self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
            self.log.info("Creating session with Token Authentication")
            self._is_token_auth = True

        elif all(k in os.environ for k in tenant_auth_variables):
            # Tenant (service principal) authentication
            self.credentials = ServicePrincipalCredentials(
                client_id=os.environ['AZURE_CLIENT_ID'],
                secret=os.environ['AZURE_CLIENT_SECRET'],
                tenant=os.environ['AZURE_TENANT_ID'],
                resource=self.resource_namespace)
            self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
            self.tenant_id = os.environ['AZURE_TENANT_ID']
            self.log.info("Creating session with Service Principal Authentication")

        else:
            # Azure CLI authentication
            self._is_cli_auth = True
            (self.credentials,
             self.subscription_id,
             self.tenant_id) = Profile().get_login_credentials(
                resource=self.resource_namespace)
            self.log.info("Creating session with Azure CLI Authentication")

        # Let provided id parameter override everything else
        if self.subscription_id_override is not None:
            self.subscription_id = self.subscription_id_override

        self.log.info("Session using Subscription ID: %s" % self.subscription_id)

        if self.credentials is None:
            self.log.error('Unable to locate credentials for Azure session.')

    def client(self, client):
        self._initialize_session()
        service_name, client_name = client.rsplit('.', 1)
        svc_module = importlib.import_module(service_name)
        klass = getattr(svc_module, client_name)
        return klass(self.credentials, self.subscription_id)

    def get_credentials(self):
        self._initialize_session()
        return self.credentials

    def resource_api_version(self, resource_id):
        """ latest non-preview api version for resource """

        namespace = ResourceIdParser.get_namespace(resource_id)
        resource_type = ResourceIdParser.get_resource_type(resource_id)

        if resource_type in self._provider_cache:
            return self._provider_cache[resource_type]

        resource_client = self.client('azure.mgmt.resource.ResourceManagementClient')
        provider = resource_client.providers.get(namespace)

        rt = next((t for t in provider.resource_types
            if t.resource_type == str(resource_type).split('/')[-1]), None)
        if rt and rt.api_versions:
            versions = [v for v in rt.api_versions if 'preview' not in v.lower()]
            api_version = versions[0] if versions else rt.api_versions[0]
            self._provider_cache[resource_type] = api_version
            return api_version

    def get_tenant_id(self):
        self._initialize_session()
        if self._is_token_auth:
            decoded = jwt.decode(self.credentials['token']['access_token'], verify=False)
            return decoded['tid']

        return self.tenant_id

    def get_bearer_token(self):
        self._initialize_session()
        if self._is_cli_auth:
            return self.credentials._token_retriever()[1]
        return self.credentials.token['access_token']

    def load_auth_file(self, path):
        with open(path) as json_file:
            data = json.load(json_file)
            return (ServicePrincipalCredentials(
                client_id=data['credentials']['client_id'],
                secret=data['credentials']['secret'],
                tenant=data['credentials']['tenant']
            ), data['subscription'])

    def get_auth_string(self):
        if type(self.credentials) is not ServicePrincipalCredentials:
            raise NotImplementedError(
                "Writing auth file only supported for Service Principal credentials.")

        auth = {
            'credentials':
                {
                    'client_id': os.environ['AZURE_CLIENT_ID'],
                    'secret': os.environ['AZURE_CLIENT_SECRET'],
                    'tenant': os.environ['AZURE_TENANT_ID']
                },
            'subscription': self.subscription_id
        }

        return json.dumps(auth)
예제 #5
0
def get_credentials():
    credentials = ServicePrincipalCredentials(
        client_id=os.environ.get("AZURE_CLIENT_ID"),
        secret=os.environ.get("AZURE_CLIENT_SECRET"),
        tenant=os.environ.get("AZURE_TENANT_ID"))
    return credentials
예제 #6
0
    def __init__(self,
                 derived_arg_spec,
                 bypass_checks=False,
                 no_log=False,
                 check_invalid_arguments=None,
                 mutually_exclusive=None,
                 required_together=None,
                 required_one_of=None,
                 add_file_common_args=False,
                 supports_check_mode=False,
                 required_if=None,
                 supports_tags=True,
                 facts_module=False,
                 skip_exec=False):

        merged_arg_spec = dict()
        merged_arg_spec.update(AZURE_COMMON_ARGS)
        if supports_tags:
            merged_arg_spec.update(AZURE_TAG_ARGS)

        if derived_arg_spec:
            merged_arg_spec.update(derived_arg_spec)

        merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
        if required_if:
            merged_required_if += required_if

        self.module = AnsibleModule(
            argument_spec=merged_arg_spec,
            bypass_checks=bypass_checks,
            no_log=no_log,
            check_invalid_arguments=check_invalid_arguments,
            mutually_exclusive=mutually_exclusive,
            required_together=required_together,
            required_one_of=required_one_of,
            add_file_common_args=add_file_common_args,
            supports_check_mode=supports_check_mode,
            required_if=merged_required_if)

        if not HAS_PACKAGING_VERSION:
            self.fail(
                "Do you have packaging installed? Try `pip install packaging`"
                "- {0}".format(HAS_PACKAGING_VERSION_EXC))

        if not HAS_MSRESTAZURE:
            self.fail(
                "Do you have msrestazure installed? Try `pip install msrestazure`"
                "- {0}".format(HAS_MSRESTAZURE_EXC))

        if not HAS_AZURE:
            self.fail(
                "Do you have azure>={1} installed? Try `pip install ansible[azure]`"
                "- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE))

        self._cloud_environment = None
        self._network_client = None
        self._storage_client = None
        self._resource_client = None
        self._compute_client = None
        self._dns_client = None
        self._web_client = None
        self._containerservice_client = None

        self.check_mode = self.module.check_mode
        self.api_profile = self.module.params.get('api_profile')
        self.facts_module = facts_module
        # self.debug = self.module.params.get('debug')

        # authenticate
        self.credentials = self._get_credentials(self.module.params)
        if not self.credentials:
            if HAS_AZURE_CLI_CORE:
                self.fail(
                    "Failed to get credentials. Either pass as parameters, set environment variables, "
                    "define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`)."
                )
            else:
                self.fail(
                    "Failed to get credentials. Either pass as parameters, set environment variables, "
                    "define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`)."
                )

        # cert validation mode precedence: module-arg, credential profile, env, "validate"
        self._cert_validation_mode = self.module.params['cert_validation_mode'] or self.credentials.get('cert_validation_mode') or \
            os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate'

        if self._cert_validation_mode not in ['validate', 'ignore']:
            self.fail('invalid cert_validation_mode: {0}'.format(
                self._cert_validation_mode))

        # if cloud_environment specified, look up/build Cloud object
        raw_cloud_env = self.credentials.get('cloud_environment')
        if self.credentials.get(
                'credentials') is not None and raw_cloud_env is not None:
            self._cloud_environment = raw_cloud_env
        elif not raw_cloud_env:
            self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD  # SDK default
        else:
            # try to look up "well-known" values via the name attribute on azure_cloud members
            all_clouds = [
                x[1] for x in inspect.getmembers(azure_cloud)
                if isinstance(x[1], azure_cloud.Cloud)
            ]
            matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
            if len(matched_clouds) == 1:
                self._cloud_environment = matched_clouds[0]
            elif len(matched_clouds) > 1:
                self.fail(
                    "Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'"
                    .format(raw_cloud_env))
            else:
                if not urlparse.urlparse(raw_cloud_env).scheme:
                    self.fail(
                        "cloud_environment must be an endpoint discovery URL or one of {0}"
                        .format([x.name for x in all_clouds]))
                try:
                    self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(
                        raw_cloud_env)
                except Exception as e:
                    self.fail(
                        "cloud_environment {0} could not be resolved: {1}".
                        format(raw_cloud_env, e.message),
                        exception=traceback.format_exc(e))

        if self.credentials.get(
                'subscription_id',
                None) is None and self.credentials.get('credentials') is None:
            self.fail("Credentials did not include a subscription_id value.")
        self.log("setting subscription_id")
        self.subscription_id = self.credentials['subscription_id']

        if self.credentials.get('credentials') is not None:
            # AzureCLI credentials
            self.azure_credentials = self.credentials['credentials']
        elif self.credentials.get('client_id') is not None and \
                self.credentials.get('secret') is not None and \
                self.credentials.get('tenant') is not None:
            self.azure_credentials = ServicePrincipalCredentials(
                client_id=self.credentials['client_id'],
                secret=self.credentials['secret'],
                tenant=self.credentials['tenant'],
                cloud_environment=self._cloud_environment,
                verify=self._cert_validation_mode == 'validate')

        elif self.credentials.get(
                'ad_user') is not None and self.credentials.get(
                    'password') is not None:
            tenant = self.credentials.get('tenant')
            if not tenant:
                tenant = 'common'  # SDK default

            self.azure_credentials = UserPassCredentials(
                self.credentials['ad_user'],
                self.credentials['password'],
                tenant=tenant,
                cloud_environment=self._cloud_environment,
                verify=self._cert_validation_mode == 'validate')
        else:
            self.fail(
                "Failed to authenticate with provided credentials. Some attributes were missing. "
                "Credentials must include client_id, secret and tenant or ad_user and password or "
                "be logged using AzureCLI.")

        # common parameter validation
        if self.module.params.get('tags'):
            self.validate_tags(self.module.params['tags'])

        if not skip_exec:
            res = self.exec_module(**self.module.params)
            self.module.exit_json(**res)
예제 #7
0
    def _authenticate(self):
        try:
            keyvault_client_id = self._auth_params.get('keyvault_client_id')
            keyvault_secret_id = self._auth_params.get('keyvault_secret_id')

            # If user provided KeyVault secret, we will pull auth params information from it
            if keyvault_secret_id:
                self._auth_params.update(
                    json.loads(
                        get_keyvault_secret(keyvault_client_id,
                                            keyvault_secret_id)))

            client_id = self._auth_params.get('client_id')
            client_secret = self._auth_params.get('client_secret')
            access_token = self._auth_params.get('access_token')
            tenant_id = self._auth_params.get('tenant_id')
            use_msi = self._auth_params.get('use_msi')
            subscription_id = self._auth_params.get('subscription_id')

            if access_token and subscription_id:
                log.info("Creating session with Token Authentication")
                self.subscription_id = subscription_id
                self.credentials = BasicTokenAuthentication(
                    token={'access_token': access_token})
                self._is_token_auth = True

            elif client_id and client_secret and tenant_id and subscription_id:
                log.info(
                    "Creating session with Service Principal Authentication")
                self.subscription_id = subscription_id
                self.credentials = ServicePrincipalCredentials(
                    client_id=client_id,
                    secret=client_secret,
                    tenant=tenant_id,
                    resource=self.resource_namespace)
                self.tenant_id = tenant_id

            elif use_msi and subscription_id:
                log.info("Creating session with MSI Authentication")
                self.subscription_id = subscription_id
                if client_id:
                    self.credentials = MSIAuthentication(
                        client_id=client_id, resource=self.resource_namespace)
                else:
                    self.credentials = MSIAuthentication(
                        resource=self.resource_namespace)

            elif self._auth_params.get('enable_cli_auth'):
                log.info("Creating session with Azure CLI Authentication")
                self._is_cli_auth = True
                (self.credentials, self.subscription_id,
                 self.tenant_id) = Profile().get_login_credentials(
                     resource=self.resource_namespace)
            log.info("Session using Subscription ID: %s" %
                     self.subscription_id)

        except AuthenticationError as e:
            log.error('Azure Authentication Failure\n'
                      'Error: {0}'.format(
                          json.dumps(e.inner_exception.error_response,
                                     indent=2)))
            sys.exit(1)
        except HTTPError as e:
            if keyvault_client_id and keyvault_secret_id:
                log.error(
                    'Azure Authentication Failure\n'
                    'Error: Cannot retrieve SP credentials from the Key Vault '
                    '(KV uses MSI to access) with client id: {0}'.format(
                        keyvault_client_id))
            elif use_msi:
                log.error(
                    'Azure Authentication Failure\n'
                    'Error: Could not authenticate using managed service identity {0}'
                    .format(client_id if client_id else '(system identity)'))
            else:
                log.error('Azure Authentication Failure: %s' % e.response)
            sys.exit(1)
        except CLIError as e:
            log.error(
                'Azure Authentication Failure\n'
                'Error: Could not authenticate with Azure CLI credentials: {0}'
                .format(e))
            sys.exit(1)
        except Exception as e:
            log.error('Azure Authentication Failure\n' 'Error: {0}'.format(e))
            sys.exit(1)
예제 #8
0
import time
import requests
import json

### Variables
subID = 'xxxxxxxx-9999-xxxx-9999-xxxxxxxxxxxx'
tenant = 'xxxxxxxx-9999-xxxx-9999-xxxxxxxxxxxx'
clientId = '99999999-xxxx-9999-xxxx-999999999999'
clientSecret = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'

resourceGroup = 'xxxxxxxxxxxx'
dataFactoryName = 'xxxxxxxxxxxxxxxxxxx'
LinkedServiceName = 'xxxxxxxxxxxx'

### Linked Service
credentials = ServicePrincipalCredentials(client_id=clientId, secret=clientSecret, tenant=tenant)
adf_client = DataFactoryManagementClient(credentials, subID)

#adf_client.linked_services.get(resourceGroup, dataFactoryName, LinkedServiceName)
#adf_client.linked_services.get('IronedgeData', 'IronEdgeDataFactory', 'LabtechMySql')
#adf_client.linked_services.create_or_update(resourceGroup, dataFactoryName, LinkedServiceName, )
#adf_client.pipelines.get(resourceGroup, dataFactoryName, 'LabtechComputersCopyToAzure')

### REST API Method
### Get Linked Service Information
getHeader = {
        'Authorization': 'Bearer '+credentials.token['access_token'], 
        'Content-Type': 'application/json',
        'x-ms-client-request-id': '01'
        }
예제 #9
0
#--------------------------------------------------------------------------
# variables
#--------------------------------------------------------------------------
AZURE_LOCATION = 'eastus'
RESOURCE_GROUP = "myResourceGroup"
NAT_GATEWAY_NAME = "myNatGateway"
PUBLIC_IP_ADDRESS_NAME = "myPublicIpAddress"
PUBLIC_IPPREFIX_NAME = "myPublicIpprefix"
PUBLIC_IP_PREFIX_NAME = "myPublicIpPrefix"

#--------------------------------------------------------------------------
# management clients
#--------------------------------------------------------------------------
credentials = ServicePrincipalCredentials(client_id=CLIENT_ID,
                                          secret=CLIENT_SECRET,
                                          tenant=TENANT_ID)
mgmt_client = NetworkManagementClient(credentials, SUBSCRIPTION_ID)
resource_client = ResourceManagementClient(credentials, SUBSCRIPTION_ID)

#--------------------------------------------------------------------------
# resource group (prerequisite)
#--------------------------------------------------------------------------
print("Creating Resource Group")
resource_client.resource_groups.create_or_update(
    resource_group_name=RESOURCE_GROUP,
    parameters={'location': AZURE_LOCATION})

#--------------------------------------------------------------------------
# /PublicIPAddresses/put/Create public IP address defaults[put]
#--------------------------------------------------------------------------
예제 #10
0
def get_credentials():
    subscription_id = cred.AZURE_SUBSCRIPTION_ID
    credentials = ServicePrincipalCredentials(client_id=cred.AZURE_CLIENT_ID,
                                              secret=cred.AZURE_CLIENT_SECRET,
                                              tenant=cred.AZURE_TENANT_ID)
    return credentials, subscription_id
예제 #11
0
def az_key_vault_connection(az_client_id, az_secret, az_tenant_id):
    credentials = ServicePrincipalCredentials(client_id=az_client_id,
                                              secret=az_secret,
                                              tenant=az_tenant_id)
    client = KeyVaultClient(credentials)
    return client
예제 #12
0
def main(argv):
    # azure python sdk https://github.com/Azure/azure-sdk-for-python/blob/master/doc/batch.rst
    # or https://docs.microsoft.com/en-us/python/api/azure-batch/index?view=azure-python

    input_container_name = _INPUT_FOLDER
    output_container_name = _OUTPUT_FOLDER

    try:
        opts, args = getopt.getopt(argv[1:], 'hi:o', ['ifolder=', 'ofolder='])
    except getopt.GetoptError:
        print('n4t1-azBatch.py [-h] -i <input folder> -o <output folder>')
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print('n4t1-azBatch.py -i <input folder> -o <output folder>')
            sys.exit(2)
        elif opt in ("-i", "--ifolder"):
            input_container_name = str(arg)
        elif opt in ("-o", "--ofolder"):
            output_container_name = str(arg)

    start_time = datetime.datetime.now().replace(microsecond=0)
    print('Azure Batch start: {}\n'.format(start_time))

    blob_client = azureblob.BlockBlobService(
        account_name=_STORAGE_ACCOUNT_NAME, account_key=_STORAGE_ACCOUNT_KEY)

    input_container_created = blob_client.create_container(
        input_container_name, fail_on_exist=True)

    if not input_container_created:
        print('Error creating input container [{}]. Exiting'.format(
            input_container_name))
        sys.exit(2)

    output_container_created = blob_client.create_container(
        output_container_name, fail_on_exist=True)
    if not output_container_created:
        blob_client.delete_container(input_container_name)
        print(
            'Error creating output container [{}]. Has this job already been run?'
            .format(output_container_name))
        print('Deleted input container [{}] and exiting.'.format(
            input_container_name))
        sys.exit(2)

    # Create a list of all files in the input_container_name directory.
    input_file_paths = []
    script_path = []
    template_path = []

    for folder, subs, files in os.walk('./' + input_container_name + '/'):
        for filename in files:
            if filename.startswith('1'):
                input_file_paths.append(
                    os.path.abspath(os.path.join(folder, filename)))
            elif filename.endswith('.sh'):
                script_path.append(
                    os.path.abspath(os.path.join(folder, filename)))
            elif filename.startswith('t'):
                template_path.append(
                    os.path.abspath(os.path.join(folder, filename)))
            else:
                print(
                    'Error finding upload files. Deleting containers [{}] & [{}]and exiting.'
                    .format(input_container_name, output_container_name))
                sys.exit(2)

    print(
        'Uploading file(s) to container [{}] ...'.format(input_container_name),
        end=' ')
    input_files = [
        upload_file_to_container(blob_client, input_container_name, file_path)
        for file_path in input_file_paths
    ]
    input_files.append(
        upload_file_to_container(blob_client, input_container_name,
                                 script_path[0]))
    input_files.append(
        upload_file_to_container(blob_client, input_container_name,
                                 template_path[0]))
    print('Done\n')

    # Obtain a shared access signature URL that provides write access to the output
    # container to which the tasks will upload their output.
    output_container_sas_url = get_container_sas_url(blob_client,
                                                     output_container_name)

    # Create a Batch service client. We'll now be interacting with the Batch service in addition to Storage
    credentials = ServicePrincipalCredentials(
        client_id=_APPLICATION_ID,
        secret=_APPLICATION_SECRET,
        tenant=_TENANT_ID,
        resource='https://batch.core.windows.net/')
    batch_client = batch.BatchServiceClient(credentials,
                                            batch_url=_BATCH_ACCOUNT_URL)

    try:
        # Create the pool that will contain the compute nodes that will execute the tasks.
        dedicated_node_count = ceil(len(input_file_paths) / _MAX_TASK_PER_NODE)
        create_pool(batch_client, _POOL_ID, _SCALE_INT, _AUTO_SCALE_EVAL_INT,
                    dedicated_node_count)
    except batchmodels.BatchErrorException as err:
        print_batch_exception(err)
        print('Error creating pool [{}]'.format(_POOL_ID))
        blob_client.delete_container(input_container_name)
        blob_client.delete_container(output_container_name)
        print('Deleted containers [{}] & [{}] and exiting'.format(
            input_container_name, output_container_name))
        sys.exit(2)

    try:
        # Create the job that will run the tasks.
        create_job(batch_client, _JOB_ID, _POOL_ID)
    except batchmodels.BatchErrorException as err:
        print_batch_exception(err)
        print('Error creating jobs [{}]'.format(_JOB_ID))
        batch_client.pool.delete(_POOL_ID)
        blob_client.delete_container(input_container_name)
        blob_client.delete_container(output_container_name)
        print('Deleted pool [{}] ...'.format(_POOL_ID))
        print('Deleted containers [{}] & [{}] and exiting'.format(
            input_container_name, output_container_name))
        sys.exit(2)

    try:
        # Add the tasks to the job. Pass the input files and a SAS URL to the storage container for output files.
        add_tasks(batch_client, _JOB_ID, input_files, output_container_sas_url)

        # Add ctrl-c handling
        def signal_handler(sig, frame):
            print()
            print('Ctrl+C pressed!')

            if query_yes_no('Delete storage container [{}]?'.format(
                    input_container_name)) == 'yes':
                blob_client.delete_container(input_container_name)

            if query_yes_no('Delete storage container [{}]?'.format(
                    output_container_name)) == 'yes':
                blob_client.delete_container(output_container_name)

            if query_yes_no('Delete job [{}]?'.format(_JOB_ID)) == 'yes':
                batch_client.job.delete(_JOB_ID)

            if query_yes_no('Delete pool [{}]?'.format(_POOL_ID)) == 'yes':
                batch_client.pool.delete(_POOL_ID)

            sys.exit(0)

        signal.signal(signal.SIGINT, signal_handler)

        # Pause execution until tasks reach Completed state.
        wait_for_tasks_to_complete(batch_client, _JOB_ID,
                                   datetime.timedelta(hours=_TIMEOUT_HR),
                                   blob_client, output_container_name)
    except batchmodels.BatchErrorException as err:
        print_batch_exception(err)
        raise

    # Print out some timing info
    end_time = datetime.datetime.now().replace(microsecond=0)
    print('Batch end: {}'.format(end_time))
    print('Elapsed time: {}'.format(end_time - start_time))

    # Clean up Batch resources (if the user so chooses)
    # if query_yes_no('Delete storage container [{}]?'.format(input_container_name)) == 'yes':
    blob_client.delete_container(input_container_name)

    # if query_yes_no('Delete storage container [{}]?'.format(output_container_name)) == 'yes':
    blob_client.delete_container(output_container_name)

    # if query_yes_no('Delete job [{}]?'.format(_JOB_ID)) == 'yes':
    batch_client.job.delete(_JOB_ID)

    # if query_yes_no('Delete pool [{}]?'.format(_POOL_ID)) == 'yes':
    batch_client.pool.delete(_POOL_ID)
def main():

    # Azure subscription ID
    subscription_id = 'fb3980ef-51e7-4f04-9aa0-9f4dae9250d8'

    # This program creates this resource group. If it's an existing resource group, comment out the code that creates the resource group
    rg_name = 'ADFTutorialResourceGroup'

    # The data factory name. It must be globally unique.
    df_name = 'myadf-17'

    # Specify your Active Directory client ID, client secret, and tenant ID
    credentials = ServicePrincipalCredentials(
        client_id='0e7776ac-e1de-418c-a2e9-386e4cd8d9a9',
        secret='DZVuXInE?w1Ft/re.uB5Fr.Y9tTi4sBK',
        tenant='98df3faa-19c6-40a2-8a75-7a046f001fde')
    resource_client = ResourceManagementClient(credentials, subscription_id)
    adf_client = DataFactoryManagementClient(credentials, subscription_id)

    rg_params = {'location': 'eastus'}
    df_params = {'location': 'eastus'}

    # create the resource group
    # comment out if the resource group already exits
    resource_client.resource_groups.create_or_update(rg_name, rg_params)

    #Create a data factory
    df_resource = Factory(location='eastus')
    df = adf_client.factories.create_or_update(rg_name, df_name, df_resource)
    print_item(df)
    while df.provisioning_state != 'Succeeded':
        df = adf_client.factories.get(rg_name, df_name)
        time.sleep(1)

    # Create an Azure Storage linked service
    ls_name = 'storageLinkedService'

    # IMPORTANT: specify the name and key of your Azure Storage account.
    storage_string = SecureString(
        'DefaultEndpointsProtocol=https;AccountName=stacc17;AccountKey=aoulemZGHxahJ+RFGsR/WvWYHBIplL2ecQ4LHfPBjZAPfypphUNCYvZoXqtD7ENIPoZ5v0BfkYwCOvOw5MtS/w=='
    )

    ls_azure_storage = AzureStorageLinkedService(
        connection_string=storage_string)
    ls = adf_client.linked_services.create_or_update(rg_name, df_name, ls_name,
                                                     ls_azure_storage)
    print_item(ls)

    # Create an Azure blob dataset (input)
    ds_name = 'ds_in'
    ds_ls = LinkedServiceReference(reference_name=ls_name)
    blob_path = 'adfv2tutorial/input'
    blob_filename = 'input.txt'
    ds_azure_blob = AzureBlobDataset(linked_service_name=ds_ls,
                                     folder_path=blob_path,
                                     file_name=blob_filename)
    ds = adf_client.datasets.create_or_update(rg_name, df_name, ds_name,
                                              ds_azure_blob)
    print_item(ds)

    # Create an Azure blob dataset (output)
    dsOut_name = 'ds_out'
    output_blobpath = 'adfv2tutorial/output'
    dsOut_azure_blob = AzureBlobDataset(linked_service_name=ds_ls,
                                        folder_path=output_blobpath)
    dsOut = adf_client.datasets.create_or_update(rg_name, df_name, dsOut_name,
                                                 dsOut_azure_blob)
    print_item(dsOut)

    # Create a copy activity
    act_name = 'copyBlobtoBlob'
    blob_source = BlobSource()
    blob_sink = BlobSink()
    dsin_ref = DatasetReference(reference_name=ds_name)
    dsOut_ref = DatasetReference(reference_name=dsOut_name)
    copy_activity = CopyActivity(name=act_name,
                                 inputs=[dsin_ref],
                                 outputs=[dsOut_ref],
                                 source=blob_source,
                                 sink=blob_sink)

    #Create a pipeline with the copy activity
    p_name = 'copyPipeline'
    params_for_pipeline = {}
    p_obj = PipelineResource(activities=[copy_activity],
                             parameters=params_for_pipeline)
    p = adf_client.pipelines.create_or_update(rg_name, df_name, p_name, p_obj)
    print_item(p)

    #Create a pipeline run.
    run_response = adf_client.pipelines.create_run(rg_name, df_name, p_name,
                                                   {})

    #Monitor the pipeline run
    time.sleep(30)
    pipeline_run = adf_client.pipeline_runs.get(rg_name, df_name,
                                                run_response.run_id)
    print("\n\tPipeline run status: {}".format(pipeline_run.status))
    activity_runs_paged = list(
        adf_client.activity_runs.list_by_pipeline_run(
            rg_name, df_name, pipeline_run.run_id,
            datetime.now() - timedelta(1),
            datetime.now() + timedelta(1)))
    print_activity_run_details(activity_runs_paged[0])
예제 #14
0
def setup_get_request(provider, aggregation, mode):
    # check if hostaddress is an IP address.  If it is, resolve to
    # hostname and strip off domain as MS RestAPI only knows names
    hostaddress = args.hostaddress
    if args.debug:
        print "hostaddress is " + hostaddress
    if isgoodipv4(hostaddress):
        if args.debug:
            print "found IP address"
        hostaddress = socket.getfqdn(hostaddress)

    if args.debug:
        print "hostaddress after any ip lookup " + hostaddress

    # if hostaddress is a FQDN, strip down to just the hostname
    hostaddress = hostaddress.partition('.')[0]

    if args.debug:
        print "hostaddress is now " + hostaddress
    """Setup the credentials to access the azure service"""
    credentials = ServicePrincipalCredentials(client_id=args.client,
                                              secret=args.secret,
                                              tenant=args.tenant)

    client = MonitorClient(credentials, args.subscription)

    resource_client = ResourceManagementClient(credentials, args.subscription)

    resource_client.providers.register('Microsoft.Insights')

    # Creating the resource ID of the system also acts as an endpoint
    resource_id = ('subscriptions/{0}/'
                   'resourceGroups/{1}/'
                   'providers/{2}/{3}').format(args.subscription,
                                               args.resource, provider,
                                               hostaddress)

    if args.debug:
        sys.stderr.write("Available Resource Groups:\n")
        for item in resource_client.resource_groups.list():
            print_item(item)

        sys.stderr.write("Available VMs:\n")
        compute_client = ComputeManagementClient(credentials,
                                                 args.subscription)
        for vm in compute_client.virtual_machines.list_all():
            sys.stderr.write("\t{}\n".format(vm.name))

        sys.stderr.write("Available Metric Definitions\n")
        for metric in client.metric_definitions.list(resource_id):
            sys.stderr.write("\t{}: id={}, unit={}\n".format(
                metric.name.localized_value, metric.name.value, metric.unit))


# listing available metrics is not useful as without a filter it only shows
# the first available and not all (as per the docs)
#        print "Available Metrics"
#        for metric in client.metrics.list(resource_id):
#            # azure.monitor.models.MetricDefinition
#            print("\t{}: id={}, unit={}".format(
#                metric.name.localized_value,
#                metric.name.value,
#                metric.unit
#            ))

    end_time = datetime.datetime.utcnow()
    start_time = update_time_state(end_time)
    period = end_time - start_time

    # Setup the call for the data we want
    filter = " and ".join([
        "name.value eq '{}'".format(mode),
        "aggregationType eq '{}'".format(aggregation),
        "startTime eq {}".format(start_time.strftime('%Y-%m-%dT%H:%M:%SZ')),
        "endTime eq {}".format(end_time.strftime('%Y-%m-%dT%H:%M:%SZ')),
        "timeGrain eq duration'PT{}M'".format(
            int(period.total_seconds() / MINUTE_IN_SECONDS))
    ])

    # if we output the info here then we need to make another call to get the data
    # else the iterator uses up all the info and returns nothing to the caller
    if args.debug:
        metrics_data = client.metrics.list(resource_id, filter=filter)
        sys.stderr.write("Metric filter: " + filter + "\n")
        sys.stderr.write("Metric data returned:\n")
        for metric in metrics_data:
            for data in metric.data:
                sys.stderr.write("\t{}: {}\n".format(data.time_stamp,
                                                     data.total))

    metrics_data = client.metrics.list(resource_id, filter=filter)

    return metrics_data
예제 #15
0
class microsoft_azure:
    # This script expects that the following environment vars are set:
    # Tenant ID for your Azure Subscription
    TENANT_ID = 'xxxxxxxxxxxxxxx'  
    my_resource_group = 'xxxxx-xxxx-DataPlatform'            # the resource group for deployment

    # Your Service Principal App ID
    CLIENT = 'xxx-xxxx-xxxx-xxxx-xxxxxxxxx'

    # Your Service Principal Password
    KEY = 'xxxxxxxxxx/xxxxxxxxxxxxxx'
    subscription_id = "xxxxxx-xxxxx-xxxxx-xxxx-xxxxx"

    credentials = ServicePrincipalCredentials(
            client_id = CLIENT,
            secret = KEY,
            tenant = TENANT_ID
            )


    def __init__(self, service):
        self.service = service
    
    """
    Azure Event Hubs is a Big Data streaming platform and event ingestion 
    service, capable of receiving and processing millions of events per second. 
    Event Hubs can process and store events, data, or telemetry produced by 
    distributed software and devices. 

    """
    def eventhubService(self):
        
        sbs = ServiceBusService(service_namespace='xxxxxxxx', 
                        shared_access_key_name='RootManageSharedAccessKey', 
                        shared_access_key_value='xxxxxx+xxxxxx+xxxxxxxxx=')
        
        
    def eventhubSend(test_queue_url):
        try:
            sbs.send_event('samplequeue', json.dumps(data))
        
        except Exception as e:
            print("Failed SQS Send Record {}".format(str(e))  )
            
    
    def eventhubReceiveToFile(test_queue_url):
        # next, we dequeue these messages - 10 messages at a time 
        # (SQS max limit) till the queue is exhausted.
        # in production/real setup, I suggest using long polling as 
        # you get billed for each request, regardless of an empty response
        counter = 0
        filepath = ''

        CONSUMER_GROUP = "$Default"
        OFFSET = Offset("0")
        PARTITION = "0"

        client = EventHubClient('amqps://xxxxx.servicebus.windows.net/txxxxxqueue', 
                                debug=True, 
                                username='******', 
                                password='******')
        receiver = client.add_receiver(CONSUMER_GROUP, 
                                       PARTITION, prefetch=300, offset=OFFSET)
        try:    
            client.run()
            while True:
                for event_data in receiver.receive(timeout=100):
                    rcv_msg = str(event_data.message)
                    #print((rcv_msg))
                    if len(rcv_msg)>=5:
                        if counter!=0 and counter <= 50000:
                            #print(message['Body'])
                            file = open(filepath,'a')
                            file.write(rcv_msg)
                            file.write('\n')
                            # next, we delete the message from the queue so no one else will process it again
                        elif counter == 0:
                            filepath = createfile()
                            # print(filepath)
                            file = open(filepath,'w') 
                        else:
                            filepath = createfile()
                            #print(filepath)
                            counter = 1
                            file = open(filepath,'w') 
                        file.close() 
                        counter = counter + 1
        except Exception as e:
            print("Failed Receiving Record {}".format(str(e)) ) 
        finally:
            client.stop()
    
    
            
    """
    Azure CosmosDB, which is a global distributed and horizontally scalable 
    database with multi-API support and provides low latency responses at 
    potentially massive scale.
    """    
    def cosmosDBServiceToCosmosDB(self):
        
        database_link = 'dbs/' + DATABASE_ID
        collection_link = database_link + '/colls/' + COLLECTION_ID

        counter = 0
        filepath = ''

        CONSUMER_GROUP = "$Default"
        OFFSET = Offset("0")
        PARTITION = "0"
        eh_client = EventHubClient('amqps://xxxxx.servicebus.windows.net/txxxxqueue', 
                                   debug=True, 
                                   username='******', 
                                   password='******')
        receiver = eh_client.add_receiver(CONSUMER_GROUP, PARTITION, 
                                          prefetch=300, offset=OFFSET)
        try:    
            eh_client.run()
            while True:
                for event_data in receiver.receive(timeout=100):
                    rcv_msg = str(event_data.message)
                    # Filter the Null messages
                    if len(rcv_msg)>5:
                        # Load the messages in CosmosDB
                        cosmos_client.CreateDocument(collection_link, 
                                                     json.loads(str(event_data.message)))
   
            eh_client.stop()
        except Exception as e:
            print("Failed Receiving Record {}".format(str(e)) ) 
        finally:
            eh_client.stop()
         
    
    def cosmosDBquery():
        from pydocumentdb import document_client

        uri = 'https://xxxxxxx.documents.azure.com:443/'
        key = 'xxxxxxxxxxxxxx=='

        client = document_client.DocumentClient(uri, {'masterKey': key})
        print(client)

        db_id = 'xxxxStreamDB'
        db_query = "select * from r where r.id = '{0}'".format(db_id)
        db = list(client.QueryDatabases(db_query))[0]
        db_link = db['_self']
        print(db_link)

        coll_id = 'testxxxx'
        coll_query = "select * from r where r.id = '{0}'".format(coll_id)
        coll = list(client.QueryCollections(db_link, coll_query))[0]
        coll_link = coll['_self']
        print(coll_link)

        options = {} 
        options['enableCrossPartitionQuery'] = True
        options['maxItemCount'] = -1
        options['MaxDegreeOfParallelism'] = -1, 

        #query = { 'query': 'SELECT value count(1) FROM  s' }   
        query = { 'query': 'select value count(1) from s'} 
        docs = client.QueryDocuments(coll_link, query, options)
        print(list(docs))
            
    ## Use this only for Azure AD service-to-service authentication
    from azure.common.credentials import ServicePrincipalCredentials

    ## Use this only for Azure AD end-user authentication
    from azure.common.credentials import UserPassCredentials

    ## Use this only for Azure AD multi-factor authentication
    from msrestazure.azure_active_directory import AADTokenCredentials

    ## Required for Azure Data Lake Store account management
    from azure.mgmt.datalake.store import DataLakeStoreAccountManagementClient
    from azure.mgmt.datalake.store.models import DataLakeStoreAccount

    ## Required for Azure Data Lake Store filesystem management
    from azure.datalake.store import core, lib, multithread

    import os
    from azure.common.credentials import ServicePrincipalCredentials
    from azure.mgmt.resource import ResourceManagementClient
    import azure.mgmt.compute.models


    # Common Azure imports
    from azure.mgmt.resource.resources import ResourceManagementClient
    from azure.mgmt.resource.resources.models import ResourceGroup

    ## Use these as needed for your application
    import logging, getpass, pprint, uuid, time

    import os
    from azure.common.credentials import ServicePrincipalCredentials
    from azure.mgmt.resource import ResourceManagementClient
    import azure.mgmt.compute.models

    # This script expects that the following environment vars are set:
    # Tenant ID for your Azure Subscription
    TENANT_ID = 'xxxxxxxxx'  
    my_resource_group = 'RG-xxxxxxxxx-DataPlatform'            # the resource group for deployment

    # Your Service Principal App ID
    CLIENT = 'xxxxxxxxx-xxxxxxxxx'

    # Your Service Principal Password
    KEY = 'xxxxxxxxx/xxxxxxxxx='
    subscription_id = "xxxxxxxxx-xxxxxxxxx"

    RESOURCE = 'https://datalake.azure.net/'

    credentials = ServicePrincipalCredentials(
            client_id = CLIENT,
            secret = KEY,
            tenant = TENANT_ID
            )

    def dataLake(self):
        adlCreds = lib.auth(tenant_id = TENANT_ID,
                client_secret = KEY,
                client_id = CLIENT,
                resource = RESOURCE)
        ## Declare variables
        subscriptionId = subscription_id
        adlsAccountName = 'datalakearm'

        ## Create a filesystem client object
        adlsFileSystemClient = core.AzureDLFileSystem(adlCreds, 
                                                      store_name=adlsAccountName)
        ## List the existing Data Lake Store accounts
    raiseesult_list_response = adlsFileSystemClient.listdir()
    result_list = list(result_list_response)
    for items in result_list:
        print(items)
        
    def dataLakeUpload(bucket_name):
        myfilepath = "DIRECTORY/TO/MY/FILES"
        allfiles = [files for files in sorted(listdir(myfilepath)) 
                    if isfile(join(myfilepath, files))]
        # upload all expect the working one
        for i in range(len(allfiles)-2): 
            #print(myfilepath + allfiles[i])
            fullpath =  myfilepath + allfiles[i]

            datalakepath = '/dataDump/' + allfiles[i]
            print(datalakepath)
            
            try: 
                multithread.ADLUploader(adlsFileSystemClient, 
                        lpath=fullpath, 
                        rpath=datalakepath, nthreads=64, 
                        overwrite=True, buffersize=4194304, blocksize=4194304)
                print("Uploaded: "+ allfiles[i])
                os.unlink(fullpath)
                pass
            except BaseException as e:
                print(e)
        
    def DataLaketoSQLDW(self):
        import pyodbc
        server = 'xxxx.database.windows.net'
        dbname = 'xxxxDW'
        user = '******'
        password = '******'
        port = '1433'
        driver='{ODBC Driver 17 for SQL Server}'

        table = 'xxxx_stream_data'
        file_path = 'adl://xxxxx.azuredatalakestore.net//xxxxx/xxx-'
        cnxn = pyodbc.connect('DRIVER='+driver+';SERVER='+server+';
                              PORT='+ port +';DATABASE='+dbname+';
                              UID='+user+';PWD='+ password)
        cursor = cnxn.cursor()
        sql = "select lang, count(*) from xxx_stream_data;"
        sql = "SELECT lang, COUNT(*) FROM xxxx_stream_data;"
        cursor.execute(sql)
    
        for res in cursor.fetchall():
            print(res)
        
   
    def dataFactory(self):
        ds_name = 'data_in'
        dsOut_name = 'data_out'
        # Create a copy activity
        act_name =  'copydata'
        data_source = AzureDataLakeStoreSource(recursive=True)
        data_sink = SqlDWSink(write_batch_size=1000, write_batch_timeout=None, 
                              sink_retry_count=1, sink_retry_wait=10, 
                              allow_poly_base=False)
        dsin_ref = DatasetReference(ds_name)
        dsOut_ref = DatasetReference(dsOut_name)
        copy_activity = CopyActivity(act_name,inputs=[dsin_ref], 
                                     outputs=[dsOut_ref], source=data_source, 
                                     sink=data_sink)
        #Create a pipeline with the copy activity
        p_name =  'copyPipeline'
        params_for_pipeline = {}
        p_obj = PipelineResource(activities=[copy_activity], parameters=params_for_pipeline)
        p = adf_client.pipelines.create_or_update(rg_name, df_name, p_name, p_obj)
        print_item(p)

        #Create a pipeline run.
        run_response = adf_client.pipelines.create_run(rg_name, df_name, p_name,{})
        pipeline_run = adf_client.pipeline_runs.get(rg_name, 
                                                   df_name, run_response.run_id)
        print("\n\tPipeline run status: {}".format(pipeline_run.status))
        activity_runs_paged = list(adf_client.activity_runs.list_by_pipeline_run(rg_name, 
                                    df_name, pipeline_run.run_id, 
                                    datetime.now() - timedelta(1),  
                                    datetime.now() + timedelta(1)))
        print_activity_run_details(activity_runs_paged[0])
예제 #16
0
    def __init__(self, args):
        self._args = args
        self._cloud_environment = None
        self._compute_client = None
        self._resource_client = None
        self._network_client = None
        self._adfs_authority_url = None
        self._resource = None

        self.debug = False
        if args.debug:
            self.debug = True

        self.credentials = self._get_credentials(args)
        if not self.credentials:
            self.fail(
                "Failed to get credentials. Either pass as parameters, set environment variables, "
                "or define a profile in ~/.azure/credentials.")

        # if cloud_environment specified, look up/build Cloud object
        raw_cloud_env = self.credentials.get('cloud_environment')
        if not raw_cloud_env:
            self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD  # SDK default
        else:
            # try to look up "well-known" values via the name attribute on azure_cloud members
            all_clouds = [
                x[1] for x in inspect.getmembers(azure_cloud)
                if isinstance(x[1], azure_cloud.Cloud)
            ]
            matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
            if len(matched_clouds) == 1:
                self._cloud_environment = matched_clouds[0]
            elif len(matched_clouds) > 1:
                self.fail(
                    "Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'"
                    .format(raw_cloud_env))
            else:
                if not urlparse.urlparse(raw_cloud_env).scheme:
                    self.fail(
                        "cloud_environment must be an endpoint discovery URL or one of {0}"
                        .format([x.name for x in all_clouds]))
                try:
                    self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(
                        raw_cloud_env)
                except Exception as e:
                    self.fail(
                        "cloud_environment {0} could not be resolved: {1}".
                        format(raw_cloud_env, e.message))

        if self.credentials.get('subscription_id', None) is None:
            self.fail("Credentials did not include a subscription_id value.")
        self.log("setting subscription_id")
        self.subscription_id = self.credentials['subscription_id']

        # get authentication authority
        # for adfs, user could pass in authority or not.
        # for others, use default authority from cloud environment
        if self.credentials.get('adfs_authority_url'):
            self._adfs_authority_url = self.credentials.get(
                'adfs_authority_url')
        else:
            self._adfs_authority_url = self._cloud_environment.endpoints.active_directory

        # get resource from cloud environment
        self._resource = self._cloud_environment.endpoints.active_directory_resource_id

        if self.credentials.get('credentials'):
            self.azure_credentials = self.credentials.get('credentials')
        elif self.credentials.get('client_id') and self.credentials.get(
                'secret') and self.credentials.get('tenant'):
            self.azure_credentials = ServicePrincipalCredentials(
                client_id=self.credentials['client_id'],
                secret=self.credentials['secret'],
                tenant=self.credentials['tenant'],
                cloud_environment=self._cloud_environment)

        elif self.credentials.get('ad_user') is not None and \
                self.credentials.get('password') is not None and \
                self.credentials.get('client_id') is not None and \
                self.credentials.get('tenant') is not None:

            self.azure_credentials = self.acquire_token_with_username_password(
                self._adfs_authority_url, self._resource,
                self.credentials['ad_user'], self.credentials['password'],
                self.credentials['client_id'], self.credentials['tenant'])

        elif self.credentials.get(
                'ad_user') is not None and self.credentials.get(
                    'password') is not None:
            tenant = self.credentials.get('tenant')
            if not tenant:
                tenant = 'common'
            self.azure_credentials = UserPassCredentials(
                self.credentials['ad_user'],
                self.credentials['password'],
                tenant=tenant,
                cloud_environment=self._cloud_environment)

        else:
            self.fail(
                "Failed to authenticate with provided credentials. Some attributes were missing. "
                "Credentials must include client_id, secret and tenant or ad_user and password, or "
                "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
                "be logged in using AzureCLI.")
    def __init__(self,
                 aws_regions,
                 aws_access_key,
                 aws_secret_key,
                 ignore_system_pods,
                 azure_client_id,
                 azure_client_secret,
                 azure_subscription_id,
                 azure_tenant_id,
                 azure_resource_group_names,
                 azure_slow_scale_classes,
                 kubeconfig,
                 idle_threshold,
                 type_idle_threshold,
                 pod_namespace,
                 instance_init_time,
                 cluster_name,
                 notifier,
                 scaling_policy_obj,
                 use_aws_iam_role=False,
                 drain_utilization_below=0.0,
                 max_scale_in_fraction=0.1,
                 scale_up=True,
                 maintainance=True,
                 datadog_api_key=None,
                 over_provision=5,
                 dry_run=False):
        if kubeconfig:
            # for using locally
            logger.debug('Using kubeconfig %s', kubeconfig)
            self.api = pykube.HTTPClient(
                pykube.KubeConfig.from_file(kubeconfig))
        else:
            # for using on kube
            logger.debug('Using kube service account')
            self.api = pykube.HTTPClient(
                pykube.KubeConfig.from_service_account())
        if pod_namespace is None:
            self.pod_namespace = pykube.all
        else:
            self.pod_namespace = pod_namespace
        self.ignore_system_pods = ignore_system_pods
        self.drain_utilization_below = drain_utilization_below
        self.max_scale_in_fraction = max_scale_in_fraction
        self.scaling_policy_obj = scaling_policy_obj
        self._drained = {}
        self.session = None
        if aws_access_key and aws_secret_key:
            self.session = boto3.session.Session(
                aws_access_key_id=aws_access_key,
                aws_secret_access_key=aws_secret_key,
                region_name=aws_regions[0])  # provide a default region
        elif use_aws_iam_role is True:
            self.session = boto3.session.Session(
                region_name=aws_regions[0])  # provide a default region
        self.autoscaling_groups = autoscaling_groups.AutoScalingGroups(
            session=self.session,
            regions=aws_regions,
            cluster_name=cluster_name)
        self.autoscaling_timeouts = autoscaling_groups.AutoScalingTimeouts(
            self.session)

        azure_regions = []
        resource_groups = []
        self.azure_client = None
        if azure_client_id:
            azure_credentials = ServicePrincipalCredentials(
                client_id=azure_client_id,
                secret=azure_client_secret,
                tenant=azure_tenant_id)

            # Setup the Azure client
            resource_client = ResourceManagementClient(azure_credentials,
                                                       azure_subscription_id)
            resource_client.providers.register('Microsoft.Compute')
            resource_client.providers.register('Microsoft.Network')
            resource_client.providers.register('Microsoft.Insights')

            region_map = {}
            for resource_group_name in azure_resource_group_names:
                resource_group = resource_client.resource_groups.get(
                    resource_group_name)
                location = resource_group.location
                if location in region_map:
                    logger.fatal(
                        "{} and {} are both in {}. May only have one resource group per region"
                        .format(resource_group_name, region_map[location],
                                location))
                region_map[location] = resource_group_name
                azure_regions.append(location)
                resource_groups.append(resource_group)

            compute_client = ComputeManagementClient(azure_credentials,
                                                     azure_subscription_id)
            compute_client.config.retry_policy.policy = azure.AzureBoundedRetry.from_retry(
                compute_client.config.retry_policy.policy)

            monitor_client = MonitorClient(azure_credentials,
                                           azure_subscription_id)
            monitor_client.config.retry_policy.policy = azure.AzureBoundedRetry.from_retry(
                monitor_client.config.retry_policy.policy)
            self.azure_client = AzureWriteThroughCachedApi(
                AzureWrapper(compute_client, monitor_client, resource_client))

        self.azure_groups = azure.AzureGroups(resource_groups,
                                              azure_slow_scale_classes,
                                              self.azure_client)

        # config
        self.azure_resource_group_names = azure_resource_group_names
        self.azure_regions = azure_regions
        self.aws_regions = aws_regions
        self.idle_threshold = idle_threshold
        self.instance_init_time = instance_init_time
        self.type_idle_threshold = type_idle_threshold
        self.over_provision = over_provision

        self.scale_up = scale_up
        self.maintainance = maintainance

        self.notifier = notifier

        if datadog_api_key:
            datadog.initialize(api_key=datadog_api_key)
            logger.info('Datadog initialized')
        self.stats = datadog.ThreadStats()
        self.stats.start()

        self.dry_run = dry_run
예제 #18
0
import time

parser = argparse.ArgumentParser()
parser.add_argument("--sub", help="SubscriptionID.", type=str)
parser.add_argument("--cliid", help="ClientID ou AppID.", type=str)
parser.add_argument("--pwd", help="Password.", type=str)
parser.add_argument("--tntid", help="TenantID.", type=str)
parser.add_argument("--basename", help="Prefix for all names.", type=str)
parser.add_argument("--loc", help="Location.", type=str)
args = parser.parse_args()

print('SubscriptionID :' + args.sub)

# Create credentials
credentials = ServicePrincipalCredentials(client_id=args.cliid,
                                          secret=args.pwd,
                                          tenant=args.tntid)
resource_client = ResourceManagementClient(credentials, args.sub)
adf_client = DataFactoryManagementClient(credentials, args.sub)

# Create resources group
#resource_client.resource_groups.create_or_update(args.basename + '-RG', {'location': args.loc})

# Create a data factory
df_resource = Factory(location=args.loc)
df = adf_client.factories.create_or_update(args.basename + '-RG',
                                           args.basename + '-DF', df_resource)
while df.provisioning_state != 'Succeeded':
    df = adf_client.factories.get(args.basename + '-RG', args.basename + '-DF')
    time.sleep(1)
예제 #19
0
 def get_credentials():
     credentials = ServicePrincipalCredentials(client_id=Client_Id,
                                               secret=Secret,
                                               tenant=Tenant_Id)
     return credentials
예제 #20
0
def get_connection():
    subscription_id = subscriptionid
    credentials = ServicePrincipalCredentials(client_id=client_id,
                                              secret=client_secret,
                                              tenant=tenant_id)
    return credentials, subscription_id
예제 #21
0
# What it does: Enables connection throttling on an Azure PostgreSQL server to help prevent DoS attacks
# Corresponds with rule D9.AZU.LOG.05
# Usage: AUTO: postgres_enable_connection_throttling
# Limitations: None

from azure.common.credentials import ServicePrincipalCredentials
import logging
import os
from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.rdbms.postgresql.models import Configuration

# Set Azure AD credentials from the environment variables
credentials = ServicePrincipalCredentials(client_id=os.environ['CLIENT_ID'],
                                          secret=os.environ['SECRET'],
                                          tenant=os.environ['TENANT'])


def raise_credentials_error():
    msg = 'Error! Subscription id or credentials are missing.'
    logging.info(f'{__file__} - {msg}')
    return msg


def run_action(credentials, rule, entity, params):
    logging.info(f'{__file__} - ${run_action.__name__} started')
    server_name = entity['name']
    subscription_id = entity['accountNumber']
    group_name = entity['resourceGroup']
    logging.info(
        f'{__file__} - subscription_id : {subscription_id} - group_name : {group_name} - server_name : {server_name}'
예제 #22
0
def get_credentials():
    credentials = ServicePrincipalCredentials(client_id,secret,tenant)
     return credentials
예제 #23
0
def run_example():
    """Web Site management example."""
    #
    # Create the Resource Manager Client with an Application (service principal) token provider
    #
    subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']

    credentials = ServicePrincipalCredentials(
        client_id=os.environ['AZURE_CLIENT_ID'],
        secret=os.environ['AZURE_CLIENT_SECRET'],
        tenant=os.environ['AZURE_TENANT_ID'])
    resource_client = ResourceManagementClient(credentials, subscription_id)
    web_client = WebSiteManagementClient(credentials, subscription_id)

    # Register for required namespace
    resource_client.providers.register('Microsoft.Web')

    # Create Resource group
    print('Create Resource Group')
    resource_group_params = {'location': 'westus'}
    print_item(
        resource_client.resource_groups.create_or_update(
            GROUP_NAME, resource_group_params))

    #
    # Create an App Service plan for your WebApp
    #
    print('Create an App Service plan for your WebApp')

    service_plan_async_operation = web_client.app_service_plans.create_or_update(
        GROUP_NAME, SERVER_FARM_NAME,
        AppServicePlan(location=WEST_US,
                       sku=SkuDescription(name='S1',
                                          capacity=1,
                                          tier='Standard')))
    service_plan = service_plan_async_operation.result()
    print_item(service_plan)

    #
    # Create a Site to be hosted on the App Service plan
    #
    print('Create a Site to be hosted on the App Service plan')
    site_async_operation = web_client.web_apps.create_or_update(
        GROUP_NAME, SITE_NAME,
        Site(location=WEST_US, server_farm_id=service_plan.id))
    site = site_async_operation.result()
    print_item(site)

    #
    # List Sites by Resource Group
    #
    print('List Sites by Resource Group')
    for site in web_client.web_apps.list_by_resource_group(GROUP_NAME):
        print_item(site)

    #
    # Get a single Site
    #
    print('Get a single Site')
    site = web_client.web_apps.get(GROUP_NAME, SITE_NAME)
    print_item(site)

    print("Your site and server farm have been created. " \
      "You can now go and visit at http://{}/".format(site.default_host_name))
    input("Press enter to delete the site and server farm.")

    #
    # Delete a Site
    #
    print('Deleting the Site')
    web_client.web_apps.delete(GROUP_NAME, SITE_NAME)

    #
    # Delete the Resource Group
    #
    print('Deleting the resource group')
    delete_async_operation = resource_client.resource_groups.delete(GROUP_NAME)
    delete_async_operation.wait()
예제 #24
0
def get_credentials():
    credentials = ServicePrincipalCredentials(
        client_id=os.environ['AZURE_CLIENT_ID'],
        secret=os.environ['AZURE_CLIENT_SECRET'],
        tenant=os.environ['AZURE_TENANT_ID'])
    return credentials
예제 #25
0
def run(job, *args, **kwargs):
    create_custom_fields_as_needed()
    resource = kwargs.get('resource')
    env_id = '{{ env_id }}'
    resource_group = "{{ resource_group }}"
    storage_account = "{{ storage_accounts }}"
    permission = "{{ permissions }}"
    container_name = "{{container_name}}"

    env = Environment.objects.get(id=env_id)
    rh = env.resource_handler.cast()
    location = env.node_location
    set_progress('Location: %s' % location)

    credentials = ServicePrincipalCredentials(
        client_id=rh.client_id,
        secret=rh.secret,
        tenant=rh.tenant_id,
    )
    client = storage.StorageManagementClient(credentials, rh.serviceaccount)

    resource_g = ARMResourceGroup.objects.get(id=resource_group)
    resource_g_name = resource_g.name

    resource.name = container_name
    resource.azure_account_name = storage_account
    resource.azure_container_name = container_name
    resource.resource_group_name = resource_g_name
    resource.azure_location = location
    resource.lifecycle = "ACTIVE"
    resource.azure_rh_id = rh.id
    # Get and save accountkey
    res = client.storage_accounts.list_keys(resource_g_name, storage_account)
    keys = res.keys

    resource.azure_account_key = keys[0].value
    resource.save()

    azure_account_key = resource.azure_account_key

    if azure_account_key:
        block_blob_service = BlockBlobService(account_name=storage_account,
                                              account_key=azure_account_key)
        set_progress(f"Creating container named '{container_name}' ...")

        result = block_blob_service.create_container(container_name.lower())

        if result:
            # PublicAccess.OFF is the default so act if this is not what has been selected.
            if permission != PublicAccess.OFF:
                set_progress(
                    f"Setting access permissions for '{container_name}'")
                set_progress(permission)
                block_blob_service.set_container_acl(container_name,
                                                     public_access=permission)

            return "SUCCESS", f"'{container_name}' created successfuly", ""
        else:
            return "FAILURE", f"'{container_name}' already exists.", ""

    return "FAILURE", f"You don't have the account key for '{storage_account}'.", ""
예제 #26
0
    def _initialize_session(self):
        """
        Creates a session using available authentication type.

        Auth priority:
        1. Token Auth
        2. Tenant Auth
        3. Azure CLI Auth

        """

        # Only run once
        if self.credentials is not None:
            return

        tenant_auth_variables = [
            'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',
            'AZURE_CLIENT_ID', 'AZURE_CLIENT_SECRET'
        ]
        token_auth_variables = ['AZURE_ACCESS_TOKEN', 'AZURE_SUBSCRIPTION_ID']

        if self.authorization_file:
            self.credentials, self.subscription_id = self.load_auth_file(self.authorization_file)
            self.log.info("Creating session with authorization file")

        elif all(k in os.environ for k in token_auth_variables):
            # Token authentication
            self.credentials = BasicTokenAuthentication(
                token={
                    'access_token': os.environ['AZURE_ACCESS_TOKEN']
                })
            self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
            self.log.info("Creating session with Token Authentication")
            self._is_token_auth = True

        elif all(k in os.environ for k in tenant_auth_variables):
            # Tenant (service principal) authentication
            self.credentials = ServicePrincipalCredentials(
                client_id=os.environ['AZURE_CLIENT_ID'],
                secret=os.environ['AZURE_CLIENT_SECRET'],
                tenant=os.environ['AZURE_TENANT_ID'],
                resource=self.resource_namespace)
            self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
            self.tenant_id = os.environ['AZURE_TENANT_ID']
            self.log.info("Creating session with Service Principal Authentication")

        else:
            # Azure CLI authentication
            self._is_cli_auth = True
            (self.credentials,
             self.subscription_id,
             self.tenant_id) = Profile().get_login_credentials(
                resource=self.resource_namespace)
            self.log.info("Creating session with Azure CLI Authentication")

        # Let provided id parameter override everything else
        if self.subscription_id_override is not None:
            self.subscription_id = self.subscription_id_override

        self.log.info("Session using Subscription ID: %s" % self.subscription_id)

        if self.credentials is None:
            self.log.error('Unable to locate credentials for Azure session.')
예제 #27
0
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.containerservice import ContainerServiceClient
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
import os

tenant_id = os.environ.get('TENANT')
application_id = os.environ.get('CLIENT_ID_JENKINS')   
application_secret = os.environ.get('CLIENT_SECRET_JENKINS') 
subscription_id = 'b72ab7b7-723f-4b18-b6f6-03b0f2c6a1bb' # os.environ.get('SUBSCRIPTION_ID')

credentials = ServicePrincipalCredentials(
    client_id = application_id,
    secret = application_secret,
    tenant = tenant_id,
)

container_client = ContainerServiceClient(credentials, subscription_id)
instance_client = ContainerInstanceManagementClient(credentials, subscription_id)

clusters = container_client.managed_clusters.list()

for cluster in clusters:
    print(cluster)