Пример #1
0
 def rm_models(self):
     self.log("Getting resource manager models")
     return ResourceManagementClient.models("2017-05-10")
Пример #2
0
def main():
    argument_spec = dict(azure_url=dict(default=AZURE_URL),
                         subscription_id=dict(required=False),
                         client_secret=dict(no_log=True),
                         client_id=dict(),
                         tenant_or_domain=dict(),
                         security_token=dict(aliases=['access_token'],
                                             no_log=True),
                         resource_group_name=dict(required=True),
                         state=dict(default='present',
                                    choices=['present', 'absent']),
                         template=dict(default=None, type='dict'),
                         parameters=dict(default=None, type='dict'),
                         template_link=dict(default=None),
                         parameters_link=dict(default=None),
                         location=dict(default="West US"))

    module = AnsibleModule(
        argument_spec=argument_spec,
        mutually_exclusive=[['template_link', 'template'],
                            ['parameters_link', 'parameters']],
    )

    if not HAS_DEPS:
        module.fail_json(msg='requests and azure are required for this module')

    conn_info = get_azure_connection_info(module)

    if conn_info['security_token'] is None and \
            (conn_info['client_id'] is None or conn_info['client_secret'] is None or conn_info[
                'tenant_or_domain'] is None):
        module.fail_json(
            msg=
            'security token or client_id, client_secret and tenant_or_domain is required'
        )

    if conn_info['security_token'] is None:
        conn_info['security_token'] = get_token(conn_info['tenant_or_domain'],
                                                conn_info['client_id'],
                                                conn_info['client_secret'])

    if conn_info['security_token'] is None:
        module.fail_json(
            msg=
            'failed to retrieve a security token from Azure Active Directory')

    credentials = BasicTokenAuthentication(
        token={'access_token': conn_info['security_token']})
    subscription_id = module.params.get('subscription_id')
    resource_client = ResourceManagementClient(
        ResourceManagementClientConfiguration(credentials, subscription_id))
    network_client = NetworkManagementClient(
        NetworkManagementClientConfiguration(credentials, subscription_id))
    conn_info['deployment_name'] = 'ansible-arm'

    if module.params.get('state') == 'present':
        deployment = deploy_template(module, resource_client, conn_info)
        data = dict(name=deployment.name,
                    group_name=conn_info['resource_group_name'],
                    id=deployment.id,
                    outputs=deployment.properties.outputs,
                    instances=get_instances(network_client,
                                            conn_info['resource_group_name'],
                                            deployment),
                    changed=True,
                    msg='deployment created')
        dir(data)
        module.exit_json(**data)
    else:
        destroy_resource_group(module, resource_client, conn_info)
        module.exit_json(changed=True, msg='deployment deleted')
def setup_get_request(provider, aggregation, mode):
    """Setup the credentials to access the azure service"""
    credentials = ServicePrincipalCredentials(client_id=args.client,
                                              secret=args.secret,
                                              tenant=args.tenant)

    client = MonitorClient(credentials, args.subscription)

    resource_client = ResourceManagementClient(credentials, args.subscription)

    resource_client.providers.register('Microsoft.Insights')

    # Creating the resource ID of the system also acts as an endpoint
    resource_id = ('subscriptions/{0}/'
                   'resourceGroups/{1}/'
                   'providers/{2}/{3}').format(args.subscription,
                                               args.resource, provider,
                                               args.hostaddress)

    if args.debug:
        sys.stderr.write("Available Resource Groups:\n")
        for item in resource_client.resource_groups.list():
            print_item(item)

        sys.stderr.write("Available VMs:\n")
        compute_client = ComputeManagementClient(credentials,
                                                 args.subscription)
        for vm in compute_client.virtual_machines.list_all():
            sys.stderr.write("\t{}\n".format(vm.name))

        sys.stderr.write("Available Metric Definitions\n")
        for metric in client.metric_definitions.list(resource_id):
            sys.stderr.write("\t{}: id={}, unit={}\n".format(
                metric.name.localized_value, metric.name.value, metric.unit))


# listing available metrics is not useful as without a filter it only shows
# the first available and not all (as per the docs)
#        print "Available Metrics"
#        for metric in client.metrics.list(resource_id):
#            # azure.monitor.models.MetricDefinition
#            print("\t{}: id={}, unit={}".format(
#                metric.name.localized_value,
#                metric.name.value,
#                metric.unit
#            ))

    end_time = datetime.datetime.utcnow()
    start_time = update_time_state(end_time)
    period = end_time - start_time

    # Setup the call for the data we want
    filter = " and ".join([
        "name.value eq '{}'".format(mode),
        "aggregationType eq '{}'".format(aggregation),
        "startTime eq {}".format(start_time.strftime('%Y-%m-%dT%H:%M:%SZ')),
        "endTime eq {}".format(end_time.strftime('%Y-%m-%dT%H:%M:%SZ')),
        "timeGrain eq duration'PT{}M'".format(
            int(period.total_seconds() / MINUTE_IN_SECONDS))
    ])

    # if we output the info here then we need to make another call to get the data
    # else the iterator uses up all the info and returns nothing to the caller
    if args.debug:
        metrics_data = client.metrics.list(resource_id, filter=filter)
        sys.stderr.write("Metric filter: " + filter + "\n")
        sys.stderr.write("Metric data returned:\n")
        for metric in metrics_data:
            for data in metric.data:
                sys.stderr.write("\t{}: {}\n".format(data.time_stamp,
                                                     data.total))

    metrics_data = client.metrics.list(resource_id, filter=filter)

    return metrics_data
# Write content to file
def writeCSV(name, content):
    try:
        file = open(str("%s\%s.csv" % (Directory, name)), 'w')
        file.write(content)
        file.close
    except OSError:
        pass


#---------------------------------------------------
#   Resource Groups
#---------------------------------------------------
csvResourceGroup = ""
client = ResourceManagementClient(credentials, subscription_id)
for item in client.resource_groups.list():
    oResourceGroupName = item.name
    oResourceGroupLocation = item.location
    csvResourceGroup += str("%s,%s\n" %
                            (oResourceGroupName, oResourceGroupLocation))
# Write to file
writeCSV('ResouceGroup', csvResourceGroup)

#---------------------------------------------------
#   Network
#---------------------------------------------------
csvNetwork = ""
network_client = NetworkManagementClient(credentials, subscription_id)

for virtualNetwork in network_client.virtual_networks.list_all():
Пример #5
0
        *get_package('app'),
        *get_package('HowLong'),
        *get_package('static'),
        *get_package('wheelhouse'),
        (DEPLOY_ROOT / 'requirements.txt', 'requirements.txt'),
        (DEPLOY_ROOT / 'manage.py', 'manage.py'),
        (DEPLOY_ROOT / 'create_test_data.py', 'create_test_data.py'),
        (DEPLOY_ROOT / 'web.config', 'web.config'),
        (DEPLOY_ROOT / 'static.web.config', 'static\\web.config'),
    ] if '__pycache__' not in s.parts]


#################################################
# Create management clients

rc = ResourceManagementClient(credentials=CREDENTIALS,
                              subscription_id=SUBSCRIPTION_ID)

#################################################
# Create a resource group
#
# A resource group contains our entire deployment
# and makes it easy to manage related services.

print("Creating resource group:", RESOURCE_GROUP)

rc.resource_groups.create_or_update(RESOURCE_GROUP,
                                    ResourceGroup(location=LOCATION))

try:

    #################################################
Пример #6
0
def main():
    argument_spec = dict(azure_url=dict(default=AZURE_URL),
                         subscription_id=dict(),
                         client_secret=dict(no_log=True),
                         client_id=dict(required=True),
                         tenant_id=dict(required=True),
                         resource_group_name=dict(required=True),
                         state=dict(default='present',
                                    choices=['present', 'absent']),
                         template=dict(default=None, type='dict'),
                         parameters=dict(default=None, type='dict'),
                         template_link=dict(default=None),
                         parameters_link=dict(default=None),
                         location=dict(default="West US"),
                         deployment_mode=dict(
                             default='Complete',
                             choices=['Complete', 'Incremental']),
                         deployment_name=dict(default="ansible-arm"),
                         wait_for_deployment_completion=dict(default=True),
                         wait_for_deployment_polling_period=dict(default=30))

    module = AnsibleModule(
        argument_spec=argument_spec,
        mutually_exclusive=[['template_link', 'template'],
                            ['parameters_link', 'parameters']],
    )

    if not HAS_DEPS:
        module.fail_json(msg='requests and azure are required for this module')

    conn_info = get_azure_connection_info(module)

    credentials = ServicePrincipalCredentials(
        client_id=conn_info['client_id'],
        secret=conn_info['client_secret'],
        tenant=conn_info['tenant_id'])

    subscription_id = conn_info['subscription_id']
    resource_configuration = ResourceManagementClientConfiguration(
        credentials, subscription_id)
    resource_configuration.add_user_agent('Ansible-Deploy')
    resource_client = ResourceManagementClient(resource_configuration)
    network_configuration = NetworkManagementClientConfiguration(
        credentials, subscription_id)
    network_configuration.add_user_agent('Ansible-Deploy')
    network_client = NetworkManagementClient(network_configuration)
    conn_info['deployment_name'] = module.params.get('deployment_name')

    if module.params.get('state') == 'present':
        deployment = deploy_template(module, resource_client, conn_info)
        data = dict(name=deployment.name,
                    group_name=conn_info['resource_group_name'],
                    id=deployment.id,
                    outputs=deployment.properties.outputs,
                    instances=get_instances(network_client,
                                            conn_info['resource_group_name'],
                                            deployment),
                    changed=True,
                    msg='deployment created')
        module.exit_json(**data)
    else:
        destroy_resource_group(module, resource_client, conn_info)
        module.exit_json(changed=True, msg='deployment deleted')
Пример #7
0
    def __init__(self,
                 aws_regions,
                 aws_access_key,
                 aws_secret_key,
                 azure_client_id,
                 azure_client_secret,
                 azure_subscription_id,
                 azure_tenant_id,
                 azure_resource_group_names,
                 azure_slow_scale_classes,
                 kubeconfig,
                 idle_threshold,
                 type_idle_threshold,
                 pod_namespace,
                 instance_init_time,
                 cluster_name,
                 notifier,
                 drain_utilization_below=0.0,
                 max_scale_in_fraction=0.1,
                 scale_up=True,
                 maintainance=True,
                 datadog_api_key=None,
                 over_provision=5,
                 dry_run=False):
        if kubeconfig:
            # for using locally
            logger.debug('Using kubeconfig %s', kubeconfig)
            self.api = pykube.HTTPClient(
                pykube.KubeConfig.from_file(kubeconfig))
        else:
            # for using on kube
            logger.debug('Using kube service account')
            self.api = pykube.HTTPClient(
                pykube.KubeConfig.from_service_account())
        if pod_namespace is None:
            self.pod_namespace = pykube.all
        else:
            self.pod_namespace = pod_namespace

        self.drain_utilization_below = drain_utilization_below
        self.max_scale_in_fraction = max_scale_in_fraction
        self._drained = {}
        self.session = None
        if aws_access_key and aws_secret_key:
            self.session = boto3.session.Session(
                aws_access_key_id=aws_access_key,
                aws_secret_access_key=aws_secret_key,
                region_name=aws_regions[0])  # provide a default region
        self.autoscaling_groups = autoscaling_groups.AutoScalingGroups(
            session=self.session,
            regions=aws_regions,
            cluster_name=cluster_name)
        self.autoscaling_timeouts = autoscaling_groups.AutoScalingTimeouts(
            self.session)

        azure_regions = []
        resource_groups = []
        self.azure_client = None
        if azure_client_id:
            azure_credentials = ServicePrincipalCredentials(
                client_id=azure_client_id,
                secret=azure_client_secret,
                tenant=azure_tenant_id)

            # Setup the Azure client
            resource_client = ResourceManagementClient(azure_credentials,
                                                       azure_subscription_id)
            resource_client.providers.register('Microsoft.Compute')
            resource_client.providers.register('Microsoft.Network')
            resource_client.providers.register('Microsoft.Insights')

            region_map = {}
            for resource_group_name in azure_resource_group_names:
                resource_group = resource_client.resource_groups.get(
                    resource_group_name)
                location = resource_group.location
                if location in region_map:
                    logger.fatal(
                        "{} and {} are both in {}. May only have one resource group per region"
                        .format(resource_group_name, region_map[location],
                                location))
                region_map[location] = resource_group_name
                azure_regions.append(location)
                resource_groups.append(resource_group)

            compute_client = ComputeManagementClient(azure_credentials,
                                                     azure_subscription_id)
            compute_client.config.retry_policy.policy = azure.AzureBoundedRetry.from_retry(
                compute_client.config.retry_policy.policy)

            monitor_client = MonitorClient(azure_credentials,
                                           azure_subscription_id)
            monitor_client.config.retry_policy.policy = azure.AzureBoundedRetry.from_retry(
                monitor_client.config.retry_policy.policy)
            self.azure_client = AzureWriteThroughCachedApi(
                AzureWrapper(compute_client, monitor_client, resource_client))

        self.azure_groups = azure.AzureGroups(resource_groups,
                                              azure_slow_scale_classes,
                                              self.azure_client)

        # config
        self.azure_resource_group_names = azure_resource_group_names
        self.azure_regions = azure_regions
        self.aws_regions = aws_regions
        self.idle_threshold = idle_threshold
        self.instance_init_time = instance_init_time
        self.type_idle_threshold = type_idle_threshold
        self.over_provision = over_provision

        self.scale_up = scale_up
        self.maintainance = maintainance

        self.notifier = notifier

        if datadog_api_key:
            datadog.initialize(api_key=datadog_api_key)
            logger.info('Datadog initialized')
        self.stats = datadog.ThreadStats()
        self.stats.start()

        self.dry_run = dry_run
def main():
    module = AnsibleModule(
        argument_spec=dict(
            client_id=dict(required=False),
            client_secret=dict(required=False),
            tenant_id=dict(required=False),
            subscription_id=dict(required=False),
            profile=dict(required=False),
            ad_user=dict(required=False),
            password=dict(required=False),
            resource_group_name=dict(required=False),
            resource_url=dict(required=False),
            raw_url=dict(required=False),
            delete=dict(required=False, default=False, type='bool'),
        ),
        # Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
        supports_check_mode=False,
    )

    creds_params = {}

    if not HAS_ARM:
        module.fail_json(msg='azure python sdk required for this module')

    if module.params['client_id']:
        creds_params['client_id'] = module.params.get('client_id')
    if module.params['client_secret']:
        creds_params['client_secret'] = module.params.get('client_secret')
    if module.params['tenant_id']:
        creds_params['tenant_id'] = module.params.get('tenant_id')
    if module.params['subscription_id']:
        creds_params['subscription_id'] = module.params.get('subscription_id')
    if module.params['profile']:
        profile = module.params.get('profile')
    else:
        profile = None
    if module.params['ad_user']:
        creds_params['ad_user'] = module.params.get('ad_user')
    if module.params['password']:
        creds_params['password'] = module.params.get('password')
    if module.params['resource_group_name']:
        resource_group_name = module.params.get('resource_group_name')
    else:
        resource_group_name = None
    resource_url = module.params.get('resource_url')
    if module.params['raw_url']:
        raw_url = module.params.get('raw_url')
    else:
        raw_url = None
    delete = module.params['delete']
    url_method = 'get'
    #try:

    creds = None

    #authenticate to azure
    if profile:
        path = expanduser("~/.azure/credentials")
        try:
            config = ConfigParser.SafeConfigParser()
            config.read(path)
        except Exception as exc:
            module.fail_json(msg="Failed to access profile " + str(path))
        if not config.has_section(profile):
            module.fail_json(msg="section not found in profile")
        for key, val in config.items(profile):
            creds_params[key] = val

    if 'client_id' in creds_params and 'client_secret' in creds_params:
        endpoint = 'https://login.microsoftonline.com/' + creds_params[
            'tenant_id'] + '/oauth2/token'
        auth_token = get_token_from_client_credentials(
            endpoint=endpoint,
            client_id=creds_params['client_id'],
            client_secret=creds_params['client_secret'],
        )
        creds = ServicePrincipalCredentials(
            client_id=creds_params['client_id'],
            secret=creds_params['client_secret'],
            tenant=creds_params['tenant_id'])

    elif 'ad_user' in creds_params and 'password' in creds_params:
        creds = UserPassCredentials(creds_params['ad_user'],
                                    creds_params['password'])
        auth_token = creds.token['access_token']

    #at this point, we should have creds and a subscription id
    if not creds:
        module.fail_json(
            msg="Unable to login to Azure with the current parameters/options")
    if not creds_params['subscription_id']:
        module.fail_json(
            msg=
            "Unable to select a working Azure subscription given the current parameters/options"
        )

    #construct resource client
    #config = ResourceManagementClientConfiguration(creds, creds_params['subscription_id'])

    resource_client = ResourceManagementClient(
        credentials=creds, subscription_id=creds_params['subscription_id'])

    if resource_url and not resource_group_name:
        module.fail_json(
            msg="resource url was specified but resource_group_name was not")

    #Check rg
    if resource_group_name:
        try:
            rg_list_result = resource_client.resource_groups.get(
                resource_group_name)
            rg_does_exist = 'True'
        except:
            rg_does_exist = 'False'

    #Create RG if necessary
    if resource_url:
        if (rg_does_exist == 'False'):
            module.fail_json(msg="Resoruce group does not exist")

    if raw_url:
        url = "https://management.azure.com" + raw_url
    else:
        url = "https://management.azure.com/subscriptions/" + creds_params[
            'subscription_id'] + "/resourceGroups/" + resource_group_name + "/" + resource_url
    headers = {
        'Content-Type': 'application/json',
        'Accept': 'application/json',
        'Authorization': 'Bearer ' + auth_token
    }

    class Object(object):
        pass

    returnobj = Object()

    #Check if the resource exists
    result = None
    if delete is False:
        does_exist_request = requests.get(url, headers=headers)
    elif delete is True:
        does_exist_request = requests.delete(url, headers=headers)
    if does_exist_request.status_code in (400, 404):
        does_exist = False
    else:
        does_exist = True

    if (does_exist == False):
        module.exit_json(changed=False,
                         status_code=None,
                         url=url,
                         content=None)

    if (does_exist == True):
        module.exit_json(changed=False,
                         status_code=does_exist_request.status_code,
                         url=url,
                         content=does_exist_request.json())
Пример #9
0
def getResourceClient( spCred, subscription_id):
    return ResourceManagementClient(
        spCred,
        subscription_id
    )
Пример #10
0
    azure.mgmt.network.NetworkManagementClientConfiguration(
        credentials,
        subscription_id
    )
)

storage_client = azure.mgmt.storage.StorageManagementClient(
    azure.mgmt.storage.StorageManagementClientConfiguration(
        credentials,
        subscription_id
    )
)

resource_client = ResourceManagementClient(
    ResourceManagementClientConfiguration(
        credentials,
        subscription_id
    )
)


def create_network_interface(network_client, region, group_name, interface_name,
                             network_name, subnet_name, ip_name):

    result_vnet = network_client.virtual_networks.create_or_update(
        group_name,
        network_name,
        azure.mgmt.network.models.VirtualNetwork(
            location=region,
            address_space=azure.mgmt.network.models.AddressSpace(
                address_prefixes=[
                    '10.1.0.0/16',
Пример #11
0
def register_provider(subscription_id, creds, provider_namespace):
    resource_client = ResourceManagementClient(ResourceManagementClientConfiguration(creds, subscription_id))
    resource_client.providers.register(provider_namespace)
Пример #12
0
#Get azure subscription Id
azure_subscription_command = os.popen('az account list').read()
azure_subscription_list = json.loads(azure_subscription_command)
azure_subscription = azure_subscription_list[0]
azure_subscription_id = azure_subscription["id"]

#input from user
LOCATION = input("Enter Location: ")
GROUP_NAME = input("Enter Resource Group Name: ")
VM_NAME = input("Enter VM Name: ")

#create connection
compute_client = ComputeManagementClient(ca.Get_credentials(),
                                         azure_subscription_id)
resource_client = ResourceManagementClient(ca.Get_credentials(),
                                           azure_subscription_id)
network_client = NetworkManagementClient(ca.Get_credentials(),
                                         azure_subscription_id)

resource_group_list = []
count = 0
for rg in resource_client.resource_groups.list():
    resource_group_list.append(rg.name)

for name in resource_group_list:
    if GROUP_NAME == name:
        count = count + 1
        print(
            "\nResource group name exists. Please provide a different name.\n")
        break
 def __init__(self, client_data, location, group_name):
     self.location = location
     self.group_name = group_name
     self.resource_client = ResourceManagementClient(*client_data)
     self._resource_group = None
Пример #14
0
def main():
    validate_env()
    location = os.getenv('AZURE_LOCATION', 'East US')
    credentials = azure.common.credentials.ServicePrincipalCredentials(
        client_id=os.environ['AZURE_CLIENT_ID'],
        secret=os.environ['AZURE_CLIENT_SECRET'],
        tenant=os.environ['AZURE_TENANT_ID'])
    subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
    template = TemplateLink(uri=os.environ['AZURE_TEMPLATE_URL'])
    # tenant_id = os.environ.get('AZURE_TENANT_ID')
    # client_id = os.environ.get('AZURE_CLIENT_ID')
    # client_secret = os.environ.get('AZURE_CLIENT_SECRET')
    group_name = 'testing' + ''.join(
        random.choice('01234567890abcdef') for n in range(10))
    deployment_name = 'deployment{}'.format(uuid.uuid4().hex)

    rmc = ResourceManagementClient(credentials, subscription_id)

    template_parameters = get_env_params()

    # Output resource group
    print("Resource group name: {}".format(group_name))
    print("Deployment name: {}".format(deployment_name))

    azure_cluster = {
        'resource_group_name': group_name,
        'deployment_name': deployment_name
    }
    pkgpanda.util.write_json('azure-cluster.json', azure_cluster)

    # Create a new resource group
    print("Creating new resource group in location: {}".format(location))
    if rmc.resource_groups.check_existence(group_name):
        print(
            "ERROR: Group name already exists / taken: {}".format(group_name))
    rmc.resource_groups.create_or_update(group_name,
                                         ResourceGroup(location=location))

    test_successful = False

    try:
        deployment_properties = DeploymentProperties(
            template_link=template,
            mode=DeploymentMode.incremental,
            parameters=template_parameters)

        # Use RPC against azure to validate the ARM template is well-formed
        result = rmc.deployments.validate(group_name,
                                          deployment_name,
                                          properties=deployment_properties)
        if result.error:
            print("Template verification failed\n{}".format(result.error),
                  file=sys.stderr)
            sys.exit(1)

        # Actually create a template deployment
        print("Creating template deployment ...")
        deploy_poller = rmc.deployments.create_or_update(
            group_name, deployment_name, deployment_properties)

        # Stop after 45 attempts (each one takes up to one minute)
        @retry(stop_max_attempt_number=45)
        def poll_deploy():
            res = deploy_poller.result(timeout=60)
            print("Current deploy state: {}".format(
                res.properties.provisioning_state))
            assert deploy_poller.done(), "Not done deploying."

        print("Waiting for template to deploy ...")
        try:
            poll_deploy()
        except:
            print("Current deploy status:\n{}".format(deploy_poller.result(0)))
            raise
        print("Template deployed successfully")

        assert deploy_poller.done(
        ), "Deployment failed / polling didn't reach deployment done."
        deployment_result = deploy_poller.result()
        print(deployment_result.properties.outputs)
        master_lb = deployment_result.properties.outputs['masterFQDN']['value']

        print(
            "Template deployed using SSH private key: https://mesosphere.onelogin.com/notes/18444"
        )
        print(
            "For troubleshooting, master0 can be reached using: ssh -p 2200 {}@{}"
            .format(get_value('linuxAdminUsername'), master_lb))

        # Run test now, so grab IPs
        nmc = NetworkManagementClient(credentials, subscription_id)
        ip_buckets = {'master': [], 'private': [], 'public': []}

        for resource in rmc.resource_groups.list_resources(
                group_name,
                filter=
            ("resourceType eq 'Microsoft.Network/networkInterfaces' or "
             "resourceType eq 'Microsoft.Compute/virtualMachineScaleSets'")):
            if resource.type == 'Microsoft.Network/networkInterfaces':
                nics = [nmc.network_interfaces.get(group_name, resource.name)]
            elif resource.type == 'Microsoft.Compute/virtualMachineScaleSets':
                nics = list(
                    nmc.network_interfaces.
                    list_virtual_machine_scale_set_network_interfaces(
                        virtual_machine_scale_set_name=resource.name,
                        resource_group_name=group_name))
            else:
                raise ('Unexpected resourceType: {}'.format(resource.type))

            for bucket_name in ip_buckets.keys():
                if bucket_name in resource.name:
                    for n in nics:
                        for config in n.ip_configurations:
                            ip_buckets[bucket_name].append(
                                config.private_ip_address)

        print('Detected IP configuration: {}'.format(ip_buckets))

        with SSHTunnel(get_value('linuxAdminUsername'),
                       'ssh_key',
                       master_lb,
                       port=2200) as t:
            integration_test(
                tunnel=t,
                test_dir='/home/{}'.format(get_value('linuxAdminUsername')),
                dcos_dns=ip_buckets['master'][0],
                master_list=ip_buckets['master'],
                agent_list=ip_buckets['private'],
                public_agent_list=ip_buckets['public'],
                provider='azure',
                test_dns_search=False,
                add_env=get_test_config(),
                pytest_dir=os.getenv(
                    'DCOS_PYTEST_DIR',
                    '/opt/mesosphere/active/dcos-integration-test'),
                pytest_cmd=os.getenv('DCOS_PYTEST_CMD',
                                     "py.test -rs -vv -m 'not ccm' ") +
                os.getenv('CI_FLAGS', ''))
        test_successful = True
    except Exception as ex:
        traceback.print_exc()
        print("ERROR: exception {}".format(ex))
        raise
    finally:
        if os.getenv('AZURE_CLEANUP') == 'false':
            print("Cluster must be cleaned up manually")
            print("Cluster details: {}".format(azure_cluster))
        else:
            # Send a delete request
            # TODO(cmaloney): The old code had a retry around this:
            # @retry(wait_exponential_multiplier=1000, wait_exponential_max=60*1000, stop_max_delay=(30*60*1000))
            poller = rmc.resource_groups.delete(group_name)

            # poll for the delete to complete
            print("Deleting resource group: {} ...".format(group_name))

            @retry(wait_fixed=(5 * 1000), stop_max_delay=(60 * 60 * 1000))
            def wait_for_delete():
                assert poller.done(), "Timed out waiting for delete"

            print("Waiting for delete ...")
            wait_for_delete()

            print("Clean up successful")

    if test_successful:
        print("Azure test deployment succeeded")
    else:
        print("ERROR: Azure test deployment failed", file=sys.stderr)
        sys.exit(2)
Пример #15
0
    def __init__(self, azure_config, skip_setup=False):

        self.config = azure_config

        subscription_id = azure_config.subscription_id
        username = azure_config.username
        password = azure_config.password
        group_name = azure_config.group_name
        storage_name = azure_config.storage_name
        virtual_network_name = azure_config.virtual_network_name
        subnet_name = azure_config.subnet_name
        region = azure_config.region

        self.vms = {}

        # 0. Authentication
        credentials = UserPassCredentials(username, password)

        self.resource_client = ResourceManagementClient(
            credentials, subscription_id)
        self.storage_client = StorageManagementClient(credentials,
                                                      subscription_id)
        self.network_client = NetworkManagementClient(credentials,
                                                      subscription_id)
        self.compute_client = ComputeManagementClient(credentials,
                                                      subscription_id)

        if not skip_setup:
            # 1. Create a resource group
            result = self.resource_client.resource_groups.create_or_update(
                group_name,
                ResourceGroup(location=region),
            )

            # 2. Create a storage account
            result = self.storage_client.storage_accounts.create(
                group_name,
                storage_name,
                azure.mgmt.storage.models.StorageAccountCreateParameters(
                    location=region,
                    account_type=azure.mgmt.storage.models.AccountType.
                    standard_lrs,
                ),
            )
            result.wait()

            # 3. Create a virtual network
            result = self.network_client.virtual_networks.create_or_update(
                group_name,
                virtual_network_name,
                azure.mgmt.network.models.VirtualNetwork(
                    location=region,
                    address_space=azure.mgmt.network.models.AddressSpace(
                        address_prefixes=[
                            '10.0.0.0/16',
                        ], ),
                    subnets=[
                        azure.mgmt.network.models.Subnet(
                            name=subnet_name,
                            address_prefix='10.0.0.0/24',
                        ),
                    ],
                ),
            )
            result.wait()
Пример #16
0
def run():
    location = os.getenv('AZURE_LOCATION', 'East US')
    credentials = azure.common.credentials.ServicePrincipalCredentials(
        client_id=os.environ['AZURE_CLIENT_ID'],
        secret=os.environ['AZURE_CLIENT_SECRET'],
        tenant=os.environ['AZURE_TENANT_ID'])
    subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
    template = TemplateLink(uri=os.environ['AZURE_TEMPLATE_URL'])
    # tenant_id = os.environ.get('AZURE_TENANT_ID')
    # client_id = os.environ.get('AZURE_CLIENT_ID')
    # client_secret = os.environ.get('AZURE_CLIENT_SECRET')
    group_name = 'tesing' + ''.join(
        random.choice('01234567890abcdef') for n in range(10))
    deployment_name = 'deployment{}'.format(uuid.uuid4().hex)

    rmc = ResourceManagementClient(credentials, subscription_id)

    template_parameters = get_env_params()
    if template_parameters.get('numberOfPrivateSlaves'):
        assert template_parameters['numberOfPrivateSlaves'][
            'value'] >= 2, 'Test requires at least 2 private slaves!'
    else:
        template_parameters['numberOfPrivateSlaves'] = {'value': 2}
    if template_parameters.get('numberOfPublicSlaves'):
        assert template_parameters['numberOfPublicSlaves'][
            'value'] >= 1, 'Test requires at least 1 public slave!'
    else:
        template_parameters['numberOfPublicSlaves'] = {'value': 1}

    # Output resource group
    print("Resource group name: {}".format(group_name))
    print("Deployment name: {}".format(deployment_name))

    azure_cluster = {
        'resource_group_name': group_name,
        'deployment_name': deployment_name
    }
    pkgpanda.util.write_json('azure-cluster.json', azure_cluster)

    # Create a new resource group
    print("Creating new resource group in location: {}".format(location))
    if rmc.resource_groups.check_existence(group_name):
        print(
            "ERROR: Group name already exists / taken: {}".format(group_name))
    rmc.resource_groups.create_or_update(group_name,
                                         ResourceGroup(location=location))

    test_successful = False

    try:
        deployment_properties = DeploymentProperties(
            template_link=template,
            mode=DeploymentMode.incremental,
            parameters=template_parameters)

        # Use RPC against azure to validate the ARM template is well-formed
        result = rmc.deployments.validate(group_name,
                                          deployment_name,
                                          properties=deployment_properties)
        if result.error:
            print("Template verification failed\n{}".format(result.error),
                  file=sys.stderr)
            sys.exit(1)

        # Actually create a template deployment
        print("Creating template deployment ...")
        deploy_poller = rmc.deployments.create_or_update(
            group_name, deployment_name, deployment_properties)

        # Stop after 45 attempts (each one takes up to one minute)
        @retry(stop_max_attempt_number=45)
        def poll_deploy():
            res = deploy_poller.result(timeout=60)
            print("Current deploy state: {}".format(
                res.properties.provisioning_state))
            assert deploy_poller.done(), "Not done deploying."

        print("Waiting for template to deploy ...")
        try:
            poll_deploy()
        except:
            print("Current deploy status:\n{}".format(deploy_poller.result(0)))
            raise
        print("Template deployed successfully")

        assert deploy_poller.done(
        ), "Deployment failed / polling didn't reach deployment done."
        deployment_result = deploy_poller.result()
        print(deployment_result.properties.outputs)
        master_lb = deployment_result.properties.outputs['dnsAddress']['value']
        master_url = "http://{}".format(master_lb)

        print(
            "Template deployed using SSH private key: https://mesosphere.onelogin.com/notes/18444"
        )
        print(
            "For troubleshooting, master0 can be reached using: ssh -p 2200 core@{}"
            .format(master_lb))

        @retry(wait_fixed=(5 * 1000), stop_max_delay=(15 * 60 * 1000))
        def poll_on_dcos_ui_up():
            r = get_dcos_ui(master_url)
            assert r is not None and r.status_code == requests.codes.ok, \
                "Unable to reach DC/OS UI: {}".format(master_url)

        print("Waiting for DC/OS UI at: {} ...".format(master_url))
        poll_on_dcos_ui_up()

        # Run test now, so grab IPs
        nmc = NetworkManagementClient(credentials, subscription_id)
        ip_buckets = {
            'masterNodeNic': [],
            'slavePrivateNic': [],
            'slavePublicNic': []
        }

        for resource in rmc.resource_groups.list_resources(group_name):
            for bucket_name, bucket in ip_buckets.items():
                if resource.name.startswith(bucket_name):
                    nic = nmc.network_interfaces.get(group_name, resource.name)
                    all_ips = []
                    for config in nic.ip_configurations:
                        all_ips.append(config.private_ip_address)
                    bucket.extend(all_ips)

        with closing(SSHTunnel('core', 'ssh_key', master_lb, port=2200)) as t:
            integration_test(
                tunnel=t,
                test_dir='/home/core',
                dcos_dns=master_lb,
                master_list=ip_buckets['masterNodeNic'],
                agent_list=ip_buckets['slavePrivateNic'],
                public_agent_list=ip_buckets['slavePublicNic'],
                provider='azure',
                test_dns_search=False,
                pytest_dir=os.getenv(
                    'DCOS_PYTEST_DIR',
                    '/opt/mesosphere/active/dcos-integration-test'),
                pytest_cmd=os.getenv('DCOS_PYTEST_CMD',
                                     "py.test -vv -m 'not ccm' ") +
                os.getenv('CI_FLAGS', ''))
        test_successful = True
    except Exception as ex:
        print("ERROR: exception {}".format(ex))
        raise
    finally:
        # Send a delete request
        # TODO(cmaloney): The old code had a retry around this:
        # @retry(wait_exponential_multiplier=1000, wait_exponential_max=60*1000, stop_max_delay=(30*60*1000))
        poller = rmc.resource_groups.delete(group_name)

        # poll for the delete to complete
        print("Deleting resource group: {} ...".format(group_name))

        @retry(wait_fixed=(5 * 1000), stop_max_delay=(60 * 60 * 1000))
        def wait_for_delete():
            assert poller.done(), "Timed out waiting for delete"

        print("Waiting for delete ...")
        wait_for_delete()

        print("Clean up successful")

    if test_successful:
        print("Azure test deployment succeeded")
    else:
        print("ERROR: Azure test deployment failed", file=sys.stderr)
        sys.exit(2)
Пример #17
0
 def rm_models(self):
     self.log("Getting resource manager models")
     return ResourceManagementClient.models("2017-05-10")