def setUp(self):
        self.sms = ServiceManagementService(
            credentials.getSubscriptionId(),
            credentials.getManagementCertFile())
        set_service_options(self.sms)

        self.storage_account_name = getUniqueName('utstor')
Exemplo n.º 2
0
    def __init__(self):
        """Main execution path."""
        # Inventory grouped by display group
        self.inventory = {}
        # Index of deployment name -> host
        self.index = {}

        # Read settings and parse CLI arguments
        self.read_settings()
        self.read_environment()
        self.parse_cli_args()

        # Initialize Azure ServiceManagementService
        self.sms = ServiceManagementService(self.subscription_id,
                                            self.cert_path)

        # Cache
        if self.args.refresh_cache:
            self.do_api_calls_update_cache()
        elif not self.is_cache_valid():
            self.do_api_calls_update_cache()

        if self.args.list_images:
            data_to_print = self.json_format_dict(self.get_images(), True)
        elif self.args.list:
            # Display list of nodes for inventory
            if len(self.inventory) == 0:
                data_to_print = self.get_inventory_from_cache()
            else:
                data_to_print = self.json_format_dict(self.inventory, True)

        print data_to_print
def deprovision(instance_id):
    """
    Deprovision an existing instance of this service

    DELETE /v2/service_instances/<instance_id>:
        <instance_id> is the Cloud Controller provided
          value used to provision the instance

    return:
        As of API 2.3, an empty JSON document
        is expected
    """
    global subscription_id
    global cert
    global account_name
    global account_key

    if account_name and account_key:
        blob_service = BlobService(account_name, account_key)
        container_name = '{0}-{1}'.format(CONTAINER_NAME_PREFIX, instance_id)
        blob_service.delete_container(container_name)

        if account_name.startswith(STORAGE_ACCOUNT_NAME_PREFIX):
            sms = ServiceManagementService(subscription_id, cert_file)
            sms.delete_storage_account(account_name)

    return jsonify({})
Exemplo n.º 4
0
def main(argv):
	config = ConfigParser.ConfigParser()
        #config.read([splunkHome + '/etc/apps/oovoo/config/app.conf'])
	config.read('/root/oovoo/config/app.conf')
	sms = ServiceManagementService(config.get('Azure','subscription_id'),config.get('Azure','certificate'))
	services = sms.list_hosted_services()
	for oneService in services:
		print('Service name:' + oneService.service_name)
	
	print 'Finish...'
Exemplo n.º 5
0
    def delete_machine(self, instance_ids, deployment_name=None,
                       cleanup_service=None, **kwargs):
        sms = ServiceManagementService(self.profile.username,
                                       self.cert_path)
        for inst in instance_ids:
            try:
                sms.delete_deployment(service_name=inst,
                                      deployment_name=deployment_name)
                if cleanup_service:
                    sms.delete_hosted_service(service_name=inst)

            except Exception, ex:
                self.log.exception(ex)
                return self.FAIL
Exemplo n.º 6
0
    def __init__(self):
        """Main execution path."""
        # Inventory grouped by display group
        self.inventory = {}
        # Index of deployment name -> host
        self.index = {}

        # Read settings and parse CLI arguments
        self.read_settings()
        self.read_environment()
        self.parse_cli_args()

        # Initialize Azure ServiceManagementService
        self.sms = ServiceManagementService(self.subscription_id, self.cert_path)

        # Cache
        if self.args.refresh_cache:
            self.do_api_calls_update_cache()
        elif not self.is_cache_valid():
            self.do_api_calls_update_cache()

        if self.args.list_images:
            data_to_print = self.json_format_dict(self.get_images(), True)
        elif self.args.list:
            # Display list of nodes for inventory
            if len(self.inventory) == 0:
                data_to_print = self.get_inventory_from_cache()
            else:
                data_to_print = self.json_format_dict(self.inventory, True)

        print data_to_print
Exemplo n.º 7
0
 def __init__(self, access_key):
     """
     :param access_key: subscription_id
     #certificate_path the
     :return: cons
     """
     self.certificate_path = os.path.join('../conf/', 'mycert.cer')
     self.sms = ServiceManagementService(access_key)
Exemplo n.º 8
0
 def generate_azure_service(self, azure_key_id):
     azure_key = self.db.get_object(AzureKey, azure_key_id)
     if self.azure_adapter is not None and self.azure_adapter.subscription_id == azure_key.subscription_id:
         return self.azure_adapter
     self.azure_adapter = ServiceManagementService(
         azure_key.subscription_id, azure_key.pem_url,
         azure_key.management_host)
     return self.azure_adapter
    def setUp(self):
        self.sms = ServiceManagementService(credentials.getSubscriptionId(),
                                            credentials.getManagementCertFile())
        set_service_options(self.sms)

        self.affinity_group_name = getUniqueName('utaffgrp')
        self.hosted_service_name = None
        self.storage_account_name = None
    def setUp(self):
        proxy_host = credentials.getProxyHost()
        proxy_port = credentials.getProxyPort()

        self.sms = ServiceManagementService(credentials.getSubscriptionId(), credentials.getManagementCertFile())
        if proxy_host:
            self.sms.set_proxy(proxy_host, proxy_port)

        self.management_certificate_name = getUniqueNameBasedOnCurrentTime('utmgmtcert')
    def setUp(self):
        self.sms = ServiceManagementService(credentials.getSubscriptionId(),
                                            credentials.getManagementCertFile())

        self.sms.set_proxy(credentials.getProxyHost(),
                           credentials.getProxyPort(),
                           credentials.getProxyUser(),
                           credentials.getProxyPassword())

        self.certificate_thumbprints = []
    def setUp(self):
        self.sms = ServiceManagementService(credentials.getSubscriptionId(),
                                            credentials.getManagementCertFile())

        self.sms.set_proxy(credentials.getProxyHost(),
                           credentials.getProxyPort(),
                           credentials.getProxyUser(),
                           credentials.getProxyPassword())

        self.storage_account_name = getUniqueNameBasedOnCurrentTime('utstorage')
    def setUp(self):
        proxy_host = credentials.getProxyHost()
        proxy_port = credentials.getProxyPort()

        self.sms = ServiceManagementService(credentials.getSubscriptionId(), credentials.getManagementCertFile())
        if proxy_host:
            self.sms.set_proxy(proxy_host, proxy_port)

        self.affinity_group_name = getUniqueNameBasedOnCurrentTime('utaffgrp')
        self.hosted_service_name = None
        self.storage_account_name = None
Exemplo n.º 14
0
def azure_add_endpoints(name, portConfigs):
    sms = ServiceManagementService(AZURE_SUBSCRIPTION_ID, AZURE_CERTIFICATE)
    role = sms.get_role(name, name, name)

    network_config = role.configuration_sets[0]
    for i, portConfig in enumerate(portConfigs):
        network_config.input_endpoints.input_endpoints.append(
            ConfigurationSetInputEndpoint(
                name=portConfig["service"],
                protocol=portConfig["protocol"],
                port=portConfig["port"],
                local_port=portConfig["local_port"],
                load_balanced_endpoint_set_name=None,
                enable_direct_server_return=True if portConfig["protocol"] == "udp" else False,
                idle_timeout_in_minutes=None if portConfig["protocol"] == "udp" else 4)
        )
    try:
        sms.update_role(name, name, name, network_config=network_config)
    except AzureHttpError as e:
        debug.warn("Exception opening ports for %s: %r" % (name, e))
 def get_management_service(service, config):
     if service is ServiceManagementService:
         return ServiceManagementService(config['subscription_id'],
                                         config['key_file'])
     else:
         credential_service = ServicePrincipalCredentials(
             client_id=config['app_client_id'],
             secret=config['app_secret'],
             tenant=config['app_tenant'])
         return service(credentials=credential_service,
                        subscription_id=config['subscription_id'])
Exemplo n.º 16
0
 def __build_management_service_instance(self):
     subscription_id = self.subscription_id()
     certificate_filename = self.certificate_filename()
     management_url = self.get_management_url()
     try:
         return ServiceManagementService(subscription_id,
                                         certificate_filename,
                                         management_url)
     except Exception as e:
         raise AzureServiceManagementError('%s: %s' %
                                           (type(e).__name__, format(e)))
Exemplo n.º 17
0
def azure_add_endpoints(name, portConfigs):
    sms = ServiceManagementService(AZURE_SUBSCRIPTION_ID, AZURE_CERTIFICATE)
    role = sms.get_role(name, name, name)

    network_config = role.configuration_sets[0]
    for i, portConfig in enumerate(portConfigs):
        network_config.input_endpoints.input_endpoints.append(
            ConfigurationSetInputEndpoint(
                name=portConfig["service"],
                protocol=portConfig["protocol"],
                port=portConfig["port"],
                local_port=portConfig["local_port"],
                load_balanced_endpoint_set_name=None,
                enable_direct_server_return=True
                if portConfig["protocol"] == "udp" else False,
                idle_timeout_in_minutes=None
                if portConfig["protocol"] == "udp" else 4))
    try:
        sms.update_role(name, name, name, network_config=network_config)
    except AzureHttpError as e:
        debug.warn("Exception opening ports for %s: %r" % (name, e))
Exemplo n.º 18
0
    def _deleteVirtualMachines(self, service_name):
        """
        Deletes the VMs in the given cloud service.
        """
        if self._resource_exists(lambda: self.sms.get_deployment_by_name(service_name, service_name)) == False:
            logger.warn("Deployment %s not found: no VMs to delete.", service_name)
        else:
            logger.info("Attempting to delete deployment %s.", service_name)
            # Get set of role instances before we remove them
            role_instances = self._getRoleInstances(service_name)

            def update_request(request):
                """
                A filter to intercept the HTTP request sent by the ServiceManagementService
                so we can take advantage of a newer feature ('comp=media') in the delete deployment API
                (see http://msdn.microsoft.com/en-us/library/windowsazure/ee460812.aspx)
                """
                hdrs = []
                for name, value in request.headers:
                    if 'x-ms-version' == name:
                        value = '2013-08-01'
                    hdrs.append((name, value))
                request.headers = hdrs
                request.path = request.path + '?comp=media'
                #pylint: disable=W0212
                response = self.sms._filter(request)
                return response

            svc = ServiceManagementService(self.sms.subscription_id, self.sms.cert_file)
            #pylint: disable=W0212
            svc._filter = update_request
            result = svc.delete_deployment(service_name, service_name)
            logger.info("Deployment %s deletion in progress: waiting for delete_deployment operation.", service_name)
            self._wait_for_operation_success(result.request_id)
            logger.info("Deployment %s deletion in progress: waiting for VM disks to be removed.", service_name)
            # Now wait for the disks to disappear
            for role_instance_name in role_instances.keys():
                disk_name = "{0}.vhd".format(role_instance_name)
                self._wait_for_disk_deletion(disk_name)
            logger.info("Deployment %s deleted.", service_name)
Exemplo n.º 19
0
    def __init__(self, **azure_config):
        """
        :param ServiceManagement azure_client: an instance of the azure
        serivce managment api client.
        :param String service_name: The name of the cloud service
        :param
            names of Azure volumes to identify cluster
        :returns: A ``BlockDeviceVolume``.
        """
        self._instance_id = self.compute_instance_id()
        self._azure_service_client = ServiceManagementService(
            azure_config['subscription_id'],
            azure_config['management_certificate_path'])
        self._service_name = azure_config['service_name']
        self._azure_storage_client = BlobService(
            azure_config['storage_account_name'],
            azure_config['storage_account_key'])
        self._storage_account_name = azure_config['storage_account_name']
        self._disk_container_name = azure_config['disk_container_name']

        if azure_config['debug']:
            to_file(sys.stdout)
Exemplo n.º 20
0
    def __init__(self):
        """Main execution path."""
        # Inventory grouped by display group
        self.inventory = {}
        # Index of deployment name -> host
        self.index = {}
        self.host_metadata = {}

        # Cache setting defaults.
        # These can be overridden in settings (see `read_settings`).
        cache_dir = os.path.expanduser('~')
        self.cache_path_cache = os.path.join(cache_dir, '.ansible-azure.cache')
        self.cache_path_index = os.path.join(cache_dir, '.ansible-azure.index')
        self.cache_max_age = 0

        # Read settings and parse CLI arguments
        self.read_settings()
        self.read_environment()
        self.parse_cli_args()

        # Initialize Azure ServiceManagementService
        self.sms = ServiceManagementService(self.subscription_id, self.cert_path)

        # Cache
        if self.args.refresh_cache:
            self.do_api_calls_update_cache()
        elif not self.is_cache_valid():
            self.do_api_calls_update_cache()

        if self.args.list_images:
            data_to_print = self.json_format_dict(self.get_images(), True)
        elif self.args.list or self.args.host:
            # Display list of nodes for inventory
            if len(self.inventory) == 0:
                data = json.loads(self.get_inventory_from_cache())
            else:
                data = self.inventory

            if self.args.host:
                data_to_print = self.get_host(self.args.host)
            else:
                # Add the `['_meta']['hostvars']` information.
                hostvars = {}
                if len(data) > 0:
                    for host in set([h for hosts in data.values() for h in hosts if h]):
                        hostvars[host] = self.get_host(host, jsonify=False)
                data['_meta'] = {'hostvars': hostvars}

                # JSONify the data.
                data_to_print = self.json_format_dict(data, pretty=True)
        print(data_to_print)
Exemplo n.º 21
0
def main(argv):
    args = parse_args(argv)
    sms = ServiceManagementService(args.subscription_id, args.certificate_path)
    if args.command == 'delete-unused-disks':
        delete_unused_disks(sms, dry_run=args.dry_run, verbose=args.verbose)
    elif args.command == 'list-services':
        list_services(sms, glob=args.filter, verbose=args.verbose)
    elif args.command == 'delete-services':
        delete_services(sms,
                        glob=args.filter,
                        old_age=args.old_age,
                        dry_run=args.dry_run,
                        verbose=args.verbose)
    return 0
Exemplo n.º 22
0
def launch_node(node_id, creds, params, init, ssh_username, ssh_private_key):
    # "pip install azure"
    sms = ServiceManagementService(
        subscription_id='69581868-8a08-4d98-a5b0-1d111c616fc3',
        cert_file='/Users/dgriffin/certs/iOSWAToolkit.pem')
    for i in sms.list_os_images():
        print 'I is ', i.name, ' -- ', i.label, ' -- ', i.location, ' -- ', i.media_link
    media_link = \
        'http://opdemandstorage.blob.core.windows.net/communityimages/' + \
        'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-' + \
        'precise-12_04_2-LTS-amd64-server-20130702-en-us-30GB.vhd'
    config = LinuxConfigurationSet(user_name="ubuntu", user_password="******")
    hard_disk = OSVirtualHardDisk(
        'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-' +
        'precise-12_04_2-LTS-amd64-server-20130702-en-us-30GB',
        media_link, disk_label='opdemandservice')
    ret = sms.create_virtual_machine_deployment(
        'opdemandservice', 'deploy1', 'production', 'opdemandservice2',
        'opdemandservice3', config, hard_disk)
       # service_name, deployment_name, deployment_slot, label, role_name
       # system_config, os_virtual_hard_disk
    print 'Ret ', ret
    return sms
Exemplo n.º 23
0
    def from_boot_config(cls, boot_config):
        """A context manager for a AzureAccount.

        It writes the certificate to a temp file because the Azure client
        library requires it, then deletes the temp file when done.
        """
        from azure.servicemanagement import ServiceManagementService
        config = get_config(boot_config)
        with temp_dir() as cert_dir:
            cert_file = os.path.join(cert_dir, 'azure.pem')
            open(cert_file, 'w').write(config['management-certificate'])
            service_client = ServiceManagementService(
                config['management-subscription-id'], cert_file)
            yield cls(service_client)
Exemplo n.º 24
0
    def create_machine(self, name, region='West US',
                       image=None, role_size='Small',
                       min_count=1, max_count=1,
                       media='storage_url_blob_cloudrunner',
                       username='', password='', ssh_pub_key='',
                       server=CR_SERVER,
                       cleanup=None, **kwargs):
        self.log.info("Registering Azure machine [%s::%s] for [%s]" %
                      (name, image, CR_SERVER))
        try:
            sms = ServiceManagementService(self.profile.username,
                                           self._cert_path)
            server_config = LinuxConfigurationSet('myhostname', 'myuser',
                                                  'mypassword', True)
            media_link = "%s__%s" % (media, name)
            os_hd = OSVirtualHardDisk(image, media_link)

            res = sms.create_virtual_machine_deployment(
                service_name=name,
                deployment_name=name,
                deployment_slot='production',
                label=name,
                role_name=name,
                system_config=server_config,
                os_virtual_hard_disk=os_hd,
                role_size='Small')

            instance_ids = []
            meta = {}
            if not res:
                return self.FAIL, [], {}
            meta['deployment_name'] = name
            meta['cleanup_service'] = cleanup in ['1', 'True', 'true']
            return self.OK, instance_ids, meta
        except Exception, ex:
            self.log.exception(ex)
            return self.FAIL, [], {}
Exemplo n.º 25
0
    def __init__(self, subscription_id=None, key_file=None, **kwargs):
        """
        subscription_id contains the Azure subscription id
        in the form of GUID key_file contains
        the Azure X509 certificate in .pem form
        """
        self.subscription_id = subscription_id
        self.key_file = key_file
        self.sms = ServiceManagementService(subscription_id, key_file)

        super(AzureNodeDriver, self).__init__(
            self.subscription_id,
            self.key_file,
            secure=True,
            **kwargs)
Exemplo n.º 26
0
    def __init__(self, subscription_id, pem_filepath):
        raise_if_none(subscription_id, 'subscription_id')
        raise_if_none(pem_filepath, 'pem_filepath')
        raise_if_path_not_exists(pem_filepath)

        self.name = "Azure"
        """Name of the cloud provider"""
        self.azure_subscription_id = subscription_id
        """Subscription ID in which we can consume Azure resources"""
        self.azure_pem_file = pem_filepath
        """Path to PEM file associated with the Azure subscription"""
        self.sms = ServiceManagementService(
            subscription_id=self.azure_subscription_id, cert_file=pem_filepath)
        """ServiceManagementService object for managing all services"""
        self.url = 'https://storage.googleapis.com/sweeper/configs/azure/configs.json'
        """URL to get config data"""
Exemplo n.º 27
0
    def __get_sms_object(self, hackathon_id):
        """
        Get ServiceManagementService object by Azure account which is related to hackathon_id

        :param hackathon_id: the id of hackathon
        :type hackathon_id: integer

        :return: ServiceManagementService object
        :rtype: class 'azure.servicemanagement.servicemanagementservice.ServiceManagementService'
        """
        hackathon_azure_key = self.db.find_first_object_by(HackathonAzureKey, hackathon_id=hackathon_id)

        if hackathon_azure_key is None:
            self.log.error('Found no azure key with Hackathon:%d' % hackathon_id)
            return None
        sms = ServiceManagementService(hackathon_azure_key.azure_key.subscription_id,
                                       hackathon_azure_key.azure_key.pem_url,
                                       host=hackathon_azure_key.azure_key.management_host)
        return sms
    def __init__(self, **azure_config):
        """
        :param ServiceManagement azure_client: an instance of the azure
        serivce managment api client.
        :param String service_name: The name of the cloud service
        :param
            names of Azure volumes to identify cluster
        :returns: A ``BlockDeviceVolume``.
        """
        self._instance_id = self.compute_instance_id()
        self._azure_service_client = ServiceManagementService(
            azure_config['subscription_id'],
            azure_config['management_certificate_path'])
        self._service_name = azure_config['service_name']
        self._azure_storage_client = BlobService(
            azure_config['storage_account_name'],
            azure_config['storage_account_key'])
        self._storage_account_name = azure_config['storage_account_name']
        self._disk_container_name = azure_config['disk_container_name']

        if azure_config['debug']:
            to_file(sys.stdout)
Exemplo n.º 29
0
    def __get_sms_object(self, hackathon_id):
        """
        Get ServiceManagementService object by Azure account which is related to hackathon_id

        :param hackathon_id: the id of hackathon
        :type hackathon_id: integer

        :return: ServiceManagementService object
        :rtype: class 'azure.servicemanagement.servicemanagementservice.ServiceManagementService'
        """
        hackathon_azure_keys = Hackathon.objects(
            id=hackathon_id).first().azure_keys

        if len(hackathon_azure_keys) == 0:
            self.log.error('Found no azure key with Hackathon:%d' %
                           hackathon_id)
            return None

        hackathon_azure_key = hackathon_azure_keys[0]
        sms = ServiceManagementService(
            hackathon_azure_key.subscription_id,
            hackathon_azure_key.get_local_pem_url(),
            host=hackathon_azure_key.management_host)
        return sms
class ManagementCertificateManagementServiceTest(AzureTestCase):

    def setUp(self):
        proxy_host = credentials.getProxyHost()
        proxy_port = credentials.getProxyPort()

        self.sms = ServiceManagementService(credentials.getSubscriptionId(), credentials.getManagementCertFile())
        if proxy_host:
            self.sms.set_proxy(proxy_host, proxy_port)

        self.management_certificate_name = getUniqueNameBasedOnCurrentTime('utmgmtcert')

    def tearDown(self):
        try:
            self.sms.delete_management_certificate(self.management_certificate_name)
        except: pass

    #--Helpers-----------------------------------------------------------------
    def _create_management_certificate(self, thumbprint):
        result = self.sms.add_management_certificate(MANAGEMENT_CERT_PUBLICKEY, thumbprint, MANAGEMENT_CERT_DATA)
        self.assertIsNone(result)

    def _management_certificate_exists(self, thumbprint):
        try:
            props = self.sms.get_management_certificate(thumbprint)
            return props is not None
        except:
            return False

    #--Test cases for management certificates ----------------------------
    def test_list_management_certificates(self):
        # Arrange
        self._create_management_certificate(self.management_certificate_name)

        # Act
        result = self.sms.list_management_certificates()

        # Assert
        self.assertIsNotNone(result)
        self.assertTrue(len(result) > 0)
        
        cert = None
        for temp in result:
            if temp.subscription_certificate_thumbprint == self.management_certificate_name:
                cert = temp
                break

        self.assertIsNotNone(cert)
        self.assertIsNotNone(cert.created)
        self.assertEqual(cert.subscription_certificate_public_key, MANAGEMENT_CERT_PUBLICKEY)
        self.assertEqual(cert.subscription_certificate_data, MANAGEMENT_CERT_DATA)
        self.assertEqual(cert.subscription_certificate_thumbprint, self.management_certificate_name)

    def test_get_management_certificate(self):
        # Arrange
        self._create_management_certificate(self.management_certificate_name)

        # Act
        result = self.sms.get_management_certificate(self.management_certificate_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertIsNotNone(result.created)
        self.assertEqual(result.subscription_certificate_public_key, MANAGEMENT_CERT_PUBLICKEY)
        self.assertEqual(result.subscription_certificate_data, MANAGEMENT_CERT_DATA)
        self.assertEqual(result.subscription_certificate_thumbprint, self.management_certificate_name)

    def test_add_management_certificate(self):
        # Arrange
        public_key = MANAGEMENT_CERT_PUBLICKEY
        data = MANAGEMENT_CERT_DATA

        # Act
        result = self.sms.add_management_certificate(public_key, self.management_certificate_name, data)

        # Assert
        self.assertIsNone(result)
        self.assertTrue(self._management_certificate_exists(self.management_certificate_name))

    def test_delete_management_certificate(self):
        # Arrange
        self._create_management_certificate(self.management_certificate_name)

        # Act
        result = self.sms.delete_management_certificate(self.management_certificate_name)

        # Assert
        self.assertIsNone(result)
        self.assertFalse(self._management_certificate_exists(self.management_certificate_name))
Exemplo n.º 31
0
# The MIT License (MIT)
#
# Copyright (c) 2015 Taio Jia (jiasir) <*****@*****.**>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR

from azure.servicemanagement import ServiceManagementService

subscription_id = '7f32b7c7-8622-4070-84d0-1ec5bc64dd8f'
cert_file = '/Users/Taio/Downloads/Microsoft_Azure_credentials.pem'
sms = ServiceManagementService(subscription_id, cert_file)

locations = sms.list_locations()
for location in locations:
    print(location.name)
Exemplo n.º 32
0
class AzureInventory(object):
    def __init__(self):
        """Main execution path."""
        # Inventory grouped by display group
        self.inventory = {}
        # Index of deployment name -> host
        self.index = {}
        self.host_metadata = {}

        # Cache setting defaults.
        # These can be overridden in settings (see `read_settings`).
        cache_dir = os.path.expanduser('~')
        self.cache_path_cache = os.path.join(cache_dir, '.ansible-azure.cache')
        self.cache_path_index = os.path.join(cache_dir, '.ansible-azure.index')
        self.cache_max_age = 0

        # Read settings and parse CLI arguments
        self.read_settings()
        self.read_environment()
        self.parse_cli_args()

        # Initialize Azure ServiceManagementService
        self.sms = ServiceManagementService(self.subscription_id, self.cert_path)

        # Cache
        if self.args.refresh_cache:
            self.do_api_calls_update_cache()
        elif not self.is_cache_valid():
            self.do_api_calls_update_cache()

        if self.args.list_images:
            data_to_print = self.json_format_dict(self.get_images(), True)
        elif self.args.list or self.args.host:
            # Display list of nodes for inventory
            if len(self.inventory) == 0:
                data = json.loads(self.get_inventory_from_cache())
            else:
                data = self.inventory

            if self.args.host:
                data_to_print = self.get_host(self.args.host)
            else:
                # Add the `['_meta']['hostvars']` information.
                hostvars = {}
                if len(data) > 0:
                    for host in set([h for hosts in data.values() for h in hosts if h]):
                        hostvars[host] = self.get_host(host, jsonify=False)
                data['_meta'] = {'hostvars': hostvars}

                # JSONify the data.
                data_to_print = self.json_format_dict(data, pretty=True)
        print(data_to_print)

    def get_host(self, hostname, jsonify=True):
        """Return information about the given hostname, based on what
        the Windows Azure API provides.
        """
        if hostname not in self.host_metadata:
            return "No host found: %s" % json.dumps(self.host_metadata)
        if jsonify:
            return json.dumps(self.host_metadata[hostname])
        return self.host_metadata[hostname]

    def get_images(self):
        images = []
        for image in self.sms.list_os_images():
            if str(image.label).lower().find(self.args.list_images.lower()) >= 0:
                images.append(vars(image))
        return json.loads(json.dumps(images, default=lambda o: o.__dict__))

    def is_cache_valid(self):
        """Determines if the cache file has expired, or if it is still valid."""
        if os.path.isfile(self.cache_path_cache):
            mod_time = os.path.getmtime(self.cache_path_cache)
            current_time = time()
            if (mod_time + self.cache_max_age) > current_time:
                if os.path.isfile(self.cache_path_index):
                    return True
        return False

    def read_settings(self):
        """Reads the settings from the .ini file."""
        config = ConfigParser.SafeConfigParser()
        config.read(os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini')

        # Credentials related
        if config.has_option('azure', 'subscription_id'):
            self.subscription_id = config.get('azure', 'subscription_id')
        if config.has_option('azure', 'cert_path'):
            self.cert_path = config.get('azure', 'cert_path')

        # Cache related
        if config.has_option('azure', 'cache_path'):
            cache_path = os.path.expandvars(os.path.expanduser(config.get('azure', 'cache_path')))
            self.cache_path_cache = os.path.join(cache_path, 'ansible-azure.cache')
            self.cache_path_index = os.path.join(cache_path, 'ansible-azure.index')
        if config.has_option('azure', 'cache_max_age'):
            self.cache_max_age = config.getint('azure', 'cache_max_age')

    def read_environment(self):
        ''' Reads the settings from environment variables '''
        # Credentials
        if os.getenv("AZURE_SUBSCRIPTION_ID"):
            self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
        if os.getenv("AZURE_CERT_PATH"):
            self.cert_path = os.getenv("AZURE_CERT_PATH")

    def parse_cli_args(self):
        """Command line argument processing"""
        parser = argparse.ArgumentParser(
            description='Produce an Ansible Inventory file based on Azure',
        )
        parser.add_argument('--list', action='store_true', default=True,
                            help='List nodes (default: True)')
        parser.add_argument('--list-images', action='store',
                            help='Get all available images.')
        parser.add_argument('--refresh-cache',
            action='store_true', default=False,
            help='Force refresh of thecache by making API requests to Azure '
                 '(default: False - use cache files)',
        )
        parser.add_argument('--host', action='store',
                            help='Get all information about an instance.')
        self.args = parser.parse_args()

    def do_api_calls_update_cache(self):
        """Do API calls, and save data in cache files."""
        self.add_cloud_services()
        self.write_to_cache(self.inventory, self.cache_path_cache)
        self.write_to_cache(self.index, self.cache_path_index)

    def add_cloud_services(self):
        """Makes an Azure API call to get the list of cloud services."""
        try:
            for cloud_service in self.sms.list_hosted_services():
                self.add_deployments(cloud_service)
        except WindowsAzureError as e:
            print("Looks like Azure's API is down:")
            print("")
            print(e)
            sys.exit(1)

    def add_deployments(self, cloud_service):
        """Makes an Azure API call to get the list of virtual machines
        associated with a cloud service.
        """
        try:
            for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments:
                self.add_deployment(cloud_service, deployment)
        except WindowsAzureError as e:
            print("Looks like Azure's API is down:")
            print("")
            print(e)
            sys.exit(1)

    def add_deployment(self, cloud_service, deployment):
        """Adds a deployment to the inventory and index"""
        for role in deployment.role_instance_list.role_instances:
            try:
                # Default port 22 unless port found with name 'SSH'
                port = '22'
                for ie in role.instance_endpoints.instance_endpoints:
                    if ie.name == 'SSH':
                        port = ie.public_port
                        break
            except AttributeError as e:
                pass
            finally:
                self.add_instance(role.instance_name, deployment, port, cloud_service, role.instance_status)

    def add_instance(self, hostname, deployment, ssh_port, cloud_service, status):
        """Adds an instance to the inventory and index"""

        dest = urlparse(deployment.url).hostname

        # Add to index
        self.index[hostname] = deployment.name

        self.host_metadata[hostname] = dict(ansible_ssh_host=dest,
                                            ansible_ssh_port=int(ssh_port),
                                            instance_status=status,
                                            private_id=deployment.private_id)

        # List of all azure deployments
        self.push(self.inventory, "azure", hostname)

        # Inventory: Group by service name
        self.push(self.inventory, self.to_safe(cloud_service.service_name), hostname)

        if int(ssh_port) == 22:
            self.push(self.inventory, "Cloud_services", hostname)

        # Inventory: Group by region
        self.push(self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), hostname)

    def push(self, my_dict, key, element):
        """Pushed an element onto an array that may not have been defined in the dict."""
        if key in my_dict:
            my_dict[key].append(element);
        else:
            my_dict[key] = [element]

    def get_inventory_from_cache(self):
        """Reads the inventory from the cache file and returns it as a JSON object."""
        cache = open(self.cache_path_cache, 'r')
        json_inventory = cache.read()
        return json_inventory

    def load_index_from_cache(self):
        """Reads the index from the cache file and sets self.index."""
        cache = open(self.cache_path_index, 'r')
        json_index = cache.read()
        self.index = json.loads(json_index)

    def write_to_cache(self, data, filename):
        """Writes data in JSON format to a file."""
        json_data = self.json_format_dict(data, True)
        cache = open(filename, 'w')
        cache.write(json_data)
        cache.close()

    def to_safe(self, word):
        """Escapes any characters that would be invalid in an ansible group name."""
        return re.sub("[^A-Za-z0-9\-]", "_", word)

    def json_format_dict(self, data, pretty=False):
        """Converts a dict to a JSON object and dumps it as a formatted string."""
        if pretty:
            return json.dumps(data, sort_keys=True, indent=2)
        else:
            return json.dumps(data)
    def setUp(self):
        self.sms = ServiceManagementService(credentials.getSubscriptionId(),
                                            credentials.getManagementCertFile())
        set_service_options(self.sms)

        self.storage_account_name = getUniqueName('utstor')
Exemplo n.º 34
0
 def __init__(self, subscription_id, cert_file):
     self.subscription_id = subscription_id
     self.cert_file = cert_file
     self.sms = ServiceManagementService(self.subscription_id, self.cert_file)
Exemplo n.º 35
0
def main():
    '''
        new version simulate a simple bash
    '''
       
    config = __import__('config')
    
    subscription_id = get_certificate_from_publish_settings(
        publish_settings_path=config.publish_settings_path,
        path_to_write_certificate=config.path_to_write_certificate,
    )
    
    cert_file = config.path_to_write_certificate
    sms = ServiceManagementService(subscription_id, cert_file)
    
    if len(sys.argv) < 2 :
        print "format should be python inspector.py <url of the vhd>"
        exit
    url = sys.argv[1]
    storage_name = url[8:url.find('.')]
    
    storage_account_key = sms.get_storage_account_keys(storage_name).storage_service_keys.primary.encode('ascii','ignore')
    
    nowpath = "/"
    
    def get_sentence(s) :
        st = s.find(' ')
        while st < len(s) and s[st] ==  ' ' :
            st += 1
        ed = len(s)
        for i in range(st, len(s)) :
            if s[i] == ' ' and s[i-1] != '\\' :
                ed = i
                break
        while ed>0 and s[ed-1] == '/' :
            ed -= 1
        return s[st:ed].replace("//", "/")
    
    global last_query_files_num
    while True :
        cmd = raw_input(nowpath+" $ ")
        if cmd.split(' ')[0] == "quit" :
            break
        elif cmd.split(' ')[0] == "ls" :
            old_main(url=url, account_key=storage_account_key, path=nowpath, ls=True)
        elif cmd.startswith("cd ") :
            sentence = get_sentence(cmd)
            if sentence != "" :
                if sentence == ".." :
                    if nowpath != "/" :
                        nowpath = nowpath[:nowpath[:-1].rfind('/')+1]
                elif sentence[0] == '/' :
                    old_main(url=url, account_key=storage_account_key, path=sentence, ls=True)
                    if last_query_files_num == 0 :
                        print "no such directory"
                    else :
                        nowpath = sentence + "/"
                elif sentence != "" :
                    old_main(url=url, account_key=storage_account_key, path=(nowpath+sentence), ls=True)
                    if last_query_files_num == 0 :
                        print "no such directory"
                    else :
                        nowpath += sentence + "/"
        elif cmd.startswith("download ") :
            sentence = get_sentence(cmd)
            tmp = sentence.rfind('/')
            if sentence != "" :
                old_main(url=url, account_key=storage_account_key, path=(nowpath+sentence[:tmp]), filename=sentence[(tmp+1):])
        else :
            print "invalid command"
Exemplo n.º 36
0
class AzureNodeDriver(NodeDriver):
    name = "Azure Node Provider"
    type = Provider.AZURE
    website = 'http://windowsazure.com'
    sms = None

    rolesizes = None

    NODE_STATE_MAP = {
        'RoleStateUnknown': NodeState.UNKNOWN,
        'CreatingVM': NodeState.PENDING,
        'StartingVM': NodeState.PENDING,
        'CreatingRole': NodeState.PENDING,
        'StartingRole': NodeState.PENDING,
        'ReadyRole': NodeState.RUNNING,
        'BusyRole': NodeState.PENDING,
        'StoppingRole': NodeState.PENDING,
        'StoppingVM': NodeState.PENDING,
        'DeletingVM': NodeState.PENDING,
        'StoppedVM': NodeState.STOPPED,
        'RestartingRole': NodeState.REBOOTING,
        'CyclingRole': NodeState.TERMINATED,
        'FailedStartingRole': NodeState.TERMINATED,
        'FailedStartingVM': NodeState.TERMINATED,
        'UnresponsiveRole': NodeState.TERMINATED,
        'StoppedDeallocated': NodeState.TERMINATED,
    }

    def __init__(self, subscription_id=None, key_file=None, **kwargs):
        """
        subscription_id contains the Azure subscription id
        in the form of GUID key_file contains
        the Azure X509 certificate in .pem form
        """
        self.subscription_id = subscription_id
        self.key_file = key_file
        self.sms = ServiceManagementService(subscription_id, key_file)

        super(AzureNodeDriver, self).__init__(
            self.subscription_id,
            self.key_file,
            secure=True,
            **kwargs)

    def list_sizes(self):
        """
        Lists all sizes from azure

        :rtype: ``list`` of :class:`NodeSize`
        """
        if self.rolesizes is None:
            # refresh rolesizes
            data = self.sms.list_role_sizes()
            self.rolesizes = [self._to_node_size(i) for i in data]
        return self.rolesizes

    def list_images(self, location=None):
        """
        Lists all sizes from azure

        :rtype: ``list`` of :class:`NodeSize`
        """
        data = self.sms.list_os_images()
        images = [self._to_image(i) for i in data]

        if location is not None:
            images = [image for image in images
                      if location in image.extra["location"]]
        return images

    def list_locations(self):
        """
        Lists all Location from azure

        :rtype: ``list`` of :class:`NodeLocation`
        """
        data = self.sms.list_locations()
        locations = [self._to_location(i) for i in data]
        return locations

    def list_virtual_net(self):
        """
        List all VirtualNetworkSites

        :rtype: ``list`` of :class:`VirtualNetwork`
        """
        data = self.sms.list_virtual_network_sites()
        virtualnets = [self._to_virtual_network(i) for i in data]
        return virtualnets

    def create_node(self,
                    name,
                    image,
                    size,
                    storage,
                    service_name,
                    vm_user,
                    vm_password,
                    location=None,
                    affinity_group=None,
                    virtual_network=None):
        """
        Create a vm deploiement request

        :rtype:  :class:`.Node`
        :return: ``Node`` Node instance on success.
        """

        try:
            self.sms.get_hosted_service_properties(service_name)
            pass
        except WindowsAzureMissingResourceError:
            # create cloud service
            if bool(location is not None) != bool(affinity_group is not None):
                raise ValueError(
                    "For ressource creation, set location or affinity_group" +
                    " not both")
            if location is not None:
                try:
                    self.sms.create_hosted_service(
                        service_name=service_name,
                        label=service_name,
                        location=location)
                    pass
                except Exception, e:
                    raise e
            else:
                try:
                    self.sms.create_hosted_service(
                        service_name=service_name,
                        label=service_name,
                        affinity_group=affinity_group)
                except Exception, e:
                    raise e
Exemplo n.º 37
0
import random
import base64
from customparser import ConfigReader

config_reader = ConfigReader('config.ini')
config_params = config_reader.get_config_section_map("azure")

# from http://stackoverflow.com/a/2257449
def name_generator(size=10, chars=string.ascii_lowercase + string.digits):
    return ''.join(random.choice(chars) for _ in range(size))


subscription_id = config_params["subscription_id"]
certificate_path = config_params["mgmt_cert_path"]

sms = ServiceManagementService(subscription_id, certificate_path)

# Because the name has to be unique in Their cloud :/
hosted_service_name = name_generator()
label = 'devOps test'
desc = 'Service for basic nginx server'
location = 'Central US'

# image_list = sms.list_os_images()

result = sms.create_hosted_service(hosted_service_name, label, desc, location)
operation_result = sms.get_operation_status(result.request_id)

storage_acc_name = name_generator()
label = 'mystorageaccount'
location = 'Central US'
Exemplo n.º 38
0
 def __init__(self, config):
     self.config = config
     self.sms = ServiceManagementService(config.getAzureSubscriptionId(), config.getAzureCertificatePath())
     self.sbms = ServiceBusManagementService(config.getAzureSubscriptionId(), config.getAzureCertificatePath())
Exemplo n.º 39
0
class Deployment(object):
    """
    Helper class to handle deployment of the web site.
    """
    def __init__(self, config):
        self.config = config
        self.sms = ServiceManagementService(config.getAzureSubscriptionId(), config.getAzureCertificatePath())
        self.sbms = ServiceBusManagementService(config.getAzureSubscriptionId(), config.getAzureCertificatePath())

    @staticmethod
    def _resource_exists(get_resource):
        """
        Helper to check for the existence of a resource in Azure.

        get_resource: Parameter-less function to invoke in order to get the resource. The resource
            is assumed to exist when the call to get_resource() returns a value that is not None.
            If the call to get_resource() returns None or throws a WindowsAzureMissingResourceError
            exception, then it is assumed that the resource does not exist.

        Returns: A boolean value which is True if the resource exists.
        """
        resource = None
        try:
            resource = get_resource()
        except WindowsAzureMissingResourceError:
            pass
        return resource is not None

    def _wait_for_operation_success(self, request_id, timeout=600, wait=5):
        """
        Waits for an asynchronous Azure operation to finish.

        request_id: The ID of the request to track.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to fetch the latest operation status.
        """
        result = self.sms.get_operation_status(request_id)
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        while result.status == 'InProgress':
            if now >= max_time:
                raise Exception("Operation did not finish within the expected timeout")
            logger.info('Waiting for operation to finish (last_status=%s wait_so_far=%s)',
                        result.status, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            result = self.sms.get_operation_status(request_id)
            now = time.time()
        if result.status != 'Succeeded':
            raise Exception("Operation terminated but it did not succeed.")

    def _wait_for_role_instance_status(self, role_instance_name, service_name, expected_status, timeout=600, wait=5):
        """
        Waits for a role instance within the web site's cloud service to reach the status specified.

        role_instance_name: Name of the role instance.
        service_name: Name of service in which to find the role instance.
        expected_status: Expected instance status.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to fetch the latest role status.
        """
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        while True:
            status = None
            deployment = self.sms.get_deployment_by_name(service_name, service_name)
            for role_instance in deployment.role_instance_list:
                if role_instance.instance_name == role_instance_name:
                    status = role_instance.instance_status
            if status == expected_status:
                break
            if now >= max_time:
                raise Exception("Operation did not finish within the expected timeout")
            logger.info('Waiting for deployment status: expecting %s but got %s (wait_so_far=%s)',
                        expected_status, status, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            now = time.time()

    def _wait_for_disk_deletion(self, disk_name, timeout=600, wait=5):
        """
        Waits for a VM disk to disappear when it is being deleted.

        disk_name: Name of the VHD.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to check for the existence of the disk.
        """
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        logger.info("Checking that disk %s has been deleted.", disk_name)
        while self._resource_exists(lambda: self.sms.get_disk(disk_name)):
            if now >= max_time:
                raise Exception("Disk %s was not deleted within the expected timeout.".format(disk_name))
            logger.info("Waiting for disk %s to disappear (wait_so_far=%s).", disk_name, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            now = time.time()
        logger.info("Disk %s has been deleted.", disk_name)

    def _wait_for_namespace_active(self, name, timeout=600, wait=5):
        """
        Waits for a service bus namespace to become Active.

        name: Namespace name.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to check for the existence of the disk.
        """
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        while True:
            status = None
            props = self.sbms.get_namespace(name)
            status = props.status
            if status == 'Active':
                break
            if now >= max_time:
                raise Exception("Operation did not finish within the expected timeout")
            logger.info('Waiting for namespace status: expecting Active but got %s (wait_so_far=%s)',
                        status, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            now = time.time()

    def _getRoleInstances(self, service_name):
        """
        Returns the role instances in the given cloud service deployment. The results are provided as
        a dictionary where keys are role instance names and values are RoleInstance objects.
        """
        role_instances = {}
        if self._resource_exists(lambda: self.sms.get_deployment_by_name(service_name, service_name)):
            deployment = self.sms.get_deployment_by_name(service_name, service_name)
            for role_instance in deployment.role_instance_list:
                role_instances[role_instance.instance_name] = role_instance
        return role_instances

    def _ensureAffinityGroupExists(self):
        """
        Creates the affinity group if it does not exist.
        """
        name = self.config.getAffinityGroupName()
        location = self.config.getServiceLocation()
        logger.info("Checking for existence of affinity group (name=%s; location=%s).", name, location)
        if self._resource_exists(lambda: self.sms.get_affinity_group_properties(name)):
            logger.warn("An affinity group named %s already exists.", name)
        else:
            self.sms.create_affinity_group(name, name, location)
            logger.info("Created affinity group %s.", name)

    def _ensureStorageAccountExists(self, name):
        """
        Creates the storage account if it does not exist.
        """
        logger.info("Checking for existence of storage account (name=%s).", name)
        if self._resource_exists(lambda: self.sms.get_storage_account_properties(name)):
            logger.warn("A storage account named %s already exists.", name)
        else:
            result = self.sms.create_storage_account(name, "", name, affinity_group=self.config.getAffinityGroupName())
            self._wait_for_operation_success(result.request_id, timeout=self.config.getAzureOperationTimeout())
            logger.info("Created storage account %s.", name)

    def _getStorageAccountKey(self, account_name):
        """
        Gets the storage account key (primary key) for the given storage account.
        """
        storage_props = self.sms.get_storage_account_keys(account_name)
        return storage_props.storage_service_keys.primary

    def _ensureStorageContainersExist(self):
        """
        Creates Blob storage containers required by the service.
        """
        logger.info("Checking for existence of Blob containers.")
        account_name = self.config.getServiceStorageAccountName()
        account_key = self._getStorageAccountKey(account_name)
        blob_service = BlobService(account_name, account_key)
        name_and_access_list = [(self.config.getServicePublicStorageContainer(), 'blob'),
                                (self.config.getServiceBundleStorageContainer(), None)]
        for name, access in name_and_access_list:
            logger.info("Checking for existence of Blob container %s.", name)
            blob_service.create_container(name, x_ms_blob_public_access=access, fail_on_exist=False)
            access_info = 'private' if access is None else 'public {0}'.format(access)
            logger.info("Blob container %s is ready (access: %s).", name, access_info)

    def ensureStorageHasCorsConfiguration(self):
        """
        Ensures Blob storage container for bundles is configured to allow cross-origin resource sharing.
        """
        logger.info("Setting CORS rules.")
        account_name = self.config.getServiceStorageAccountName()
        account_key = self._getStorageAccountKey(account_name)

        cors_rule = CorsRule()
        cors_rule.allowed_origins = self.config.getServiceStorageCorsAllowedOrigins()
        cors_rule.allowed_methods = 'PUT'
        cors_rule.exposed_headers = '*'
        cors_rule.allowed_headers = '*'
        cors_rule.max_age_in_seconds = 1800
        cors_rules = Cors()
        cors_rules.cors_rule.append(cors_rule)
        set_storage_service_cors_properties(account_name, account_key, cors_rules)

    def _ensureServiceExists(self, service_name, affinity_group_name):
        """
        Creates the specified cloud service host if it does not exist.

        service_name: Name of the cloud service.
        affinity_group_name: Name of the affinity group (which should exists).
        """
        logger.info("Checking for existence of cloud service (name=%s).", service_name)
        if self._resource_exists(lambda: self.sms.get_hosted_service_properties(service_name)):
            logger.warn("A cloud service named %s already exists.", service_name)
        else:
            self.sms.create_hosted_service(service_name, service_name, affinity_group=affinity_group_name)
            logger.info("Created cloud service %s.", service_name)

    def _ensureServiceCertificateExists(self, service_name):
        """
        Adds certificate to the specified cloud service.

        service_name: Name of the target cloud service (which should exist).
        """
        cert_format = self.config.getServiceCertificateFormat()
        cert_algorithm = self.config.getServiceCertificateAlgorithm()
        cert_thumbprint = self.config.getServiceCertificateThumbprint()
        cert_path = self.config.getServiceCertificateFilename()
        cert_password = self.config.getServiceCertificatePassword()
        logger.info("Checking for existence of cloud service certificate for service %s.", service_name)
        get_cert = lambda: self.sms.get_service_certificate(service_name, cert_algorithm, cert_thumbprint)
        if self._resource_exists(get_cert):
            logger.info("Found expected cloud service certificate.")
        else:
            with open(cert_path, 'rb') as f:
                cert_data = base64.b64encode(f.read())
            if len(cert_data) <= 0:
                raise Exception("Detected invalid certificate data.")
            result = self.sms.add_service_certificate(service_name, cert_data, cert_format, cert_password)
            self._wait_for_operation_success(result.request_id, timeout=self.config.getAzureOperationTimeout())
            logger.info("Added service certificate.")

    def _assertOsImageExists(self, os_image_name):
        """
        Asserts that the named OS image exists.
        """
        logger.info("Checking for availability of OS image (name=%s).", os_image_name)
        if self.sms.get_os_image(os_image_name) is None:
            raise Exception("Unable to find OS Image '{0}'.".format(os_image_name))

    def _ensureVirtualMachinesExist(self):
        """
        Creates the VMs for the web site.
        """
        service_name = self.config.getServiceName()
        cert_thumbprint = self.config.getServiceCertificateThumbprint()
        vm_username = self.config.getVirtualMachineLogonUsername()
        vm_password = self.config.getVirtualMachineLogonPassword()
        vm_role_size = self.config.getServiceInstanceRoleSize()
        vm_numbers = self.config.getServiceInstanceCount()
        if vm_numbers < 1:
            raise Exception("Detected an invalid number of instances: {0}.".format(vm_numbers))

        self._assertOsImageExists(self.config.getServiceOSImageName())

        role_instances = self._getRoleInstances(service_name)
        for vm_number in range(1, vm_numbers+1):
            vm_hostname = '{0}-{1}'.format(service_name, vm_number)
            if vm_hostname in role_instances:
                logger.warn("Role instance %s already exists: skipping creation.", vm_hostname)
                continue

            logger.info("Role instance %s provisioning begins.", vm_hostname)
            vm_diskname = '{0}.vhd'.format(vm_hostname)
            vm_disk_media_link = 'http://{0}.blob.core.windows.net/vhds/{1}'.format(
                self.config.getServiceStorageAccountName(), vm_diskname
            )
            ssh_port = str(self.config.getServiceInstanceSshPort() + vm_number)

            os_hd = OSVirtualHardDisk(self.config.getServiceOSImageName(),
                                      vm_disk_media_link,
                                      disk_name=vm_diskname,
                                      disk_label=vm_diskname)
            linux_config = LinuxConfigurationSet(vm_hostname, vm_username, vm_password, True)
            linux_config.ssh.public_keys.public_keys.append(
                PublicKey(cert_thumbprint, u'/home/{0}/.ssh/authorized_keys'.format(vm_username))
            )
            linux_config.ssh.key_pairs.key_pairs.append(
                KeyPair(cert_thumbprint, u'/home/{0}/.ssh/id_rsa'.format(vm_username))
            )
            network_config = ConfigurationSet()
            network_config.configuration_set_type = 'NetworkConfiguration'
            ssh_endpoint = ConfigurationSetInputEndpoint(name='SSH',
                                                         protocol='TCP',
                                                         port=ssh_port,
                                                         local_port=u'22')
            network_config.input_endpoints.input_endpoints.append(ssh_endpoint)
            http_endpoint = ConfigurationSetInputEndpoint(name='HTTP',
                                                          protocol='TCP',
                                                          port=u'80',
                                                          local_port=u'80',
                                                          load_balanced_endpoint_set_name=service_name)
            http_endpoint.load_balancer_probe.port = '80'
            http_endpoint.load_balancer_probe.protocol = 'TCP'
            network_config.input_endpoints.input_endpoints.append(http_endpoint)

            if vm_number == 1:
                result = self.sms.create_virtual_machine_deployment(service_name=service_name,
                                                                    deployment_name=service_name,
                                                                    deployment_slot='Production',
                                                                    label=vm_hostname,
                                                                    role_name=vm_hostname,
                                                                    system_config=linux_config,
                                                                    os_virtual_hard_disk=os_hd,
                                                                    network_config=network_config,
                                                                    availability_set_name=service_name,
                                                                    data_virtual_hard_disks=None,
                                                                    role_size=vm_role_size)
                self._wait_for_operation_success(result.request_id,
                                                 timeout=self.config.getAzureOperationTimeout())
                self._wait_for_role_instance_status(vm_hostname, service_name, 'ReadyRole',
                                                    self.config.getAzureOperationTimeout())
            else:
                result = self.sms.add_role(service_name=service_name,
                                           deployment_name=service_name,
                                           role_name=vm_hostname,
                                           system_config=linux_config,
                                           os_virtual_hard_disk=os_hd,
                                           network_config=network_config,
                                           availability_set_name=service_name,
                                           role_size=vm_role_size)
                self._wait_for_operation_success(result.request_id,
                                                 timeout=self.config.getAzureOperationTimeout())
                self._wait_for_role_instance_status(vm_hostname, service_name, 'ReadyRole',
                                                    self.config.getAzureOperationTimeout())

            logger.info("Role instance %s has been created.", vm_hostname)

    def _deleteVirtualMachines(self, service_name):
        """
        Deletes the VMs in the given cloud service.
        """
        if self._resource_exists(lambda: self.sms.get_deployment_by_name(service_name, service_name)) == False:
            logger.warn("Deployment %s not found: no VMs to delete.", service_name)
        else:
            logger.info("Attempting to delete deployment %s.", service_name)
            # Get set of role instances before we remove them
            role_instances = self._getRoleInstances(service_name)

            def update_request(request):
                """
                A filter to intercept the HTTP request sent by the ServiceManagementService
                so we can take advantage of a newer feature ('comp=media') in the delete deployment API
                (see http://msdn.microsoft.com/en-us/library/windowsazure/ee460812.aspx)
                """
                hdrs = []
                for name, value in request.headers:
                    if 'x-ms-version' == name:
                        value = '2013-08-01'
                    hdrs.append((name, value))
                request.headers = hdrs
                request.path = request.path + '?comp=media'
                #pylint: disable=W0212
                response = self.sms._filter(request)
                return response

            svc = ServiceManagementService(self.sms.subscription_id, self.sms.cert_file)
            #pylint: disable=W0212
            svc._filter = update_request
            result = svc.delete_deployment(service_name, service_name)
            logger.info("Deployment %s deletion in progress: waiting for delete_deployment operation.", service_name)
            self._wait_for_operation_success(result.request_id)
            logger.info("Deployment %s deletion in progress: waiting for VM disks to be removed.", service_name)
            # Now wait for the disks to disappear
            for role_instance_name in role_instances.keys():
                disk_name = "{0}.vhd".format(role_instance_name)
                self._wait_for_disk_deletion(disk_name)
            logger.info("Deployment %s deleted.", service_name)

    def _ensureBuildMachineExists(self):
        """
        Creates the VM for the build server.
        """
        service_name = self.config.getBuildServiceName()
        service_storage_name = self.config.getStorageAccountName()
        cert_thumbprint = self.config.getServiceCertificateThumbprint()
        vm_username = self.config.getVirtualMachineLogonUsername()
        vm_password = self.config.getVirtualMachineLogonPassword()
        vm_hostname = service_name

        role_instances = self._getRoleInstances(service_name)
        if vm_hostname in role_instances:
            logger.warn("Role instance %s already exists: skipping creation.", vm_hostname)
        else:
            logger.info("Role instance %s provisioning begins.", vm_hostname)
            self._assertOsImageExists(self.config.getBuildOSImageName())

            vm_diskname = '{0}.vhd'.format(vm_hostname)
            vm_disk_media_link = 'http://{0}.blob.core.windows.net/vhds/{1}'.format(service_storage_name, vm_diskname)
            os_hd = OSVirtualHardDisk(self.config.getBuildOSImageName(),
                                      vm_disk_media_link,
                                      disk_name=vm_diskname,
                                      disk_label=vm_diskname)
            linux_config = LinuxConfigurationSet(vm_hostname, vm_username, vm_password, True)
            linux_config.ssh.public_keys.public_keys.append(
                PublicKey(cert_thumbprint, u'/home/{0}/.ssh/authorized_keys'.format(vm_username))
            )
            linux_config.ssh.key_pairs.key_pairs.append(
                KeyPair(cert_thumbprint, u'/home/{0}/.ssh/id_rsa'.format(vm_username))
            )
            network_config = ConfigurationSet()
            network_config.configuration_set_type = 'NetworkConfiguration'
            ssh_endpoint = ConfigurationSetInputEndpoint(name='SSH',
                                                         protocol='TCP',
                                                         port=u'22',
                                                         local_port=u'22')
            network_config.input_endpoints.input_endpoints.append(ssh_endpoint)

            result = self.sms.create_virtual_machine_deployment(service_name=service_name,
                                                                deployment_name=service_name,
                                                                deployment_slot='Production',
                                                                label=vm_hostname,
                                                                role_name=vm_hostname,
                                                                system_config=linux_config,
                                                                os_virtual_hard_disk=os_hd,
                                                                network_config=network_config,
                                                                availability_set_name=None,
                                                                data_virtual_hard_disks=None,
                                                                role_size=self.config.getBuildInstanceRoleSize())
            self._wait_for_operation_success(result.request_id, timeout=self.config.getAzureOperationTimeout())
            self._wait_for_role_instance_status(vm_hostname, service_name, 'ReadyRole',
                                                self.config.getAzureOperationTimeout())
            logger.info("Role instance %s has been created.", vm_hostname)

    def _deleteStorageAccount(self, name):
        """
        Deletes the storage account for the web site.
        """
        logger.info("Attempting to delete storage account %s.", name)
        if self._resource_exists(lambda: self.sms.get_storage_account_properties(name)) == False:
            logger.warn("Storage account %s not found: nothing to delete.", name)
        else:
            self.sms.delete_storage_account(name)
            logger.info("Storage account %s deleted.", name)

    def _deleteService(self, name):
        """
        Deletes the specified cloud service.
        """
        logger.info("Attempting to delete cloud service %s.", name)
        if self._resource_exists(lambda: self.sms.get_hosted_service_properties(name)) == False:
            logger.warn("Cloud service %s not found: nothing to delete.", name)
        else:
            self.sms.delete_hosted_service(name)
            logger.info("Cloud service %s deleted.", name)

    def _deleteAffinityGroup(self):
        """
        Deletes the affinity group for the web site.
        """
        name = self.config.getAffinityGroupName()
        logger.info("Attempting to delete affinity group %s.", name)
        if self._resource_exists(lambda: self.sms.get_affinity_group_properties(name)) == False:
            logger.warn("Affinity group %s not found: nothing to delete.", name)
        else:
            self.sms.delete_affinity_group(name)
            logger.info("Affinity group %s deleted.", name)

    def _ensureServiceBusNamespaceExists(self):
        """
        Creates the Azure Service Bus Namespace if it does not exist.
        """
        name = self.config.getServiceBusNamespace()
        logger.info("Checking for existence of service bus namespace (name=%s).", name)
        if self._resource_exists(lambda: self.sbms.get_namespace(name)):
            logger.warn("A namespace named %s already exists.", name)
        else:
            self.sbms.create_namespace(name, self.config.getServiceLocation())
            self._wait_for_namespace_active(name)
            logger.info("Created namespace %s.", name)

    def _ensureServiceBusQueuesExist(self):
        """
        Creates Azure service bus queues required by the service.
        """
        logger.info("Checking for existence of Service Bus Queues.")
        namespace = self.sbms.get_namespace(self.config.getServiceBusNamespace())
        sbs = ServiceBusService(namespace.name, namespace.default_key, issuer='owner')
        queue_names = ['jobresponsequeue', 'windowscomputequeue', 'linuxcomputequeue']
        for name in queue_names:
            logger.info("Checking for existence of Queue %s.", name)
            sbs.create_queue(name, fail_on_exist=False)
            logger.info("Queue %s is ready.", name)

    def _deleteServiceBusNamespace(self):
        """
        Deletes the Azure Service Bus Namespace.
        """
        name = self.config.getServiceBusNamespace()
        logger.info("Attempting to delete service bus namespace %s.", name)
        if self._resource_exists(lambda: self.sbms.get_namespace(name)) == False:
            logger.warn("Namespace %s not found: nothing to delete.", name)
        else:
            self.sbms.delete_namespace(name)
            logger.info("Namespace %s deleted.", name)

    def Deploy(self, assets):
        """
        Creates a deployment.

        assets: The set of assets to create. The full set is: {'build', 'web'}.
        """
        if len(assets) == 0:
            raise ValueError("Set of assets to deploy is not specified.")
        logger.info("Starting deployment operation.")
        self._ensureAffinityGroupExists()
        self._ensureStorageAccountExists(self.config.getStorageAccountName())
        ## Build instance
        if 'build' in assets:
            self._ensureServiceExists(self.config.getBuildServiceName(), self.config.getAffinityGroupName())
            self._ensureServiceCertificateExists(self.config.getBuildServiceName())
            self._ensureBuildMachineExists()
        # Web instances
        if 'web' in assets:
            self._ensureStorageAccountExists(self.config.getServiceStorageAccountName())
            self._ensureStorageContainersExist()
            self.ensureStorageHasCorsConfiguration()
            self._ensureServiceBusNamespaceExists()
            self._ensureServiceBusQueuesExist()
            self._ensureServiceExists(self.config.getServiceName(), self.config.getAffinityGroupName())
            self._ensureServiceCertificateExists(self.config.getServiceName())
            self._ensureVirtualMachinesExist()
        #queues
        logger.info("Deployment operation is complete.")

    def Teardown(self, assets):
        """
        Deletes a deployment.

        assets: The set of assets to delete. The full set is: {'web', 'build'}.
        """
        if len(assets) == 0:
            raise ValueError("Set of assets to teardown is not specified.")
        logger.info("Starting teardown operation.")
        if 'web' in assets:
            self._deleteVirtualMachines(self.config.getServiceName())
            self._deleteService(self.config.getServiceName())
            self._deleteStorageAccount(self.config.getServiceStorageAccountName())
        if 'build' in assets:
            self._deleteVirtualMachines(self.config.getBuildServiceName())
            self._deleteService(self.config.getBuildServiceName())
            self._deleteStorageAccount(self.config.getStorageAccountName())
        if ('web' in assets) and ('build' in assets):
            self._deleteServiceBusNamespace()
            self._deleteAffinityGroup()
        logger.info("Teardown operation is complete.")

    def getSettingsFileContent(self):
        """
        Generates the content of the local Django settings file.
        """
        allowed_hosts = ['{0}.cloudapp.net'.format(self.config.getServiceName())]
        allowed_hosts.extend(self.config.getWebHostnames())
        allowed_hosts.extend(['www.codalab.org', 'codalab.org'])
        ssl_allowed_hosts = self.config.getSslRewriteHosts();
        if len(ssl_allowed_hosts) == 0:
            ssl_allowed_hosts = allowed_hosts

        storage_key = self._getStorageAccountKey(self.config.getServiceStorageAccountName())
        namespace = self.sbms.get_namespace(self.config.getServiceBusNamespace())

        if len(self.config.getSslCertificateInstalledPath()) > 0:
            bundle_auth_scheme = "https"
        else:
            bundle_auth_scheme = "http"
        if len(ssl_allowed_hosts) == 0:
            bundle_auth_host = '{0}.cloudapp.net'.format(self.config.getServiceName())
        else:
            bundle_auth_host = ssl_allowed_hosts[0]
        bundle_auth_url = "{0}://{1}".format(bundle_auth_scheme, bundle_auth_host)

        lines = [
            "from base import Base",
            "from default import *",
            "from configurations import Settings",
            "",
            "import sys",
            "from os.path import dirname, abspath, join",
            "from pkgutil import extend_path",
            "import codalab",
            "",
            "class {0}(Base):".format(self.config.getDjangoConfiguration()),
            "",
            "    DEBUG=False",
            "",
            "    ALLOWED_HOSTS = {0}".format(allowed_hosts),
            "",
            "    SSL_PORT = '443'",
            "    SSL_CERTIFICATE = '{0}'".format(self.config.getSslCertificateInstalledPath()),
            "    SSL_CERTIFICATE_KEY = '{0}'".format(self.config.getSslCertificateKeyInstalledPath()),
            "    SSL_ALLOWED_HOSTS = {0}".format(ssl_allowed_hosts),
            "",
            "    DEFAULT_FILE_STORAGE = 'codalab.azure_storage.AzureStorage'",
            "    AZURE_ACCOUNT_NAME = '{0}'".format(self.config.getServiceStorageAccountName()),
            "    AZURE_ACCOUNT_KEY = '{0}'".format(storage_key),
            "    AZURE_CONTAINER = '{0}'".format(self.config.getServicePublicStorageContainer()),
            "    BUNDLE_AZURE_ACCOUNT_NAME = AZURE_ACCOUNT_NAME",
            "    BUNDLE_AZURE_ACCOUNT_KEY = AZURE_ACCOUNT_KEY",
            "    BUNDLE_AZURE_CONTAINER = '{0}'".format(self.config.getServiceBundleStorageContainer()),
            "",
            "    SBS_NAMESPACE = '{0}'".format(self.config.getServiceBusNamespace()),
            "    SBS_ISSUER = 'owner'",
            "    SBS_ACCOUNT_KEY = '{0}'".format(namespace.default_key),
            "    SBS_RESPONSE_QUEUE = 'jobresponsequeue'",
            "    SBS_COMPUTE_QUEUE = 'windowscomputequeue'",
            "",
            "    EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'",
            "    EMAIL_HOST = '{0}'".format(self.config.getEmailHost()),
            "    EMAIL_HOST_USER = '******'".format(self.config.getEmailUser()),
            "    EMAIL_HOST_PASSWORD = '******'".format(self.config.getEmailPassword()),
            "    EMAIL_PORT = 587",
            "    EMAIL_USE_TLS = True",
            "    DEFAULT_FROM_EMAIL = 'CodaLab <*****@*****.**>'",
            "    SERVER_EMAIL = '*****@*****.**'",
            "",
            "    # Django secret",
            "    SECRET_KEY = '{0}'".format(self.config.getDjangoSecretKey()),
            "",
            "    ADMINS = (('CodaLab', '*****@*****.**'),)",
            "    MANAGERS = ADMINS",
            "",
            "    DATABASES = {",
            "        'default': {",
            "            'ENGINE': '{0}',".format(self.config.getDatabaseEngine()),
            "            'NAME': '{0}',".format(self.config.getDatabaseName()),
            "            'USER': '******',".format(self.config.getDatabaseUser()),
            "            'PASSWORD': '******',".format(self.config.getDatabasePassword()),
            "            'HOST': '{0}',".format(self.config.getDatabaseHost()),
            "            'PORT': '{0}', ".format(self.config.getDatabasePort()),
            "            'OPTIONS' : {",
            "                'init_command': 'SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED',",
            "                'read_timeout': 5",
            "            }",
            "        }",
            "    }",
            "",
            "    BUNDLE_DB_NAME = '{0}'".format(self.config.getBundleServiceDatabaseName()),
            "    BUNDLE_DB_USER = '******'".format(self.config.getBundleServiceDatabaseUser()),
            "    BUNDLE_DB_PASSWORD = '******'".format(self.config.getBundleServiceDatabasePassword()),
            "    BUNDLE_APP_ID = '{0}'".format(self.config.getBundleServiceAppId()),
            "    BUNDLE_APP_KEY = '{0}'".format(self.config.getBundleServiceAppKey()),
            "    BUNDLE_AUTH_URL = '{0}'".format(bundle_auth_url),
            "",
            "    BUNDLE_SERVICE_URL = '{0}'".format(self.config.getBundleServiceUrl()),
            "    BUNDLE_SERVICE_CODE_PATH = '/home/{0}/deploy/bundles'".format(self.config.getVirtualMachineLogonUsername()),
            "    sys.path.append(BUNDLE_SERVICE_CODE_PATH)",
            "    codalab.__path__ = extend_path(codalab.__path__, codalab.__name__)",
            "",
        ]
        preview = self.config.getShowPreviewFeatures()
        if preview >= 1:
            if preview == 1:
                lines.append("    PREVIEW_WORKSHEETS = True")
            if preview > 1:
                lines.append("    SHOW_BETA_FEATURES = True")
            lines.append("")
        return '\n'.join(lines)
Exemplo n.º 40
0
class AzureServicesManager:
    # Storage
    container = 'vhds'
    windows_blob_url = 'blob.core.windows.net'

    # Linux
    linux_user = '******'
    linux_pass = '******'
    location = 'West US'
    # SSH Keys

    def __init__(self, subscription_id, cert_file):
        self.subscription_id = subscription_id
        self.cert_file = cert_file
        self.sms = ServiceManagementService(self.subscription_id, self.cert_file)

    @property
    def sms(self):
         return self.sms

    def list_locations(self):
        locations = self.sms.list_locations()
        for location in locations:
            print location

    def list_images(self):
        return self.sms.list_os_images()

    @utils.resource_not_found_handler
    def get_hosted_service(self, service_name):
        resp = self.sms.get_hosted_service_properties(service_name)
        properties = resp.hosted_service_properties
        return properties.__dict__

    def delete_hosted_service(self, service_name):
        res = self.sms.check_hosted_service_name_availability(service_name)
        if not res.result:
            return

        self.sms.delete_hosted_service(service_name)

    def create_hosted_service(self, os_user, service_name=None, random=False):
        if not service_name:
            service_name = self.generate_cloud_service_name(os_user, random)

        available = False

        while not available:
            res = self.sms.check_hosted_service_name_availability(service_name)
            if not res.result:
                service_name = self.generate_cloud_service_name(os_user,
                                                                random)
            else:
                available = True

        self.sms.create_hosted_service(service_name=service_name,
                                       label=service_name,
                                       location='West US')

        return service_name

    def create_virtual_machine(self, service_name, vm_name, image_name, role_size):
        media_link = self._get_media_link(vm_name)
        # Linux VM configuration
        hostname = '-'.join((vm_name, 'host'))
        linux_config = LinuxConfigurationSet(hostname,
                                             self.linux_user,
                                             self.linux_pass,
                                             True)

        # Hard disk for the OS
        os_hd = OSVirtualHardDisk(image_name, media_link)

        # Create vm
        result = self.sms.create_virtual_machine_deployment(
            service_name=service_name,  deployment_name=vm_name,
            deployment_slot='production', label=vm_name,
            role_name=vm_name, system_config=linux_config,
            os_virtual_hard_disk=os_hd,
            role_size=role_size
        )
        request_id = result.request_id

        return {
            'request_id': request_id,
            'media_link': media_link
        }

    def delete_virtual_machine(self, service_name, vm_name):
        resp = self.sms.delete_deployment(service_name, vm_name, True)
        self.sms.wait_for_operation_status(resp.request_id)
        result = self.sms.delete_hosted_service(service_name)
        return result

    def generate_cloud_service_name(self, os_user=None, random=False):
        if random:
            return utils.generate_random_name(10)

        return '-'.join((os_user, utils.generate_random_name(6)))

    @utils.resource_not_found_handler
    def get_virtual_machine_info(self, service_name, vm_name):
        vm_info = {}
        deploy_info = self.sms.get_deployment_by_name(service_name, vm_name)

        if deploy_info and deploy_info.role_instance_list:
            vm_info = deploy_info.role_instance_list[0].__dict__

        return vm_info

    def list_virtual_machines(self):
        vm_list = []
        services = self.sms.list_hosted_services()
        for service in services:
            deploys = service.deployments
            if deploys and deploys.role_instance_list:
                vm_name = deploys.role_instance_list[0].instance_name
                vm_list.append(vm_name)

        return vm_list

    def power_on(self, service_name, vm_name):
        resp = self.sms.start_role(service_name, vm_name, vm_name)
        return resp.request_id

    def power_off(self, service_name, vm_name):
        resp = self.sms.shutdown_role(service_name, vm_name, vm_name)
        return resp.request_id

    def soft_reboot(self, service_name, vm_name):
        resp = self.sms.restart_role(service_name, vm_name, vm_name)
        return resp.request_id

    def hard_reboot(self, service_name, vm_name):
        resp = self.sms.reboot_role_instance(service_name, vm_name, vm_name)
        return resp.request_id

    def attach_volume(self, service_name, vm_name, size, lun):
        disk_name = utils.generate_random_name(5, vm_name)
        media_link = self._get_media_link(vm_name, disk_name)

        self.sms.add_data_disk(service_name,
                               vm_name,
                               vm_name,
                               lun,
                               host_caching='ReadWrite',
                               media_link=media_link,
                               disk_name=disk_name,
                               logical_disk_size_in_gb=size)

    def detach_volume(self, service_name, vm_name, lun):
        self.sms.delete_data_disk(service_name, vm_name, vm_name, lun, True)

    def get_available_lun(self, service_name, vm_name):
        try:
            role = self.sms.get_role(service_name, vm_name, vm_name)
        except Exception:
            return 0

        disks = role.data_virtual_hard_disks
        luns = [disk.lun for disk in disks].sort()

        for i in range(1, 16):
            if i not in luns:
                return i

        return None

    def snapshot(self, service_name, vm_name, image_id, snanshot_name):
        image_desc = 'Snapshot for image %s' % vm_name
        image = CaptureRoleAsVMImage('Specialized', snanshot_name,
                                     image_id, image_desc, 'english')

        resp = self.sms.capture_vm_image(service_name, vm_name, vm_name, image)

        self.sms.wait_for_operation_status(resp.request_id)

    def _get_media_link(self, vm_name, filename=None, storage_account=None):
        """ The MediaLink should be constructed as:
        https://<storageAccount>.<blobLink>/<blobContainer>/<filename>.vhd
        """
        if not storage_account:
            storage_account = self._get_or_create_storage_account()

        container = self.container
        filename = vm_name if filename is None else filename
        blob = vm_name + '-' + filename + '.vhd'
        media_link = "http://%s.%s/%s/%s" % (storage_account,
                                             self.windows_blob_url,
                                             container, blob)

        return media_link

    def _get_or_create_storage_account(self):
        account_list = self.sms.list_storage_accounts()

        if account_list:
            return account_list[-1].service_name

        storage_account = utils.generate_random_name(10)
        description = "Storage account %s description" % storage_account
        label = storage_account + 'label'
        self.sms.create_storage_account(storage_account,
                                        description,
                                        label,
                                        location=self.location)

        return storage_account

    def _wait_for_operation(self, request_id, timeout=3000,
                            failure_callback=None,
                            failure_callback_kwargs=None):
        try:
            self.sms.wait_for_operation_status(request_id, timeout=timeout)
        except Exception as ex:
            if failure_callback and failure_callback_kwargs:
                failure_callback(**failure_callback_kwargs)

            raise ex
class StorageManagementServiceTest(AzureTestCase):

    def setUp(self):
        proxy_host = credentials.getProxyHost()
        proxy_port = credentials.getProxyPort()

        self.sms = ServiceManagementService(credentials.getSubscriptionId(), credentials.getManagementCertFile())
        if proxy_host:
            self.sms.set_proxy(proxy_host, proxy_port)

        self.storage_account_name = getUniqueNameBasedOnCurrentTime('utstorage')

    def tearDown(self):
        try:
            self.sms.delete_storage_account(self.storage_account_name)
        except: pass

    #--Helpers-----------------------------------------------------------------
    def _wait_for_async(self, request_id):
        count = 0
        result = self.sms.get_operation_status(request_id)
        while result.status == 'InProgress':
            count = count + 1
            if count > 120:
                self.assertTrue(False, 'Timed out waiting for async operation to complete.')
            time.sleep(5)
            result = self.sms.get_operation_status(request_id)
        self.assertEqual(result.status, 'Succeeded')

    def _create_storage_account(self, name):
        result = self.sms.create_storage_account(name, name + 'description', name + 'label', None, 'West US', False, {'ext1':'val1', 'ext2':42})
        self._wait_for_async(result.request_id)

    def _storage_account_exists(self, name):
        try:
            props = self.sms.get_storage_account_properties(name)
            return props is not None
        except:
            return False

    #--Test cases for storage accounts -----------------------------------
    def test_list_storage_accounts(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.list_storage_accounts()
        
        # Assert
        self.assertIsNotNone(result)
        self.assertTrue(len(result) > 0)

        storage = None
        for temp in result:
            if temp.service_name == self.storage_account_name:
                storage = temp
                break
        
        self.assertIsNotNone(storage)
        self.assertIsNotNone(storage.service_name)
        self.assertIsNone(storage.storage_service_keys)
        self.assertIsNotNone(storage.storage_service_properties)
        self.assertIsNotNone(storage.storage_service_properties.affinity_group)
        self.assertIsNotNone(storage.storage_service_properties.description)
        self.assertIsNotNone(storage.storage_service_properties.geo_primary_region)
        self.assertIsNotNone(storage.storage_service_properties.geo_replication_enabled)
        self.assertIsNotNone(storage.storage_service_properties.geo_secondary_region)
        self.assertIsNotNone(storage.storage_service_properties.label)
        self.assertIsNotNone(storage.storage_service_properties.last_geo_failover_time)
        self.assertIsNotNone(storage.storage_service_properties.location)
        self.assertIsNotNone(storage.storage_service_properties.status)
        self.assertIsNotNone(storage.storage_service_properties.status_of_primary)
        self.assertIsNotNone(storage.storage_service_properties.status_of_secondary)
        self.assertIsNotNone(storage.storage_service_properties.endpoints)
        self.assertTrue(len(storage.storage_service_properties.endpoints) > 0)
        self.assertIsNotNone(storage.extended_properties)
        self.assertTrue(len(storage.extended_properties) > 0)

    def test_get_storage_account_properties(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.get_storage_account_properties(self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertEqual(result.service_name, self.storage_account_name)
        self.assertIsNotNone(result.url)
        self.assertIsNone(result.storage_service_keys)
        self.assertIsNotNone(result.storage_service_properties)
        self.assertIsNotNone(result.storage_service_properties.affinity_group)
        self.assertIsNotNone(result.storage_service_properties.description)
        self.assertIsNotNone(result.storage_service_properties.geo_primary_region)
        self.assertIsNotNone(result.storage_service_properties.geo_replication_enabled)
        self.assertIsNotNone(result.storage_service_properties.geo_secondary_region)
        self.assertIsNotNone(result.storage_service_properties.label)
        self.assertIsNotNone(result.storage_service_properties.last_geo_failover_time)
        self.assertIsNotNone(result.storage_service_properties.location)
        self.assertIsNotNone(result.storage_service_properties.status)
        self.assertIsNotNone(result.storage_service_properties.status_of_primary)
        self.assertIsNotNone(result.storage_service_properties.status_of_secondary)
        self.assertIsNotNone(result.storage_service_properties.endpoints)
        self.assertTrue(len(result.storage_service_properties.endpoints) > 0)
        self.assertIsNotNone(result.extended_properties)
        self.assertTrue(len(result.extended_properties) > 0)
        self.assertIsNotNone(result.capabilities)
        self.assertTrue(len(result.capabilities) > 0)

    def test_get_storage_account_keys(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.get_storage_account_keys(self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertIsNotNone(result.url)
        self.assertIsNotNone(result.service_name)
        self.assertIsNotNone(result.storage_service_keys.primary)
        self.assertIsNotNone(result.storage_service_keys.secondary)
        self.assertIsNone(result.storage_service_properties)

    def test_regenerate_storage_account_keys(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)
        previous = self.sms.get_storage_account_keys(self.storage_account_name)

        # Act
        result = self.sms.regenerate_storage_account_keys(self.storage_account_name, 'Secondary')

        # Assert
        self.assertIsNotNone(result)
        self.assertIsNotNone(result.url)
        self.assertIsNotNone(result.service_name)
        self.assertIsNotNone(result.storage_service_keys.primary)
        self.assertIsNotNone(result.storage_service_keys.secondary)
        self.assertIsNone(result.storage_service_properties)
        self.assertEqual(result.storage_service_keys.primary, previous.storage_service_keys.primary)
        self.assertNotEqual(result.storage_service_keys.secondary, previous.storage_service_keys.secondary)

    def test_create_storage_account(self):
        # Arrange
        description = self.storage_account_name + 'description'
        label = self.storage_account_name + 'label'

        # Act
        result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':'val1', 'ext2':42})
        self._wait_for_async(result.request_id)

        # Assert
        self.assertTrue(self._storage_account_exists(self.storage_account_name))

    def test_update_storage_account(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)
        description = self.storage_account_name + 'descriptionupdate'
        label = self.storage_account_name + 'labelupdate'

        # Act
        result = self.sms.update_storage_account(self.storage_account_name, description, label, False, {'ext1':'val1update', 'ext2':53, 'ext3':'brandnew'})

        # Assert
        self.assertIsNone(result)
        props = self.sms.get_storage_account_properties(self.storage_account_name)
        self.assertEqual(props.storage_service_properties.description, description)
        self.assertEqual(props.storage_service_properties.label, label)
        self.assertEqual(props.extended_properties['ext1'], 'val1update')
        self.assertEqual(props.extended_properties['ext2'], '53')
        self.assertEqual(props.extended_properties['ext3'], 'brandnew')

    def test_delete_storage_account(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.delete_storage_account(self.storage_account_name)

        # Assert
        self.assertIsNone(result)
        self.assertFalse(self._storage_account_exists(self.storage_account_name))

    def test_check_storage_account_name_availability_not_available(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.check_storage_account_name_availability(self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertFalse(result.result)

    def test_check_storage_account_name_availability_available(self):
        # Arrange

        # Act
        result = self.sms.check_storage_account_name_availability(self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertTrue(result.result)

    def test_unicode_create_storage_account_unicode_name(self):
        # Arrange
        self.storage_account_name = unicode(self.storage_account_name) + u'啊齄丂狛狜'
        description = 'description'
        label = 'label'

        # Act
        with self.assertRaises(WindowsAzureError):
            # not supported - queue name must be alphanumeric, lowercase
            result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':'val1', 'ext2':42})
            self._wait_for_async(result.request_id)

        # Assert

    def test_unicode_create_storage_account_unicode_description_label(self):
        # Arrange
        description = u'啊齄丂狛狜'
        label = u'丂狛狜'

        # Act
        result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':'val1', 'ext2':42})
        self._wait_for_async(result.request_id)

        # Assert
        result = self.sms.get_storage_account_properties(self.storage_account_name)
        self.assertEqual(result.storage_service_properties.description, description)
        self.assertEqual(result.storage_service_properties.label, label)

    def test_unicode_create_storage_account_unicode_property_value(self):
        # Arrange
        description = 'description'
        label = 'label'

        # Act
        result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':u'丂狛狜', 'ext2':42})
        self._wait_for_async(result.request_id)

        # Assert
        result = self.sms.get_storage_account_properties(self.storage_account_name)
        self.assertEqual(result.storage_service_properties.description, description)
        self.assertEqual(result.storage_service_properties.label, label)
        self.assertEqual(result.extended_properties['ext1'], u'丂狛狜')
class AffinityGroupManagementServiceTest(AzureTestCase):

    def setUp(self):
        self.sms = ServiceManagementService(credentials.getSubscriptionId(), 
                                            credentials.getManagementCertFile())

        self.sms.set_proxy(credentials.getProxyHost(), 
                           credentials.getProxyPort(), 
                           credentials.getProxyUser(), 
                           credentials.getProxyPassword())

        self.affinity_group_name = getUniqueNameBasedOnCurrentTime('utaffgrp')
        self.hosted_service_name = None
        self.storage_account_name = None

    def tearDown(self):
        try:
            if self.hosted_service_name is not None:
                self.sms.delete_hosted_service(self.hosted_service_name)
        except: pass

        try:
            if self.storage_account_name is not None:
                self.sms.delete_storage_account(self.storage_account_name)
        except: pass

        try:
            self.sms.delete_affinity_group(self.affinity_group_name)
        except: pass

    #--Helpers-----------------------------------------------------------------
    def _create_affinity_group(self, name):
        result = self.sms.create_affinity_group(name, 'tstmgmtaffgrp', 'West US', 'tstmgmt affinity group')
        self.assertIsNone(result)

    def _affinity_group_exists(self, name):
        try:
            props = self.sms.get_affinity_group_properties(name)
            return props is not None
        except:
            return False

    #--Test cases for affinity groups ------------------------------------
    def test_list_affinity_groups(self):
        # Arrange
        self._create_affinity_group(self.affinity_group_name)

        # Act
        result = self.sms.list_affinity_groups()
        
        # Assert
        self.assertIsNotNone(result)
        self.assertTrue(len(result) > 0)

        group = None
        for temp in result:
            if temp.name == self.affinity_group_name:
                group = temp
                break

        self.assertIsNotNone(group)
        self.assertIsNotNone(group.name)
        self.assertIsNotNone(group.label)
        self.assertIsNotNone(group.description)
        self.assertIsNotNone(group.location)
        self.assertIsNotNone(group.capabilities)
        self.assertTrue(len(group.capabilities) > 0)

    def test_get_affinity_group_properties(self):
        # Arrange
        self.hosted_service_name = getUniqueNameBasedOnCurrentTime('utsvc')
        self.storage_account_name = getUniqueNameBasedOnCurrentTime('utstorage')
        self._create_affinity_group(self.affinity_group_name)
        self.sms.create_hosted_service(self.hosted_service_name, 'affgrptestlabel', 'affgrptestdesc', None, self.affinity_group_name)
        self.sms.create_storage_account(self.storage_account_name, self.storage_account_name + 'desc', self.storage_account_name + 'label', self.affinity_group_name)

        # Act
        result = self.sms.get_affinity_group_properties(self.affinity_group_name)
        
        # Assert
        self.assertIsNotNone(result)
        self.assertEqual(result.name, self.affinity_group_name)
        self.assertIsNotNone(result.label)
        self.assertIsNotNone(result.description)
        self.assertIsNotNone(result.location)
        self.assertIsNotNone(result.hosted_services[0])
        self.assertEqual(result.hosted_services[0].service_name, self.hosted_service_name)
        self.assertEqual(result.hosted_services[0].hosted_service_properties.affinity_group, self.affinity_group_name)
        # not sure why azure does not return any storage service
        self.assertTrue(len(result.capabilities) > 0)

    def test_create_affinity_group(self):
        # Arrange
        label = 'tstmgmtaffgrp'
        description = 'tstmgmt affinity group'

        # Act
        result = self.sms.create_affinity_group(self.affinity_group_name, label, 'West US', description)

        # Assert
        self.assertIsNone(result)
        self.assertTrue(self._affinity_group_exists(self.affinity_group_name))

    def test_update_affinity_group(self):
        # Arrange
        self._create_affinity_group(self.affinity_group_name)
        label = 'tstlabelupdate'
        description = 'testmgmt affinity group update'

        # Act
        result = self.sms.update_affinity_group(self.affinity_group_name, label, description)

        # Assert
        self.assertIsNone(result)
        props = self.sms.get_affinity_group_properties(self.affinity_group_name)
        self.assertEqual(props.label, label)
        self.assertEqual(props.description, description)

    def test_delete_affinity_group(self):
        # Arrange
        self._create_affinity_group(self.affinity_group_name)

        # Act
        result = self.sms.delete_affinity_group(self.affinity_group_name)

        # Assert
        self.assertIsNone(result)
        self.assertFalse(self._affinity_group_exists(self.affinity_group_name))

    #--Test cases for locations ------------------------------------------
    def test_list_locations(self):
        # Arrange

        # Act
        result = self.sms.list_locations()
        
        # Assert
        self.assertIsNotNone(result)
        self.assertTrue(len(result) > 0)
        self.assertIsNotNone(result[0].name)
        self.assertIsNotNone(result[0].display_name)
        self.assertIsNotNone(result[0].available_services)
        self.assertTrue(len(result[0].available_services) > 0)
Exemplo n.º 43
0
class AzureInventory(object):
    def __init__(self):
        """Main execution path."""
        # Inventory grouped by display group
        self.inventory = {}
        # Index of deployment name -> host
        self.index = {}

        # Read settings and parse CLI arguments
        self.read_settings()
        self.read_environment()
        self.parse_cli_args()

        # Initialize Azure ServiceManagementService
        self.sms = ServiceManagementService(self.subscription_id, self.cert_path)

        # Cache
        if self.args.refresh_cache:
            self.do_api_calls_update_cache()
        elif not self.is_cache_valid():
            self.do_api_calls_update_cache()

        if self.args.list_images:
            data_to_print = self.json_format_dict(self.get_images(), True)
        elif self.args.list:
            # Display list of nodes for inventory
            if len(self.inventory) == 0:
                data_to_print = self.get_inventory_from_cache()
            else:
                data_to_print = self.json_format_dict(self.inventory, True)

        print data_to_print

    def get_images(self):
        images = []
        for image in self.sms.list_os_images():
            if str(image.label).lower().find(self.args.list_images.lower()) >= 0:
                images.append(vars(image))
        return json.loads(json.dumps(images, default=lambda o: o.__dict__))

    def is_cache_valid(self):
        """Determines if the cache file has expired, or if it is still valid."""
        if os.path.isfile(self.cache_path_cache):
            mod_time = os.path.getmtime(self.cache_path_cache)
            current_time = time()
            if (mod_time + self.cache_max_age) > current_time:
                if os.path.isfile(self.cache_path_index):
                    return True
        return False

    def read_settings(self):
        """Reads the settings from the .ini file."""
        config = ConfigParser.SafeConfigParser()
        config.read(os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini')

        # Credentials related
        if config.has_option('azure', 'subscription_id'):
            self.subscription_id = config.get('azure', 'subscription_id')
        if config.has_option('azure', 'cert_path'):
            self.cert_path = config.get('azure', 'cert_path')

        # Cache related
        if config.has_option('azure', 'cache_path'):
            cache_path = config.get('azure', 'cache_path')
            self.cache_path_cache = cache_path + "/ansible-azure.cache"
            self.cache_path_index = cache_path + "/ansible-azure.index"
        if config.has_option('azure', 'cache_max_age'):
            self.cache_max_age = config.getint('azure', 'cache_max_age')

    def read_environment(self):
        ''' Reads the settings from environment variables '''
        # Credentials
        if os.getenv("AZURE_SUBSCRIPTION_ID"): self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
        if os.getenv("AZURE_CERT_PATH"):       self.cert_path = os.getenv("AZURE_CERT_PATH")


    def parse_cli_args(self):
        """Command line argument processing"""
        parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Azure')
        parser.add_argument('--list', action='store_true', default=True,
                           help='List nodes (default: True)')
        parser.add_argument('--list-images', action='store',
                           help='Get all available images.')
        parser.add_argument('--refresh-cache', action='store_true', default=False,
                           help='Force refresh of cache by making API requests to Azure (default: False - use cache files)')
        self.args = parser.parse_args()

    def do_api_calls_update_cache(self):
        """Do API calls, and save data in cache files."""
        self.add_cloud_services()
        self.write_to_cache(self.inventory, self.cache_path_cache)
        self.write_to_cache(self.index, self.cache_path_index)

    def add_cloud_services(self):
        """Makes an Azure API call to get the list of cloud services."""
        try:
            for cloud_service in self.sms.list_hosted_services():
                self.add_deployments(cloud_service)
        except WindowsAzureError as e:
            print "Looks like Azure's API is down:"
            print
            print e
            sys.exit(1)

    def add_deployments(self, cloud_service):
        """Makes an Azure API call to get the list of virtual machines associated with a cloud service"""
        try:
            for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments:
                if deployment.deployment_slot == "Production":
                    self.add_deployment(cloud_service, deployment)
        except WindowsAzureError as e:
            print "Looks like Azure's API is down:"
            print
            print e
            sys.exit(1)

    def add_deployment(self, cloud_service, deployment):
        """Adds a deployment to the inventory and index"""

        dest = urlparse(deployment.url).hostname

        # Add to index
        self.index[dest] = deployment.name

        # List of all azure deployments
        self.push(self.inventory, "azure", dest)

        # Inventory: Group by service name
        self.push(self.inventory, self.to_safe(cloud_service.service_name), dest)

        # Inventory: Group by region
        self.push(self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), dest)

    def push(self, my_dict, key, element):
        """Pushed an element onto an array that may not have been defined in the dict."""
        if key in my_dict:
            my_dict[key].append(element);
        else:
            my_dict[key] = [element]

    def get_inventory_from_cache(self):
        """Reads the inventory from the cache file and returns it as a JSON object."""
        cache = open(self.cache_path_cache, 'r')
        json_inventory = cache.read()
        return json_inventory

    def load_index_from_cache(self):
        """Reads the index from the cache file and sets self.index."""
        cache = open(self.cache_path_index, 'r')
        json_index = cache.read()
        self.index = json.loads(json_index)

    def write_to_cache(self, data, filename):
        """Writes data in JSON format to a file."""
        json_data = self.json_format_dict(data, True)
        cache = open(filename, 'w')
        cache.write(json_data)
        cache.close()

    def to_safe(self, word):
        """Escapes any characters that would be invalid in an ansible group name."""
        return re.sub("[^A-Za-z0-9\-]", "_", word)

    def json_format_dict(self, data, pretty=False):
        """Converts a dict to a JSON object and dumps it as a formatted string."""
        if pretty:
            return json.dumps(data, sort_keys=True, indent=2)
        else:
            return json.dumps(data)