class StorageManagementServiceTest(AzureTestCase):

    def setUp(self):
        proxy_host = credentials.getProxyHost()
        proxy_port = credentials.getProxyPort()

        self.sms = ServiceManagementService(credentials.getSubscriptionId(), credentials.getManagementCertFile())
        if proxy_host:
            self.sms.set_proxy(proxy_host, proxy_port)

        self.storage_account_name = getUniqueNameBasedOnCurrentTime('utstorage')

    def tearDown(self):
        try:
            self.sms.delete_storage_account(self.storage_account_name)
        except: pass

    #--Helpers-----------------------------------------------------------------
    def _wait_for_async(self, request_id):
        count = 0
        result = self.sms.get_operation_status(request_id)
        while result.status == 'InProgress':
            count = count + 1
            if count > 120:
                self.assertTrue(False, 'Timed out waiting for async operation to complete.')
            time.sleep(5)
            result = self.sms.get_operation_status(request_id)
        self.assertEqual(result.status, 'Succeeded')

    def _create_storage_account(self, name):
        result = self.sms.create_storage_account(name, name + 'description', name + 'label', None, 'West US', False, {'ext1':'val1', 'ext2':42})
        self._wait_for_async(result.request_id)

    def _storage_account_exists(self, name):
        try:
            props = self.sms.get_storage_account_properties(name)
            return props is not None
        except:
            return False

    #--Test cases for storage accounts -----------------------------------
    def test_list_storage_accounts(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.list_storage_accounts()
        
        # Assert
        self.assertIsNotNone(result)
        self.assertTrue(len(result) > 0)

        storage = None
        for temp in result:
            if temp.service_name == self.storage_account_name:
                storage = temp
                break
        
        self.assertIsNotNone(storage)
        self.assertIsNotNone(storage.service_name)
        self.assertIsNone(storage.storage_service_keys)
        self.assertIsNotNone(storage.storage_service_properties)
        self.assertIsNotNone(storage.storage_service_properties.affinity_group)
        self.assertIsNotNone(storage.storage_service_properties.description)
        self.assertIsNotNone(storage.storage_service_properties.geo_primary_region)
        self.assertIsNotNone(storage.storage_service_properties.geo_replication_enabled)
        self.assertIsNotNone(storage.storage_service_properties.geo_secondary_region)
        self.assertIsNotNone(storage.storage_service_properties.label)
        self.assertIsNotNone(storage.storage_service_properties.last_geo_failover_time)
        self.assertIsNotNone(storage.storage_service_properties.location)
        self.assertIsNotNone(storage.storage_service_properties.status)
        self.assertIsNotNone(storage.storage_service_properties.status_of_primary)
        self.assertIsNotNone(storage.storage_service_properties.status_of_secondary)
        self.assertIsNotNone(storage.storage_service_properties.endpoints)
        self.assertTrue(len(storage.storage_service_properties.endpoints) > 0)
        self.assertIsNotNone(storage.extended_properties)
        self.assertTrue(len(storage.extended_properties) > 0)

    def test_get_storage_account_properties(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.get_storage_account_properties(self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertEqual(result.service_name, self.storage_account_name)
        self.assertIsNotNone(result.url)
        self.assertIsNone(result.storage_service_keys)
        self.assertIsNotNone(result.storage_service_properties)
        self.assertIsNotNone(result.storage_service_properties.affinity_group)
        self.assertIsNotNone(result.storage_service_properties.description)
        self.assertIsNotNone(result.storage_service_properties.geo_primary_region)
        self.assertIsNotNone(result.storage_service_properties.geo_replication_enabled)
        self.assertIsNotNone(result.storage_service_properties.geo_secondary_region)
        self.assertIsNotNone(result.storage_service_properties.label)
        self.assertIsNotNone(result.storage_service_properties.last_geo_failover_time)
        self.assertIsNotNone(result.storage_service_properties.location)
        self.assertIsNotNone(result.storage_service_properties.status)
        self.assertIsNotNone(result.storage_service_properties.status_of_primary)
        self.assertIsNotNone(result.storage_service_properties.status_of_secondary)
        self.assertIsNotNone(result.storage_service_properties.endpoints)
        self.assertTrue(len(result.storage_service_properties.endpoints) > 0)
        self.assertIsNotNone(result.extended_properties)
        self.assertTrue(len(result.extended_properties) > 0)
        self.assertIsNotNone(result.capabilities)
        self.assertTrue(len(result.capabilities) > 0)

    def test_get_storage_account_keys(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.get_storage_account_keys(self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertIsNotNone(result.url)
        self.assertIsNotNone(result.service_name)
        self.assertIsNotNone(result.storage_service_keys.primary)
        self.assertIsNotNone(result.storage_service_keys.secondary)
        self.assertIsNone(result.storage_service_properties)

    def test_regenerate_storage_account_keys(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)
        previous = self.sms.get_storage_account_keys(self.storage_account_name)

        # Act
        result = self.sms.regenerate_storage_account_keys(self.storage_account_name, 'Secondary')

        # Assert
        self.assertIsNotNone(result)
        self.assertIsNotNone(result.url)
        self.assertIsNotNone(result.service_name)
        self.assertIsNotNone(result.storage_service_keys.primary)
        self.assertIsNotNone(result.storage_service_keys.secondary)
        self.assertIsNone(result.storage_service_properties)
        self.assertEqual(result.storage_service_keys.primary, previous.storage_service_keys.primary)
        self.assertNotEqual(result.storage_service_keys.secondary, previous.storage_service_keys.secondary)

    def test_create_storage_account(self):
        # Arrange
        description = self.storage_account_name + 'description'
        label = self.storage_account_name + 'label'

        # Act
        result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':'val1', 'ext2':42})
        self._wait_for_async(result.request_id)

        # Assert
        self.assertTrue(self._storage_account_exists(self.storage_account_name))

    def test_update_storage_account(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)
        description = self.storage_account_name + 'descriptionupdate'
        label = self.storage_account_name + 'labelupdate'

        # Act
        result = self.sms.update_storage_account(self.storage_account_name, description, label, False, {'ext1':'val1update', 'ext2':53, 'ext3':'brandnew'})

        # Assert
        self.assertIsNone(result)
        props = self.sms.get_storage_account_properties(self.storage_account_name)
        self.assertEqual(props.storage_service_properties.description, description)
        self.assertEqual(props.storage_service_properties.label, label)
        self.assertEqual(props.extended_properties['ext1'], 'val1update')
        self.assertEqual(props.extended_properties['ext2'], '53')
        self.assertEqual(props.extended_properties['ext3'], 'brandnew')

    def test_delete_storage_account(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.delete_storage_account(self.storage_account_name)

        # Assert
        self.assertIsNone(result)
        self.assertFalse(self._storage_account_exists(self.storage_account_name))

    def test_check_storage_account_name_availability_not_available(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.check_storage_account_name_availability(self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertFalse(result.result)

    def test_check_storage_account_name_availability_available(self):
        # Arrange

        # Act
        result = self.sms.check_storage_account_name_availability(self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertTrue(result.result)

    def test_unicode_create_storage_account_unicode_name(self):
        # Arrange
        self.storage_account_name = unicode(self.storage_account_name) + u'啊齄丂狛狜'
        description = 'description'
        label = 'label'

        # Act
        with self.assertRaises(WindowsAzureError):
            # not supported - queue name must be alphanumeric, lowercase
            result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':'val1', 'ext2':42})
            self._wait_for_async(result.request_id)

        # Assert

    def test_unicode_create_storage_account_unicode_description_label(self):
        # Arrange
        description = u'啊齄丂狛狜'
        label = u'丂狛狜'

        # Act
        result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':'val1', 'ext2':42})
        self._wait_for_async(result.request_id)

        # Assert
        result = self.sms.get_storage_account_properties(self.storage_account_name)
        self.assertEqual(result.storage_service_properties.description, description)
        self.assertEqual(result.storage_service_properties.label, label)

    def test_unicode_create_storage_account_unicode_property_value(self):
        # Arrange
        description = 'description'
        label = 'label'

        # Act
        result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':u'丂狛狜', 'ext2':42})
        self._wait_for_async(result.request_id)

        # Assert
        result = self.sms.get_storage_account_properties(self.storage_account_name)
        self.assertEqual(result.storage_service_properties.description, description)
        self.assertEqual(result.storage_service_properties.label, label)
        self.assertEqual(result.extended_properties['ext1'], u'丂狛狜')
Ejemplo n.º 2
0
class Deployment(object):
    """
    Helper class to handle deployment of the web site.
    """
    def __init__(self, config):
        self.config = config
        self.sms = ServiceManagementService(config.getAzureSubscriptionId(),
                                            config.getAzureCertificatePath())
        self.sbms = ServiceBusManagementService(
            config.getAzureSubscriptionId(), config.getAzureCertificatePath())

    @staticmethod
    def _resource_exists(get_resource):
        """
        Helper to check for the existence of a resource in Azure.

        get_resource: Parameter-less function to invoke in order to get the resource. The resource
            is assumed to exist when the call to get_resource() returns a value that is not None.
            If the call to get_resource() returns None or throws a WindowsAzureMissingResourceError
            exception, then it is assumed that the resource does not exist.

        Returns: A boolean value which is True if the resource exists.
        """
        resource = None
        try:
            resource = get_resource()
        except WindowsAzureMissingResourceError:
            pass
        return resource is not None

    def _wait_for_operation_success(self, request_id, timeout=600, wait=5):
        """
        Waits for an asynchronous Azure operation to finish.

        request_id: The ID of the request to track.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to fetch the latest operation status.
        """
        result = self.sms.get_operation_status(request_id)
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        while result.status == 'InProgress':
            if now >= max_time:
                raise Exception(
                    "Operation did not finish within the expected timeout")
            logger.info(
                'Waiting for operation to finish (last_status=%s wait_so_far=%s)',
                result.status, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            result = self.sms.get_operation_status(request_id)
            now = time.time()
        if result.status != 'Succeeded':
            raise Exception("Operation terminated but it did not succeed.")

    def _wait_for_role_instance_status(self,
                                       role_instance_name,
                                       service_name,
                                       expected_status,
                                       timeout=600,
                                       wait=5):
        """
        Waits for a role instance within the web site's cloud service to reach the status specified.

        role_instance_name: Name of the role instance.
        service_name: Name of service in which to find the role instance.
        expected_status: Expected instance status.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to fetch the latest role status.
        """
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        while True:
            status = None
            deployment = self.sms.get_deployment_by_name(
                service_name, service_name)
            for role_instance in deployment.role_instance_list:
                if role_instance.instance_name == role_instance_name:
                    status = role_instance.instance_status
            if status == expected_status:
                break
            if now >= max_time:
                raise Exception(
                    "Operation did not finish within the expected timeout")
            logger.info(
                'Waiting for deployment status: expecting %s but got %s (wait_so_far=%s)',
                expected_status, status, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            now = time.time()

    def _wait_for_disk_deletion(self, disk_name, timeout=600, wait=5):
        """
        Waits for a VM disk to disappear when it is being deleted.

        disk_name: Name of the VHD.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to check for the existence of the disk.
        """
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        logger.info("Checking that disk %s has been deleted.", disk_name)
        while self._resource_exists(lambda: self.sms.get_disk(disk_name)):
            if now >= max_time:
                raise Exception(
                    "Disk %s was not deleted within the expected timeout.".
                    format(disk_name))
            logger.info("Waiting for disk %s to disappear (wait_so_far=%s).",
                        disk_name, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            now = time.time()
        logger.info("Disk %s has been deleted.", disk_name)

    def _wait_for_namespace_active(self, name, timeout=600, wait=5):
        """
        Waits for a service bus namespace to become Active.

        name: Namespace name.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to check for the existence of the disk.
        """
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        while True:
            status = None
            props = self.sbms.get_namespace(name)
            status = props.status
            if status == 'Active':
                break
            if now >= max_time:
                raise Exception(
                    "Operation did not finish within the expected timeout")
            logger.info(
                'Waiting for namepsace status: expecting Active but got %s (wait_so_far=%s)',
                status, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            now = time.time()

    def _getRoleInstances(self, service_name):
        """
        Returns the role instances in the given cloud service deployment. The results are provided as
        a dictionary where keys are role instance names and values are RoleInstance objects.
        """
        role_instances = {}
        if self._resource_exists(lambda: self.sms.get_deployment_by_name(
                service_name, service_name)):
            deployment = self.sms.get_deployment_by_name(
                service_name, service_name)
            for role_instance in deployment.role_instance_list:
                role_instances[role_instance.instance_name] = role_instance
        return role_instances

    def _ensureAffinityGroupExists(self):
        """
        Creates the affinity group if it does not exist.
        """
        name = self.config.getAffinityGroupName()
        location = self.config.getServiceLocation()
        logger.info(
            "Checking for existence of affinity group (name=%s; location=%s).",
            name, location)
        if self._resource_exists(
                lambda: self.sms.get_affinity_group_properties(name)):
            logger.warn("An affinity group named %s already exists.", name)
        else:
            self.sms.create_affinity_group(name, name, location)
            logger.info("Created affinity group %s.", name)

    def _ensureStorageAccountExists(self, name):
        """
        Creates the storage account if it does not exist.
        """
        logger.info("Checking for existence of storage account (name=%s).",
                    name)
        if self._resource_exists(
                lambda: self.sms.get_storage_account_properties(name)):
            logger.warn("A storage account named %s already exists.", name)
        else:
            result = self.sms.create_storage_account(
                name,
                "",
                name,
                affinity_group=self.config.getAffinityGroupName())
            self._wait_for_operation_success(
                result.request_id,
                timeout=self.config.getAzureOperationTimeout())
            logger.info("Created storage account %s.", name)

    def _getStorageAccountKey(self, account_name):
        """
        Gets the storage account key (primary key) for the given storage account.
        """
        storage_props = self.sms.get_storage_account_keys(account_name)
        return storage_props.storage_service_keys.primary

    def _ensureStorageContainersExist(self):
        """
        Creates Blob storage containers required by the service.
        """
        logger.info("Checking for existence of Blob containers.")
        account_name = self.config.getServiceStorageAccountName()
        account_key = self._getStorageAccountKey(account_name)
        blob_service = BlobService(account_name, account_key)
        name_and_access_list = [
            (self.config.getServicePublicStorageContainer(), 'blob'),
            (self.config.getServiceBundleStorageContainer(), None)
        ]
        for name, access in name_and_access_list:
            logger.info("Checking for existence of Blob container %s.", name)
            blob_service.create_container(name,
                                          x_ms_blob_public_access=access,
                                          fail_on_exist=False)
            access_info = 'private' if access is None else 'public {0}'.format(
                access)
            logger.info("Blob container %s is ready (access: %s).", name,
                        access_info)

    def ensureStorageHasCorsConfiguration(self):
        """
        Ensures Blob storage container for bundles is configured to allow cross-origin resource sharing.
        """
        logger.info("Setting CORS rules.")
        account_name = self.config.getServiceStorageAccountName()
        account_key = self._getStorageAccountKey(account_name)

        cors_rule = CorsRule()
        cors_rule.allowed_origins = self.config.getServiceStorageCorsAllowedOrigins(
        )
        cors_rule.allowed_methods = 'PUT'
        cors_rule.exposed_headers = '*'
        cors_rule.allowed_headers = '*'
        cors_rule.max_age_in_seconds = 1800
        cors_rules = Cors()
        cors_rules.cors_rule.append(cors_rule)
        set_storage_service_cors_properties(account_name, account_key,
                                            cors_rules)

    def _ensureServiceExists(self, service_name, affinity_group_name):
        """
        Creates the specified cloud service host if it does not exist.

        service_name: Name of the cloud service.
        affinity_group_name: Name of the affinity group (which should exists).
        """
        logger.info("Checking for existence of cloud service (name=%s).",
                    service_name)
        if self._resource_exists(
                lambda: self.sms.get_hosted_service_properties(service_name)):
            logger.warn("A cloud service named %s already exists.",
                        service_name)
        else:
            self.sms.create_hosted_service(service_name,
                                           service_name,
                                           affinity_group=affinity_group_name)
            logger.info("Created cloud service %s.", service_name)

    def _ensureServiceCertificateExists(self, service_name):
        """
        Adds certificate to the specified cloud service.

        service_name: Name of the target cloud service (which should exist).
        """
        cert_format = self.config.getServiceCertificateFormat()
        cert_algorithm = self.config.getServiceCertificateAlgorithm()
        cert_thumbprint = self.config.getServiceCertificateThumbprint()
        cert_path = self.config.getServiceCertificateFilename()
        cert_password = self.config.getServiceCertificatePassword()
        logger.info(
            "Checking for existence of cloud service certificate for service %s.",
            service_name)
        get_cert = lambda: self.sms.get_service_certificate(
            service_name, cert_algorithm, cert_thumbprint)
        if self._resource_exists(get_cert):
            logger.info("Found expected cloud service certificate.")
        else:
            with open(cert_path, 'rb') as f:
                cert_data = base64.b64encode(f.read())
            if len(cert_data) <= 0:
                raise Exception("Detected invalid certificate data.")
            result = self.sms.add_service_certificate(service_name, cert_data,
                                                      cert_format,
                                                      cert_password)
            self._wait_for_operation_success(
                result.request_id,
                timeout=self.config.getAzureOperationTimeout())
            logger.info("Added service certificate.")

    def _assertOsImageExists(self, os_image_name):
        """
        Asserts that the named OS image exists.
        """
        logger.info("Checking for availability of OS image (name=%s).",
                    os_image_name)
        if self.sms.get_os_image(os_image_name) is None:
            raise Exception(
                "Unable to find OS Image '{0}'.".format(os_image_name))

    def _ensureVirtualMachinesExist(self):
        """
        Creates the VMs for the web site.
        """
        service_name = self.config.getServiceName()
        cert_thumbprint = self.config.getServiceCertificateThumbprint()
        vm_username = self.config.getVirtualMachineLogonUsername()
        vm_password = self.config.getVirtualMachineLogonPassword()
        vm_role_size = self.config.getServiceInstanceRoleSize()
        vm_numbers = self.config.getServiceInstanceCount()
        if vm_numbers < 1:
            raise Exception(
                "Detected an invalid number of instances: {0}.".format(
                    vm_numbers))

        self._assertOsImageExists(self.config.getServiceOSImageName())

        role_instances = self._getRoleInstances(service_name)
        for vm_number in range(1, vm_numbers + 1):
            vm_hostname = '{0}-{1}'.format(service_name, vm_number)
            if vm_hostname in role_instances:
                logger.warn(
                    "Role instance %s already exists: skipping creation.",
                    vm_hostname)
                continue

            logger.info("Role instance %s provisioning begins.", vm_hostname)
            vm_diskname = '{0}.vhd'.format(vm_hostname)
            vm_disk_media_link = 'http://{0}.blob.core.windows.net/vhds/{1}'.format(
                self.config.getServiceStorageAccountName(), vm_diskname)
            ssh_port = str(self.config.getServiceInstanceSshPort() + vm_number)

            os_hd = OSVirtualHardDisk(self.config.getServiceOSImageName(),
                                      vm_disk_media_link,
                                      disk_name=vm_diskname,
                                      disk_label=vm_diskname)
            linux_config = LinuxConfigurationSet(vm_hostname, vm_username,
                                                 vm_password, True)
            linux_config.ssh.public_keys.public_keys.append(
                PublicKey(
                    cert_thumbprint,
                    u'/home/{0}/.ssh/authorized_keys'.format(vm_username)))
            linux_config.ssh.key_pairs.key_pairs.append(
                KeyPair(cert_thumbprint,
                        u'/home/{0}/.ssh/id_rsa'.format(vm_username)))
            network_config = ConfigurationSet()
            network_config.configuration_set_type = 'NetworkConfiguration'
            ssh_endpoint = ConfigurationSetInputEndpoint(name='SSH',
                                                         protocol='TCP',
                                                         port=ssh_port,
                                                         local_port=u'22')
            network_config.input_endpoints.input_endpoints.append(ssh_endpoint)
            http_endpoint = ConfigurationSetInputEndpoint(
                name='HTTP',
                protocol='TCP',
                port=u'80',
                local_port=u'80',
                load_balanced_endpoint_set_name=service_name)
            http_endpoint.load_balancer_probe.port = '80'
            http_endpoint.load_balancer_probe.protocol = 'TCP'
            network_config.input_endpoints.input_endpoints.append(
                http_endpoint)

            if vm_number == 1:
                result = self.sms.create_virtual_machine_deployment(
                    service_name=service_name,
                    deployment_name=service_name,
                    deployment_slot='Production',
                    label=vm_hostname,
                    role_name=vm_hostname,
                    system_config=linux_config,
                    os_virtual_hard_disk=os_hd,
                    network_config=network_config,
                    availability_set_name=service_name,
                    data_virtual_hard_disks=None,
                    role_size=vm_role_size)
                self._wait_for_operation_success(
                    result.request_id,
                    timeout=self.config.getAzureOperationTimeout())
                self._wait_for_role_instance_status(
                    vm_hostname, service_name, 'ReadyRole',
                    self.config.getAzureOperationTimeout())
            else:
                result = self.sms.add_role(service_name=service_name,
                                           deployment_name=service_name,
                                           role_name=vm_hostname,
                                           system_config=linux_config,
                                           os_virtual_hard_disk=os_hd,
                                           network_config=network_config,
                                           availability_set_name=service_name,
                                           role_size=vm_role_size)
                self._wait_for_operation_success(
                    result.request_id,
                    timeout=self.config.getAzureOperationTimeout())
                self._wait_for_role_instance_status(
                    vm_hostname, service_name, 'ReadyRole',
                    self.config.getAzureOperationTimeout())

            logger.info("Role instance %s has been created.", vm_hostname)

    def _deleteVirtualMachines(self, service_name):
        """
        Deletes the VMs in the given cloud service.
        """
        if self._resource_exists(lambda: self.sms.get_deployment_by_name(
                service_name, service_name)) == False:
            logger.warn("Deployment %s not found: no VMs to delete.",
                        service_name)
        else:
            logger.info("Attempting to delete deployment %s.", service_name)
            # Get set of role instances before we remove them
            role_instances = self._getRoleInstances(service_name)

            def update_request(request):
                """
                A filter to intercept the HTTP request sent by the ServiceManagementService
                so we can take advantage of a newer feature ('comp=media') in the delete deployment API
                (see http://msdn.microsoft.com/en-us/library/windowsazure/ee460812.aspx)
                """
                hdrs = []
                for name, value in request.headers:
                    if 'x-ms-version' == name:
                        value = '2013-08-01'
                    hdrs.append((name, value))
                request.headers = hdrs
                request.path = request.path + '?comp=media'
                #pylint: disable=W0212
                response = self.sms._filter(request)
                return response

            svc = ServiceManagementService(self.sms.subscription_id,
                                           self.sms.cert_file)
            #pylint: disable=W0212
            svc._filter = update_request
            result = svc.delete_deployment(service_name, service_name)
            logger.info(
                "Deployment %s deletion in progress: waiting for delete_deployment operation.",
                service_name)
            self._wait_for_operation_success(result.request_id)
            logger.info(
                "Deployment %s deletion in progress: waiting for VM disks to be removed.",
                service_name)
            # Now wait for the disks to disappear
            for role_instance_name in role_instances.keys():
                disk_name = "{0}.vhd".format(role_instance_name)
                self._wait_for_disk_deletion(disk_name)
            logger.info("Deployment %s deleted.", service_name)

    def _ensureBuildMachineExists(self):
        """
        Creates the VM for the build server.
        """
        service_name = self.config.getBuildServiceName()
        service_storage_name = self.config.getStorageAccountName()
        cert_thumbprint = self.config.getServiceCertificateThumbprint()
        vm_username = self.config.getVirtualMachineLogonUsername()
        vm_password = self.config.getVirtualMachineLogonPassword()
        vm_hostname = service_name

        role_instances = self._getRoleInstances(service_name)
        if vm_hostname in role_instances:
            logger.warn("Role instance %s already exists: skipping creation.",
                        vm_hostname)
        else:
            logger.info("Role instance %s provisioning begins.", vm_hostname)
            self._assertOsImageExists(self.config.getBuildOSImageName())

            vm_diskname = '{0}.vhd'.format(vm_hostname)
            vm_disk_media_link = 'http://{0}.blob.core.windows.net/vhds/{1}'.format(
                service_storage_name, vm_diskname)
            os_hd = OSVirtualHardDisk(self.config.getBuildOSImageName(),
                                      vm_disk_media_link,
                                      disk_name=vm_diskname,
                                      disk_label=vm_diskname)
            linux_config = LinuxConfigurationSet(vm_hostname, vm_username,
                                                 vm_password, True)
            linux_config.ssh.public_keys.public_keys.append(
                PublicKey(
                    cert_thumbprint,
                    u'/home/{0}/.ssh/authorized_keys'.format(vm_username)))
            linux_config.ssh.key_pairs.key_pairs.append(
                KeyPair(cert_thumbprint,
                        u'/home/{0}/.ssh/id_rsa'.format(vm_username)))
            network_config = ConfigurationSet()
            network_config.configuration_set_type = 'NetworkConfiguration'
            ssh_endpoint = ConfigurationSetInputEndpoint(name='SSH',
                                                         protocol='TCP',
                                                         port=u'22',
                                                         local_port=u'22')
            network_config.input_endpoints.input_endpoints.append(ssh_endpoint)

            result = self.sms.create_virtual_machine_deployment(
                service_name=service_name,
                deployment_name=service_name,
                deployment_slot='Production',
                label=vm_hostname,
                role_name=vm_hostname,
                system_config=linux_config,
                os_virtual_hard_disk=os_hd,
                network_config=network_config,
                availability_set_name=None,
                data_virtual_hard_disks=None,
                role_size=self.config.getBuildInstanceRoleSize())
            self._wait_for_operation_success(
                result.request_id,
                timeout=self.config.getAzureOperationTimeout())
            self._wait_for_role_instance_status(
                vm_hostname, service_name, 'ReadyRole',
                self.config.getAzureOperationTimeout())
            logger.info("Role instance %s has been created.", vm_hostname)

    def _deleteStorageAccount(self, name):
        """
        Deletes the storage account for the web site.
        """
        logger.info("Attempting to delete storage account %s.", name)
        if self._resource_exists(
                lambda: self.sms.get_storage_account_properties(name
                                                                )) == False:
            logger.warn("Storage account %s not found: nothing to delete.",
                        name)
        else:
            self.sms.delete_storage_account(name)
            logger.info("Storage account %s deleted.", name)

    def _deleteService(self, name):
        """
        Deletes the specified cloud service.
        """
        logger.info("Attempting to delete cloud service %s.", name)
        if self._resource_exists(
                lambda: self.sms.get_hosted_service_properties(name)) == False:
            logger.warn("Cloud service %s not found: nothing to delete.", name)
        else:
            self.sms.delete_hosted_service(name)
            logger.info("Cloud service %s deleted.", name)

    def _deleteAffinityGroup(self):
        """
        Deletes the affinity group for the web site.
        """
        name = self.config.getAffinityGroupName()
        logger.info("Attempting to delete affinity group %s.", name)
        if self._resource_exists(
                lambda: self.sms.get_affinity_group_properties(name)) == False:
            logger.warn("Affinity group %s not found: nothing to delete.",
                        name)
        else:
            self.sms.delete_affinity_group(name)
            logger.info("Affinity group %s deleted.", name)

    def _ensureServiceBusNamespaceExists(self):
        """
        Creates the Azure Service Bus Namespace if it does not exist.
        """
        name = self.config.getServiceBusNamespace()
        logger.info(
            "Checking for existence of service bus namespace (name=%s).", name)
        if self._resource_exists(lambda: self.sbms.get_namespace(name)):
            logger.warn("A namespace named %s already exists.", name)
        else:
            self.sbms.create_namespace(name, self.config.getServiceLocation())
            self._wait_for_namespace_active(name)
            logger.info("Created namespace %s.", name)

    def _ensureServiceBusQueuesExist(self):
        """
        Creates Azure service bus queues required by the service.
        """
        logger.info("Checking for existence of Service Bus Queues.")
        namespace = self.sbms.get_namespace(
            self.config.getServiceBusNamespace())
        sbs = ServiceBusService(namespace.name,
                                namespace.default_key,
                                issuer='owner')
        queue_names = [
            'jobresponsequeue', 'windowscomputequeue', 'linuxcomputequeue'
        ]
        for name in queue_names:
            logger.info("Checking for existence of Queue %s.", name)
            sbs.create_queue(name, fail_on_exist=False)
            logger.info("Queue %s is ready.", name)

    def _deleteServiceBusNamespace(self):
        """
        Deletes the Azure Service Bus Namespace.
        """
        name = self.config.getServiceBusNamespace()
        logger.info("Attempting to delete service bus namespace %s.", name)
        if self._resource_exists(
                lambda: self.sbms.get_namespace(name)) == False:
            logger.warn("Namespace %s not found: nothing to delete.", name)
        else:
            self.sbms.delete_namespace(name)
            logger.info("Namespace %s deleted.", name)

    def Deploy(self, assets):
        """
        Creates a deployment.

        assets: The set of assets to create. The full set is: {'build', 'web'}.
        """
        if len(assets) == 0:
            raise ValueError("Set of assets to deploy is not specified.")
        logger.info("Starting deployment operation.")
        self._ensureAffinityGroupExists()
        self._ensureStorageAccountExists(self.config.getStorageAccountName())
        ## Build instance
        if 'build' in assets:
            self._ensureServiceExists(self.config.getBuildServiceName(),
                                      self.config.getAffinityGroupName())
            self._ensureServiceCertificateExists(
                self.config.getBuildServiceName())
            self._ensureBuildMachineExists()
        # Web instances
        if 'web' in assets:
            self._ensureStorageAccountExists(
                self.config.getServiceStorageAccountName())
            self._ensureStorageContainersExist()
            self.ensureStorageHasCorsConfiguration()
            self._ensureServiceBusNamespaceExists()
            self._ensureServiceBusQueuesExist()
            self._ensureServiceExists(self.config.getServiceName(),
                                      self.config.getAffinityGroupName())
            self._ensureServiceCertificateExists(self.config.getServiceName())
            self._ensureVirtualMachinesExist()
        #queues
        logger.info("Deployment operation is complete.")

    def Teardown(self, assets):
        """
        Deletes a deployment.

        assets: The set of assets to delete. The full set is: {'web', 'build'}.
        """
        if len(assets) == 0:
            raise ValueError("Set of assets to teardown is not specified.")
        logger.info("Starting teardown operation.")
        if 'web' in assets:
            self._deleteVirtualMachines(self.config.getServiceName())
            self._deleteService(self.config.getServiceName())
            self._deleteStorageAccount(
                self.config.getServiceStorageAccountName())
        if 'build' in assets:
            self._deleteVirtualMachines(self.config.getBuildServiceName())
            self._deleteService(self.config.getBuildServiceName())
            self._deleteStorageAccount(self.config.getStorageAccountName())
        if ('web' in assets) and ('build' in assets):
            self._deleteServiceBusNamespace()
            self._deleteAffinityGroup()
        logger.info("Teardown operation is complete.")

    def getSettingsFileContent(self):
        """
        Generates the content of the local Django settings file.
        """
        allowed_hosts = [
            '{0}.cloudapp.net'.format(self.config.getServiceName())
        ]
        allowed_hosts.extend(self.config.getWebHostnames())
        allowed_hosts.extend(['www.codalab.org', 'codalab.org'])
        ssl_allowed_hosts = self.config.getSslRewriteHosts()
        if len(ssl_allowed_hosts) == 0:
            ssl_allowed_hosts = allowed_hosts

        storage_key = self._getStorageAccountKey(
            self.config.getServiceStorageAccountName())
        namespace = self.sbms.get_namespace(
            self.config.getServiceBusNamespace())

        if len(self.config.getSslCertificateInstalledPath()) > 0:
            bundle_auth_scheme = "https"
        else:
            bundle_auth_scheme = "http"
        if len(ssl_allowed_hosts) == 0:
            bundle_auth_host = '{0}.cloudapp.net'.format(
                self.config.getServiceName())
        else:
            bundle_auth_host = ssl_allowed_hosts[0]
        bundle_auth_url = "{0}://{1}".format(bundle_auth_scheme,
                                             bundle_auth_host)

        lines = [
            "from base import Base",
            "from default import *",
            "from configurations import Settings",
            "",
            "import sys",
            "from os.path import dirname, abspath, join",
            "from pkgutil import extend_path",
            "import codalab",
            "",
            "class {0}(Base):".format(self.config.getDjangoConfiguration()),
            "",
            "    DEBUG=False",
            "",
            "    ALLOWED_HOSTS = {0}".format(allowed_hosts),
            "",
            "    SSL_PORT = '443'",
            "    SSL_CERTIFICATE = '{0}'".format(
                self.config.getSslCertificateInstalledPath()),
            "    SSL_CERTIFICATE_KEY = '{0}'".format(
                self.config.getSslCertificateKeyInstalledPath()),
            "    SSL_ALLOWED_HOSTS = {0}".format(ssl_allowed_hosts),
            "",
            "    DEFAULT_FILE_STORAGE = 'codalab.azure_storage.AzureStorage'",
            "    AZURE_ACCOUNT_NAME = '{0}'".format(
                self.config.getServiceStorageAccountName()),
            "    AZURE_ACCOUNT_KEY = '{0}'".format(storage_key),
            "    AZURE_CONTAINER = '{0}'".format(
                self.config.getServicePublicStorageContainer()),
            "    BUNDLE_AZURE_ACCOUNT_NAME = AZURE_ACCOUNT_NAME",
            "    BUNDLE_AZURE_ACCOUNT_KEY = AZURE_ACCOUNT_KEY",
            "    BUNDLE_AZURE_CONTAINER = '{0}'".format(
                self.config.getServiceBundleStorageContainer()),
            "",
            "    SBS_NAMESPACE = '{0}'".format(
                self.config.getServiceBusNamespace()),
            "    SBS_ISSUER = 'owner'",
            "    SBS_ACCOUNT_KEY = '{0}'".format(namespace.default_key),
            "    SBS_RESPONSE_QUEUE = 'jobresponsequeue'",
            "    SBS_COMPUTE_QUEUE = 'windowscomputequeue'",
            "",
            "    EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'",
            "    EMAIL_HOST = '{0}'".format(self.config.getEmailHost()),
            "    EMAIL_HOST_USER = '******'".format(self.config.getEmailUser()),
            "    EMAIL_HOST_PASSWORD = '******'".format(
                self.config.getEmailPassword()),
            "    EMAIL_PORT = 587",
            "    EMAIL_USE_TLS = True",
            "    DEFAULT_FROM_EMAIL = '*****@*****.**'",
            "    SERVER_EMAIL = '*****@*****.**'",
            "",
            "    # Django secret",
            "    SECRET_KEY = '{0}'".format(self.config.getDjangoSecretKey()),
            "",
            "    ADMINS = (('CodaLab', '*****@*****.**'),)",
            "    MANAGERS = ADMINS",
            "",
            "    DATABASES = {",
            "        'default': {",
            "            'ENGINE': '{0}',".format(
                self.config.getDatabaseEngine()),
            "            'NAME': '{0}',".format(self.config.getDatabaseName()),
            "            'USER': '******',".format(self.config.getDatabaseUser()),
            "            'PASSWORD': '******',".format(
                self.config.getDatabasePassword()),
            "            'HOST': '{0}',".format(self.config.getDatabaseHost()),
            "            'PORT': '{0}', ".format(
                self.config.getDatabasePort()),
            "            'OPTIONS' : {",
            "                'init_command': 'SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED',",
            "                'read_timeout': 5",
            "            }",
            "        }",
            "    }",
            "",
            "    BUNDLE_DB_NAME = '{0}'".format(
                self.config.getBundleServiceDatabaseName()),
            "    BUNDLE_DB_USER = '******'".format(
                self.config.getBundleServiceDatabaseUser()),
            "    BUNDLE_DB_PASSWORD = '******'".format(
                self.config.getBundleServiceDatabasePassword()),
            "    BUNDLE_APP_ID = '{0}'".format(
                self.config.getBundleServiceAppId()),
            "    BUNDLE_APP_KEY = '{0}'".format(
                self.config.getBundleServiceAppKey()),
            "    BUNDLE_AUTH_URL = '{0}'".format(bundle_auth_url),
            "",
            "    BUNDLE_SERVICE_URL = '{0}'".format(
                self.config.getBundleServiceUrl()),
            "    BUNDLE_SERVICE_CODE_PATH = '/home/{0}/deploy/bundles'".format(
                self.config.getVirtualMachineLogonUsername()),
            "    sys.path.append(BUNDLE_SERVICE_CODE_PATH)",
            "    codalab.__path__ = extend_path(codalab.__path__, codalab.__name__)",
            "",
        ]
        preview = self.config.getShowPreviewFeatures()
        if preview >= 1:
            if preview == 1:
                lines.append("    PREVIEW_WORKSHEETS = True")
            if preview > 1:
                lines.append("    SHOW_BETA_FEATURES = True")
            lines.append("")
        return '\n'.join(lines)
Ejemplo n.º 3
0
class Deployment(object):
    """
    Helper class to handle deployment of the web site.
    """
    def __init__(self, config):
        self.config = config
        self.sms = ServiceManagementService(config.getAzureSubscriptionId(), config.getAzureCertificatePath())
        self.sbms = ServiceBusManagementService(config.getAzureSubscriptionId(), config.getAzureCertificatePath())

    @staticmethod
    def _resource_exists(get_resource):
        """
        Helper to check for the existence of a resource in Azure.

        get_resource: Parameter-less function to invoke in order to get the resource. The resource
            is assumed to exist when the call to get_resource() returns a value that is not None.
            If the call to get_resource() returns None or throws a WindowsAzureMissingResourceError
            exception, then it is assumed that the resource does not exist.

        Returns: A boolean value which is True if the resource exists.
        """
        resource = None
        try:
            resource = get_resource()
        except WindowsAzureMissingResourceError:
            pass
        return resource is not None

    def _wait_for_operation_success(self, request_id, timeout=600, wait=5):
        """
        Waits for an asynchronous Azure operation to finish.

        request_id: The ID of the request to track.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to fetch the latest operation status.
        """
        result = self.sms.get_operation_status(request_id)
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        while result.status == 'InProgress':
            if now >= max_time:
                raise Exception("Operation did not finish within the expected timeout")
            logger.info('Waiting for operation to finish (last_status=%s wait_so_far=%s)',
                        result.status, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            result = self.sms.get_operation_status(request_id)
            now = time.time()
        if result.status != 'Succeeded':
            raise Exception("Operation terminated but it did not succeed.")

    def _wait_for_role_instance_status(self, role_instance_name, service_name, expected_status, timeout=600, wait=5):
        """
        Waits for a role instance within the web site's cloud service to reach the status specified.

        role_instance_name: Name of the role instance.
        service_name: Name of service in which to find the role instance.
        expected_status: Expected instance status.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to fetch the latest role status.
        """
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        while True:
            status = None
            deployment = self.sms.get_deployment_by_name(service_name, service_name)
            for role_instance in deployment.role_instance_list:
                if role_instance.instance_name == role_instance_name:
                    status = role_instance.instance_status
            if status == expected_status:
                break
            if now >= max_time:
                raise Exception("Operation did not finish within the expected timeout")
            logger.info('Waiting for deployment status: expecting %s but got %s (wait_so_far=%s)',
                        expected_status, status, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            now = time.time()

    def _wait_for_disk_deletion(self, disk_name, timeout=600, wait=5):
        """
        Waits for a VM disk to disappear when it is being deleted.

        disk_name: Name of the VHD.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to check for the existence of the disk.
        """
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        logger.info("Checking that disk %s has been deleted.", disk_name)
        while self._resource_exists(lambda: self.sms.get_disk(disk_name)):
            if now >= max_time:
                raise Exception("Disk %s was not deleted within the expected timeout.".format(disk_name))
            logger.info("Waiting for disk %s to disappear (wait_so_far=%s).", disk_name, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            now = time.time()
        logger.info("Disk %s has been deleted.", disk_name)

    def _wait_for_namespace_active(self, name, timeout=600, wait=5):
        """
        Waits for a service bus namespace to become Active.

        name: Namespace name.
        timeout: Maximum duration (in seconds) allowed for the operation to complete.
        wait: Wait time (in seconds) between consecutive calls to check for the existence of the disk.
        """
        start_time = time.time()
        max_time = start_time + timeout
        now = start_time
        while True:
            status = None
            props = self.sbms.get_namespace(name)
            status = props.status
            if status == 'Active':
                break
            if now >= max_time:
                raise Exception("Operation did not finish within the expected timeout")
            logger.info('Waiting for namespace status: expecting Active but got %s (wait_so_far=%s)',
                        status, round(now - start_time, 1))
            time_to_wait = max(0.0, min(max_time - now, wait))
            time.sleep(time_to_wait)
            now = time.time()

    def _getRoleInstances(self, service_name):
        """
        Returns the role instances in the given cloud service deployment. The results are provided as
        a dictionary where keys are role instance names and values are RoleInstance objects.
        """
        role_instances = {}
        if self._resource_exists(lambda: self.sms.get_deployment_by_name(service_name, service_name)):
            deployment = self.sms.get_deployment_by_name(service_name, service_name)
            for role_instance in deployment.role_instance_list:
                role_instances[role_instance.instance_name] = role_instance
        return role_instances

    def _ensureAffinityGroupExists(self):
        """
        Creates the affinity group if it does not exist.
        """
        name = self.config.getAffinityGroupName()
        location = self.config.getServiceLocation()
        logger.info("Checking for existence of affinity group (name=%s; location=%s).", name, location)
        if self._resource_exists(lambda: self.sms.get_affinity_group_properties(name)):
            logger.warn("An affinity group named %s already exists.", name)
        else:
            self.sms.create_affinity_group(name, name, location)
            logger.info("Created affinity group %s.", name)

    def _ensureStorageAccountExists(self, name):
        """
        Creates the storage account if it does not exist.
        """
        logger.info("Checking for existence of storage account (name=%s).", name)
        if self._resource_exists(lambda: self.sms.get_storage_account_properties(name)):
            logger.warn("A storage account named %s already exists.", name)
        else:
            result = self.sms.create_storage_account(name, "", name, affinity_group=self.config.getAffinityGroupName())
            self._wait_for_operation_success(result.request_id, timeout=self.config.getAzureOperationTimeout())
            logger.info("Created storage account %s.", name)

    def _getStorageAccountKey(self, account_name):
        """
        Gets the storage account key (primary key) for the given storage account.
        """
        storage_props = self.sms.get_storage_account_keys(account_name)
        return storage_props.storage_service_keys.primary

    def _ensureStorageContainersExist(self):
        """
        Creates Blob storage containers required by the service.
        """
        logger.info("Checking for existence of Blob containers.")
        account_name = self.config.getServiceStorageAccountName()
        account_key = self._getStorageAccountKey(account_name)
        blob_service = BlobService(account_name, account_key)
        name_and_access_list = [(self.config.getServicePublicStorageContainer(), 'blob'),
                                (self.config.getServiceBundleStorageContainer(), None)]
        for name, access in name_and_access_list:
            logger.info("Checking for existence of Blob container %s.", name)
            blob_service.create_container(name, x_ms_blob_public_access=access, fail_on_exist=False)
            access_info = 'private' if access is None else 'public {0}'.format(access)
            logger.info("Blob container %s is ready (access: %s).", name, access_info)

    def ensureStorageHasCorsConfiguration(self):
        """
        Ensures Blob storage container for bundles is configured to allow cross-origin resource sharing.
        """
        logger.info("Setting CORS rules.")
        account_name = self.config.getServiceStorageAccountName()
        account_key = self._getStorageAccountKey(account_name)

        cors_rule = CorsRule()
        cors_rule.allowed_origins = self.config.getServiceStorageCorsAllowedOrigins()
        cors_rule.allowed_methods = 'PUT'
        cors_rule.exposed_headers = '*'
        cors_rule.allowed_headers = '*'
        cors_rule.max_age_in_seconds = 1800
        cors_rules = Cors()
        cors_rules.cors_rule.append(cors_rule)
        set_storage_service_cors_properties(account_name, account_key, cors_rules)

    def _ensureServiceExists(self, service_name, affinity_group_name):
        """
        Creates the specified cloud service host if it does not exist.

        service_name: Name of the cloud service.
        affinity_group_name: Name of the affinity group (which should exists).
        """
        logger.info("Checking for existence of cloud service (name=%s).", service_name)
        if self._resource_exists(lambda: self.sms.get_hosted_service_properties(service_name)):
            logger.warn("A cloud service named %s already exists.", service_name)
        else:
            self.sms.create_hosted_service(service_name, service_name, affinity_group=affinity_group_name)
            logger.info("Created cloud service %s.", service_name)

    def _ensureServiceCertificateExists(self, service_name):
        """
        Adds certificate to the specified cloud service.

        service_name: Name of the target cloud service (which should exist).
        """
        cert_format = self.config.getServiceCertificateFormat()
        cert_algorithm = self.config.getServiceCertificateAlgorithm()
        cert_thumbprint = self.config.getServiceCertificateThumbprint()
        cert_path = self.config.getServiceCertificateFilename()
        cert_password = self.config.getServiceCertificatePassword()
        logger.info("Checking for existence of cloud service certificate for service %s.", service_name)
        get_cert = lambda: self.sms.get_service_certificate(service_name, cert_algorithm, cert_thumbprint)
        if self._resource_exists(get_cert):
            logger.info("Found expected cloud service certificate.")
        else:
            with open(cert_path, 'rb') as f:
                cert_data = base64.b64encode(f.read())
            if len(cert_data) <= 0:
                raise Exception("Detected invalid certificate data.")
            result = self.sms.add_service_certificate(service_name, cert_data, cert_format, cert_password)
            self._wait_for_operation_success(result.request_id, timeout=self.config.getAzureOperationTimeout())
            logger.info("Added service certificate.")

    def _assertOsImageExists(self, os_image_name):
        """
        Asserts that the named OS image exists.
        """
        logger.info("Checking for availability of OS image (name=%s).", os_image_name)
        if self.sms.get_os_image(os_image_name) is None:
            raise Exception("Unable to find OS Image '{0}'.".format(os_image_name))

    def _ensureVirtualMachinesExist(self):
        """
        Creates the VMs for the web site.
        """
        service_name = self.config.getServiceName()
        cert_thumbprint = self.config.getServiceCertificateThumbprint()
        vm_username = self.config.getVirtualMachineLogonUsername()
        vm_password = self.config.getVirtualMachineLogonPassword()
        vm_role_size = self.config.getServiceInstanceRoleSize()
        vm_numbers = self.config.getServiceInstanceCount()
        if vm_numbers < 1:
            raise Exception("Detected an invalid number of instances: {0}.".format(vm_numbers))

        self._assertOsImageExists(self.config.getServiceOSImageName())

        role_instances = self._getRoleInstances(service_name)
        for vm_number in range(1, vm_numbers+1):
            vm_hostname = '{0}-{1}'.format(service_name, vm_number)
            if vm_hostname in role_instances:
                logger.warn("Role instance %s already exists: skipping creation.", vm_hostname)
                continue

            logger.info("Role instance %s provisioning begins.", vm_hostname)
            vm_diskname = '{0}.vhd'.format(vm_hostname)
            vm_disk_media_link = 'http://{0}.blob.core.windows.net/vhds/{1}'.format(
                self.config.getServiceStorageAccountName(), vm_diskname
            )
            ssh_port = str(self.config.getServiceInstanceSshPort() + vm_number)

            os_hd = OSVirtualHardDisk(self.config.getServiceOSImageName(),
                                      vm_disk_media_link,
                                      disk_name=vm_diskname,
                                      disk_label=vm_diskname)
            linux_config = LinuxConfigurationSet(vm_hostname, vm_username, vm_password, True)
            linux_config.ssh.public_keys.public_keys.append(
                PublicKey(cert_thumbprint, u'/home/{0}/.ssh/authorized_keys'.format(vm_username))
            )
            linux_config.ssh.key_pairs.key_pairs.append(
                KeyPair(cert_thumbprint, u'/home/{0}/.ssh/id_rsa'.format(vm_username))
            )
            network_config = ConfigurationSet()
            network_config.configuration_set_type = 'NetworkConfiguration'
            ssh_endpoint = ConfigurationSetInputEndpoint(name='SSH',
                                                         protocol='TCP',
                                                         port=ssh_port,
                                                         local_port=u'22')
            network_config.input_endpoints.input_endpoints.append(ssh_endpoint)
            http_endpoint = ConfigurationSetInputEndpoint(name='HTTP',
                                                          protocol='TCP',
                                                          port=u'80',
                                                          local_port=u'80',
                                                          load_balanced_endpoint_set_name=service_name)
            http_endpoint.load_balancer_probe.port = '80'
            http_endpoint.load_balancer_probe.protocol = 'TCP'
            network_config.input_endpoints.input_endpoints.append(http_endpoint)

            if vm_number == 1:
                result = self.sms.create_virtual_machine_deployment(service_name=service_name,
                                                                    deployment_name=service_name,
                                                                    deployment_slot='Production',
                                                                    label=vm_hostname,
                                                                    role_name=vm_hostname,
                                                                    system_config=linux_config,
                                                                    os_virtual_hard_disk=os_hd,
                                                                    network_config=network_config,
                                                                    availability_set_name=service_name,
                                                                    data_virtual_hard_disks=None,
                                                                    role_size=vm_role_size)
                self._wait_for_operation_success(result.request_id,
                                                 timeout=self.config.getAzureOperationTimeout())
                self._wait_for_role_instance_status(vm_hostname, service_name, 'ReadyRole',
                                                    self.config.getAzureOperationTimeout())
            else:
                result = self.sms.add_role(service_name=service_name,
                                           deployment_name=service_name,
                                           role_name=vm_hostname,
                                           system_config=linux_config,
                                           os_virtual_hard_disk=os_hd,
                                           network_config=network_config,
                                           availability_set_name=service_name,
                                           role_size=vm_role_size)
                self._wait_for_operation_success(result.request_id,
                                                 timeout=self.config.getAzureOperationTimeout())
                self._wait_for_role_instance_status(vm_hostname, service_name, 'ReadyRole',
                                                    self.config.getAzureOperationTimeout())

            logger.info("Role instance %s has been created.", vm_hostname)

    def _deleteVirtualMachines(self, service_name):
        """
        Deletes the VMs in the given cloud service.
        """
        if self._resource_exists(lambda: self.sms.get_deployment_by_name(service_name, service_name)) == False:
            logger.warn("Deployment %s not found: no VMs to delete.", service_name)
        else:
            logger.info("Attempting to delete deployment %s.", service_name)
            # Get set of role instances before we remove them
            role_instances = self._getRoleInstances(service_name)

            def update_request(request):
                """
                A filter to intercept the HTTP request sent by the ServiceManagementService
                so we can take advantage of a newer feature ('comp=media') in the delete deployment API
                (see http://msdn.microsoft.com/en-us/library/windowsazure/ee460812.aspx)
                """
                hdrs = []
                for name, value in request.headers:
                    if 'x-ms-version' == name:
                        value = '2013-08-01'
                    hdrs.append((name, value))
                request.headers = hdrs
                request.path = request.path + '?comp=media'
                #pylint: disable=W0212
                response = self.sms._filter(request)
                return response

            svc = ServiceManagementService(self.sms.subscription_id, self.sms.cert_file)
            #pylint: disable=W0212
            svc._filter = update_request
            result = svc.delete_deployment(service_name, service_name)
            logger.info("Deployment %s deletion in progress: waiting for delete_deployment operation.", service_name)
            self._wait_for_operation_success(result.request_id)
            logger.info("Deployment %s deletion in progress: waiting for VM disks to be removed.", service_name)
            # Now wait for the disks to disappear
            for role_instance_name in role_instances.keys():
                disk_name = "{0}.vhd".format(role_instance_name)
                self._wait_for_disk_deletion(disk_name)
            logger.info("Deployment %s deleted.", service_name)

    def _ensureBuildMachineExists(self):
        """
        Creates the VM for the build server.
        """
        service_name = self.config.getBuildServiceName()
        service_storage_name = self.config.getStorageAccountName()
        cert_thumbprint = self.config.getServiceCertificateThumbprint()
        vm_username = self.config.getVirtualMachineLogonUsername()
        vm_password = self.config.getVirtualMachineLogonPassword()
        vm_hostname = service_name

        role_instances = self._getRoleInstances(service_name)
        if vm_hostname in role_instances:
            logger.warn("Role instance %s already exists: skipping creation.", vm_hostname)
        else:
            logger.info("Role instance %s provisioning begins.", vm_hostname)
            self._assertOsImageExists(self.config.getBuildOSImageName())

            vm_diskname = '{0}.vhd'.format(vm_hostname)
            vm_disk_media_link = 'http://{0}.blob.core.windows.net/vhds/{1}'.format(service_storage_name, vm_diskname)
            os_hd = OSVirtualHardDisk(self.config.getBuildOSImageName(),
                                      vm_disk_media_link,
                                      disk_name=vm_diskname,
                                      disk_label=vm_diskname)
            linux_config = LinuxConfigurationSet(vm_hostname, vm_username, vm_password, True)
            linux_config.ssh.public_keys.public_keys.append(
                PublicKey(cert_thumbprint, u'/home/{0}/.ssh/authorized_keys'.format(vm_username))
            )
            linux_config.ssh.key_pairs.key_pairs.append(
                KeyPair(cert_thumbprint, u'/home/{0}/.ssh/id_rsa'.format(vm_username))
            )
            network_config = ConfigurationSet()
            network_config.configuration_set_type = 'NetworkConfiguration'
            ssh_endpoint = ConfigurationSetInputEndpoint(name='SSH',
                                                         protocol='TCP',
                                                         port=u'22',
                                                         local_port=u'22')
            network_config.input_endpoints.input_endpoints.append(ssh_endpoint)

            result = self.sms.create_virtual_machine_deployment(service_name=service_name,
                                                                deployment_name=service_name,
                                                                deployment_slot='Production',
                                                                label=vm_hostname,
                                                                role_name=vm_hostname,
                                                                system_config=linux_config,
                                                                os_virtual_hard_disk=os_hd,
                                                                network_config=network_config,
                                                                availability_set_name=None,
                                                                data_virtual_hard_disks=None,
                                                                role_size=self.config.getBuildInstanceRoleSize())
            self._wait_for_operation_success(result.request_id, timeout=self.config.getAzureOperationTimeout())
            self._wait_for_role_instance_status(vm_hostname, service_name, 'ReadyRole',
                                                self.config.getAzureOperationTimeout())
            logger.info("Role instance %s has been created.", vm_hostname)

    def _deleteStorageAccount(self, name):
        """
        Deletes the storage account for the web site.
        """
        logger.info("Attempting to delete storage account %s.", name)
        if self._resource_exists(lambda: self.sms.get_storage_account_properties(name)) == False:
            logger.warn("Storage account %s not found: nothing to delete.", name)
        else:
            self.sms.delete_storage_account(name)
            logger.info("Storage account %s deleted.", name)

    def _deleteService(self, name):
        """
        Deletes the specified cloud service.
        """
        logger.info("Attempting to delete cloud service %s.", name)
        if self._resource_exists(lambda: self.sms.get_hosted_service_properties(name)) == False:
            logger.warn("Cloud service %s not found: nothing to delete.", name)
        else:
            self.sms.delete_hosted_service(name)
            logger.info("Cloud service %s deleted.", name)

    def _deleteAffinityGroup(self):
        """
        Deletes the affinity group for the web site.
        """
        name = self.config.getAffinityGroupName()
        logger.info("Attempting to delete affinity group %s.", name)
        if self._resource_exists(lambda: self.sms.get_affinity_group_properties(name)) == False:
            logger.warn("Affinity group %s not found: nothing to delete.", name)
        else:
            self.sms.delete_affinity_group(name)
            logger.info("Affinity group %s deleted.", name)

    def _ensureServiceBusNamespaceExists(self):
        """
        Creates the Azure Service Bus Namespace if it does not exist.
        """
        name = self.config.getServiceBusNamespace()
        logger.info("Checking for existence of service bus namespace (name=%s).", name)
        if self._resource_exists(lambda: self.sbms.get_namespace(name)):
            logger.warn("A namespace named %s already exists.", name)
        else:
            self.sbms.create_namespace(name, self.config.getServiceLocation())
            self._wait_for_namespace_active(name)
            logger.info("Created namespace %s.", name)

    def _ensureServiceBusQueuesExist(self):
        """
        Creates Azure service bus queues required by the service.
        """
        logger.info("Checking for existence of Service Bus Queues.")
        namespace = self.sbms.get_namespace(self.config.getServiceBusNamespace())
        sbs = ServiceBusService(namespace.name, namespace.default_key, issuer='owner')
        queue_names = ['jobresponsequeue', 'windowscomputequeue', 'linuxcomputequeue']
        for name in queue_names:
            logger.info("Checking for existence of Queue %s.", name)
            sbs.create_queue(name, fail_on_exist=False)
            logger.info("Queue %s is ready.", name)

    def _deleteServiceBusNamespace(self):
        """
        Deletes the Azure Service Bus Namespace.
        """
        name = self.config.getServiceBusNamespace()
        logger.info("Attempting to delete service bus namespace %s.", name)
        if self._resource_exists(lambda: self.sbms.get_namespace(name)) == False:
            logger.warn("Namespace %s not found: nothing to delete.", name)
        else:
            self.sbms.delete_namespace(name)
            logger.info("Namespace %s deleted.", name)

    def Deploy(self, assets):
        """
        Creates a deployment.

        assets: The set of assets to create. The full set is: {'build', 'web'}.
        """
        if len(assets) == 0:
            raise ValueError("Set of assets to deploy is not specified.")
        logger.info("Starting deployment operation.")
        self._ensureAffinityGroupExists()
        self._ensureStorageAccountExists(self.config.getStorageAccountName())
        ## Build instance
        if 'build' in assets:
            self._ensureServiceExists(self.config.getBuildServiceName(), self.config.getAffinityGroupName())
            self._ensureServiceCertificateExists(self.config.getBuildServiceName())
            self._ensureBuildMachineExists()
        # Web instances
        if 'web' in assets:
            self._ensureStorageAccountExists(self.config.getServiceStorageAccountName())
            self._ensureStorageContainersExist()
            self.ensureStorageHasCorsConfiguration()
            self._ensureServiceBusNamespaceExists()
            self._ensureServiceBusQueuesExist()
            self._ensureServiceExists(self.config.getServiceName(), self.config.getAffinityGroupName())
            self._ensureServiceCertificateExists(self.config.getServiceName())
            self._ensureVirtualMachinesExist()
        #queues
        logger.info("Deployment operation is complete.")

    def Teardown(self, assets):
        """
        Deletes a deployment.

        assets: The set of assets to delete. The full set is: {'web', 'build'}.
        """
        if len(assets) == 0:
            raise ValueError("Set of assets to teardown is not specified.")
        logger.info("Starting teardown operation.")
        if 'web' in assets:
            self._deleteVirtualMachines(self.config.getServiceName())
            self._deleteService(self.config.getServiceName())
            self._deleteStorageAccount(self.config.getServiceStorageAccountName())
        if 'build' in assets:
            self._deleteVirtualMachines(self.config.getBuildServiceName())
            self._deleteService(self.config.getBuildServiceName())
            self._deleteStorageAccount(self.config.getStorageAccountName())
        if ('web' in assets) and ('build' in assets):
            self._deleteServiceBusNamespace()
            self._deleteAffinityGroup()
        logger.info("Teardown operation is complete.")

    def getSettingsFileContent(self):
        """
        Generates the content of the local Django settings file.
        """
        allowed_hosts = ['{0}.cloudapp.net'.format(self.config.getServiceName())]
        allowed_hosts.extend(self.config.getWebHostnames())
        allowed_hosts.extend(['www.codalab.org', 'codalab.org'])
        ssl_allowed_hosts = self.config.getSslRewriteHosts();
        if len(ssl_allowed_hosts) == 0:
            ssl_allowed_hosts = allowed_hosts

        storage_key = self._getStorageAccountKey(self.config.getServiceStorageAccountName())
        namespace = self.sbms.get_namespace(self.config.getServiceBusNamespace())

        if len(self.config.getSslCertificateInstalledPath()) > 0:
            bundle_auth_scheme = "https"
        else:
            bundle_auth_scheme = "http"
        if len(ssl_allowed_hosts) == 0:
            bundle_auth_host = '{0}.cloudapp.net'.format(self.config.getServiceName())
        else:
            bundle_auth_host = ssl_allowed_hosts[0]
        bundle_auth_url = "{0}://{1}".format(bundle_auth_scheme, bundle_auth_host)

        lines = [
            "from base import Base",
            "from default import *",
            "from configurations import Settings",
            "",
            "import sys",
            "from os.path import dirname, abspath, join",
            "from pkgutil import extend_path",
            "import codalab",
            "",
            "class {0}(Base):".format(self.config.getDjangoConfiguration()),
            "",
            "    DEBUG=False",
            "",
            "    ALLOWED_HOSTS = {0}".format(allowed_hosts),
            "",
            "    SSL_PORT = '443'",
            "    SSL_CERTIFICATE = '{0}'".format(self.config.getSslCertificateInstalledPath()),
            "    SSL_CERTIFICATE_KEY = '{0}'".format(self.config.getSslCertificateKeyInstalledPath()),
            "    SSL_ALLOWED_HOSTS = {0}".format(ssl_allowed_hosts),
            "",
            "    DEFAULT_FILE_STORAGE = 'codalab.azure_storage.AzureStorage'",
            "    AZURE_ACCOUNT_NAME = '{0}'".format(self.config.getServiceStorageAccountName()),
            "    AZURE_ACCOUNT_KEY = '{0}'".format(storage_key),
            "    AZURE_CONTAINER = '{0}'".format(self.config.getServicePublicStorageContainer()),
            "    BUNDLE_AZURE_ACCOUNT_NAME = AZURE_ACCOUNT_NAME",
            "    BUNDLE_AZURE_ACCOUNT_KEY = AZURE_ACCOUNT_KEY",
            "    BUNDLE_AZURE_CONTAINER = '{0}'".format(self.config.getServiceBundleStorageContainer()),
            "",
            "    SBS_NAMESPACE = '{0}'".format(self.config.getServiceBusNamespace()),
            "    SBS_ISSUER = 'owner'",
            "    SBS_ACCOUNT_KEY = '{0}'".format(namespace.default_key),
            "    SBS_RESPONSE_QUEUE = 'jobresponsequeue'",
            "    SBS_COMPUTE_QUEUE = 'windowscomputequeue'",
            "",
            "    EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'",
            "    EMAIL_HOST = '{0}'".format(self.config.getEmailHost()),
            "    EMAIL_HOST_USER = '******'".format(self.config.getEmailUser()),
            "    EMAIL_HOST_PASSWORD = '******'".format(self.config.getEmailPassword()),
            "    EMAIL_PORT = 587",
            "    EMAIL_USE_TLS = True",
            "    DEFAULT_FROM_EMAIL = 'CodaLab <*****@*****.**>'",
            "    SERVER_EMAIL = '*****@*****.**'",
            "",
            "    # Django secret",
            "    SECRET_KEY = '{0}'".format(self.config.getDjangoSecretKey()),
            "",
            "    ADMINS = (('CodaLab', '*****@*****.**'),)",
            "    MANAGERS = ADMINS",
            "",
            "    DATABASES = {",
            "        'default': {",
            "            'ENGINE': '{0}',".format(self.config.getDatabaseEngine()),
            "            'NAME': '{0}',".format(self.config.getDatabaseName()),
            "            'USER': '******',".format(self.config.getDatabaseUser()),
            "            'PASSWORD': '******',".format(self.config.getDatabasePassword()),
            "            'HOST': '{0}',".format(self.config.getDatabaseHost()),
            "            'PORT': '{0}', ".format(self.config.getDatabasePort()),
            "            'OPTIONS' : {",
            "                'init_command': 'SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED',",
            "                'read_timeout': 5",
            "            }",
            "        }",
            "    }",
            "",
            "    BUNDLE_DB_NAME = '{0}'".format(self.config.getBundleServiceDatabaseName()),
            "    BUNDLE_DB_USER = '******'".format(self.config.getBundleServiceDatabaseUser()),
            "    BUNDLE_DB_PASSWORD = '******'".format(self.config.getBundleServiceDatabasePassword()),
            "    BUNDLE_APP_ID = '{0}'".format(self.config.getBundleServiceAppId()),
            "    BUNDLE_APP_KEY = '{0}'".format(self.config.getBundleServiceAppKey()),
            "    BUNDLE_AUTH_URL = '{0}'".format(bundle_auth_url),
            "",
            "    BUNDLE_SERVICE_URL = '{0}'".format(self.config.getBundleServiceUrl()),
            "    BUNDLE_SERVICE_CODE_PATH = '/home/{0}/deploy/bundles'".format(self.config.getVirtualMachineLogonUsername()),
            "    sys.path.append(BUNDLE_SERVICE_CODE_PATH)",
            "    codalab.__path__ = extend_path(codalab.__path__, codalab.__name__)",
            "",
        ]
        preview = self.config.getShowPreviewFeatures()
        if preview >= 1:
            if preview == 1:
                lines.append("    PREVIEW_WORKSHEETS = True")
            if preview > 1:
                lines.append("    SHOW_BETA_FEATURES = True")
            lines.append("")
        return '\n'.join(lines)
Ejemplo n.º 4
0
subscription_id = config_params["subscription_id"]
certificate_path = config_params["mgmt_cert_path"]

sms = ServiceManagementService(subscription_id, certificate_path)

# Because the name has to be unique in Their cloud :/
hosted_service_name = name_generator()
label = 'devOps test'
desc = 'Service for basic nginx server'
location = 'Central US'

# image_list = sms.list_os_images()

result = sms.create_hosted_service(hosted_service_name, label, desc, location)
operation_result = sms.get_operation_status(result.request_id)

storage_acc_name = name_generator()
label = 'mystorageaccount'
location = 'Central US'
desc = 'My storage account description.'

result = sms.create_storage_account(storage_acc_name,
                                    desc,
                                    label,
                                    location=location)

operation_result = sms.get_operation_status(result.request_id)
print('Operation status: ' + operation_result.status)

print "The following services are now up:"
Ejemplo n.º 5
0
subscription_id = config_params["subscription_id"]
certificate_path = config_params["mgmt_cert_path"]

sms = ServiceManagementService(subscription_id, certificate_path)

# Because the name has to be unique in Their cloud :/
hosted_service_name = name_generator()
label = 'devOps test'
desc = 'Service for basic nginx server'
location = 'Central US'

# image_list = sms.list_os_images()

result = sms.create_hosted_service(hosted_service_name, label, desc, location)
operation_result = sms.get_operation_status(result.request_id)

storage_acc_name = name_generator()
label = 'mystorageaccount'
location = 'Central US'
desc = 'My storage account description.'

result = sms.create_storage_account(storage_acc_name, desc, label,
                                    location=location)

operation_result = sms.get_operation_status(result.request_id)
print('Operation status: ' + operation_result.status)

print "The following services are now up:"

result = sms.list_hosted_services()
class AzureStorageBlockDeviceAPI(object):
    """
    An ``IBlockDeviceAsyncAPI`` which uses Azure Storage Backed Block Devices
    Current Support: Azure SMS API
    """

    def __init__(self, **azure_config):
        """
        :param ServiceManagement azure_client: an instance of the azure
        serivce managment api client.
        :param String service_name: The name of the cloud service
        :param
            names of Azure volumes to identify cluster
        :returns: A ``BlockDeviceVolume``.
        """
        self._instance_id = self.compute_instance_id()
        self._azure_service_client = ServiceManagementService(
            azure_config['subscription_id'],
            azure_config['management_certificate_path'])
        self._service_name = azure_config['service_name']
        self._azure_storage_client = BlobService(
            azure_config['storage_account_name'],
            azure_config['storage_account_key'])
        self._storage_account_name = azure_config['storage_account_name']
        self._disk_container_name = azure_config['disk_container_name']

        if azure_config['debug']:
            to_file(sys.stdout)

    def allocation_unit(self):
        """
        1GiB is the minimum allocation unit for azure disks
        return int: 1 GiB
        """

        return int(GiB(1).to_Byte().value)

    def compute_instance_id(self):
        """
        Azure Stored a UUID in the SDC kernel module.
        """

        # Node host names should be unique within a vnet

        return unicode(socket.gethostname())

    def create_volume(self, dataset_id, size):
        """
        Create a new volume.
        :param UUID dataset_id: The Flocker dataset ID of the dataset on this
            volume.
        :param int size: The size of the new volume in bytes.
        :returns: A ``Deferred`` that fires with a ``BlockDeviceVolume`` when
            the volume has been created.
        """

        size_in_gb = Byte(size).to_GiB().value

        if size_in_gb % 1 != 0:
            raise UnsupportedVolumeSize(dataset_id)

        self._create_volume_blob(size, dataset_id)

        label = self._disk_label_for_dataset_id(str(dataset_id))
        return BlockDeviceVolume(
            blockdevice_id=unicode(label),
            size=size,
            attached_to=None,
            dataset_id=self._dataset_id_for_disk_label(label))

    def destroy_volume(self, blockdevice_id):
        """
        Destroy an existing volume.
        :param unicode blockdevice_id: The unique identifier for the volume to
            destroy.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :return: ``None``
        """
        log_info('Destorying block device: ' + str(blockdevice_id))
        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:

            raise UnknownVolume(blockdevice_id)

        request = None

        if lun is not None:
            request = \
                self._azure_service_client.delete_data_disk(
                    service_name=self._service_name,
                    deployment_name=self._service_name,
                    role_name=target_disk.attached_to.role_name,
                    lun=lun,
                    delete_vhd=True)
        else:
            if target_disk.__class__.__name__ == 'Blob':
                # unregistered disk
                self._azure_storage_client.delete_blob(
                    self._disk_container_name, target_disk.name)
            else:
                request = self._azure_service_client.delete_disk(
                    target_disk.name, True)

        if request is not None:
            self._wait_for_async(request.request_id, 5000)
            self._wait_for_detach(blockdevice_id)

    def attach_volume(self, blockdevice_id, attach_to):
        """
        Attach ``blockdevice_id`` to ``host``.
        :param unicode blockdevice_id: The unique identifier for the block
            device being attached.
        :param unicode attach_to: An identifier like the one returned by the
            ``compute_instance_id`` method indicating the node to which to
            attach the volume.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises AlreadyAttachedVolume: If the supplied ``blockdevice_id`` is
            already attached.
        :returns: A ``BlockDeviceVolume`` with a ``host`` attribute set to
            ``host``.
        """

        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:
            raise UnknownVolume(blockdevice_id)

        if lun is not None:
            raise AlreadyAttachedVolume(blockdevice_id)

        log_info('Attempting to attach ' + str(blockdevice_id)
                 + ' to ' + str(attach_to))

        disk_size = self._attach_disk(blockdevice_id, target_disk, attach_to)

        self._wait_for_attach(blockdevice_id)

        log_info('disk attached')

        return self._blockdevicevolume_from_azure_volume(blockdevice_id,
                                                         disk_size,
                                                         attach_to)

    def detach_volume(self, blockdevice_id):
        """
        Detach ``blockdevice_id`` from whatever host it is attached to.
        :param unicode blockdevice_id: The unique identifier for the block
            device being detached.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises UnattachedVolume: If the supplied ``blockdevice_id`` is
            not attached to anything.
        :returns: ``None``
        """

        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:
            raise UnknownVolume(blockdevice_id)

        if lun is None:
            raise UnattachedVolume(blockdevice_id)

        # contrary to function name it doesn't delete by default, just detachs

        request = \
            self._azure_service_client.delete_data_disk(
                service_name=self._service_name,
                deployment_name=self._service_name,
                role_name=role_name, lun=lun)

        self._wait_for_async(request.request_id, 5000)

        self._wait_for_detach(blockdevice_id)

    def get_device_path(self, blockdevice_id):
        """
        Return the device path that has been allocated to the block device on
        the host to which it is currently attached.
        :param unicode blockdevice_id: The unique identifier for the block
            device.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises UnattachedVolume: If the supplied ``blockdevice_id`` is
            not attached to a host.
        :returns: A ``FilePath`` for the device.
        """

        (target_disk_or_blob, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk_or_blob is None:
            raise UnknownVolume(blockdevice_id)

        if lun is None:
            raise UnattachedVolume(blockdevice_id)

        return Lun.get_device_path_for_lun(lun)

    def list_volumes(self):
        """
        List all the block devices available via the back end API.
        :returns: A ``list`` of ``BlockDeviceVolume``s.
        """
        media_url_prefix = 'https://' + self._storage_account_name \
            + '.blob.core.windows.net/' + self._disk_container_name
        disks = self._azure_service_client.list_disks()
        disk_list = []
        all_blobs = self._get_flocker_blobs()
        for d in disks:

            if media_url_prefix not in d.media_link or \
                    'flocker-' not in d.label:
                    continue

            role_name = None

            if d.attached_to is not None \
                    and d.attached_to.role_name is not None:

                    role_name = d.attached_to.role_name

            disk_list.append(self._blockdevicevolume_from_azure_volume(
                d.label, self._gibytes_to_bytes(d.logical_disk_size_in_gb),
                role_name))

            if d.label in all_blobs:
                del all_blobs[d.label]

        for key in all_blobs:
            # include unregistered 'disk' blobs
            disk_list.append(self._blockdevicevolume_from_azure_volume(
                all_blobs[key].name,
                all_blobs[key].properties.content_length,
                None))

        return disk_list

    def _attach_disk(
            self,
            blockdevice_id,
            target_disk,
            attach_to):

        """
        Attaches disk to specified VM
        :param string blockdevice_id: The identifier of the disk
        :param DataVirtualHardDisk/Blob target_disk: The Blob
               or Disk to be attached
        :returns int: The size of the attached disk
        """

        lun = Lun.compute_next_lun(
            self._azure_service_client,
            self._service_name,
            str(attach_to))
        common_params = {
            'service_name': self._service_name,
            'deployment_name': self._service_name,
            'role_name': attach_to,
            'lun': lun
        }
        disk_size = None

        if target_disk.__class__.__name__ == 'Blob':
            # exclude 512 byte footer
            disk_size = target_disk.properties.content_length

            common_params['source_media_link'] = \
                'https://' + self._storage_account_name \
                + '.blob.core.windows.net/' + self._disk_container_name \
                + '/' + blockdevice_id

            common_params['disk_label'] = blockdevice_id

        else:

            disk_size = self._gibytes_to_bytes(
                target_disk.logical_disk_size_in_gb)

            common_params['disk_name'] = target_disk.name

        request = self._azure_service_client.add_data_disk(**common_params)
        self._wait_for_async(request.request_id, 5000)

        return disk_size

    def _create_volume_blob(self, size, dataset_id):
        # Create a new page blob as a blank disk
        self._azure_storage_client.put_blob(
            container_name=self._disk_container_name,
            blob_name=self._disk_label_for_dataset_id(dataset_id),
            blob=None,
            x_ms_blob_type='PageBlob',
            x_ms_blob_content_type='application/octet-stream',
            x_ms_blob_content_length=size)

        # for disk to be a valid vhd it requires a vhd footer
        # on the last 512 bytes
        vhd_footer = Vhd.generate_vhd_footer(size)

        self._azure_storage_client.put_page(
            container_name=self._disk_container_name,
            blob_name=self._disk_label_for_dataset_id(dataset_id),
            page=vhd_footer,
            x_ms_page_write='update',
            x_ms_range='bytes=' + str((size - 512)) + '-' + str(size - 1))

    def _disk_label_for_dataset_id(self, dataset_id):
        """
        Returns a disk label for a given Dataset ID
        :param unicode dataset_id: The identifier of the dataset
        :returns string: A string representing the disk label
        """

        label = 'flocker-' + str(dataset_id)
        return label

    def _dataset_id_for_disk_label(self, disk_label):
        """
        Returns a UUID representing the Dataset ID for the given disk
        label
        :param string disk_label: The disk label
        :returns UUID: The UUID of the dataset
        """
        return UUID(disk_label.replace('flocker-', ''))

    def _get_disk_vmname_lun(self, blockdevice_id):
        target_disk = None
        target_lun = None
        role_name = None
        disk_list = self._azure_service_client.list_disks()

        for d in disk_list:

            if 'flocker-' not in d.label:
                continue
            if d.label == str(blockdevice_id):
                target_disk = d
                break

        if target_disk is None:
            # check for unregisterd disk
            blobs = self._get_flocker_blobs()
            blob = None

            if str(blockdevice_id) in blobs:
                blob = blobs[str(blockdevice_id)]

            return blob, None, None

        vm_info = None

        if hasattr(target_disk.attached_to, 'role_name'):
            vm_info = self._azure_service_client.get_role(
                self._service_name, self._service_name,
                target_disk.attached_to.role_name)

            for d in vm_info.data_virtual_hard_disks:
                if d.disk_name == target_disk.name:
                    target_lun = d.lun
                    break

            role_name = target_disk.attached_to.role_name

        return (target_disk, role_name, target_lun)

    def _get_flocker_blobs(self):
        all_blobs = {}

        blobs = self._azure_storage_client.list_blobs(
            self._disk_container_name,
            prefix='flocker-')

        for b in blobs:
            # todo - this could be big!
            all_blobs[b.name] = b

        return all_blobs

    def _wait_for_detach(self, blockdevice_id):
        role_name = ''
        lun = -1

        timeout_count = 0

        log_info('waiting for azure to ' + 'report disk as detached...')

        while role_name is not None or lun is not None:
            (target_disk, role_name, lun) = \
                self._get_disk_vmname_lun(blockdevice_id)
            time.sleep(1)
            timeout_count += 1

            if timeout_count > 5000:
                raise AsynchronousTimeout()

        log_info('Disk Detached')

    def _wait_for_attach(self, blockdevice_id):
        timeout_count = 0
        lun = None

        log_info('waiting for azure to report disk as attached...')

        while lun is None:
            (target_disk, role_name, lun) = \
                self._get_disk_vmname_lun(blockdevice_id)
            time.sleep(.001)
            timeout_count += 1

            if timeout_count > 5000:
                raise AsynchronousTimeout()

    def _wait_for_async(self, request_id, timeout):
        count = 0
        result = self._azure_service_client.get_operation_status(request_id)
        while result.status == 'InProgress':
            count = count + 1
            if count > timeout:
                log_error('Timed out waiting for async operation to complete.')
                raise AsynchronousTimeout()
            time.sleep(.001)
            log_info('.')
            result = self._azure_service_client.get_operation_status(
                request_id)
            if result.error:
                log_error(result.error.code)
                log_error(str(result.error.message))

        log_error(result.status + ' in ' + str(count * 5) + 's')

    def _gibytes_to_bytes(self, size):

        return int(GiB(size).to_Byte().value)

    def _blockdevicevolume_from_azure_volume(self, label, size,
                                             attached_to_name):

        # azure will report the disk size excluding the 512 byte footer
        # however flocker expects the exact value it requested for disk size
        # so offset the reported size to flocker by 512 bytes
        return BlockDeviceVolume(
            blockdevice_id=unicode(label),
            size=int(size),
            attached_to=attached_to_name,
            dataset_id=self._dataset_id_for_disk_label(label)
        )  # disk labels are formatted as flocker-<data_set_id>
class StorageManagementServiceTest(AzureTestCase):
    def setUp(self):
        proxy_host = credentials.getProxyHost()
        proxy_port = credentials.getProxyPort()

        self.sms = ServiceManagementService(
            credentials.getSubscriptionId(),
            credentials.getManagementCertFile())
        if proxy_host:
            self.sms.set_proxy(proxy_host, proxy_port)

        self.storage_account_name = getUniqueNameBasedOnCurrentTime(
            'utstorage')

    def tearDown(self):
        try:
            self.sms.delete_storage_account(self.storage_account_name)
        except:
            pass

    #--Helpers-----------------------------------------------------------------
    def _wait_for_async(self, request_id):
        count = 0
        result = self.sms.get_operation_status(request_id)
        while result.status == 'InProgress':
            count = count + 1
            if count > 120:
                self.assertTrue(
                    False,
                    'Timed out waiting for async operation to complete.')
            time.sleep(5)
            result = self.sms.get_operation_status(request_id)
        self.assertEqual(result.status, 'Succeeded')

    def _create_storage_account(self, name):
        result = self.sms.create_storage_account(name, name + 'description',
                                                 name + 'label', None,
                                                 'West US', False, {
                                                     'ext1': 'val1',
                                                     'ext2': 42
                                                 })
        self._wait_for_async(result.request_id)

    def _storage_account_exists(self, name):
        try:
            props = self.sms.get_storage_account_properties(name)
            return props is not None
        except:
            return False

    #--Test cases for storage accounts -----------------------------------
    def test_list_storage_accounts(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.list_storage_accounts()

        # Assert
        self.assertIsNotNone(result)
        self.assertTrue(len(result) > 0)

        storage = None
        for temp in result:
            if temp.service_name == self.storage_account_name:
                storage = temp
                break

        self.assertIsNotNone(storage)
        self.assertIsNotNone(storage.service_name)
        self.assertIsNone(storage.storage_service_keys)
        self.assertIsNotNone(storage.storage_service_properties)
        self.assertIsNotNone(storage.storage_service_properties.affinity_group)
        self.assertIsNotNone(storage.storage_service_properties.description)
        self.assertIsNotNone(
            storage.storage_service_properties.geo_primary_region)
        self.assertIsNotNone(
            storage.storage_service_properties.geo_replication_enabled)
        self.assertIsNotNone(
            storage.storage_service_properties.geo_secondary_region)
        self.assertIsNotNone(storage.storage_service_properties.label)
        self.assertIsNotNone(
            storage.storage_service_properties.last_geo_failover_time)
        self.assertIsNotNone(storage.storage_service_properties.location)
        self.assertIsNotNone(storage.storage_service_properties.status)
        self.assertIsNotNone(
            storage.storage_service_properties.status_of_primary)
        self.assertIsNotNone(
            storage.storage_service_properties.status_of_secondary)
        self.assertIsNotNone(storage.storage_service_properties.endpoints)
        self.assertTrue(len(storage.storage_service_properties.endpoints) > 0)
        self.assertIsNotNone(storage.extended_properties)
        self.assertTrue(len(storage.extended_properties) > 0)

    def test_get_storage_account_properties(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.get_storage_account_properties(
            self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertEqual(result.service_name, self.storage_account_name)
        self.assertIsNotNone(result.url)
        self.assertIsNone(result.storage_service_keys)
        self.assertIsNotNone(result.storage_service_properties)
        self.assertIsNotNone(result.storage_service_properties.affinity_group)
        self.assertIsNotNone(result.storage_service_properties.description)
        self.assertIsNotNone(
            result.storage_service_properties.geo_primary_region)
        self.assertIsNotNone(
            result.storage_service_properties.geo_replication_enabled)
        self.assertIsNotNone(
            result.storage_service_properties.geo_secondary_region)
        self.assertIsNotNone(result.storage_service_properties.label)
        self.assertIsNotNone(
            result.storage_service_properties.last_geo_failover_time)
        self.assertIsNotNone(result.storage_service_properties.location)
        self.assertIsNotNone(result.storage_service_properties.status)
        self.assertIsNotNone(
            result.storage_service_properties.status_of_primary)
        self.assertIsNotNone(
            result.storage_service_properties.status_of_secondary)
        self.assertIsNotNone(result.storage_service_properties.endpoints)
        self.assertTrue(len(result.storage_service_properties.endpoints) > 0)
        self.assertIsNotNone(result.extended_properties)
        self.assertTrue(len(result.extended_properties) > 0)
        self.assertIsNotNone(result.capabilities)
        self.assertTrue(len(result.capabilities) > 0)

    def test_get_storage_account_keys(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.get_storage_account_keys(self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertIsNotNone(result.url)
        self.assertIsNotNone(result.service_name)
        self.assertIsNotNone(result.storage_service_keys.primary)
        self.assertIsNotNone(result.storage_service_keys.secondary)
        self.assertIsNone(result.storage_service_properties)

    def test_regenerate_storage_account_keys(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)
        previous = self.sms.get_storage_account_keys(self.storage_account_name)

        # Act
        result = self.sms.regenerate_storage_account_keys(
            self.storage_account_name, 'Secondary')

        # Assert
        self.assertIsNotNone(result)
        self.assertIsNotNone(result.url)
        self.assertIsNotNone(result.service_name)
        self.assertIsNotNone(result.storage_service_keys.primary)
        self.assertIsNotNone(result.storage_service_keys.secondary)
        self.assertIsNone(result.storage_service_properties)
        self.assertEqual(result.storage_service_keys.primary,
                         previous.storage_service_keys.primary)
        self.assertNotEqual(result.storage_service_keys.secondary,
                            previous.storage_service_keys.secondary)

    def test_create_storage_account(self):
        # Arrange
        description = self.storage_account_name + 'description'
        label = self.storage_account_name + 'label'

        # Act
        result = self.sms.create_storage_account(self.storage_account_name,
                                                 description, label, None,
                                                 'West US', True, {
                                                     'ext1': 'val1',
                                                     'ext2': 42
                                                 })
        self._wait_for_async(result.request_id)

        # Assert
        self.assertTrue(self._storage_account_exists(
            self.storage_account_name))

    def test_update_storage_account(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)
        description = self.storage_account_name + 'descriptionupdate'
        label = self.storage_account_name + 'labelupdate'

        # Act
        result = self.sms.update_storage_account(self.storage_account_name,
                                                 description, label, False, {
                                                     'ext1': 'val1update',
                                                     'ext2': 53,
                                                     'ext3': 'brandnew'
                                                 })

        # Assert
        self.assertIsNone(result)
        props = self.sms.get_storage_account_properties(
            self.storage_account_name)
        self.assertEqual(props.storage_service_properties.description,
                         description)
        self.assertEqual(props.storage_service_properties.label, label)
        self.assertEqual(props.extended_properties['ext1'], 'val1update')
        self.assertEqual(props.extended_properties['ext2'], '53')
        self.assertEqual(props.extended_properties['ext3'], 'brandnew')

    def test_delete_storage_account(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.delete_storage_account(self.storage_account_name)

        # Assert
        self.assertIsNone(result)
        self.assertFalse(
            self._storage_account_exists(self.storage_account_name))

    def test_check_storage_account_name_availability_not_available(self):
        # Arrange
        self._create_storage_account(self.storage_account_name)

        # Act
        result = self.sms.check_storage_account_name_availability(
            self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertFalse(result.result)

    def test_check_storage_account_name_availability_available(self):
        # Arrange

        # Act
        result = self.sms.check_storage_account_name_availability(
            self.storage_account_name)

        # Assert
        self.assertIsNotNone(result)
        self.assertTrue(result.result)

    def test_unicode_create_storage_account_unicode_name(self):
        # Arrange
        self.storage_account_name = unicode(
            self.storage_account_name) + u'啊齄丂狛狜'
        description = 'description'
        label = 'label'

        # Act
        with self.assertRaises(WindowsAzureError):
            # not supported - queue name must be alphanumeric, lowercase
            result = self.sms.create_storage_account(self.storage_account_name,
                                                     description, label, None,
                                                     'West US', True, {
                                                         'ext1': 'val1',
                                                         'ext2': 42
                                                     })
            self._wait_for_async(result.request_id)

        # Assert

    def test_unicode_create_storage_account_unicode_description_label(self):
        # Arrange
        description = u'啊齄丂狛狜'
        label = u'丂狛狜'

        # Act
        result = self.sms.create_storage_account(self.storage_account_name,
                                                 description, label, None,
                                                 'West US', True, {
                                                     'ext1': 'val1',
                                                     'ext2': 42
                                                 })
        self._wait_for_async(result.request_id)

        # Assert
        result = self.sms.get_storage_account_properties(
            self.storage_account_name)
        self.assertEqual(result.storage_service_properties.description,
                         description)
        self.assertEqual(result.storage_service_properties.label, label)

    def test_unicode_create_storage_account_unicode_property_value(self):
        # Arrange
        description = 'description'
        label = 'label'

        # Act
        result = self.sms.create_storage_account(self.storage_account_name,
                                                 description, label, None,
                                                 'West US', True, {
                                                     'ext1': u'丂狛狜',
                                                     'ext2': 42
                                                 })
        self._wait_for_async(result.request_id)

        # Assert
        result = self.sms.get_storage_account_properties(
            self.storage_account_name)
        self.assertEqual(result.storage_service_properties.description,
                         description)
        self.assertEqual(result.storage_service_properties.label, label)
        self.assertEqual(result.extended_properties['ext1'], u'丂狛狜')
def provision(instance_id):
    """
    Provision an instance of this service
    for the given org and space

    PUT /v2/service_instances/<instance_id>:
        <instance_id> is provided by the Cloud
          Controller and will be used for future
          requests to bind, unbind and deprovision

    BODY:
        {
          "service_id":        "<service-guid>",
          "plan_id":           "<plan-guid>",
          "organization_guid": "<org-guid>",
          "space_guid":        "<space-guid>"
        }

    return:
        JSON document with details about the
        services offered through this broker
    """
    if 'application/json' not in request.content_type:
        abort(415, 'Unsupported Content-Type: expecting application/json, actual {0}'.format(request.content_type))

    global subscription_id
    global cert
    global cert_file
    global account_name
    global account_key

    if subscription_id and cert and (not account_name):
        sms = ServiceManagementService(subscription_id, cert_file)
        name = '{0}{1}'.format(STORAGE_ACCOUNT_NAME_PREFIX, instance_id.split('-')[0])
        desc = name
        label = name
        location = 'West US'
        result = None
        try:
            result = sms.create_storage_account(name, desc, label, location=location)
        except WindowsAzureConflictError as e:
            pass
        if result:
            req_id = result.request_id
            operation = sms.get_operation_status(req_id)
            while operation.status == 'InProgress':
                time.sleep(5)
                operation = sms.get_operation_status(req_id)
                app.logger.info('Request ID: {0}, Operation Status: {1}'.format(req_id, operation.status))
            if operation.status == 'Succeeded':
                app.logger.info('Request ID: {0}, Operation Status: {1}'.format(req_id, operation.status))
                account_name = name
                account_key = sms.get_storage_account_keys(account_name).storage_service_keys.primary
                app.logger.info('Account Name: {0}, Account key: {1}'.format(account_name, account_key))

    if account_name:
        blob_service = BlobService(account_name, account_key)
        container_name = '{0}-{1}'.format(CONTAINER_NAME_PREFIX, instance_id)
        app.logger.info('Container Name: {0}'.format(container_name))
        request_body = request.get_json()
        if request_body.has_key('parameters'):
            parameters = request_body.pop('parameters')
        container_tags = request_body
        container_tags['instance_id'] = instance_id
        blob_service.create_container(
            container_name = container_name,
            x_ms_meta_name_values = container_tags)

    return jsonify({})
Ejemplo n.º 9
0
class AzureStorageBlockDeviceAPI(object):
    """
    An ``IBlockDeviceAsyncAPI`` which uses Azure Storage Backed Block Devices
    Current Support: Azure SMS API
    """
    def __init__(self, **azure_config):
        """
        :param ServiceManagement azure_client: an instance of the azure
        serivce managment api client.
        :param String service_name: The name of the cloud service
        :param
            names of Azure volumes to identify cluster
        :returns: A ``BlockDeviceVolume``.
        """
        self._instance_id = self.compute_instance_id()
        self._azure_service_client = ServiceManagementService(
            azure_config['subscription_id'],
            azure_config['management_certificate_path'])
        self._service_name = azure_config['service_name']
        self._azure_storage_client = BlobService(
            azure_config['storage_account_name'],
            azure_config['storage_account_key'])
        self._storage_account_name = azure_config['storage_account_name']
        self._disk_container_name = azure_config['disk_container_name']

        if azure_config['debug']:
            to_file(sys.stdout)

    def allocation_unit(self):
        """
        1GiB is the minimum allocation unit for azure disks
        return int: 1 GiB
        """

        return int(GiB(1).to_Byte().value)

    def compute_instance_id(self):
        """
        Azure Stored a UUID in the SDC kernel module.
        """

        # Node host names should be unique within a vnet

        return unicode(socket.gethostname())

    def create_volume(self, dataset_id, size):
        """
        Create a new volume.
        :param UUID dataset_id: The Flocker dataset ID of the dataset on this
            volume.
        :param int size: The size of the new volume in bytes.
        :returns: A ``Deferred`` that fires with a ``BlockDeviceVolume`` when
            the volume has been created.
        """

        size_in_gb = Byte(size).to_GiB().value

        if size_in_gb % 1 != 0:
            raise UnsupportedVolumeSize(dataset_id)

        self._create_volume_blob(size, dataset_id)

        label = self._disk_label_for_dataset_id(str(dataset_id))
        return BlockDeviceVolume(
            blockdevice_id=unicode(label),
            size=size,
            attached_to=None,
            dataset_id=self._dataset_id_for_disk_label(label))

    def destroy_volume(self, blockdevice_id):
        """
        Destroy an existing volume.
        :param unicode blockdevice_id: The unique identifier for the volume to
            destroy.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :return: ``None``
        """
        log_info('Destorying block device: ' + str(blockdevice_id))
        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:

            raise UnknownVolume(blockdevice_id)

        request = None

        if lun is not None:
            request = \
                self._azure_service_client.delete_data_disk(
                    service_name=self._service_name,
                    deployment_name=self._service_name,
                    role_name=target_disk.attached_to.role_name,
                    lun=lun,
                    delete_vhd=True)
        else:
            if target_disk.__class__.__name__ == 'Blob':
                # unregistered disk
                self._azure_storage_client.delete_blob(
                    self._disk_container_name, target_disk.name)
            else:
                request = self._azure_service_client.delete_disk(
                    target_disk.name, True)

        if request is not None:
            self._wait_for_async(request.request_id, 5000)
            self._wait_for_detach(blockdevice_id)

    def attach_volume(self, blockdevice_id, attach_to):
        """
        Attach ``blockdevice_id`` to ``host``.
        :param unicode blockdevice_id: The unique identifier for the block
            device being attached.
        :param unicode attach_to: An identifier like the one returned by the
            ``compute_instance_id`` method indicating the node to which to
            attach the volume.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises AlreadyAttachedVolume: If the supplied ``blockdevice_id`` is
            already attached.
        :returns: A ``BlockDeviceVolume`` with a ``host`` attribute set to
            ``host``.
        """

        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:
            raise UnknownVolume(blockdevice_id)

        if lun is not None:
            raise AlreadyAttachedVolume(blockdevice_id)

        log_info('Attempting to attach ' + str(blockdevice_id) + ' to ' +
                 str(attach_to))

        disk_size = self._attach_disk(blockdevice_id, target_disk, attach_to)

        self._wait_for_attach(blockdevice_id)

        log_info('disk attached')

        return self._blockdevicevolume_from_azure_volume(
            blockdevice_id, disk_size, attach_to)

    def detach_volume(self, blockdevice_id):
        """
        Detach ``blockdevice_id`` from whatever host it is attached to.
        :param unicode blockdevice_id: The unique identifier for the block
            device being detached.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises UnattachedVolume: If the supplied ``blockdevice_id`` is
            not attached to anything.
        :returns: ``None``
        """

        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:
            raise UnknownVolume(blockdevice_id)

        if lun is None:
            raise UnattachedVolume(blockdevice_id)

        # contrary to function name it doesn't delete by default, just detachs

        request = \
            self._azure_service_client.delete_data_disk(
                service_name=self._service_name,
                deployment_name=self._service_name,
                role_name=role_name, lun=lun)

        self._wait_for_async(request.request_id, 5000)

        self._wait_for_detach(blockdevice_id)

    def get_device_path(self, blockdevice_id):
        """
        Return the device path that has been allocated to the block device on
        the host to which it is currently attached.
        :param unicode blockdevice_id: The unique identifier for the block
            device.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises UnattachedVolume: If the supplied ``blockdevice_id`` is
            not attached to a host.
        :returns: A ``FilePath`` for the device.
        """

        (target_disk_or_blob, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk_or_blob is None:
            raise UnknownVolume(blockdevice_id)

        if lun is None:
            raise UnattachedVolume(blockdevice_id)

        return Lun.get_device_path_for_lun(lun)

    def list_volumes(self):
        """
        List all the block devices available via the back end API.
        :returns: A ``list`` of ``BlockDeviceVolume``s.
        """
        media_url_prefix = 'https://' + self._storage_account_name \
            + '.blob.core.windows.net/' + self._disk_container_name
        disks = self._azure_service_client.list_disks()
        disk_list = []
        all_blobs = self._get_flocker_blobs()
        for d in disks:

            if media_url_prefix not in d.media_link or \
                    'flocker-' not in d.label:
                continue

            role_name = None

            if d.attached_to is not None \
                    and d.attached_to.role_name is not None:

                role_name = d.attached_to.role_name

            disk_list.append(
                self._blockdevicevolume_from_azure_volume(
                    d.label, self._gibytes_to_bytes(d.logical_disk_size_in_gb),
                    role_name))

            if d.label in all_blobs:
                del all_blobs[d.label]

        for key in all_blobs:
            # include unregistered 'disk' blobs
            disk_list.append(
                self._blockdevicevolume_from_azure_volume(
                    all_blobs[key].name,
                    all_blobs[key].properties.content_length, None))

        return disk_list

    def _attach_disk(self, blockdevice_id, target_disk, attach_to):
        """
        Attaches disk to specified VM
        :param string blockdevice_id: The identifier of the disk
        :param DataVirtualHardDisk/Blob target_disk: The Blob
               or Disk to be attached
        :returns int: The size of the attached disk
        """

        lun = Lun.compute_next_lun(self._azure_service_client,
                                   self._service_name, str(attach_to))
        common_params = {
            'service_name': self._service_name,
            'deployment_name': self._service_name,
            'role_name': attach_to,
            'lun': lun
        }
        disk_size = None

        if target_disk.__class__.__name__ == 'Blob':
            # exclude 512 byte footer
            disk_size = target_disk.properties.content_length

            common_params['source_media_link'] = \
                'https://' + self._storage_account_name \
                + '.blob.core.windows.net/' + self._disk_container_name \
                + '/' + blockdevice_id

            common_params['disk_label'] = blockdevice_id

        else:

            disk_size = self._gibytes_to_bytes(
                target_disk.logical_disk_size_in_gb)

            common_params['disk_name'] = target_disk.name

        request = self._azure_service_client.add_data_disk(**common_params)
        self._wait_for_async(request.request_id, 5000)

        return disk_size

    def _create_volume_blob(self, size, dataset_id):
        # Create a new page blob as a blank disk
        self._azure_storage_client.put_blob(
            container_name=self._disk_container_name,
            blob_name=self._disk_label_for_dataset_id(dataset_id),
            blob=None,
            x_ms_blob_type='PageBlob',
            x_ms_blob_content_type='application/octet-stream',
            x_ms_blob_content_length=size)

        # for disk to be a valid vhd it requires a vhd footer
        # on the last 512 bytes
        vhd_footer = Vhd.generate_vhd_footer(size)

        self._azure_storage_client.put_page(
            container_name=self._disk_container_name,
            blob_name=self._disk_label_for_dataset_id(dataset_id),
            page=vhd_footer,
            x_ms_page_write='update',
            x_ms_range='bytes=' + str((size - 512)) + '-' + str(size - 1))

    def _disk_label_for_dataset_id(self, dataset_id):
        """
        Returns a disk label for a given Dataset ID
        :param unicode dataset_id: The identifier of the dataset
        :returns string: A string representing the disk label
        """

        label = 'flocker-' + str(dataset_id)
        return label

    def _dataset_id_for_disk_label(self, disk_label):
        """
        Returns a UUID representing the Dataset ID for the given disk
        label
        :param string disk_label: The disk label
        :returns UUID: The UUID of the dataset
        """
        return UUID(disk_label.replace('flocker-', ''))

    def _get_disk_vmname_lun(self, blockdevice_id):
        target_disk = None
        target_lun = None
        role_name = None
        disk_list = self._azure_service_client.list_disks()

        for d in disk_list:

            if 'flocker-' not in d.label:
                continue
            if d.label == str(blockdevice_id):
                target_disk = d
                break

        if target_disk is None:
            # check for unregisterd disk
            blobs = self._get_flocker_blobs()
            blob = None

            if str(blockdevice_id) in blobs:
                blob = blobs[str(blockdevice_id)]

            return blob, None, None

        vm_info = None

        if hasattr(target_disk.attached_to, 'role_name'):
            vm_info = self._azure_service_client.get_role(
                self._service_name, self._service_name,
                target_disk.attached_to.role_name)

            for d in vm_info.data_virtual_hard_disks:
                if d.disk_name == target_disk.name:
                    target_lun = d.lun
                    break

            role_name = target_disk.attached_to.role_name

        return (target_disk, role_name, target_lun)

    def _get_flocker_blobs(self):
        all_blobs = {}

        blobs = self._azure_storage_client.list_blobs(
            self._disk_container_name, prefix='flocker-')

        for b in blobs:
            # todo - this could be big!
            all_blobs[b.name] = b

        return all_blobs

    def _wait_for_detach(self, blockdevice_id):
        role_name = ''
        lun = -1

        timeout_count = 0

        log_info('waiting for azure to ' + 'report disk as detached...')

        while role_name is not None or lun is not None:
            (target_disk, role_name, lun) = \
                self._get_disk_vmname_lun(blockdevice_id)
            time.sleep(1)
            timeout_count += 1

            if timeout_count > 5000:
                raise AsynchronousTimeout()

        log_info('Disk Detached')

    def _wait_for_attach(self, blockdevice_id):
        timeout_count = 0
        lun = None

        log_info('waiting for azure to report disk as attached...')

        while lun is None:
            (target_disk, role_name, lun) = \
                self._get_disk_vmname_lun(blockdevice_id)
            time.sleep(.001)
            timeout_count += 1

            if timeout_count > 5000:
                raise AsynchronousTimeout()

    def _wait_for_async(self, request_id, timeout):
        count = 0
        result = self._azure_service_client.get_operation_status(request_id)
        while result.status == 'InProgress':
            count = count + 1
            if count > timeout:
                log_error('Timed out waiting for async operation to complete.')
                raise AsynchronousTimeout()
            time.sleep(.001)
            log_info('.')
            result = self._azure_service_client.get_operation_status(
                request_id)
            if result.error:
                log_error(result.error.code)
                log_error(str(result.error.message))

        log_error(result.status + ' in ' + str(count * 5) + 's')

    def _gibytes_to_bytes(self, size):

        return int(GiB(size).to_Byte().value)

    def _blockdevicevolume_from_azure_volume(self, label, size,
                                             attached_to_name):

        # azure will report the disk size excluding the 512 byte footer
        # however flocker expects the exact value it requested for disk size
        # so offset the reported size to flocker by 512 bytes
        return BlockDeviceVolume(
            blockdevice_id=unicode(label),
            size=int(size),
            attached_to=attached_to_name,
            dataset_id=self._dataset_id_for_disk_label(
                label))  # disk labels are formatted as flocker-<data_set_id>