class Deployment(object): """ Helper class to handle deployment of the web site. """ def __init__(self, config): self.config = config self.sms = ServiceManagementService(config.getAzureSubscriptionId(), config.getAzureCertificatePath()) self.sbms = ServiceBusManagementService( config.getAzureSubscriptionId(), config.getAzureCertificatePath()) @staticmethod def _resource_exists(get_resource): """ Helper to check for the existence of a resource in Azure. get_resource: Parameter-less function to invoke in order to get the resource. The resource is assumed to exist when the call to get_resource() returns a value that is not None. If the call to get_resource() returns None or throws a WindowsAzureMissingResourceError exception, then it is assumed that the resource does not exist. Returns: A boolean value which is True if the resource exists. """ resource = None try: resource = get_resource() except WindowsAzureMissingResourceError: pass return resource is not None def _wait_for_operation_success(self, request_id, timeout=600, wait=5): """ Waits for an asynchronous Azure operation to finish. request_id: The ID of the request to track. timeout: Maximum duration (in seconds) allowed for the operation to complete. wait: Wait time (in seconds) between consecutive calls to fetch the latest operation status. """ result = self.sms.get_operation_status(request_id) start_time = time.time() max_time = start_time + timeout now = start_time while result.status == 'InProgress': if now >= max_time: raise Exception( "Operation did not finish within the expected timeout") logger.info( 'Waiting for operation to finish (last_status=%s wait_so_far=%s)', result.status, round(now - start_time, 1)) time_to_wait = max(0.0, min(max_time - now, wait)) time.sleep(time_to_wait) result = self.sms.get_operation_status(request_id) now = time.time() if result.status != 'Succeeded': raise Exception("Operation terminated but it did not succeed.") def _wait_for_role_instance_status(self, role_instance_name, service_name, expected_status, timeout=600, wait=5): """ Waits for a role instance within the web site's cloud service to reach the status specified. role_instance_name: Name of the role instance. service_name: Name of service in which to find the role instance. expected_status: Expected instance status. timeout: Maximum duration (in seconds) allowed for the operation to complete. wait: Wait time (in seconds) between consecutive calls to fetch the latest role status. """ start_time = time.time() max_time = start_time + timeout now = start_time while True: status = None deployment = self.sms.get_deployment_by_name( service_name, service_name) for role_instance in deployment.role_instance_list: if role_instance.instance_name == role_instance_name: status = role_instance.instance_status if status == expected_status: break if now >= max_time: raise Exception( "Operation did not finish within the expected timeout") logger.info( 'Waiting for deployment status: expecting %s but got %s (wait_so_far=%s)', expected_status, status, round(now - start_time, 1)) time_to_wait = max(0.0, min(max_time - now, wait)) time.sleep(time_to_wait) now = time.time() def _wait_for_disk_deletion(self, disk_name, timeout=600, wait=5): """ Waits for a VM disk to disappear when it is being deleted. disk_name: Name of the VHD. timeout: Maximum duration (in seconds) allowed for the operation to complete. wait: Wait time (in seconds) between consecutive calls to check for the existence of the disk. """ start_time = time.time() max_time = start_time + timeout now = start_time logger.info("Checking that disk %s has been deleted.", disk_name) while self._resource_exists(lambda: self.sms.get_disk(disk_name)): if now >= max_time: raise Exception( "Disk %s was not deleted within the expected timeout.". format(disk_name)) logger.info("Waiting for disk %s to disappear (wait_so_far=%s).", disk_name, round(now - start_time, 1)) time_to_wait = max(0.0, min(max_time - now, wait)) time.sleep(time_to_wait) now = time.time() logger.info("Disk %s has been deleted.", disk_name) def _wait_for_namespace_active(self, name, timeout=600, wait=5): """ Waits for a service bus namespace to become Active. name: Namespace name. timeout: Maximum duration (in seconds) allowed for the operation to complete. wait: Wait time (in seconds) between consecutive calls to check for the existence of the disk. """ start_time = time.time() max_time = start_time + timeout now = start_time while True: status = None props = self.sbms.get_namespace(name) status = props.status if status == 'Active': break if now >= max_time: raise Exception( "Operation did not finish within the expected timeout") logger.info( 'Waiting for namepsace status: expecting Active but got %s (wait_so_far=%s)', status, round(now - start_time, 1)) time_to_wait = max(0.0, min(max_time - now, wait)) time.sleep(time_to_wait) now = time.time() def _getRoleInstances(self, service_name): """ Returns the role instances in the given cloud service deployment. The results are provided as a dictionary where keys are role instance names and values are RoleInstance objects. """ role_instances = {} if self._resource_exists(lambda: self.sms.get_deployment_by_name( service_name, service_name)): deployment = self.sms.get_deployment_by_name( service_name, service_name) for role_instance in deployment.role_instance_list: role_instances[role_instance.instance_name] = role_instance return role_instances def _ensureAffinityGroupExists(self): """ Creates the affinity group if it does not exist. """ name = self.config.getAffinityGroupName() location = self.config.getServiceLocation() logger.info( "Checking for existence of affinity group (name=%s; location=%s).", name, location) if self._resource_exists( lambda: self.sms.get_affinity_group_properties(name)): logger.warn("An affinity group named %s already exists.", name) else: self.sms.create_affinity_group(name, name, location) logger.info("Created affinity group %s.", name) def _ensureStorageAccountExists(self, name): """ Creates the storage account if it does not exist. """ logger.info("Checking for existence of storage account (name=%s).", name) if self._resource_exists( lambda: self.sms.get_storage_account_properties(name)): logger.warn("A storage account named %s already exists.", name) else: result = self.sms.create_storage_account( name, "", name, affinity_group=self.config.getAffinityGroupName()) self._wait_for_operation_success( result.request_id, timeout=self.config.getAzureOperationTimeout()) logger.info("Created storage account %s.", name) def _getStorageAccountKey(self, account_name): """ Gets the storage account key (primary key) for the given storage account. """ storage_props = self.sms.get_storage_account_keys(account_name) return storage_props.storage_service_keys.primary def _ensureStorageContainersExist(self): """ Creates Blob storage containers required by the service. """ logger.info("Checking for existence of Blob containers.") account_name = self.config.getServiceStorageAccountName() account_key = self._getStorageAccountKey(account_name) blob_service = BlobService(account_name, account_key) name_and_access_list = [ (self.config.getServicePublicStorageContainer(), 'blob'), (self.config.getServiceBundleStorageContainer(), None) ] for name, access in name_and_access_list: logger.info("Checking for existence of Blob container %s.", name) blob_service.create_container(name, x_ms_blob_public_access=access, fail_on_exist=False) access_info = 'private' if access is None else 'public {0}'.format( access) logger.info("Blob container %s is ready (access: %s).", name, access_info) def ensureStorageHasCorsConfiguration(self): """ Ensures Blob storage container for bundles is configured to allow cross-origin resource sharing. """ logger.info("Setting CORS rules.") account_name = self.config.getServiceStorageAccountName() account_key = self._getStorageAccountKey(account_name) cors_rule = CorsRule() cors_rule.allowed_origins = self.config.getServiceStorageCorsAllowedOrigins( ) cors_rule.allowed_methods = 'PUT' cors_rule.exposed_headers = '*' cors_rule.allowed_headers = '*' cors_rule.max_age_in_seconds = 1800 cors_rules = Cors() cors_rules.cors_rule.append(cors_rule) set_storage_service_cors_properties(account_name, account_key, cors_rules) def _ensureServiceExists(self, service_name, affinity_group_name): """ Creates the specified cloud service host if it does not exist. service_name: Name of the cloud service. affinity_group_name: Name of the affinity group (which should exists). """ logger.info("Checking for existence of cloud service (name=%s).", service_name) if self._resource_exists( lambda: self.sms.get_hosted_service_properties(service_name)): logger.warn("A cloud service named %s already exists.", service_name) else: self.sms.create_hosted_service(service_name, service_name, affinity_group=affinity_group_name) logger.info("Created cloud service %s.", service_name) def _ensureServiceCertificateExists(self, service_name): """ Adds certificate to the specified cloud service. service_name: Name of the target cloud service (which should exist). """ cert_format = self.config.getServiceCertificateFormat() cert_algorithm = self.config.getServiceCertificateAlgorithm() cert_thumbprint = self.config.getServiceCertificateThumbprint() cert_path = self.config.getServiceCertificateFilename() cert_password = self.config.getServiceCertificatePassword() logger.info( "Checking for existence of cloud service certificate for service %s.", service_name) get_cert = lambda: self.sms.get_service_certificate( service_name, cert_algorithm, cert_thumbprint) if self._resource_exists(get_cert): logger.info("Found expected cloud service certificate.") else: with open(cert_path, 'rb') as f: cert_data = base64.b64encode(f.read()) if len(cert_data) <= 0: raise Exception("Detected invalid certificate data.") result = self.sms.add_service_certificate(service_name, cert_data, cert_format, cert_password) self._wait_for_operation_success( result.request_id, timeout=self.config.getAzureOperationTimeout()) logger.info("Added service certificate.") def _assertOsImageExists(self, os_image_name): """ Asserts that the named OS image exists. """ logger.info("Checking for availability of OS image (name=%s).", os_image_name) if self.sms.get_os_image(os_image_name) is None: raise Exception( "Unable to find OS Image '{0}'.".format(os_image_name)) def _ensureVirtualMachinesExist(self): """ Creates the VMs for the web site. """ service_name = self.config.getServiceName() cert_thumbprint = self.config.getServiceCertificateThumbprint() vm_username = self.config.getVirtualMachineLogonUsername() vm_password = self.config.getVirtualMachineLogonPassword() vm_role_size = self.config.getServiceInstanceRoleSize() vm_numbers = self.config.getServiceInstanceCount() if vm_numbers < 1: raise Exception( "Detected an invalid number of instances: {0}.".format( vm_numbers)) self._assertOsImageExists(self.config.getServiceOSImageName()) role_instances = self._getRoleInstances(service_name) for vm_number in range(1, vm_numbers + 1): vm_hostname = '{0}-{1}'.format(service_name, vm_number) if vm_hostname in role_instances: logger.warn( "Role instance %s already exists: skipping creation.", vm_hostname) continue logger.info("Role instance %s provisioning begins.", vm_hostname) vm_diskname = '{0}.vhd'.format(vm_hostname) vm_disk_media_link = 'http://{0}.blob.core.windows.net/vhds/{1}'.format( self.config.getServiceStorageAccountName(), vm_diskname) ssh_port = str(self.config.getServiceInstanceSshPort() + vm_number) os_hd = OSVirtualHardDisk(self.config.getServiceOSImageName(), vm_disk_media_link, disk_name=vm_diskname, disk_label=vm_diskname) linux_config = LinuxConfigurationSet(vm_hostname, vm_username, vm_password, True) linux_config.ssh.public_keys.public_keys.append( PublicKey( cert_thumbprint, u'/home/{0}/.ssh/authorized_keys'.format(vm_username))) linux_config.ssh.key_pairs.key_pairs.append( KeyPair(cert_thumbprint, u'/home/{0}/.ssh/id_rsa'.format(vm_username))) network_config = ConfigurationSet() network_config.configuration_set_type = 'NetworkConfiguration' ssh_endpoint = ConfigurationSetInputEndpoint(name='SSH', protocol='TCP', port=ssh_port, local_port=u'22') network_config.input_endpoints.input_endpoints.append(ssh_endpoint) http_endpoint = ConfigurationSetInputEndpoint( name='HTTP', protocol='TCP', port=u'80', local_port=u'80', load_balanced_endpoint_set_name=service_name) http_endpoint.load_balancer_probe.port = '80' http_endpoint.load_balancer_probe.protocol = 'TCP' network_config.input_endpoints.input_endpoints.append( http_endpoint) if vm_number == 1: result = self.sms.create_virtual_machine_deployment( service_name=service_name, deployment_name=service_name, deployment_slot='Production', label=vm_hostname, role_name=vm_hostname, system_config=linux_config, os_virtual_hard_disk=os_hd, network_config=network_config, availability_set_name=service_name, data_virtual_hard_disks=None, role_size=vm_role_size) self._wait_for_operation_success( result.request_id, timeout=self.config.getAzureOperationTimeout()) self._wait_for_role_instance_status( vm_hostname, service_name, 'ReadyRole', self.config.getAzureOperationTimeout()) else: result = self.sms.add_role(service_name=service_name, deployment_name=service_name, role_name=vm_hostname, system_config=linux_config, os_virtual_hard_disk=os_hd, network_config=network_config, availability_set_name=service_name, role_size=vm_role_size) self._wait_for_operation_success( result.request_id, timeout=self.config.getAzureOperationTimeout()) self._wait_for_role_instance_status( vm_hostname, service_name, 'ReadyRole', self.config.getAzureOperationTimeout()) logger.info("Role instance %s has been created.", vm_hostname) def _deleteVirtualMachines(self, service_name): """ Deletes the VMs in the given cloud service. """ if self._resource_exists(lambda: self.sms.get_deployment_by_name( service_name, service_name)) == False: logger.warn("Deployment %s not found: no VMs to delete.", service_name) else: logger.info("Attempting to delete deployment %s.", service_name) # Get set of role instances before we remove them role_instances = self._getRoleInstances(service_name) def update_request(request): """ A filter to intercept the HTTP request sent by the ServiceManagementService so we can take advantage of a newer feature ('comp=media') in the delete deployment API (see http://msdn.microsoft.com/en-us/library/windowsazure/ee460812.aspx) """ hdrs = [] for name, value in request.headers: if 'x-ms-version' == name: value = '2013-08-01' hdrs.append((name, value)) request.headers = hdrs request.path = request.path + '?comp=media' #pylint: disable=W0212 response = self.sms._filter(request) return response svc = ServiceManagementService(self.sms.subscription_id, self.sms.cert_file) #pylint: disable=W0212 svc._filter = update_request result = svc.delete_deployment(service_name, service_name) logger.info( "Deployment %s deletion in progress: waiting for delete_deployment operation.", service_name) self._wait_for_operation_success(result.request_id) logger.info( "Deployment %s deletion in progress: waiting for VM disks to be removed.", service_name) # Now wait for the disks to disappear for role_instance_name in role_instances.keys(): disk_name = "{0}.vhd".format(role_instance_name) self._wait_for_disk_deletion(disk_name) logger.info("Deployment %s deleted.", service_name) def _ensureBuildMachineExists(self): """ Creates the VM for the build server. """ service_name = self.config.getBuildServiceName() service_storage_name = self.config.getStorageAccountName() cert_thumbprint = self.config.getServiceCertificateThumbprint() vm_username = self.config.getVirtualMachineLogonUsername() vm_password = self.config.getVirtualMachineLogonPassword() vm_hostname = service_name role_instances = self._getRoleInstances(service_name) if vm_hostname in role_instances: logger.warn("Role instance %s already exists: skipping creation.", vm_hostname) else: logger.info("Role instance %s provisioning begins.", vm_hostname) self._assertOsImageExists(self.config.getBuildOSImageName()) vm_diskname = '{0}.vhd'.format(vm_hostname) vm_disk_media_link = 'http://{0}.blob.core.windows.net/vhds/{1}'.format( service_storage_name, vm_diskname) os_hd = OSVirtualHardDisk(self.config.getBuildOSImageName(), vm_disk_media_link, disk_name=vm_diskname, disk_label=vm_diskname) linux_config = LinuxConfigurationSet(vm_hostname, vm_username, vm_password, True) linux_config.ssh.public_keys.public_keys.append( PublicKey( cert_thumbprint, u'/home/{0}/.ssh/authorized_keys'.format(vm_username))) linux_config.ssh.key_pairs.key_pairs.append( KeyPair(cert_thumbprint, u'/home/{0}/.ssh/id_rsa'.format(vm_username))) network_config = ConfigurationSet() network_config.configuration_set_type = 'NetworkConfiguration' ssh_endpoint = ConfigurationSetInputEndpoint(name='SSH', protocol='TCP', port=u'22', local_port=u'22') network_config.input_endpoints.input_endpoints.append(ssh_endpoint) result = self.sms.create_virtual_machine_deployment( service_name=service_name, deployment_name=service_name, deployment_slot='Production', label=vm_hostname, role_name=vm_hostname, system_config=linux_config, os_virtual_hard_disk=os_hd, network_config=network_config, availability_set_name=None, data_virtual_hard_disks=None, role_size=self.config.getBuildInstanceRoleSize()) self._wait_for_operation_success( result.request_id, timeout=self.config.getAzureOperationTimeout()) self._wait_for_role_instance_status( vm_hostname, service_name, 'ReadyRole', self.config.getAzureOperationTimeout()) logger.info("Role instance %s has been created.", vm_hostname) def _deleteStorageAccount(self, name): """ Deletes the storage account for the web site. """ logger.info("Attempting to delete storage account %s.", name) if self._resource_exists( lambda: self.sms.get_storage_account_properties(name )) == False: logger.warn("Storage account %s not found: nothing to delete.", name) else: self.sms.delete_storage_account(name) logger.info("Storage account %s deleted.", name) def _deleteService(self, name): """ Deletes the specified cloud service. """ logger.info("Attempting to delete cloud service %s.", name) if self._resource_exists( lambda: self.sms.get_hosted_service_properties(name)) == False: logger.warn("Cloud service %s not found: nothing to delete.", name) else: self.sms.delete_hosted_service(name) logger.info("Cloud service %s deleted.", name) def _deleteAffinityGroup(self): """ Deletes the affinity group for the web site. """ name = self.config.getAffinityGroupName() logger.info("Attempting to delete affinity group %s.", name) if self._resource_exists( lambda: self.sms.get_affinity_group_properties(name)) == False: logger.warn("Affinity group %s not found: nothing to delete.", name) else: self.sms.delete_affinity_group(name) logger.info("Affinity group %s deleted.", name) def _ensureServiceBusNamespaceExists(self): """ Creates the Azure Service Bus Namespace if it does not exist. """ name = self.config.getServiceBusNamespace() logger.info( "Checking for existence of service bus namespace (name=%s).", name) if self._resource_exists(lambda: self.sbms.get_namespace(name)): logger.warn("A namespace named %s already exists.", name) else: self.sbms.create_namespace(name, self.config.getServiceLocation()) self._wait_for_namespace_active(name) logger.info("Created namespace %s.", name) def _ensureServiceBusQueuesExist(self): """ Creates Azure service bus queues required by the service. """ logger.info("Checking for existence of Service Bus Queues.") namespace = self.sbms.get_namespace( self.config.getServiceBusNamespace()) sbs = ServiceBusService(namespace.name, namespace.default_key, issuer='owner') queue_names = [ 'jobresponsequeue', 'windowscomputequeue', 'linuxcomputequeue' ] for name in queue_names: logger.info("Checking for existence of Queue %s.", name) sbs.create_queue(name, fail_on_exist=False) logger.info("Queue %s is ready.", name) def _deleteServiceBusNamespace(self): """ Deletes the Azure Service Bus Namespace. """ name = self.config.getServiceBusNamespace() logger.info("Attempting to delete service bus namespace %s.", name) if self._resource_exists( lambda: self.sbms.get_namespace(name)) == False: logger.warn("Namespace %s not found: nothing to delete.", name) else: self.sbms.delete_namespace(name) logger.info("Namespace %s deleted.", name) def Deploy(self, assets): """ Creates a deployment. assets: The set of assets to create. The full set is: {'build', 'web'}. """ if len(assets) == 0: raise ValueError("Set of assets to deploy is not specified.") logger.info("Starting deployment operation.") self._ensureAffinityGroupExists() self._ensureStorageAccountExists(self.config.getStorageAccountName()) ## Build instance if 'build' in assets: self._ensureServiceExists(self.config.getBuildServiceName(), self.config.getAffinityGroupName()) self._ensureServiceCertificateExists( self.config.getBuildServiceName()) self._ensureBuildMachineExists() # Web instances if 'web' in assets: self._ensureStorageAccountExists( self.config.getServiceStorageAccountName()) self._ensureStorageContainersExist() self.ensureStorageHasCorsConfiguration() self._ensureServiceBusNamespaceExists() self._ensureServiceBusQueuesExist() self._ensureServiceExists(self.config.getServiceName(), self.config.getAffinityGroupName()) self._ensureServiceCertificateExists(self.config.getServiceName()) self._ensureVirtualMachinesExist() #queues logger.info("Deployment operation is complete.") def Teardown(self, assets): """ Deletes a deployment. assets: The set of assets to delete. The full set is: {'web', 'build'}. """ if len(assets) == 0: raise ValueError("Set of assets to teardown is not specified.") logger.info("Starting teardown operation.") if 'web' in assets: self._deleteVirtualMachines(self.config.getServiceName()) self._deleteService(self.config.getServiceName()) self._deleteStorageAccount( self.config.getServiceStorageAccountName()) if 'build' in assets: self._deleteVirtualMachines(self.config.getBuildServiceName()) self._deleteService(self.config.getBuildServiceName()) self._deleteStorageAccount(self.config.getStorageAccountName()) if ('web' in assets) and ('build' in assets): self._deleteServiceBusNamespace() self._deleteAffinityGroup() logger.info("Teardown operation is complete.") def getSettingsFileContent(self): """ Generates the content of the local Django settings file. """ allowed_hosts = [ '{0}.cloudapp.net'.format(self.config.getServiceName()) ] allowed_hosts.extend(self.config.getWebHostnames()) allowed_hosts.extend(['www.codalab.org', 'codalab.org']) ssl_allowed_hosts = self.config.getSslRewriteHosts() if len(ssl_allowed_hosts) == 0: ssl_allowed_hosts = allowed_hosts storage_key = self._getStorageAccountKey( self.config.getServiceStorageAccountName()) namespace = self.sbms.get_namespace( self.config.getServiceBusNamespace()) if len(self.config.getSslCertificateInstalledPath()) > 0: bundle_auth_scheme = "https" else: bundle_auth_scheme = "http" if len(ssl_allowed_hosts) == 0: bundle_auth_host = '{0}.cloudapp.net'.format( self.config.getServiceName()) else: bundle_auth_host = ssl_allowed_hosts[0] bundle_auth_url = "{0}://{1}".format(bundle_auth_scheme, bundle_auth_host) lines = [ "from base import Base", "from default import *", "from configurations import Settings", "", "import sys", "from os.path import dirname, abspath, join", "from pkgutil import extend_path", "import codalab", "", "class {0}(Base):".format(self.config.getDjangoConfiguration()), "", " DEBUG=False", "", " ALLOWED_HOSTS = {0}".format(allowed_hosts), "", " SSL_PORT = '443'", " SSL_CERTIFICATE = '{0}'".format( self.config.getSslCertificateInstalledPath()), " SSL_CERTIFICATE_KEY = '{0}'".format( self.config.getSslCertificateKeyInstalledPath()), " SSL_ALLOWED_HOSTS = {0}".format(ssl_allowed_hosts), "", " DEFAULT_FILE_STORAGE = 'codalab.azure_storage.AzureStorage'", " AZURE_ACCOUNT_NAME = '{0}'".format( self.config.getServiceStorageAccountName()), " AZURE_ACCOUNT_KEY = '{0}'".format(storage_key), " AZURE_CONTAINER = '{0}'".format( self.config.getServicePublicStorageContainer()), " BUNDLE_AZURE_ACCOUNT_NAME = AZURE_ACCOUNT_NAME", " BUNDLE_AZURE_ACCOUNT_KEY = AZURE_ACCOUNT_KEY", " BUNDLE_AZURE_CONTAINER = '{0}'".format( self.config.getServiceBundleStorageContainer()), "", " SBS_NAMESPACE = '{0}'".format( self.config.getServiceBusNamespace()), " SBS_ISSUER = 'owner'", " SBS_ACCOUNT_KEY = '{0}'".format(namespace.default_key), " SBS_RESPONSE_QUEUE = 'jobresponsequeue'", " SBS_COMPUTE_QUEUE = 'windowscomputequeue'", "", " EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'", " EMAIL_HOST = '{0}'".format(self.config.getEmailHost()), " EMAIL_HOST_USER = '******'".format(self.config.getEmailUser()), " EMAIL_HOST_PASSWORD = '******'".format( self.config.getEmailPassword()), " EMAIL_PORT = 587", " EMAIL_USE_TLS = True", " DEFAULT_FROM_EMAIL = '*****@*****.**'", " SERVER_EMAIL = '*****@*****.**'", "", " # Django secret", " SECRET_KEY = '{0}'".format(self.config.getDjangoSecretKey()), "", " ADMINS = (('CodaLab', '*****@*****.**'),)", " MANAGERS = ADMINS", "", " DATABASES = {", " 'default': {", " 'ENGINE': '{0}',".format( self.config.getDatabaseEngine()), " 'NAME': '{0}',".format(self.config.getDatabaseName()), " 'USER': '******',".format(self.config.getDatabaseUser()), " 'PASSWORD': '******',".format( self.config.getDatabasePassword()), " 'HOST': '{0}',".format(self.config.getDatabaseHost()), " 'PORT': '{0}', ".format( self.config.getDatabasePort()), " 'OPTIONS' : {", " 'init_command': 'SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED',", " 'read_timeout': 5", " }", " }", " }", "", " BUNDLE_DB_NAME = '{0}'".format( self.config.getBundleServiceDatabaseName()), " BUNDLE_DB_USER = '******'".format( self.config.getBundleServiceDatabaseUser()), " BUNDLE_DB_PASSWORD = '******'".format( self.config.getBundleServiceDatabasePassword()), " BUNDLE_APP_ID = '{0}'".format( self.config.getBundleServiceAppId()), " BUNDLE_APP_KEY = '{0}'".format( self.config.getBundleServiceAppKey()), " BUNDLE_AUTH_URL = '{0}'".format(bundle_auth_url), "", " BUNDLE_SERVICE_URL = '{0}'".format( self.config.getBundleServiceUrl()), " BUNDLE_SERVICE_CODE_PATH = '/home/{0}/deploy/bundles'".format( self.config.getVirtualMachineLogonUsername()), " sys.path.append(BUNDLE_SERVICE_CODE_PATH)", " codalab.__path__ = extend_path(codalab.__path__, codalab.__name__)", "", ] preview = self.config.getShowPreviewFeatures() if preview >= 1: if preview == 1: lines.append(" PREVIEW_WORKSHEETS = True") if preview > 1: lines.append(" SHOW_BETA_FEATURES = True") lines.append("") return '\n'.join(lines)
for hosted_service in result: print('Service name: ' + hosted_service.service_name) print('Management URL: ' + hosted_service.url) print('Location: ' + hosted_service.hosted_service_properties.location) print('') print "The following storage accounts are now up:" result = sms.list_storage_accounts() for account in result: print('Account Service name: ' + account.service_name) print('Storage account url: ' + account.url) print('Location: ' + account.storage_service_properties.location) print('Storage Account Keys:') storageServiceObj = sms.get_storage_account_keys(account.service_name) print storageServiceObj.storage_service_keys.primary print storageServiceObj.storage_service_keys.secondary print('') if account.service_name == storage_acc_name: storageServiceObj = sms.get_storage_account_keys(account.service_name) storage_acc_key = storageServiceObj.storage_service_keys.primary # cert_path = "/home/rohan/temp2/myCert.pem" cert_path = config_params["vm_cert_path"] with open(cert_path, "rb") as bfile: # decode to make sure this is a str and not a bstr cert_data = base64.b64encode(bfile.read()).decode() cert_format = 'pfx'
class StorageManagementServiceTest(AzureTestCase): def setUp(self): proxy_host = credentials.getProxyHost() proxy_port = credentials.getProxyPort() self.sms = ServiceManagementService(credentials.getSubscriptionId(), credentials.getManagementCertFile()) if proxy_host: self.sms.set_proxy(proxy_host, proxy_port) self.storage_account_name = getUniqueNameBasedOnCurrentTime('utstorage') def tearDown(self): try: self.sms.delete_storage_account(self.storage_account_name) except: pass #--Helpers----------------------------------------------------------------- def _wait_for_async(self, request_id): count = 0 result = self.sms.get_operation_status(request_id) while result.status == 'InProgress': count = count + 1 if count > 120: self.assertTrue(False, 'Timed out waiting for async operation to complete.') time.sleep(5) result = self.sms.get_operation_status(request_id) self.assertEqual(result.status, 'Succeeded') def _create_storage_account(self, name): result = self.sms.create_storage_account(name, name + 'description', name + 'label', None, 'West US', False, {'ext1':'val1', 'ext2':42}) self._wait_for_async(result.request_id) def _storage_account_exists(self, name): try: props = self.sms.get_storage_account_properties(name) return props is not None except: return False #--Test cases for storage accounts ----------------------------------- def test_list_storage_accounts(self): # Arrange self._create_storage_account(self.storage_account_name) # Act result = self.sms.list_storage_accounts() # Assert self.assertIsNotNone(result) self.assertTrue(len(result) > 0) storage = None for temp in result: if temp.service_name == self.storage_account_name: storage = temp break self.assertIsNotNone(storage) self.assertIsNotNone(storage.service_name) self.assertIsNone(storage.storage_service_keys) self.assertIsNotNone(storage.storage_service_properties) self.assertIsNotNone(storage.storage_service_properties.affinity_group) self.assertIsNotNone(storage.storage_service_properties.description) self.assertIsNotNone(storage.storage_service_properties.geo_primary_region) self.assertIsNotNone(storage.storage_service_properties.geo_replication_enabled) self.assertIsNotNone(storage.storage_service_properties.geo_secondary_region) self.assertIsNotNone(storage.storage_service_properties.label) self.assertIsNotNone(storage.storage_service_properties.last_geo_failover_time) self.assertIsNotNone(storage.storage_service_properties.location) self.assertIsNotNone(storage.storage_service_properties.status) self.assertIsNotNone(storage.storage_service_properties.status_of_primary) self.assertIsNotNone(storage.storage_service_properties.status_of_secondary) self.assertIsNotNone(storage.storage_service_properties.endpoints) self.assertTrue(len(storage.storage_service_properties.endpoints) > 0) self.assertIsNotNone(storage.extended_properties) self.assertTrue(len(storage.extended_properties) > 0) def test_get_storage_account_properties(self): # Arrange self._create_storage_account(self.storage_account_name) # Act result = self.sms.get_storage_account_properties(self.storage_account_name) # Assert self.assertIsNotNone(result) self.assertEqual(result.service_name, self.storage_account_name) self.assertIsNotNone(result.url) self.assertIsNone(result.storage_service_keys) self.assertIsNotNone(result.storage_service_properties) self.assertIsNotNone(result.storage_service_properties.affinity_group) self.assertIsNotNone(result.storage_service_properties.description) self.assertIsNotNone(result.storage_service_properties.geo_primary_region) self.assertIsNotNone(result.storage_service_properties.geo_replication_enabled) self.assertIsNotNone(result.storage_service_properties.geo_secondary_region) self.assertIsNotNone(result.storage_service_properties.label) self.assertIsNotNone(result.storage_service_properties.last_geo_failover_time) self.assertIsNotNone(result.storage_service_properties.location) self.assertIsNotNone(result.storage_service_properties.status) self.assertIsNotNone(result.storage_service_properties.status_of_primary) self.assertIsNotNone(result.storage_service_properties.status_of_secondary) self.assertIsNotNone(result.storage_service_properties.endpoints) self.assertTrue(len(result.storage_service_properties.endpoints) > 0) self.assertIsNotNone(result.extended_properties) self.assertTrue(len(result.extended_properties) > 0) self.assertIsNotNone(result.capabilities) self.assertTrue(len(result.capabilities) > 0) def test_get_storage_account_keys(self): # Arrange self._create_storage_account(self.storage_account_name) # Act result = self.sms.get_storage_account_keys(self.storage_account_name) # Assert self.assertIsNotNone(result) self.assertIsNotNone(result.url) self.assertIsNotNone(result.service_name) self.assertIsNotNone(result.storage_service_keys.primary) self.assertIsNotNone(result.storage_service_keys.secondary) self.assertIsNone(result.storage_service_properties) def test_regenerate_storage_account_keys(self): # Arrange self._create_storage_account(self.storage_account_name) previous = self.sms.get_storage_account_keys(self.storage_account_name) # Act result = self.sms.regenerate_storage_account_keys(self.storage_account_name, 'Secondary') # Assert self.assertIsNotNone(result) self.assertIsNotNone(result.url) self.assertIsNotNone(result.service_name) self.assertIsNotNone(result.storage_service_keys.primary) self.assertIsNotNone(result.storage_service_keys.secondary) self.assertIsNone(result.storage_service_properties) self.assertEqual(result.storage_service_keys.primary, previous.storage_service_keys.primary) self.assertNotEqual(result.storage_service_keys.secondary, previous.storage_service_keys.secondary) def test_create_storage_account(self): # Arrange description = self.storage_account_name + 'description' label = self.storage_account_name + 'label' # Act result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':'val1', 'ext2':42}) self._wait_for_async(result.request_id) # Assert self.assertTrue(self._storage_account_exists(self.storage_account_name)) def test_update_storage_account(self): # Arrange self._create_storage_account(self.storage_account_name) description = self.storage_account_name + 'descriptionupdate' label = self.storage_account_name + 'labelupdate' # Act result = self.sms.update_storage_account(self.storage_account_name, description, label, False, {'ext1':'val1update', 'ext2':53, 'ext3':'brandnew'}) # Assert self.assertIsNone(result) props = self.sms.get_storage_account_properties(self.storage_account_name) self.assertEqual(props.storage_service_properties.description, description) self.assertEqual(props.storage_service_properties.label, label) self.assertEqual(props.extended_properties['ext1'], 'val1update') self.assertEqual(props.extended_properties['ext2'], '53') self.assertEqual(props.extended_properties['ext3'], 'brandnew') def test_delete_storage_account(self): # Arrange self._create_storage_account(self.storage_account_name) # Act result = self.sms.delete_storage_account(self.storage_account_name) # Assert self.assertIsNone(result) self.assertFalse(self._storage_account_exists(self.storage_account_name)) def test_check_storage_account_name_availability_not_available(self): # Arrange self._create_storage_account(self.storage_account_name) # Act result = self.sms.check_storage_account_name_availability(self.storage_account_name) # Assert self.assertIsNotNone(result) self.assertFalse(result.result) def test_check_storage_account_name_availability_available(self): # Arrange # Act result = self.sms.check_storage_account_name_availability(self.storage_account_name) # Assert self.assertIsNotNone(result) self.assertTrue(result.result) def test_unicode_create_storage_account_unicode_name(self): # Arrange self.storage_account_name = unicode(self.storage_account_name) + u'啊齄丂狛狜' description = 'description' label = 'label' # Act with self.assertRaises(WindowsAzureError): # not supported - queue name must be alphanumeric, lowercase result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':'val1', 'ext2':42}) self._wait_for_async(result.request_id) # Assert def test_unicode_create_storage_account_unicode_description_label(self): # Arrange description = u'啊齄丂狛狜' label = u'丂狛狜' # Act result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':'val1', 'ext2':42}) self._wait_for_async(result.request_id) # Assert result = self.sms.get_storage_account_properties(self.storage_account_name) self.assertEqual(result.storage_service_properties.description, description) self.assertEqual(result.storage_service_properties.label, label) def test_unicode_create_storage_account_unicode_property_value(self): # Arrange description = 'description' label = 'label' # Act result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':u'丂狛狜', 'ext2':42}) self._wait_for_async(result.request_id) # Assert result = self.sms.get_storage_account_properties(self.storage_account_name) self.assertEqual(result.storage_service_properties.description, description) self.assertEqual(result.storage_service_properties.label, label) self.assertEqual(result.extended_properties['ext1'], u'丂狛狜')
def main(): ''' new version simulate a simple bash ''' config = __import__('config') subscription_id = get_certificate_from_publish_settings( publish_settings_path=config.publish_settings_path, path_to_write_certificate=config.path_to_write_certificate, ) cert_file = config.path_to_write_certificate sms = ServiceManagementService(subscription_id, cert_file) if len(sys.argv) < 2 : print "format should be python inspector.py <url of the vhd>" exit url = sys.argv[1] storage_name = url[8:url.find('.')] storage_account_key = sms.get_storage_account_keys(storage_name).storage_service_keys.primary.encode('ascii','ignore') nowpath = "/" def get_sentence(s) : st = s.find(' ') while st < len(s) and s[st] == ' ' : st += 1 ed = len(s) for i in range(st, len(s)) : if s[i] == ' ' and s[i-1] != '\\' : ed = i break while ed>0 and s[ed-1] == '/' : ed -= 1 return s[st:ed].replace("//", "/") global last_query_files_num while True : cmd = raw_input(nowpath+" $ ") if cmd.split(' ')[0] == "quit" : break elif cmd.split(' ')[0] == "ls" : old_main(url=url, account_key=storage_account_key, path=nowpath, ls=True) elif cmd.startswith("cd ") : sentence = get_sentence(cmd) if sentence != "" : if sentence == ".." : if nowpath != "/" : nowpath = nowpath[:nowpath[:-1].rfind('/')+1] elif sentence[0] == '/' : old_main(url=url, account_key=storage_account_key, path=sentence, ls=True) if last_query_files_num == 0 : print "no such directory" else : nowpath = sentence + "/" elif sentence != "" : old_main(url=url, account_key=storage_account_key, path=(nowpath+sentence), ls=True) if last_query_files_num == 0 : print "no such directory" else : nowpath += sentence + "/" elif cmd.startswith("download ") : sentence = get_sentence(cmd) tmp = sentence.rfind('/') if sentence != "" : old_main(url=url, account_key=storage_account_key, path=(nowpath+sentence[:tmp]), filename=sentence[(tmp+1):]) else : print "invalid command"
class Deployment(object): """ Helper class to handle deployment of the web site. """ def __init__(self, config): self.config = config self.sms = ServiceManagementService(config.getAzureSubscriptionId(), config.getAzureCertificatePath()) self.sbms = ServiceBusManagementService(config.getAzureSubscriptionId(), config.getAzureCertificatePath()) @staticmethod def _resource_exists(get_resource): """ Helper to check for the existence of a resource in Azure. get_resource: Parameter-less function to invoke in order to get the resource. The resource is assumed to exist when the call to get_resource() returns a value that is not None. If the call to get_resource() returns None or throws a WindowsAzureMissingResourceError exception, then it is assumed that the resource does not exist. Returns: A boolean value which is True if the resource exists. """ resource = None try: resource = get_resource() except WindowsAzureMissingResourceError: pass return resource is not None def _wait_for_operation_success(self, request_id, timeout=600, wait=5): """ Waits for an asynchronous Azure operation to finish. request_id: The ID of the request to track. timeout: Maximum duration (in seconds) allowed for the operation to complete. wait: Wait time (in seconds) between consecutive calls to fetch the latest operation status. """ result = self.sms.get_operation_status(request_id) start_time = time.time() max_time = start_time + timeout now = start_time while result.status == 'InProgress': if now >= max_time: raise Exception("Operation did not finish within the expected timeout") logger.info('Waiting for operation to finish (last_status=%s wait_so_far=%s)', result.status, round(now - start_time, 1)) time_to_wait = max(0.0, min(max_time - now, wait)) time.sleep(time_to_wait) result = self.sms.get_operation_status(request_id) now = time.time() if result.status != 'Succeeded': raise Exception("Operation terminated but it did not succeed.") def _wait_for_role_instance_status(self, role_instance_name, service_name, expected_status, timeout=600, wait=5): """ Waits for a role instance within the web site's cloud service to reach the status specified. role_instance_name: Name of the role instance. service_name: Name of service in which to find the role instance. expected_status: Expected instance status. timeout: Maximum duration (in seconds) allowed for the operation to complete. wait: Wait time (in seconds) between consecutive calls to fetch the latest role status. """ start_time = time.time() max_time = start_time + timeout now = start_time while True: status = None deployment = self.sms.get_deployment_by_name(service_name, service_name) for role_instance in deployment.role_instance_list: if role_instance.instance_name == role_instance_name: status = role_instance.instance_status if status == expected_status: break if now >= max_time: raise Exception("Operation did not finish within the expected timeout") logger.info('Waiting for deployment status: expecting %s but got %s (wait_so_far=%s)', expected_status, status, round(now - start_time, 1)) time_to_wait = max(0.0, min(max_time - now, wait)) time.sleep(time_to_wait) now = time.time() def _wait_for_disk_deletion(self, disk_name, timeout=600, wait=5): """ Waits for a VM disk to disappear when it is being deleted. disk_name: Name of the VHD. timeout: Maximum duration (in seconds) allowed for the operation to complete. wait: Wait time (in seconds) between consecutive calls to check for the existence of the disk. """ start_time = time.time() max_time = start_time + timeout now = start_time logger.info("Checking that disk %s has been deleted.", disk_name) while self._resource_exists(lambda: self.sms.get_disk(disk_name)): if now >= max_time: raise Exception("Disk %s was not deleted within the expected timeout.".format(disk_name)) logger.info("Waiting for disk %s to disappear (wait_so_far=%s).", disk_name, round(now - start_time, 1)) time_to_wait = max(0.0, min(max_time - now, wait)) time.sleep(time_to_wait) now = time.time() logger.info("Disk %s has been deleted.", disk_name) def _wait_for_namespace_active(self, name, timeout=600, wait=5): """ Waits for a service bus namespace to become Active. name: Namespace name. timeout: Maximum duration (in seconds) allowed for the operation to complete. wait: Wait time (in seconds) between consecutive calls to check for the existence of the disk. """ start_time = time.time() max_time = start_time + timeout now = start_time while True: status = None props = self.sbms.get_namespace(name) status = props.status if status == 'Active': break if now >= max_time: raise Exception("Operation did not finish within the expected timeout") logger.info('Waiting for namespace status: expecting Active but got %s (wait_so_far=%s)', status, round(now - start_time, 1)) time_to_wait = max(0.0, min(max_time - now, wait)) time.sleep(time_to_wait) now = time.time() def _getRoleInstances(self, service_name): """ Returns the role instances in the given cloud service deployment. The results are provided as a dictionary where keys are role instance names and values are RoleInstance objects. """ role_instances = {} if self._resource_exists(lambda: self.sms.get_deployment_by_name(service_name, service_name)): deployment = self.sms.get_deployment_by_name(service_name, service_name) for role_instance in deployment.role_instance_list: role_instances[role_instance.instance_name] = role_instance return role_instances def _ensureAffinityGroupExists(self): """ Creates the affinity group if it does not exist. """ name = self.config.getAffinityGroupName() location = self.config.getServiceLocation() logger.info("Checking for existence of affinity group (name=%s; location=%s).", name, location) if self._resource_exists(lambda: self.sms.get_affinity_group_properties(name)): logger.warn("An affinity group named %s already exists.", name) else: self.sms.create_affinity_group(name, name, location) logger.info("Created affinity group %s.", name) def _ensureStorageAccountExists(self, name): """ Creates the storage account if it does not exist. """ logger.info("Checking for existence of storage account (name=%s).", name) if self._resource_exists(lambda: self.sms.get_storage_account_properties(name)): logger.warn("A storage account named %s already exists.", name) else: result = self.sms.create_storage_account(name, "", name, affinity_group=self.config.getAffinityGroupName()) self._wait_for_operation_success(result.request_id, timeout=self.config.getAzureOperationTimeout()) logger.info("Created storage account %s.", name) def _getStorageAccountKey(self, account_name): """ Gets the storage account key (primary key) for the given storage account. """ storage_props = self.sms.get_storage_account_keys(account_name) return storage_props.storage_service_keys.primary def _ensureStorageContainersExist(self): """ Creates Blob storage containers required by the service. """ logger.info("Checking for existence of Blob containers.") account_name = self.config.getServiceStorageAccountName() account_key = self._getStorageAccountKey(account_name) blob_service = BlobService(account_name, account_key) name_and_access_list = [(self.config.getServicePublicStorageContainer(), 'blob'), (self.config.getServiceBundleStorageContainer(), None)] for name, access in name_and_access_list: logger.info("Checking for existence of Blob container %s.", name) blob_service.create_container(name, x_ms_blob_public_access=access, fail_on_exist=False) access_info = 'private' if access is None else 'public {0}'.format(access) logger.info("Blob container %s is ready (access: %s).", name, access_info) def ensureStorageHasCorsConfiguration(self): """ Ensures Blob storage container for bundles is configured to allow cross-origin resource sharing. """ logger.info("Setting CORS rules.") account_name = self.config.getServiceStorageAccountName() account_key = self._getStorageAccountKey(account_name) cors_rule = CorsRule() cors_rule.allowed_origins = self.config.getServiceStorageCorsAllowedOrigins() cors_rule.allowed_methods = 'PUT' cors_rule.exposed_headers = '*' cors_rule.allowed_headers = '*' cors_rule.max_age_in_seconds = 1800 cors_rules = Cors() cors_rules.cors_rule.append(cors_rule) set_storage_service_cors_properties(account_name, account_key, cors_rules) def _ensureServiceExists(self, service_name, affinity_group_name): """ Creates the specified cloud service host if it does not exist. service_name: Name of the cloud service. affinity_group_name: Name of the affinity group (which should exists). """ logger.info("Checking for existence of cloud service (name=%s).", service_name) if self._resource_exists(lambda: self.sms.get_hosted_service_properties(service_name)): logger.warn("A cloud service named %s already exists.", service_name) else: self.sms.create_hosted_service(service_name, service_name, affinity_group=affinity_group_name) logger.info("Created cloud service %s.", service_name) def _ensureServiceCertificateExists(self, service_name): """ Adds certificate to the specified cloud service. service_name: Name of the target cloud service (which should exist). """ cert_format = self.config.getServiceCertificateFormat() cert_algorithm = self.config.getServiceCertificateAlgorithm() cert_thumbprint = self.config.getServiceCertificateThumbprint() cert_path = self.config.getServiceCertificateFilename() cert_password = self.config.getServiceCertificatePassword() logger.info("Checking for existence of cloud service certificate for service %s.", service_name) get_cert = lambda: self.sms.get_service_certificate(service_name, cert_algorithm, cert_thumbprint) if self._resource_exists(get_cert): logger.info("Found expected cloud service certificate.") else: with open(cert_path, 'rb') as f: cert_data = base64.b64encode(f.read()) if len(cert_data) <= 0: raise Exception("Detected invalid certificate data.") result = self.sms.add_service_certificate(service_name, cert_data, cert_format, cert_password) self._wait_for_operation_success(result.request_id, timeout=self.config.getAzureOperationTimeout()) logger.info("Added service certificate.") def _assertOsImageExists(self, os_image_name): """ Asserts that the named OS image exists. """ logger.info("Checking for availability of OS image (name=%s).", os_image_name) if self.sms.get_os_image(os_image_name) is None: raise Exception("Unable to find OS Image '{0}'.".format(os_image_name)) def _ensureVirtualMachinesExist(self): """ Creates the VMs for the web site. """ service_name = self.config.getServiceName() cert_thumbprint = self.config.getServiceCertificateThumbprint() vm_username = self.config.getVirtualMachineLogonUsername() vm_password = self.config.getVirtualMachineLogonPassword() vm_role_size = self.config.getServiceInstanceRoleSize() vm_numbers = self.config.getServiceInstanceCount() if vm_numbers < 1: raise Exception("Detected an invalid number of instances: {0}.".format(vm_numbers)) self._assertOsImageExists(self.config.getServiceOSImageName()) role_instances = self._getRoleInstances(service_name) for vm_number in range(1, vm_numbers+1): vm_hostname = '{0}-{1}'.format(service_name, vm_number) if vm_hostname in role_instances: logger.warn("Role instance %s already exists: skipping creation.", vm_hostname) continue logger.info("Role instance %s provisioning begins.", vm_hostname) vm_diskname = '{0}.vhd'.format(vm_hostname) vm_disk_media_link = 'http://{0}.blob.core.windows.net/vhds/{1}'.format( self.config.getServiceStorageAccountName(), vm_diskname ) ssh_port = str(self.config.getServiceInstanceSshPort() + vm_number) os_hd = OSVirtualHardDisk(self.config.getServiceOSImageName(), vm_disk_media_link, disk_name=vm_diskname, disk_label=vm_diskname) linux_config = LinuxConfigurationSet(vm_hostname, vm_username, vm_password, True) linux_config.ssh.public_keys.public_keys.append( PublicKey(cert_thumbprint, u'/home/{0}/.ssh/authorized_keys'.format(vm_username)) ) linux_config.ssh.key_pairs.key_pairs.append( KeyPair(cert_thumbprint, u'/home/{0}/.ssh/id_rsa'.format(vm_username)) ) network_config = ConfigurationSet() network_config.configuration_set_type = 'NetworkConfiguration' ssh_endpoint = ConfigurationSetInputEndpoint(name='SSH', protocol='TCP', port=ssh_port, local_port=u'22') network_config.input_endpoints.input_endpoints.append(ssh_endpoint) http_endpoint = ConfigurationSetInputEndpoint(name='HTTP', protocol='TCP', port=u'80', local_port=u'80', load_balanced_endpoint_set_name=service_name) http_endpoint.load_balancer_probe.port = '80' http_endpoint.load_balancer_probe.protocol = 'TCP' network_config.input_endpoints.input_endpoints.append(http_endpoint) if vm_number == 1: result = self.sms.create_virtual_machine_deployment(service_name=service_name, deployment_name=service_name, deployment_slot='Production', label=vm_hostname, role_name=vm_hostname, system_config=linux_config, os_virtual_hard_disk=os_hd, network_config=network_config, availability_set_name=service_name, data_virtual_hard_disks=None, role_size=vm_role_size) self._wait_for_operation_success(result.request_id, timeout=self.config.getAzureOperationTimeout()) self._wait_for_role_instance_status(vm_hostname, service_name, 'ReadyRole', self.config.getAzureOperationTimeout()) else: result = self.sms.add_role(service_name=service_name, deployment_name=service_name, role_name=vm_hostname, system_config=linux_config, os_virtual_hard_disk=os_hd, network_config=network_config, availability_set_name=service_name, role_size=vm_role_size) self._wait_for_operation_success(result.request_id, timeout=self.config.getAzureOperationTimeout()) self._wait_for_role_instance_status(vm_hostname, service_name, 'ReadyRole', self.config.getAzureOperationTimeout()) logger.info("Role instance %s has been created.", vm_hostname) def _deleteVirtualMachines(self, service_name): """ Deletes the VMs in the given cloud service. """ if self._resource_exists(lambda: self.sms.get_deployment_by_name(service_name, service_name)) == False: logger.warn("Deployment %s not found: no VMs to delete.", service_name) else: logger.info("Attempting to delete deployment %s.", service_name) # Get set of role instances before we remove them role_instances = self._getRoleInstances(service_name) def update_request(request): """ A filter to intercept the HTTP request sent by the ServiceManagementService so we can take advantage of a newer feature ('comp=media') in the delete deployment API (see http://msdn.microsoft.com/en-us/library/windowsazure/ee460812.aspx) """ hdrs = [] for name, value in request.headers: if 'x-ms-version' == name: value = '2013-08-01' hdrs.append((name, value)) request.headers = hdrs request.path = request.path + '?comp=media' #pylint: disable=W0212 response = self.sms._filter(request) return response svc = ServiceManagementService(self.sms.subscription_id, self.sms.cert_file) #pylint: disable=W0212 svc._filter = update_request result = svc.delete_deployment(service_name, service_name) logger.info("Deployment %s deletion in progress: waiting for delete_deployment operation.", service_name) self._wait_for_operation_success(result.request_id) logger.info("Deployment %s deletion in progress: waiting for VM disks to be removed.", service_name) # Now wait for the disks to disappear for role_instance_name in role_instances.keys(): disk_name = "{0}.vhd".format(role_instance_name) self._wait_for_disk_deletion(disk_name) logger.info("Deployment %s deleted.", service_name) def _ensureBuildMachineExists(self): """ Creates the VM for the build server. """ service_name = self.config.getBuildServiceName() service_storage_name = self.config.getStorageAccountName() cert_thumbprint = self.config.getServiceCertificateThumbprint() vm_username = self.config.getVirtualMachineLogonUsername() vm_password = self.config.getVirtualMachineLogonPassword() vm_hostname = service_name role_instances = self._getRoleInstances(service_name) if vm_hostname in role_instances: logger.warn("Role instance %s already exists: skipping creation.", vm_hostname) else: logger.info("Role instance %s provisioning begins.", vm_hostname) self._assertOsImageExists(self.config.getBuildOSImageName()) vm_diskname = '{0}.vhd'.format(vm_hostname) vm_disk_media_link = 'http://{0}.blob.core.windows.net/vhds/{1}'.format(service_storage_name, vm_diskname) os_hd = OSVirtualHardDisk(self.config.getBuildOSImageName(), vm_disk_media_link, disk_name=vm_diskname, disk_label=vm_diskname) linux_config = LinuxConfigurationSet(vm_hostname, vm_username, vm_password, True) linux_config.ssh.public_keys.public_keys.append( PublicKey(cert_thumbprint, u'/home/{0}/.ssh/authorized_keys'.format(vm_username)) ) linux_config.ssh.key_pairs.key_pairs.append( KeyPair(cert_thumbprint, u'/home/{0}/.ssh/id_rsa'.format(vm_username)) ) network_config = ConfigurationSet() network_config.configuration_set_type = 'NetworkConfiguration' ssh_endpoint = ConfigurationSetInputEndpoint(name='SSH', protocol='TCP', port=u'22', local_port=u'22') network_config.input_endpoints.input_endpoints.append(ssh_endpoint) result = self.sms.create_virtual_machine_deployment(service_name=service_name, deployment_name=service_name, deployment_slot='Production', label=vm_hostname, role_name=vm_hostname, system_config=linux_config, os_virtual_hard_disk=os_hd, network_config=network_config, availability_set_name=None, data_virtual_hard_disks=None, role_size=self.config.getBuildInstanceRoleSize()) self._wait_for_operation_success(result.request_id, timeout=self.config.getAzureOperationTimeout()) self._wait_for_role_instance_status(vm_hostname, service_name, 'ReadyRole', self.config.getAzureOperationTimeout()) logger.info("Role instance %s has been created.", vm_hostname) def _deleteStorageAccount(self, name): """ Deletes the storage account for the web site. """ logger.info("Attempting to delete storage account %s.", name) if self._resource_exists(lambda: self.sms.get_storage_account_properties(name)) == False: logger.warn("Storage account %s not found: nothing to delete.", name) else: self.sms.delete_storage_account(name) logger.info("Storage account %s deleted.", name) def _deleteService(self, name): """ Deletes the specified cloud service. """ logger.info("Attempting to delete cloud service %s.", name) if self._resource_exists(lambda: self.sms.get_hosted_service_properties(name)) == False: logger.warn("Cloud service %s not found: nothing to delete.", name) else: self.sms.delete_hosted_service(name) logger.info("Cloud service %s deleted.", name) def _deleteAffinityGroup(self): """ Deletes the affinity group for the web site. """ name = self.config.getAffinityGroupName() logger.info("Attempting to delete affinity group %s.", name) if self._resource_exists(lambda: self.sms.get_affinity_group_properties(name)) == False: logger.warn("Affinity group %s not found: nothing to delete.", name) else: self.sms.delete_affinity_group(name) logger.info("Affinity group %s deleted.", name) def _ensureServiceBusNamespaceExists(self): """ Creates the Azure Service Bus Namespace if it does not exist. """ name = self.config.getServiceBusNamespace() logger.info("Checking for existence of service bus namespace (name=%s).", name) if self._resource_exists(lambda: self.sbms.get_namespace(name)): logger.warn("A namespace named %s already exists.", name) else: self.sbms.create_namespace(name, self.config.getServiceLocation()) self._wait_for_namespace_active(name) logger.info("Created namespace %s.", name) def _ensureServiceBusQueuesExist(self): """ Creates Azure service bus queues required by the service. """ logger.info("Checking for existence of Service Bus Queues.") namespace = self.sbms.get_namespace(self.config.getServiceBusNamespace()) sbs = ServiceBusService(namespace.name, namespace.default_key, issuer='owner') queue_names = ['jobresponsequeue', 'windowscomputequeue', 'linuxcomputequeue'] for name in queue_names: logger.info("Checking for existence of Queue %s.", name) sbs.create_queue(name, fail_on_exist=False) logger.info("Queue %s is ready.", name) def _deleteServiceBusNamespace(self): """ Deletes the Azure Service Bus Namespace. """ name = self.config.getServiceBusNamespace() logger.info("Attempting to delete service bus namespace %s.", name) if self._resource_exists(lambda: self.sbms.get_namespace(name)) == False: logger.warn("Namespace %s not found: nothing to delete.", name) else: self.sbms.delete_namespace(name) logger.info("Namespace %s deleted.", name) def Deploy(self, assets): """ Creates a deployment. assets: The set of assets to create. The full set is: {'build', 'web'}. """ if len(assets) == 0: raise ValueError("Set of assets to deploy is not specified.") logger.info("Starting deployment operation.") self._ensureAffinityGroupExists() self._ensureStorageAccountExists(self.config.getStorageAccountName()) ## Build instance if 'build' in assets: self._ensureServiceExists(self.config.getBuildServiceName(), self.config.getAffinityGroupName()) self._ensureServiceCertificateExists(self.config.getBuildServiceName()) self._ensureBuildMachineExists() # Web instances if 'web' in assets: self._ensureStorageAccountExists(self.config.getServiceStorageAccountName()) self._ensureStorageContainersExist() self.ensureStorageHasCorsConfiguration() self._ensureServiceBusNamespaceExists() self._ensureServiceBusQueuesExist() self._ensureServiceExists(self.config.getServiceName(), self.config.getAffinityGroupName()) self._ensureServiceCertificateExists(self.config.getServiceName()) self._ensureVirtualMachinesExist() #queues logger.info("Deployment operation is complete.") def Teardown(self, assets): """ Deletes a deployment. assets: The set of assets to delete. The full set is: {'web', 'build'}. """ if len(assets) == 0: raise ValueError("Set of assets to teardown is not specified.") logger.info("Starting teardown operation.") if 'web' in assets: self._deleteVirtualMachines(self.config.getServiceName()) self._deleteService(self.config.getServiceName()) self._deleteStorageAccount(self.config.getServiceStorageAccountName()) if 'build' in assets: self._deleteVirtualMachines(self.config.getBuildServiceName()) self._deleteService(self.config.getBuildServiceName()) self._deleteStorageAccount(self.config.getStorageAccountName()) if ('web' in assets) and ('build' in assets): self._deleteServiceBusNamespace() self._deleteAffinityGroup() logger.info("Teardown operation is complete.") def getSettingsFileContent(self): """ Generates the content of the local Django settings file. """ allowed_hosts = ['{0}.cloudapp.net'.format(self.config.getServiceName())] allowed_hosts.extend(self.config.getWebHostnames()) allowed_hosts.extend(['www.codalab.org', 'codalab.org']) ssl_allowed_hosts = self.config.getSslRewriteHosts(); if len(ssl_allowed_hosts) == 0: ssl_allowed_hosts = allowed_hosts storage_key = self._getStorageAccountKey(self.config.getServiceStorageAccountName()) namespace = self.sbms.get_namespace(self.config.getServiceBusNamespace()) if len(self.config.getSslCertificateInstalledPath()) > 0: bundle_auth_scheme = "https" else: bundle_auth_scheme = "http" if len(ssl_allowed_hosts) == 0: bundle_auth_host = '{0}.cloudapp.net'.format(self.config.getServiceName()) else: bundle_auth_host = ssl_allowed_hosts[0] bundle_auth_url = "{0}://{1}".format(bundle_auth_scheme, bundle_auth_host) lines = [ "from base import Base", "from default import *", "from configurations import Settings", "", "import sys", "from os.path import dirname, abspath, join", "from pkgutil import extend_path", "import codalab", "", "class {0}(Base):".format(self.config.getDjangoConfiguration()), "", " DEBUG=False", "", " ALLOWED_HOSTS = {0}".format(allowed_hosts), "", " SSL_PORT = '443'", " SSL_CERTIFICATE = '{0}'".format(self.config.getSslCertificateInstalledPath()), " SSL_CERTIFICATE_KEY = '{0}'".format(self.config.getSslCertificateKeyInstalledPath()), " SSL_ALLOWED_HOSTS = {0}".format(ssl_allowed_hosts), "", " DEFAULT_FILE_STORAGE = 'codalab.azure_storage.AzureStorage'", " AZURE_ACCOUNT_NAME = '{0}'".format(self.config.getServiceStorageAccountName()), " AZURE_ACCOUNT_KEY = '{0}'".format(storage_key), " AZURE_CONTAINER = '{0}'".format(self.config.getServicePublicStorageContainer()), " BUNDLE_AZURE_ACCOUNT_NAME = AZURE_ACCOUNT_NAME", " BUNDLE_AZURE_ACCOUNT_KEY = AZURE_ACCOUNT_KEY", " BUNDLE_AZURE_CONTAINER = '{0}'".format(self.config.getServiceBundleStorageContainer()), "", " SBS_NAMESPACE = '{0}'".format(self.config.getServiceBusNamespace()), " SBS_ISSUER = 'owner'", " SBS_ACCOUNT_KEY = '{0}'".format(namespace.default_key), " SBS_RESPONSE_QUEUE = 'jobresponsequeue'", " SBS_COMPUTE_QUEUE = 'windowscomputequeue'", "", " EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'", " EMAIL_HOST = '{0}'".format(self.config.getEmailHost()), " EMAIL_HOST_USER = '******'".format(self.config.getEmailUser()), " EMAIL_HOST_PASSWORD = '******'".format(self.config.getEmailPassword()), " EMAIL_PORT = 587", " EMAIL_USE_TLS = True", " DEFAULT_FROM_EMAIL = 'CodaLab <*****@*****.**>'", " SERVER_EMAIL = '*****@*****.**'", "", " # Django secret", " SECRET_KEY = '{0}'".format(self.config.getDjangoSecretKey()), "", " ADMINS = (('CodaLab', '*****@*****.**'),)", " MANAGERS = ADMINS", "", " DATABASES = {", " 'default': {", " 'ENGINE': '{0}',".format(self.config.getDatabaseEngine()), " 'NAME': '{0}',".format(self.config.getDatabaseName()), " 'USER': '******',".format(self.config.getDatabaseUser()), " 'PASSWORD': '******',".format(self.config.getDatabasePassword()), " 'HOST': '{0}',".format(self.config.getDatabaseHost()), " 'PORT': '{0}', ".format(self.config.getDatabasePort()), " 'OPTIONS' : {", " 'init_command': 'SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED',", " 'read_timeout': 5", " }", " }", " }", "", " BUNDLE_DB_NAME = '{0}'".format(self.config.getBundleServiceDatabaseName()), " BUNDLE_DB_USER = '******'".format(self.config.getBundleServiceDatabaseUser()), " BUNDLE_DB_PASSWORD = '******'".format(self.config.getBundleServiceDatabasePassword()), " BUNDLE_APP_ID = '{0}'".format(self.config.getBundleServiceAppId()), " BUNDLE_APP_KEY = '{0}'".format(self.config.getBundleServiceAppKey()), " BUNDLE_AUTH_URL = '{0}'".format(bundle_auth_url), "", " BUNDLE_SERVICE_URL = '{0}'".format(self.config.getBundleServiceUrl()), " BUNDLE_SERVICE_CODE_PATH = '/home/{0}/deploy/bundles'".format(self.config.getVirtualMachineLogonUsername()), " sys.path.append(BUNDLE_SERVICE_CODE_PATH)", " codalab.__path__ = extend_path(codalab.__path__, codalab.__name__)", "", ] preview = self.config.getShowPreviewFeatures() if preview >= 1: if preview == 1: lines.append(" PREVIEW_WORKSHEETS = True") if preview > 1: lines.append(" SHOW_BETA_FEATURES = True") lines.append("") return '\n'.join(lines)
print('Management URL: ' + hosted_service.url) print('Location: ' + hosted_service.hosted_service_properties.location) print('') print "The following storage accounts are now up:" result = sms.list_storage_accounts() for account in result: print('Account Service name: ' + account.service_name) print('Storage account url: ' + account.url) print('Location: ' + account.storage_service_properties.location) print('Storage Account Keys:') storageServiceObj = sms.get_storage_account_keys(account.service_name) print storageServiceObj.storage_service_keys.primary print storageServiceObj.storage_service_keys.secondary print('') if account.service_name == storage_acc_name: storageServiceObj = sms.get_storage_account_keys(account.service_name) storage_acc_key = storageServiceObj.storage_service_keys.primary # cert_path = "/home/rohan/temp2/myCert.pem" cert_path = config_params["vm_cert_path"] with open(cert_path, "rb") as bfile: # decode to make sure this is a str and not a bstr cert_data = base64.b64encode(bfile.read()).decode()
class StorageManagementServiceTest(AzureTestCase): def setUp(self): proxy_host = credentials.getProxyHost() proxy_port = credentials.getProxyPort() self.sms = ServiceManagementService( credentials.getSubscriptionId(), credentials.getManagementCertFile()) if proxy_host: self.sms.set_proxy(proxy_host, proxy_port) self.storage_account_name = getUniqueNameBasedOnCurrentTime( 'utstorage') def tearDown(self): try: self.sms.delete_storage_account(self.storage_account_name) except: pass #--Helpers----------------------------------------------------------------- def _wait_for_async(self, request_id): count = 0 result = self.sms.get_operation_status(request_id) while result.status == 'InProgress': count = count + 1 if count > 120: self.assertTrue( False, 'Timed out waiting for async operation to complete.') time.sleep(5) result = self.sms.get_operation_status(request_id) self.assertEqual(result.status, 'Succeeded') def _create_storage_account(self, name): result = self.sms.create_storage_account(name, name + 'description', name + 'label', None, 'West US', False, { 'ext1': 'val1', 'ext2': 42 }) self._wait_for_async(result.request_id) def _storage_account_exists(self, name): try: props = self.sms.get_storage_account_properties(name) return props is not None except: return False #--Test cases for storage accounts ----------------------------------- def test_list_storage_accounts(self): # Arrange self._create_storage_account(self.storage_account_name) # Act result = self.sms.list_storage_accounts() # Assert self.assertIsNotNone(result) self.assertTrue(len(result) > 0) storage = None for temp in result: if temp.service_name == self.storage_account_name: storage = temp break self.assertIsNotNone(storage) self.assertIsNotNone(storage.service_name) self.assertIsNone(storage.storage_service_keys) self.assertIsNotNone(storage.storage_service_properties) self.assertIsNotNone(storage.storage_service_properties.affinity_group) self.assertIsNotNone(storage.storage_service_properties.description) self.assertIsNotNone( storage.storage_service_properties.geo_primary_region) self.assertIsNotNone( storage.storage_service_properties.geo_replication_enabled) self.assertIsNotNone( storage.storage_service_properties.geo_secondary_region) self.assertIsNotNone(storage.storage_service_properties.label) self.assertIsNotNone( storage.storage_service_properties.last_geo_failover_time) self.assertIsNotNone(storage.storage_service_properties.location) self.assertIsNotNone(storage.storage_service_properties.status) self.assertIsNotNone( storage.storage_service_properties.status_of_primary) self.assertIsNotNone( storage.storage_service_properties.status_of_secondary) self.assertIsNotNone(storage.storage_service_properties.endpoints) self.assertTrue(len(storage.storage_service_properties.endpoints) > 0) self.assertIsNotNone(storage.extended_properties) self.assertTrue(len(storage.extended_properties) > 0) def test_get_storage_account_properties(self): # Arrange self._create_storage_account(self.storage_account_name) # Act result = self.sms.get_storage_account_properties( self.storage_account_name) # Assert self.assertIsNotNone(result) self.assertEqual(result.service_name, self.storage_account_name) self.assertIsNotNone(result.url) self.assertIsNone(result.storage_service_keys) self.assertIsNotNone(result.storage_service_properties) self.assertIsNotNone(result.storage_service_properties.affinity_group) self.assertIsNotNone(result.storage_service_properties.description) self.assertIsNotNone( result.storage_service_properties.geo_primary_region) self.assertIsNotNone( result.storage_service_properties.geo_replication_enabled) self.assertIsNotNone( result.storage_service_properties.geo_secondary_region) self.assertIsNotNone(result.storage_service_properties.label) self.assertIsNotNone( result.storage_service_properties.last_geo_failover_time) self.assertIsNotNone(result.storage_service_properties.location) self.assertIsNotNone(result.storage_service_properties.status) self.assertIsNotNone( result.storage_service_properties.status_of_primary) self.assertIsNotNone( result.storage_service_properties.status_of_secondary) self.assertIsNotNone(result.storage_service_properties.endpoints) self.assertTrue(len(result.storage_service_properties.endpoints) > 0) self.assertIsNotNone(result.extended_properties) self.assertTrue(len(result.extended_properties) > 0) self.assertIsNotNone(result.capabilities) self.assertTrue(len(result.capabilities) > 0) def test_get_storage_account_keys(self): # Arrange self._create_storage_account(self.storage_account_name) # Act result = self.sms.get_storage_account_keys(self.storage_account_name) # Assert self.assertIsNotNone(result) self.assertIsNotNone(result.url) self.assertIsNotNone(result.service_name) self.assertIsNotNone(result.storage_service_keys.primary) self.assertIsNotNone(result.storage_service_keys.secondary) self.assertIsNone(result.storage_service_properties) def test_regenerate_storage_account_keys(self): # Arrange self._create_storage_account(self.storage_account_name) previous = self.sms.get_storage_account_keys(self.storage_account_name) # Act result = self.sms.regenerate_storage_account_keys( self.storage_account_name, 'Secondary') # Assert self.assertIsNotNone(result) self.assertIsNotNone(result.url) self.assertIsNotNone(result.service_name) self.assertIsNotNone(result.storage_service_keys.primary) self.assertIsNotNone(result.storage_service_keys.secondary) self.assertIsNone(result.storage_service_properties) self.assertEqual(result.storage_service_keys.primary, previous.storage_service_keys.primary) self.assertNotEqual(result.storage_service_keys.secondary, previous.storage_service_keys.secondary) def test_create_storage_account(self): # Arrange description = self.storage_account_name + 'description' label = self.storage_account_name + 'label' # Act result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, { 'ext1': 'val1', 'ext2': 42 }) self._wait_for_async(result.request_id) # Assert self.assertTrue(self._storage_account_exists( self.storage_account_name)) def test_update_storage_account(self): # Arrange self._create_storage_account(self.storage_account_name) description = self.storage_account_name + 'descriptionupdate' label = self.storage_account_name + 'labelupdate' # Act result = self.sms.update_storage_account(self.storage_account_name, description, label, False, { 'ext1': 'val1update', 'ext2': 53, 'ext3': 'brandnew' }) # Assert self.assertIsNone(result) props = self.sms.get_storage_account_properties( self.storage_account_name) self.assertEqual(props.storage_service_properties.description, description) self.assertEqual(props.storage_service_properties.label, label) self.assertEqual(props.extended_properties['ext1'], 'val1update') self.assertEqual(props.extended_properties['ext2'], '53') self.assertEqual(props.extended_properties['ext3'], 'brandnew') def test_delete_storage_account(self): # Arrange self._create_storage_account(self.storage_account_name) # Act result = self.sms.delete_storage_account(self.storage_account_name) # Assert self.assertIsNone(result) self.assertFalse( self._storage_account_exists(self.storage_account_name)) def test_check_storage_account_name_availability_not_available(self): # Arrange self._create_storage_account(self.storage_account_name) # Act result = self.sms.check_storage_account_name_availability( self.storage_account_name) # Assert self.assertIsNotNone(result) self.assertFalse(result.result) def test_check_storage_account_name_availability_available(self): # Arrange # Act result = self.sms.check_storage_account_name_availability( self.storage_account_name) # Assert self.assertIsNotNone(result) self.assertTrue(result.result) def test_unicode_create_storage_account_unicode_name(self): # Arrange self.storage_account_name = unicode( self.storage_account_name) + u'啊齄丂狛狜' description = 'description' label = 'label' # Act with self.assertRaises(WindowsAzureError): # not supported - queue name must be alphanumeric, lowercase result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, { 'ext1': 'val1', 'ext2': 42 }) self._wait_for_async(result.request_id) # Assert def test_unicode_create_storage_account_unicode_description_label(self): # Arrange description = u'啊齄丂狛狜' label = u'丂狛狜' # Act result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, { 'ext1': 'val1', 'ext2': 42 }) self._wait_for_async(result.request_id) # Assert result = self.sms.get_storage_account_properties( self.storage_account_name) self.assertEqual(result.storage_service_properties.description, description) self.assertEqual(result.storage_service_properties.label, label) def test_unicode_create_storage_account_unicode_property_value(self): # Arrange description = 'description' label = 'label' # Act result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, { 'ext1': u'丂狛狜', 'ext2': 42 }) self._wait_for_async(result.request_id) # Assert result = self.sms.get_storage_account_properties( self.storage_account_name) self.assertEqual(result.storage_service_properties.description, description) self.assertEqual(result.storage_service_properties.label, label) self.assertEqual(result.extended_properties['ext1'], u'丂狛狜')
def provision(instance_id): """ Provision an instance of this service for the given org and space PUT /v2/service_instances/<instance_id>: <instance_id> is provided by the Cloud Controller and will be used for future requests to bind, unbind and deprovision BODY: { "service_id": "<service-guid>", "plan_id": "<plan-guid>", "organization_guid": "<org-guid>", "space_guid": "<space-guid>" } return: JSON document with details about the services offered through this broker """ if 'application/json' not in request.content_type: abort(415, 'Unsupported Content-Type: expecting application/json, actual {0}'.format(request.content_type)) global subscription_id global cert global cert_file global account_name global account_key if subscription_id and cert and (not account_name): sms = ServiceManagementService(subscription_id, cert_file) name = '{0}{1}'.format(STORAGE_ACCOUNT_NAME_PREFIX, instance_id.split('-')[0]) desc = name label = name location = 'West US' result = None try: result = sms.create_storage_account(name, desc, label, location=location) except WindowsAzureConflictError as e: pass if result: req_id = result.request_id operation = sms.get_operation_status(req_id) while operation.status == 'InProgress': time.sleep(5) operation = sms.get_operation_status(req_id) app.logger.info('Request ID: {0}, Operation Status: {1}'.format(req_id, operation.status)) if operation.status == 'Succeeded': app.logger.info('Request ID: {0}, Operation Status: {1}'.format(req_id, operation.status)) account_name = name account_key = sms.get_storage_account_keys(account_name).storage_service_keys.primary app.logger.info('Account Name: {0}, Account key: {1}'.format(account_name, account_key)) if account_name: blob_service = BlobService(account_name, account_key) container_name = '{0}-{1}'.format(CONTAINER_NAME_PREFIX, instance_id) app.logger.info('Container Name: {0}'.format(container_name)) request_body = request.get_json() if request_body.has_key('parameters'): parameters = request_body.pop('parameters') container_tags = request_body container_tags['instance_id'] = instance_id blob_service.create_container( container_name = container_name, x_ms_meta_name_values = container_tags) return jsonify({})