def attachDiskToInstance(self, diskSize, vmname, disklabel, storage_account, storage_key, container_name, blob_name): # Create Page Blob: blob_service = BlobService(account_name=storage_account, account_key=storage_key) #blob_service.put_blob(container_name, blob_name, b'', 'PageBlob', x_ms_blob_content_length='1073741824') #url = "http://"+storage_account+".blob.core.windows.net/"+container_name+"/"+blob_name #medialink = "http://"+storage_account+".blob.core.windows.net/"+container_name+"/nfsnew.vhd" url = "http://elastichpc.blob.core.windows.net/ahmed-sate931/ahmed.vhd" lun = 5 label = 'diskdata' + str(lun) diskname = label sms = self.CheckSubscription( '8be5609b-07c9-4114-8865-921ad82cb64a', '/media/ahmed/92b488cc-077b-480a-8a6c-62e6fd95339b/elastichpc/ehpc_azure/keys/mycert.pem' ) os = "Linux" result = sms.add_disk(False, label, url, 'disk1ahmed', 'Linux') self.wait_for_async(sms, result.request_id) result = sms.list_disks() for i in range(len(result.__dict__['disks'])): print "\n(" + str(i) + ")\n" print result.__dict__['disks'][i].__dict__['name'] print result.__dict__['disks'][i].__dict__['media_link'] print result.__dict__['disks'][i].__dict__['os'] print "------------------------------------------"
def upload_file(self, file_path): blob_service = BlobService(self.storage_account, self.account_key) try: blob_service.create_container("script",x_ms_blob_public_access='blob') except Exception,e: self.logger.warn(e.message) pass
def download_block_blob(self, account, key, container, blobname, path): if not os.path.exists(path): os.makedirs(path) blob_service = BlobService(account_name=account, account_key=key) blob = blob_service.get_blob(container, blobname) try: with open(path + "/" + blobname, 'w') as f: f.write(blob) return True except: return False
def list_storage_container(self, storage_account, storage_key): try: container_names = [] self.logger.info("Listing storage containers") blob_service = BlobService(account_name=storage_account, account_key=storage_key) result = blob_service.list_containers(None, None, 10, None) for i in range(len(result.containers)): container_names.append(result.containers[i].name) return container_names except WindowsAzureConflictError: self.logger.info("Error: Can not delete storage container: " + container_name) sys.exit(1)
def delete_containers(self, container_name, storage_account, storage_key): try: self.logger.info("Container Name: " + container_name) self.logger.info("Storage account: " + storage_account) self.logger.info("Storage key: " + storage_key) blob_service = BlobService(account_name=storage_account, account_key=storage_key) self.logger.info("creating Blob Service connection") result = blob_service.delete_container(container_name, False) return result except WindowsAzureConflictError: self.logger.info("Error: Can not delete storage container: " + container_name) sys.exit(1)
def handle_noargs(self, **options): self.set_options(**options) if not self.disable: if not ls.AZURE_ACCOUNT_NAME: raise CommandError('AZURE_ACCOUNT_NAME setting is missing') if not ls.AZURE_ACCOUNT_KEY: raise CommandError('AZURE_ACCOUNT_KEY setting is missing') if not self.origins: raise CommandError('Specify at least one origin') if not self.methods: raise CommandError('Specify at least one method') class CorsRule(WindowsAzureData): def __init__(self, origins, methods, maxage): self.allowed_origins = ','.join(origins) self.allowed_methods = ','.join(methods) self.allowed_headers = '' self.exposed_headers = '' self.max_age_in_seconds = maxage class Cors(WindowsAzureData): def __init__(self, rules): self.cors = rules blob_service = BlobService(ls.AZURE_ACCOUNT_NAME, ls.AZURE_ACCOUNT_KEY, ls.AZURE_DEFAULT_PROTOCOL) cors_rule = CorsRule(self.origins, self.methods, self.maxage) service_properties = blob_service.get_blob_service_properties() self.stdout.write('--FOUND PROPERTIES--') self.stdout.write(_convert_class_to_xml(service_properties)) cors_properties = StorageServiceProperties() if not self.disable: cors_properties.cors = Cors([cors_rule]) else: cors_properties.cors = Cors([]) cors_properties.metrics = None cors_properties.logging = None self.stdout.write('') self.stdout.write('--NEW PROPERTIES--') self.stdout.write(_convert_class_to_xml(cors_properties)) # As of the latest version, one can only send # a part of the properties and the rest will stay intact # http://msdn.microsoft.com/en-us/library/azure/hh452235.aspx self.set_properties(blob_service, cors_properties)
def create_containers(self, sms, storage_account, storage_key, container_name, permission): try: self.logger.info("Storage account: " + storage_account) self.logger.info("Container Name: " + container_name) self.logger.info("permission: " + str(permission)) blob_service = BlobService(account_name=storage_account, account_key=storage_key) self.logger.info("creating Blob Service connection") blob_service.create_container(container_name, None, permission, False) self.logger.info("creating container: %s", container_name) return container_name except WindowsAzureConflictError: self.logger.info( "Error: can not create storage container with name %s ", container_name) sys.exit(1)
def __get_available_storage_account_and_container(self, hackathon_id): """ Get available storage account and container :param hackathon_id: the id of hackathon :type hackathon_id: integer :return: if there is available storage account and container, then return (True, storage account name, container name). Otherwise, return (False, None, None) :rtype: 3-element tuple: (bool, str|unicode, str|unicode) """ container_name = self.util.safe_get_config('dockerhostserver.azure.container', 'dockerhostprivatecontainer') sms = self.__get_sms_object(hackathon_id) if sms is None: self.log.error('Something wrong with Azure account of Hackathon:%d' % hackathon_id) return False, None, None storage_accounts = sms.list_storage_accounts() # check storage account one by one, return True once find a qualified one for storage in storage_accounts.storage_services: try: storage_response = sms.get_storage_account_keys(storage.service_name) except Exception as e: self.log.error('Encounter an error when checking storage_account:%s ' % storage.service_name) self.log.error(e) continue blob_service = BlobService(account_name=storage.service_name, account_key=storage_response.storage_service_keys.primary, host_base=self.util.safe_get_config('dockerhostserver.storage.host_base', '.blob.core.chinacloudapi.cn')) try: blob_service.get_container_metadata(container_name) return True, storage.service_name, container_name except Exception as e: if e.message != AzureApiExceptionMessage.CONTAINER_NOT_FOUND: self.log.error('Encounter an error when checking container:%s ' % container_name) self.log.error(e) continue try: blob_service.create_container(container_name=container_name, x_ms_blob_public_access='container') return True, storage.service_name, container_name except Exception as e: self.log.error('Encounter an error when creating container:%s ' % container_name) self.log.error(e) return False, None, None
def upload_block_blob(self, container_name, blob_name, file_path, storage_account, storage_key): result = self.list_storage_container(storage_account, storage_key) found = False blob_service = BlobService(account_name=storage_account, account_key=storage_key) for name in result: if name == container_name: found = True if found: self.logger.info("container is already exist") else: blob_service.create_container(container_name, None, None, False) blob_service.put_blob(container_name, blob_name, '', 'BlockBlob') data_sent = 0 sent = 0 block_ids = [] index = 0 with open(file_path, 'rb') as f: while True: data = f.read(self.chunk_size) if data: length = len(data) block_id = base64.b64encode(str(index)) blob_service.put_block(container_name, blob_name, data, block_id) block_ids.append(block_id) index += 1 data_sent += self.chunk_size sent = data_sent / (1024 * 1024) sys.stdout.write("\rUploaded data = %d MB" % sent) sys.stdout.flush() else: print "\n" break blob_service.put_block_list(container_name, blob_name, block_ids)
def createInstances(self, sms, name, region, imageID, instanceSize, pkfile, count, thumbprint, cert_data, num_instances, certPasswd, storagename, master_size): hostname = name mainPath = os.path.dirname(os.path.abspath(__file__)) username = "******" password = None affGrp = self.createAffinityGrp(sms, storagename, storagename, storagename, region) storage_name = self.StorageCreate(sms, storagename, storagename, storagename, region, affGrp) account_key = self.getStorageAccountKey(sms, storage_name) permission = None container_name = self.get_vm_name() account_name = storage_name container_name = self.create_containers(sms, storage_name, account_key, container_name.lower(), None) time.sleep(5) #print "Container Name:"+disks medialink = "http://" + storage_name + ".blob.core.windows.net/" + container_name + "/" # -------------------------------------------------------- blobService = BlobService(account_name=storage_name, account_key=account_key) blobName = container_name + "blob.vhd" try: image = sms.get_os_image(imageID) except: if (self.copy_image_vmDepot(blobService, container_name, blobName, imageID, sms)): print "INFO -- The Disk Blob has been copied" media_link = "http://" + storage_name + ".blob.core.windows.net/" + container_name + "/" + blobName if (self.make_os_image(sms, media_link, imageID)): print "INFO -- The image '" + imageID + "' is ready now!!" else: print "Error: Can not complete creating The image" exit(0) #----------------------------------------------------------- medialink = "http://" + storage_name + ".blob.core.windows.net/" + container_name + "/" media_link = "" # Configuring EndPoint "Firwall Configuration": endpoint_config = ConfigurationSet() endpoint_config.configuration_set_type = 'NetworkConfiguration' endpoint1 = ConfigurationSetInputEndpoint( name='XML', protocol='tcp', port='5000', local_port='5000', load_balanced_endpoint_set_name=None, enable_direct_server_return=False) endpoint_config.input_endpoints.input_endpoints.append(endpoint1) self.logger.info( "Configuring EndPoints 'Firwall Configuration' SHH, PBS Torque and OpenMPI ports" ) # Linux VM Configuration: linux_config = LinuxConfigurationSet(hostname, username, password, True) publickey = PublicKey(thumbprint, pkfile) linux_config.ssh.public_keys.public_keys.append(publickey) self.logger.info("Linux VM Configuration") # Configuring Image ID: #---------------------- os_hd = OSVirtualHardDisk(imageID, media_link) self.logger.info( "Configuring The Virtual Hard Disk using Image ID: %s", imageID) # Start Deployment of VM on Azure: self.logger.info("Start Deployment of Elastic hpc on Azure") # Configuring Certificates: cert_format = 'pfx' cert_password = certPasswd VMname = hostname vmname = hostname instances = [] try: for num in range(count): name = vmname + str(num) vname = vmname Service_name = vname Service_url = self.newCloudService(sms, Service_name, Service_name, Service_name, region, affGrp) endpoint3 = ConfigurationSetInputEndpoint( name='SSH' + str(num), protocol='tcp', port='220' + str(num), local_port='22', load_balanced_endpoint_set_name=None, enable_direct_server_return=False) endpoint_config.input_endpoints.input_endpoints.append( endpoint3) #endpoint4 = ConfigurationSetInputEndpoint(name="FTP", protocol='tcp', port='21', local_port='21', load_balanced_endpoint_set_name=None, enable_direct_server_return=False) #endpoint_config.input_endpoints.input_endpoints.append(endpoint4) #endpoint5 = ConfigurationSetInputEndpoint(name="FTP1", protocol='tcp', port='20', local_port='20', load_balanced_endpoint_set_name=None, enable_direct_server_return=False) #endpoint_config.input_endpoints.input_endpoints.append(endpoint5) #endpoint6 = ConfigurationSetInputEndpoint(name="FTPudp", protocol='udp', port='21', local_port='21', load_balanced_endpoint_set_name=None, enable_direct_server_return=False) #endpoint_config.input_endpoints.input_endpoints.append(endpoint6) #for i in range(6): # endpointpasv = ConfigurationSetInputEndpoint(name="FTPpasv"+str(i), protocol='tcp', port='4000'+str(i), local_port='4000'+str(i), load_balanced_endpoint_set_name=None, enable_direct_server_return=False) # endpoint_config.input_endpoints.input_endpoints.append(endpointpasv) pbs_endpoints = self.get_pbs_endpoints(0) for endpoint in pbs_endpoints: endpoint_config.input_endpoints.input_endpoints.append( endpoint) media_link = medialink + name[:-1] + ".vhd" self.logger.info("Configuring Media Link %s", media_link) # Configuring Image ID: #---------------------- os_hd = OSVirtualHardDisk(imageID, media_link) self.logger.info( "Configuring The Virtual Hard Disk using Image ID: %s", imageID) self.logger.info("Deploying Node number: %d", num) result_cert = sms.add_service_certificate( service_name=Service_name, data=cert_data, certificate_format=cert_format, password=cert_password) self.logger.info("Start Deploying VM with Name: " + vname) try: self.logger.info(vars(result_cert)) except: self.logger.info(result_cert) time.sleep(5) result = sms.create_virtual_machine_deployment( service_name=Service_name, deployment_name=vname, deployment_slot='production', label=Service_name, role_name=vname, system_config=linux_config, os_virtual_hard_disk=os_hd, network_config=endpoint_config, role_size=master_size) #role_size="Large") self.logger.info("Start Deployment") self.wait_for_async(sms, result.request_id) self.wait_for_deployment_status(sms, Service_name, vname, 'Running') self.wait_for_role_instance_status(sms, Service_name, vname, vname, 'ReadyRole') instances.append(Service_name + ".cloudapp.net") instances.append(container_name) except WindowsAzureConflictError: self.logger.info("Error: Can not Create VM") sys.exit(1) return instances
import logging from azure.storage.blobservice import BlobService from azure.storage.storageclient import _StorageClient from azure.servicemanagement import LinuxConfigurationSet from azure.servicemanagement import OSVirtualHardDisk from azure import WindowsAzureConflictError from azure import WindowsAzureError from azure import * from azure.servicemanagement import * from azure.servicemanagement.servicemanagementservice import ServiceManagementService import base64 import urllib2 blob_service = BlobService( account_name='sjsssiohdsiu', account_key= 'tBlxaMOoA+bE6zMsT5i1epphb25V/sD62MpAO7UA0fRK0GbpYLiVwhpe+WIwCtg80XxC+1uaVDtOvopZRHQ3Nw==' ) #myblob = open(r'vmdepoteastus.blob.core.windows.net/linux-community-store/community-23970-525c8c75-8901-4870-a937-7277414a6eaa-1.vhd', 'r').read() #blob_service.put_blob('mycontainerazure','ahmedblob.vhd',myblob,x_ms_blob_type='BlockBlob') #cert_data_path = "/media/ahmed/1578-4B0E/WindowsAzure/AzureCert/mycert.pfx" #with open(cert_data_path, "rb") as bfile: # cert_data = base64.b64encode(bfile.read()) log = logging.getLogger() log.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.__stdout__) # Add this ch.setLevel(logging.DEBUG) # create formatter formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def create_blob_service(self): return BlobService(self.account_name, self.account_key)
def connect(self, creds): """Return an azure BlobService instance. """ return BlobService(account_name=creds.account_name, account_key=creds.account_key, protocol='https')
def service(self): if self._service is None: self._service = BlobService(account_name=self.account_name, account_key=self.account_key, protocol=self.protocol) return self._service
def service(self): if self._service is None: self._service = BlobService(self.AZURE_ACCOUNT_NAME, self.AZURE_ACCOUNT_KEY, self.AZURE_PROTOCOL, self.AZURE_DOMAIN) return self._service