def __init__(self, table): self._partition = table with open('azure.txt') as f: lines = f.readlines() acc = lines[0].strip() key = lines[1].strip() self._table_service = TableService(account_name=acc, account_key=key)
def home(): """Renders the home page.""" form = MessageForm(request.form) if request.method == 'POST' and form.validate(): #Save to db table_service = TableService(account_name=config.ACC_NAME, account_key=config.ACC_KEY) message = {'PartitionKey': 'message', 'RowKey': form.name.data + datetime.datetime.now().isoformat(), 'name' : form.name.data, 'email': form.email.data, 'date' : str(datetime.datetime.now())} if form.message_body.data: message["message"] = form.message_body.data table_service.insert_entity('landingmessages', message) return render_template( 'index.html', hiden_block='#wright-us', form=form, ) return render_template( 'index.html', hiden_block='#got-ur-message', form=form, )
def setUp(self): self.ts = TableService(credentials.getStorageServicesName(), credentials.getStorageServicesKey()) set_service_options(self.ts) self.table_name = getUniqueName('uttable') self.additional_table_names = []
class conobj: def __init__(self, pikaIP, rabbitid, rabbitpasswd, TableAccountName, TableKey): self.ipad = socket.gethostname() self.pikaIP = pikaIP self.creds = pika.PlainCredentials(rabbitid, rabbitpasswd) self.connec = pika.BlockingConnection( pika.ConnectionParameters(pikaIP, 5672, '/', self.creds)) self.chan = self.connec.channel() self.chan.queue_declare(queue='hello', auto_delete=False, exclusive=False) self.table_service = TableService(account_name=TableAccountName, account_key=TableKey) self.table_service.create_table('sciml') self.chan.basic_publish(exchange='', routing_key='hello', body='start up newest server at address ' + self.ipad) self.chan.cancel() self.connec.close() self.chan = None def recon(self): #this is the slow method. but reliable. try: self.connec = pika.BlockingConnection( pika.ConnectionParameters(self.pikaIP, 5672, '/', self.creds)) self.chan = self.connec.channel() self.chan.queue_declare(queue='hello', auto_delete=False, exclusive=False) except: self.fullrecon() def fullrecon(self): if self.chan is not None: self.chan.cancel() #self.connec.close() time.sleep(2.0) print "reconnecting to rabbitmq" self.connec = pika.BlockingConnection( pika.ConnectionParameters(self.pikaIP, 5672, '/', self.creds)) self.chan = self.connec.channel() self.chan.queue_declare(queue='hello', auto_delete=False, exclusive=False) self.chan.basic_publish(exchange='', routing_key='hello', body='reconnecting to rabbitmq') def decon(self): if self.chan is not None: #self.chan.basic_publish(exchange='', # routing_key='hello', # body='disconect from address '+self.ipad) self.chan.cancel() self.connec.close() self.chan = None
def main(argv=None): #we created four azureML service endpoints to maximize parallelism. #the program is invoked with one parameter: a number between 0 and 3 that select the #end point that is used. #you need to create at least one endpoint #you also need your stream namespace to listen for events #and you need a table service to store the results endpoints=[] url = 'the original service endpoint url' api_key = 'the original service key' endpoints.extend([[url, api_key]]) url2 = 'a second enpoint url' api_key2 = 'the key for the second' endpoints.extend([[url2, api_key2]]) url3 = 'the third endpoint url' api_key3 = 'the key for the third' endpoints.extend([[url3, api_key3]]) url4 = 'the fourth endpint url' api_key4 = 'the key for the fourth' endpoints.extend([[url4, api_key4]]) if argv is None: argv = sys.argv try: try: opts, args = getopt.getopt(argv[1:], "h", ["help"]) except getopt.error, msg: raise Usage(msg) bus_service = ServiceBusService( service_namespace='your stream', shared_access_key_name='listenpolicy', shared_access_key_value='your access key') #next set up the table service table_service = TableService(account_name='your table account', account_key='account key') table_service.create_table('scimlevents') hostname = socket.gethostname() if len(args)< 1: print "need the endpoint number" return 2 endpointid = int(args[0]) url = endpoints[endpointid][0] api_key = endpoints[endpointid][1] print url print api_key print "starting with endpoint "+str(endpointid) while True: print "running" processevents(table_service, hostname, bus_service, url, api_key) print "all done."
def setUp(self): self.tc = TableService( account_name=credentials.getStorageServicesName(), account_key=credentials.getStorageServicesKey()) proxy_host = credentials.getProxyHost() proxy_port = credentials.getProxyPort() if proxy_host: self.tc.set_proxy(proxy_host, proxy_port) __uid = getUniqueTestRunID() table_base_name = u'testtable%s' % (__uid) self.table_name = getUniqueNameBasedOnCurrentTime(table_base_name) self.additional_table_names = []
def __init__(self, accountName, namePrefix, config=None, jobChunkSize=maxAzureTablePropertySize): self.jobChunkSize = jobChunkSize self.keyPath = None self.account_key = _fetchAzureAccountKey(accountName) # Table names have strict requirements in Azure self.namePrefix = self._sanitizeTableName(namePrefix) logger.debug("Creating job store with name prefix '%s'" % self.namePrefix) # These are the main API entrypoints. self.tableService = TableService(account_key=self.account_key, account_name=accountName) self.blobService = BlobService(account_key=self.account_key, account_name=accountName) # Register our job-store in the global table for this storage account self.registryTable = self._getOrCreateTable('toilRegistry') exists = self.registryTable.get_entity(row_key=self.namePrefix) self._checkJobStoreCreation(config is not None, exists, accountName + ":" + self.namePrefix) self.registryTable.insert_or_replace_entity(row_key=self.namePrefix, entity={'exists': True}) # Serialized jobs table self.jobItems = self._getOrCreateTable(self.qualify('jobs')) # Job<->file mapping table self.jobFileIDs = self._getOrCreateTable(self.qualify('jobFileIDs')) # Container for all shared and unshared files self.files = self._getOrCreateBlobContainer(self.qualify('files')) # Stats and logging strings self.statsFiles = self._getOrCreateBlobContainer( self.qualify('statsfiles')) # File IDs that contain stats and logging strings self.statsFileIDs = self._getOrCreateTable( self.qualify('statsFileIDs')) super(AzureJobStore, self).__init__(config=config) if self.config.cseKey is not None: self.keyPath = self.config.cseKey
def __init__(self,table): self.DataCollector = [] self.i=0 self.Table=table self.table_service = TableService(account_name='portalvhdspbrd34f2fnbl', account_key='y48JkXg+VcHQRCgsylJf4xV4Fd0AuJNkQKSwGhAR+BppHnFhkI+UHPOS/oYaTo0rqFCGQkEBW+thNFZNB9W8yg==') self.task = self.table_service.query_entities(self.Table, None, None, 100) if table=="Customer": self.i=self.i+100 print(self.i) for line in self.task: self.DataCollector.append({'ID': line.RowKey, 'Name' : line.Name , 'Country' : line.Country, 'City' : line.City}) self.iteratorCustomer(self.task) if table=="Order": self.i=self.i+100 print(self.i) for line in self.task: self.DataCollector.append({'ID': line.RowKey, 'CustomerID' : line.CustomerID, 'ProductID' : line.ProductID, 'Datetime' : line.Datetime, 'TotalPrice' : line.TotalPrice}) self.iteratorOrder(self.task)
def main(argv=None): if argv is None: argv = sys.argv try: try: opts, args = getopt.getopt(argv[1:], "h", ["help"]) except getopt.error, msg: raise Usage(msg) #next set up the table service table_service = TableService(account_name='azure table', account_key='long account key for this storage service') table_service.create_table('name of table') hostname = socket.gethostname() quename = gettopic() processevents(quename, table_service, hostname) print "all done."
class conobj: def __init__(self,pikaIP, rabbitid, rabbitpasswd, TableAccountName, TableKey ): self.ipad = socket.gethostname() self.pikaIP = pikaIP self.creds = pika.PlainCredentials(rabbitid, rabbitpasswd) self.connec = pika.BlockingConnection(pika.ConnectionParameters(pikaIP, 5672,'/',self.creds)) self.chan = self.connec.channel() self.chan.queue_declare(queue='hello', auto_delete=False, exclusive=False) self.table_service = TableService(account_name=TableAccountName, account_key=TableKey) self.table_service.create_table('sciml') self.chan.basic_publish(exchange='', routing_key='hello', body='start up newest server at address '+self.ipad) self.chan.cancel() self.connec.close() self.chan = None def recon(self): #this is the slow method. but reliable. try: self.connec = pika.BlockingConnection(pika.ConnectionParameters(self.pikaIP, 5672,'/',self.creds)) self.chan = self.connec.channel() self.chan.queue_declare(queue='hello', auto_delete=False, exclusive=False) except: self.fullrecon() def fullrecon(self): if self.chan is not None: self.chan.cancel() #self.connec.close() time.sleep(2.0) print "reconnecting to rabbitmq" self.connec = pika.BlockingConnection(pika.ConnectionParameters(self.pikaIP, 5672,'/',self.creds)) self.chan = self.connec.channel() self.chan.queue_declare(queue='hello', auto_delete=False, exclusive=False) self.chan.basic_publish(exchange='', routing_key='hello', body='reconnecting to rabbitmq') def decon(self): if self.chan is not None: #self.chan.basic_publish(exchange='', # routing_key='hello', # body='disconect from address '+self.ipad) self.chan.cancel() self.connec.close() self.chan = None
def do_step(context): settings = context.meta['settings'] # Prepare the containers storage_account_name = settings["STORAGE-ACCOUNT-NAME"] storage_access_key = settings["STORAGE-ACCESS-KEY"] blob_service = BlobService(storage_account_name, storage_access_key) blob_service.create_container('bosh') blob_service.create_container(container_name='stemcell', x_ms_blob_public_access='blob') # Prepare the table for storing meta datas of storage account and stemcells table_service = TableService(storage_account_name, storage_access_key) table_service.create_table('stemcells') context.meta['settings'] = settings return context
def __init__(self, settings): """Initializes the repository with the specified settings dict. Required settings are: - STORAGE_NAME - STORAGE_KEY - STORAGE_TABLE_POLL - STORAGE_TABLE_CHOICE """ self.name = 'Azure Table Storage' self.storage_name = settings['STORAGE_NAME'] self.storage_key = settings['STORAGE_KEY'] self.poll_table = settings['STORAGE_TABLE_POLL'] self.choice_table = settings['STORAGE_TABLE_CHOICE'] self.svc = TableService(self.storage_name, self.storage_key) self.svc.create_table(self.poll_table) self.svc.create_table(self.choice_table)
def getStorageMetrics(account, key, hostBase, table, startKey, endKey): try: waagent.Log("Retrieve storage metrics data.") tableService = TableService(account_name = account, account_key = key, host_base = hostBase) ofilter = ("PartitionKey ge '{0}' and PartitionKey lt '{1}'" "").format(startKey, endKey) oselect = ("TotalRequests,TotalIngress,TotalEgress,AverageE2ELatency," "AverageServerLatency,RowKey") metrics = tableService.query_entities(table, ofilter, oselect) waagent.Log("{0} records returned.".format(len(metrics))) return metrics except Exception, e: waagent.Error(("Failed to retrieve storage metrics data: {0} {1}" "").format(printable(e), traceback.format_exc())) AddExtensionEvent(message=FAILED_TO_RETRIEVE_STORAGE_DATA) return None
def friend_email_get(mine): l=[] try: # will return a list of email ids of freinds table_service = TableService(account_name='****', account_key='******') #since table is already created #table_service.create_table('friendstable') tuple1 = table_service.get_entity('friendlisttable', 'friend', mine) count=tuple1.count for i in range(1,count+1): field="friend"+str(i); k_dict=tuple1.__dict__ print("\n\n",k_dict,"\n\n") data=k_dict[field] print("\n\n",data) l.append(data) print("\n\n",l,"\n\n") except Exception as e: print(e) return l
def __init__(self, table): if os.environ.get("raspberry") is None: table += 'Test' print table self._partition = table with open('azure.txt') as f: lines = f.readlines() acc = lines[0].strip() key = lines[1].strip() self._table_service = TableService(account_name=acc, account_key=key)
def getAzureDiagnosticMemoryData(accountName, accountKey, hostBase, startKey, endKey, hostname): try: waagent.Log("Retrieve diagnostic data: Memory") table = "LinuxPerfMemVer1v0" tableService = TableService(account_name = accountName, account_key = accountKey, host_base = hostBase) ofilter = ("PartitionKey ge '{0}' and PartitionKey lt '{1}' " "and Host eq '{2}'").format(startKey, endKey, hostname) oselect = ("PercentAvailableMemory,Host") data = tableService.query_entities(table, ofilter, oselect, 1) if data is None or len(data) == 0: return None memoryPercent = 100 - float(data[0].PercentAvailableMemory) return memoryPercent except Exception, e: waagent.Error(("Failed to retrieve diagnostic data(Memory): {0} {1}" "").format(printable(e), traceback.format_exc())) AddExtensionEvent(message=FAILED_TO_RETRIEVE_MDS_DATA) return None
def __init__(self, pikaIP, rabbitid, rabbitpasswd, TableAccountName, TableKey): self.ipad = socket.gethostname() self.pikaIP = pikaIP self.creds = pika.PlainCredentials(rabbitid, rabbitpasswd) self.connec = pika.BlockingConnection( pika.ConnectionParameters(pikaIP, 5672, '/', self.creds)) self.chan = self.connec.channel() self.chan.queue_declare(queue='hello', auto_delete=False, exclusive=False) self.table_service = TableService(account_name=TableAccountName, account_key=TableKey) self.table_service.create_table('sciml') self.chan.basic_publish(exchange='', routing_key='hello', body='start up newest server at address ' + self.ipad) self.chan.cancel() self.connec.close() self.chan = None
class CustomerTable: def __init__(self): self.table_service = TableService(account_name='portalvhdspbrd34f2fnbl',account_key='y48JkXg+VcHQRCgsylJf4xV4Fd0AuJNkQKSwGhAR+BppHnFhkI+UHPOS/oYaTo0rqFCGQkEBW+thNFZNB9W8yg==') def insert(self,ID,Name,Country,City): task = {'PartitionKey': 'Customer', 'RowKey': ID, 'Name' : Name , 'Country' : Country, 'City' : City} try: self.table_service.insert_entity('Customer', task) except: print"azure.WindowsAzureConflictError: Customer Conflict" def listAll(self): tasks = self.table_service.query_entities('Customer', "PartitionKey eq 'Customer'") i=0 for task in tasks: i=i+1 print("Name: %s, Country: %s, City: %s") %(task.Name,task.Country,task.City) print("Total Customer: %s") %(i)
def setUp(self): self.tc = TableService(credentials.getStorageServicesName(), credentials.getStorageServicesKey()) self.tc.set_proxy(credentials.getProxyHost(), credentials.getProxyPort(), credentials.getProxyUser(), credentials.getProxyPassword()) __uid = getUniqueTestRunID() table_base_name = u'testtable%s' % (__uid) self.table_name = getUniqueNameBasedOnCurrentTime(table_base_name) self.additional_table_names = []
def setUp(self): self.tc = TableService(account_name=credentials.getStorageServicesName(), account_key=credentials.getStorageServicesKey()) proxy_host = credentials.getProxyHost() proxy_port = credentials.getProxyPort() if proxy_host: self.tc.set_proxy(proxy_host, proxy_port) __uid = getUniqueTestRunID() table_base_name = u'testtable%s' % (__uid) self.table_name = getUniqueNameBasedOnCurrentTime(table_base_name) self.additional_table_names = []
def __init__(self,pikaIP, rabbitid, rabbitpasswd, TableAccountName, TableKey ): self.ipad = socket.gethostname() self.pikaIP = pikaIP self.creds = pika.PlainCredentials(rabbitid, rabbitpasswd) self.connec = pika.BlockingConnection(pika.ConnectionParameters(pikaIP, 5672,'/',self.creds)) self.chan = self.connec.channel() self.chan.queue_declare(queue='hello', auto_delete=False, exclusive=False) self.table_service = TableService(account_name=TableAccountName, account_key=TableKey) self.table_service.create_table('sciml') self.chan.basic_publish(exchange='', routing_key='hello', body='start up newest server at address '+self.ipad) self.chan.cancel() self.connec.close() self.chan = None
class AzureDataServices: _partition = 'presence' def __init__(self, table): self._partition = table with open('azure.txt') as f: lines = f.readlines() acc = lines[0].strip() key = lines[1].strip() self._table_service = TableService(account_name=acc, account_key=key) def create_table(self): """ Creates azure storage table """ self._table_service.create_table(self._partition) def insert_data(self, task): """ Insert the object to azure """ t = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) task.PartitionKey = self._partition task.RowKey = t self._table_service.insert_entity(self._partition, task) def insert_presence(self, p): """ Uploads value to azure table storage """ t = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) task = Entity() task.PartitionKey = self._partition task.RowKey = t task.users_arrived = ','.join(map(str, p.users_arrived)) task.users_left = ','.join(map(str, p.users_left)) self._table_service.insert_entity(self._partition, task) def get_presence(self): tasks = self._table_service.query_entities(self._partition, "PartitionKey eq 'presence'") return tasks
def __init__(self): self.tbl_name = 'alxserver' self.tbl_net = "net" self.tbl_info = "info" self.tbl_node = "node" self.q_name = 'alxserver' self.msg_delete_ids = {self.q_name: {}, } self.msg_no_process = {self.q_name: {}, } self.msg_template = {"from": "", "from-ip": "", "to": "", "cmd": "", } self.msg_key_na = _('Key not available') self.msg_ttl = 60 * 60 self.msg_delete_ids_ttl = 60 * 60 * 3 try: key = alxlib.key.Key() if os.path.isfile(key.get_path()): sys.path.insert(0, key.get_dir()) import alxkey self.key = alxkey.alxkey_azure self.tbl = TableService(account_name=self.key['AZURE_STORAGE_ACCOUNT_NAME'], account_key=self.key['AZURE_ACCESS_KEY']) else: raise (self.msg_key_na) except: raise (self.msg_key_na)
def __init__(self, accountName, namePrefix, config=None, jobChunkSize=maxAzureTablePropertySize): self.jobChunkSize = jobChunkSize self.keyPath = None self.account_key = _fetchAzureAccountKey(accountName) # Table names have strict requirements in Azure self.namePrefix = self._sanitizeTableName(namePrefix) log.debug("Creating job store with name prefix '%s'" % self.namePrefix) # These are the main API entrypoints. self.tableService = TableService(account_key=self.account_key, account_name=accountName) self.blobService = BlobService(account_key=self.account_key, account_name=accountName) # Register our job-store in the global table for this storage account self.registryTable = self._getOrCreateTable('toilRegistry') exists = self.registryTable.get_entity(row_key=self.namePrefix) self._checkJobStoreCreation(config is not None, exists, accountName + ":" + self.namePrefix) self.registryTable.insert_or_replace_entity(row_key=self.namePrefix, entity={'exists': True}) # Serialized jobs table self.jobItems = self._getOrCreateTable(self.qualify('jobs')) # Job<->file mapping table self.jobFileIDs = self._getOrCreateTable(self.qualify('jobFileIDs')) # Container for all shared and unshared files self.files = self._getOrCreateBlobContainer(self.qualify('files')) # Stats and logging strings self.statsFiles = self._getOrCreateBlobContainer(self.qualify('statsfiles')) # File IDs that contain stats and logging strings self.statsFileIDs = self._getOrCreateTable(self.qualify('statsFileIDs')) super(AzureJobStore, self).__init__(config=config) if self.config.cseKey is not None: self.keyPath = self.config.cseKey
class AzureJobStore(AbstractJobStore): """ A job store that uses Azure's blob store for file storage and Table Service to store job info with strong consistency.""" def __init__(self, accountName, namePrefix, config=None, jobChunkSize=maxAzureTablePropertySize): self.jobChunkSize = jobChunkSize self.keyPath = None self.account_key = _fetchAzureAccountKey(accountName) # Table names have strict requirements in Azure self.namePrefix = self._sanitizeTableName(namePrefix) logger.debug("Creating job store with name prefix '%s'" % self.namePrefix) # These are the main API entrypoints. self.tableService = TableService(account_key=self.account_key, account_name=accountName) self.blobService = BlobService(account_key=self.account_key, account_name=accountName) # Register our job-store in the global table for this storage account self.registryTable = self._getOrCreateTable('toilRegistry') exists = self.registryTable.get_entity(row_key=self.namePrefix) self._checkJobStoreCreation(config is not None, exists, accountName + ":" + self.namePrefix) self.registryTable.insert_or_replace_entity(row_key=self.namePrefix, entity={'exists': True}) # Serialized jobs table self.jobItems = self._getOrCreateTable(self.qualify('jobs')) # Job<->file mapping table self.jobFileIDs = self._getOrCreateTable(self.qualify('jobFileIDs')) # Container for all shared and unshared files self.files = self._getOrCreateBlobContainer(self.qualify('files')) # Stats and logging strings self.statsFiles = self._getOrCreateBlobContainer( self.qualify('statsfiles')) # File IDs that contain stats and logging strings self.statsFileIDs = self._getOrCreateTable( self.qualify('statsFileIDs')) super(AzureJobStore, self).__init__(config=config) if self.config.cseKey is not None: self.keyPath = self.config.cseKey # Table names must be alphanumeric nameSeparator = 'xx' # Length of a jobID - used to test if a stats file has been read already or not jobIDLength = len(str(uuid.uuid4())) def qualify(self, name): return self.namePrefix + self.nameSeparator + name def jobs(self): # How many jobs have we done? total_processed = 0 for jobEntity in self.jobItems.query_entities_auto(): # Process the items in the page yield AzureJob.fromEntity(jobEntity) total_processed += 1 if total_processed % 1000 == 0: # Produce some feedback for the user, because this can take # a long time on, for example, Azure logger.info("Processed %d total jobs" % total_processed) logger.info("Processed %d total jobs" % total_processed) def create(self, command, memory, cores, disk, predecessorNumber=0): jobStoreID = self._newJobID() job = AzureJob(jobStoreID=jobStoreID, command=command, memory=memory, cores=cores, disk=disk, remainingRetryCount=self._defaultTryCount(), logJobStoreFileID=None, predecessorNumber=predecessorNumber) entity = job.toItem(chunkSize=self.jobChunkSize) entity['RowKey'] = jobStoreID self.jobItems.insert_entity(entity=entity) return job def exists(self, jobStoreID): if self.jobItems.get_entity(row_key=jobStoreID) is None: return False return True def load(self, jobStoreID): jobEntity = self.jobItems.get_entity(row_key=jobStoreID) if jobEntity is None: raise NoSuchJobException(jobStoreID) return AzureJob.fromEntity(jobEntity) def update(self, job): self.jobItems.update_entity( row_key=job.jobStoreID, entity=job.toItem(chunkSize=self.jobChunkSize)) def delete(self, jobStoreID): try: self.jobItems.delete_entity(row_key=jobStoreID) except WindowsAzureMissingResourceError: # Job deletion is idempotent, and this job has been deleted already return filterString = "PartitionKey eq '%s'" % jobStoreID for fileEntity in self.jobFileIDs.query_entities(filter=filterString): jobStoreFileID = fileEntity.RowKey self.deleteFile(jobStoreFileID) def deleteJobStore(self): self.registryTable.delete_entity(row_key=self.namePrefix) self.jobItems.delete_table() self.jobFileIDs.delete_table() self.files.delete_container() self.statsFiles.delete_container() self.statsFileIDs.delete_table() def getEnv(self): return dict(AZURE_ACCOUNT_KEY=self.account_key) def writeFile(self, localFilePath, jobStoreID=None): jobStoreFileID = self._newFileID() self.updateFile(jobStoreFileID, localFilePath) self._associateFileWithJob(jobStoreFileID, jobStoreID) return jobStoreFileID def updateFile(self, jobStoreFileID, localFilePath): with open(localFilePath) as read_fd: with self._uploadStream(jobStoreFileID, self.files) as write_fd: while True: buf = read_fd.read(self._maxAzureBlockBytes) write_fd.write(buf) if len(buf) == 0: break def readFile(self, jobStoreFileID, localFilePath): try: with self._downloadStream(jobStoreFileID, self.files) as read_fd: with open(localFilePath, 'w') as write_fd: while True: buf = read_fd.read(self._maxAzureBlockBytes) write_fd.write(buf) if not buf: break except WindowsAzureMissingResourceError: raise NoSuchFileException(jobStoreFileID) def deleteFile(self, jobStoreFileID): try: self.files.delete_blob(blob_name=jobStoreFileID) self._dissociateFileFromJob(jobStoreFileID) except WindowsAzureMissingResourceError: pass def fileExists(self, jobStoreFileID): # As Azure doesn't have a blob_exists method (at least in the # python API) we just try to download the metadata, and hope # the metadata is small so the call will be fast. try: self.files.get_blob_metadata(blob_name=jobStoreFileID) return True except WindowsAzureMissingResourceError: return False @contextmanager def writeFileStream(self, jobStoreID=None): # TODO: this (and all stream methods) should probably use the # Append Blob type, but that is not currently supported by the # Azure Python API. jobStoreFileID = self._newFileID() with self._uploadStream(jobStoreFileID, self.files) as fd: yield fd, jobStoreFileID self._associateFileWithJob(jobStoreFileID, jobStoreID) @contextmanager def updateFileStream(self, jobStoreFileID): with self._uploadStream(jobStoreFileID, self.files, checkForModification=True) as fd: yield fd def getEmptyFileStoreID(self, jobStoreID=None): jobStoreFileID = self._newFileID() self.files.put_blob(blob_name=jobStoreFileID, blob='', x_ms_blob_type='BlockBlob') self._associateFileWithJob(jobStoreFileID, jobStoreID) return jobStoreFileID @contextmanager def readFileStream(self, jobStoreFileID): if not self.fileExists(jobStoreFileID): raise NoSuchFileException(jobStoreFileID) with self._downloadStream(jobStoreFileID, self.files) as fd: yield fd @contextmanager def writeSharedFileStream(self, sharedFileName, isProtected=None): sharedFileID = self._newFileID(sharedFileName) with self._uploadStream(sharedFileID, self.files, encrypted=isProtected) as fd: yield fd @contextmanager def readSharedFileStream(self, sharedFileName): sharedFileID = self._newFileID(sharedFileName) if not self.fileExists(sharedFileID): raise NoSuchFileException(sharedFileID) with self._downloadStream(sharedFileID, self.files) as fd: yield fd def writeStatsAndLogging(self, statsAndLoggingString): # TODO: would be a great use case for the append blobs, once implemented in the Azure SDK jobStoreFileID = self._newFileID() encrypted = self.keyPath is not None if encrypted: statsAndLoggingString = encryption.encrypt(statsAndLoggingString, self.keyPath) self.statsFiles.put_block_blob_from_text( blob_name=jobStoreFileID, text=statsAndLoggingString, x_ms_meta_name_values=dict(encrypted=str(encrypted))) self.statsFileIDs.insert_entity(entity={'RowKey': jobStoreFileID}) def readStatsAndLogging(self, callback, readAll=False): suffix = '_old' numStatsFiles = 0 for entity in self.statsFileIDs.query_entities(): jobStoreFileID = entity.RowKey hasBeenRead = len(jobStoreFileID) > self.jobIDLength if not hasBeenRead: with self._downloadStream(jobStoreFileID, self.statsFiles) as fd: callback(fd) # Mark this entity as read by appending the suffix self.statsFileIDs.insert_entity( entity={'RowKey': jobStoreFileID + suffix}) self.statsFileIDs.delete_entity(row_key=jobStoreFileID) numStatsFiles += 1 elif readAll: # Strip the suffix to get the original ID jobStoreFileID = jobStoreFileID[:-len(suffix)] with self._downloadStream(jobStoreFileID, self.statsFiles) as fd: callback(fd) numStatsFiles += 1 return numStatsFiles _azureTimeFormat = "%Y-%m-%dT%H:%M:%SZ" def getPublicUrl(self, jobStoreFileID): try: self.files.get_blob_properties(blob_name=jobStoreFileID) except WindowsAzureMissingResourceError: raise NoSuchFileException(jobStoreFileID) # Compensate of a little bit of clock skew startTimeStr = (datetime.utcnow() - timedelta(minutes=5)).strftime( self._azureTimeFormat) endTime = datetime.utcnow() + self.publicUrlExpiration endTimeStr = endTime.strftime(self._azureTimeFormat) sap = SharedAccessPolicy( AccessPolicy(startTimeStr, endTimeStr, BlobSharedAccessPermissions.READ)) sas_token = self.files.generate_shared_access_signature( blob_name=jobStoreFileID, shared_access_policy=sap) return self.files.make_blob_url( blob_name=jobStoreFileID) + '?' + sas_token def getSharedPublicUrl(self, sharedFileName): jobStoreFileID = self._newFileID(sharedFileName) return self.getPublicUrl(jobStoreFileID) def _newJobID(self): # raw UUIDs don't work for Azure property names because the '-' character is disallowed. return str(uuid.uuid4()).replace('-', '_') # A dummy job ID under which all shared files are stored. sharedFileJobID = uuid.UUID('891f7db6-e4d9-4221-a58e-ab6cc4395f94') def _newFileID(self, sharedFileName=None): if sharedFileName is None: ret = str(uuid.uuid4()) else: ret = str(uuid.uuid5(self.sharedFileJobID, str(sharedFileName))) return ret.replace('-', '_') def _associateFileWithJob(self, jobStoreFileID, jobStoreID=None): if jobStoreID is not None: self.jobFileIDs.insert_entity(entity={ 'PartitionKey': jobStoreID, 'RowKey': jobStoreFileID }) def _dissociateFileFromJob(self, jobStoreFileID): entities = self.jobFileIDs.query_entities(filter="RowKey eq '%s'" % jobStoreFileID) if entities: assert len(entities) == 1 jobStoreID = entities[0].PartitionKey self.jobFileIDs.delete_entity(partition_key=jobStoreID, row_key=jobStoreFileID) def _getOrCreateTable(self, tableName): # This will not fail if the table already exists. for attempt in retry_on_error(): with attempt: self.tableService.create_table(tableName) return AzureTable(self.tableService, tableName) def _getOrCreateBlobContainer(self, containerName): for attempt in retry_on_error(): with attempt: self.blobService.create_container(containerName) return AzureBlobContainer(self.blobService, containerName) def _sanitizeTableName(self, tableName): """ Azure table names must start with a letter and be alphanumeric. This will never cause a collision if uuids are used, but otherwise may not be safe. """ return 'a' + filter(lambda x: x.isalnum(), tableName) # Maximum bytes that can be in any block of an Azure block blob # https://github.com/Azure/azure-storage-python/blob/4c7666e05a9556c10154508335738ee44d7cb104/azure/storage/blob/blobservice.py#L106 _maxAzureBlockBytes = 4 * 1024 * 1024 @contextmanager def _uploadStream(self, jobStoreFileID, container, checkForModification=False, encrypted=None): """ :param encrypted: True to enforce encryption (will raise exception unless key is set), False to prevent encryption or None to encrypt if key is set. """ if checkForModification: try: expectedVersion = container.get_blob_properties( blob_name=jobStoreFileID)['etag'] except WindowsAzureMissingResourceError: expectedVersion = None if encrypted is None: encrypted = self.keyPath is not None elif encrypted: if self.keyPath is None: raise RuntimeError( 'Encryption requested but no key was provided') maxBlockSize = self._maxAzureBlockBytes if encrypted: # There is a small overhead for encrypted data. maxBlockSize -= encryption.overhead readable_fh, writable_fh = os.pipe() with os.fdopen(readable_fh, 'r') as readable: with os.fdopen(writable_fh, 'w') as writable: def reader(): blockIDs = [] try: while True: buf = readable.read(maxBlockSize) if len(buf) == 0: # We're safe to break here even if we never read anything, since # putting an empty block list creates an empty blob. break if encrypted: buf = encryption.encrypt(buf, self.keyPath) blockID = self._newFileID() container.put_block(blob_name=jobStoreFileID, block=buf, blockid=blockID) blockIDs.append(blockID) except: # This is guaranteed to delete any uncommitted # blocks. container.delete_blob(blob_name=jobStoreFileID) raise if checkForModification and expectedVersion is not None: # Acquire a (60-second) write lock, leaseID = container.lease_blob( blob_name=jobStoreFileID, x_ms_lease_action='acquire')['x-ms-lease-id'] # check for modification, blobProperties = container.get_blob_properties( blob_name=jobStoreFileID) if blobProperties['etag'] != expectedVersion: container.lease_blob(blob_name=jobStoreFileID, x_ms_lease_action='release', x_ms_lease_id=leaseID) raise ConcurrentFileModificationException( jobStoreFileID) # commit the file, container.put_block_list(blob_name=jobStoreFileID, block_list=blockIDs, x_ms_lease_id=leaseID, x_ms_meta_name_values=dict( encrypted=str(encrypted))) # then release the lock. container.lease_blob(blob_name=jobStoreFileID, x_ms_lease_action='release', x_ms_lease_id=leaseID) else: # No need to check for modification, just blindly write over whatever # was there. container.put_block_list(blob_name=jobStoreFileID, block_list=blockIDs, x_ms_meta_name_values=dict( encrypted=str(encrypted))) thread = ExceptionalThread(target=reader) thread.start() yield writable # The writable is now closed. This will send EOF to the readable and cause that # thread to finish. thread.join() @contextmanager def _downloadStream(self, jobStoreFileID, container): # The reason this is not in the writer is so we catch non-existant blobs early blobProps = container.get_blob_properties(blob_name=jobStoreFileID) encrypted = strict_bool(blobProps['x-ms-meta-encrypted']) if encrypted and self.keyPath is None: raise AssertionError( 'Content is encrypted but no key was provided.') readable_fh, writable_fh = os.pipe() with os.fdopen(readable_fh, 'r') as readable: with os.fdopen(writable_fh, 'w') as writable: def writer(): try: chunkStartPos = 0 fileSize = int(blobProps['Content-Length']) while chunkStartPos < fileSize: chunkEndPos = chunkStartPos + self._maxAzureBlockBytes - 1 buf = container.get_blob( blob_name=jobStoreFileID, x_ms_range="bytes=%d-%d" % (chunkStartPos, chunkEndPos)) if encrypted: buf = encryption.decrypt(buf, self.keyPath) writable.write(buf) chunkStartPos = chunkEndPos + 1 finally: # Ensure readers aren't left blocking if this thread crashes. # This close() will send EOF to the reading end and ultimately cause the # yield to return. It also makes the implict .close() done by the enclosing # "with" context redundant but that should be ok since .close() on file # objects are idempotent. writable.close() thread = ExceptionalThread(target=writer) thread.start() yield readable thread.join()
class Azure(): def __init__(self): self.tbl_name = 'alxserver' self.tbl_net = "net" self.tbl_info = "info" self.tbl_node = "node" self.q_name = 'alxserver' self.msg_delete_ids = {self.q_name: {}, } self.msg_no_process = {self.q_name: {}, } self.msg_template = {"from": "", "from-ip": "", "to": "", "cmd": "", } self.msg_key_na = _('Key not available') self.msg_ttl = 60 * 60 self.msg_delete_ids_ttl = 60 * 60 * 3 try: key = alxlib.key.Key() if os.path.isfile(key.get_path()): sys.path.insert(0, key.get_dir()) import alxkey self.key = alxkey.alxkey_azure self.tbl = TableService(account_name=self.key['AZURE_STORAGE_ACCOUNT_NAME'], account_key=self.key['AZURE_ACCESS_KEY']) else: raise (self.msg_key_na) except: raise (self.msg_key_na) # Connection def connect_sqs(self): try: self.q = QueueService(account_name=self.key['AZURE_STORAGE_ACCOUNT_NAME'], account_key=self.key['AZURE_ACCESS_KEY']) self.q.create_queue(self.q_name) return self.q except: logging.critical(_("Connection Failure: possibly bad key")) return None # msg def msg_get_all(self): try: self.connect_sqs() msgs = self.q.peek_messages(self.q_name, numofmessages=16) queue_metadata = self.q.get_queue_metadata(self.q_name) count = queue_metadata['x-ms-approximate-messages-count'] logging.info("Checking queue {0}, {1} message".format(self.q_name, count)) if count == 0: return None else: return msgs except Exception as e: logging.critical(_("MSG check error")) return None def msg_send(self, dict): try: self.connect_sqs() body = self.msg_encode(dict) self.q.put_message(self.q_name, body, messagettl=self.msg_ttl) #print(encode.decode()) except Exception as e: logging.critical(_("Message creation failure: msg_send()")) def msg_encode(self, dict): try: j = json.dumps(dict, ensure_ascii=False) body = base64.b64encode(j.encode()).decode() return body except Exception as e: logging.critical(_("Message creation failure: msg_encode()")) def msg_decode(self, body): try: dict = eval(base64.b64decode(body.encode()).decode()) return dict except: logging.critical(_("Message decode failure: msg_decode()")) return None def msg_delete(self): try: if len(self.msg_delete_ids[self.q_name]) > 0: self.connect_sqs() msgs = self.q.get_messages(self.q_name, numofmessages=16) for msg in msgs: if self.msg_delete_ids[self.q_name].get(msg.message_id, None) is not None: self.q.delete_message(self.q_name, msg.message_id, msg.pop_receipt) del self.msg_delete_ids[self.q_name][msg.message_id] logging.info("Deleting msg {0}".format(msg.message_id)) for key, value in self.msg_delete_ids[self.q_name].items(): seconds = ( datetime.datetime.fromtimestamp(self.get_timestamp_now()) - datetime.datetime.fromtimestamp( float(value))).seconds if seconds > self.msg_delete_ids_ttl: del self.msg_delete_ids[self.q_name][key] except: logging.critical(_("Message delete failure: msg_delete()")) def process_my_msg(self, func, msgs): try: if msgs is not None: for msg in msgs: body = msg.message_text if body is not None: dict = self.msg_decode(body) if dict["to"] == "*" or dict["to"] == format(socket.gethostname()): if self.msg_no_process[self.q_name].get(msg.message_id, None) is None: self.msg_no_process[self.q_name][msg.message_id] = dict['creation-time'] func(msg, dict) else: logging.debug("Ignore msg ...{0}".format(msg.message_id)) except BaseException as e: logging.critical(_("MSG process error: process_my_msg() {0}").format(e)) #Server def server_run(self): while True: try: self.update_net() """ msgs = self.msg_get_all() self.process_my_msg(lambda x, y: self.server_msg_process(x, y), msgs) time.sleep(int(self.key["AZURE_POLL"])) self.msg_delete() for key, value in self.msg_no_process[self.q_name].items(): seconds = ( datetime.datetime.fromtimestamp(self.get_timestamp_now()) - datetime.datetime.fromtimestamp( float(value))).seconds if seconds > self.msg_delete_ids_ttl: del self.msg_no_process[self.q_name][key] logging.debug("msg_no_process->{0}".format(self.msg_no_process))""" except Exception as e: logging.critical("server_run->while {0}".format(e)) #print(e) raise () def server_msg_process(self, msg, dict): try: if dict["cmd"] == "ping": logging.info("Processing ... {0}".format(msg.message_id)) self.pong_send(dict["from"], msg.message_id) if dict["to"] == format(socket.gethostname()): #self.q.delete_message(self.q_name, msg.message_id, msg.pop_receipt) self.msg_delete_ids[self.q_name][msg.message_id] = dict['creation-time'] except BaseException as e: logging.critical(_("MSG process error: server_cmd() {0}").format(e)) #Client def client_print(self): try: msgs = self.msg_get_all() self.process_my_msg(lambda x, y: self.client_msg_process(x, y), msgs) self.msg_delete() #time.sleep(5) except: raise () def client_msg_process(self, msg, dict): try: if dict["cmd"] == "pong": self.msg_delete_ids[self.q_name][msg.message_id] = dict['creation-time'] #self.q.delete_message(self.q_name, msg.message_id, msg.pop_receipt) import datetime print("reply\t\t{0}\t\t{1}\t\t{2}".format(dict["from"], dict["from-ip"], self.get_time(float(dict['creation-time'])) )) logging.debug("client_msg_process creation-time {0}".format(dict['creation-time'])) #print(self.get_time(msg.attributes['ApproximateFirstReceiveTimestamp'])) #print(datetime.datetime.fromtimestamp(time.time(int(msg.attributes["ApproximateFirstReceiveTimestamp"]))).strftime('%Y-%m-%d %H:%M:%S')) except BaseException as e: logging.critical(_("MSG process error: client_msg_process() {0}").format(e)) #cmd def ping(self, count, timeout): self.connect_sqs() print(_("Sending ping ...")) self.ping_send(count) print(_("Waiting for reply ...")) time.sleep(timeout) self.client_print() print(_("Timeout")) def ping_send(self, count): try: import copy, alxlib.node.info dict = copy.deepcopy(self.msg_template) dict["from"] = format(socket.gethostname()) dict["from-ip"] = alxlib.node.info.Info().get_ip() dict["to"] = "*" dict["cmd"] = "ping" dict["creation-time"] = str(self.get_timestamp_now()) for i in range(0, count): self.msg_send(dict) except Exception as e: logging.critical(_("Message creation failure: ping_send()")) def pong_send(self, to, replyToId): try: import copy, alxlib.node.info dict = copy.deepcopy(self.msg_template) dict["from"] = format(socket.gethostname()) dict["from-ip"] = alxlib.node.info.Info().get_ip() dict["to"] = to dict["reply-to-id"] = replyToId dict["cmd"] = "pong" dict['creation-time'] = str(self.get_timestamp_now()) self.msg_send(dict) except: logging.critical(_("Message creation failure")) #helper def get_time(self, timestamp): try: return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp)) except: return "" def get_timestamp_now(self): return time.time() #table def tbl_update(self, name, p, r, d): try: d["updatetime"] = str(self.get_timestamp_now()) self.tbl.create_table(name) self.tbl.insert_or_merge_entity(name, p, r, d) except Exception as e: logging.critical("Error update_tbl {0}".format(e)) def tbl_row_query(self, name, q, n=1000, next_partition_key_=None, next_row_key_=None): try: self.tbl.create_table(name) rows = self.tbl.query_entities(name, filter=q, top=n, next_partition_key=next_partition_key_, next_row_key=next_row_key_) return rows except Exception as e: return None def entity2dict(self, e): try: keys = dir(e) d = {} for key in keys: d[key] = getattr(e, key, "") return d except: return None def update_net(self): try: info = Info().get_all() self.tbl_update(self.tbl_name, self.tbl_net, info["hostname"], info) except: logging.warning("Error update_net") def wrap_text(self, text, max_width): if text is not None: from textwrap import wrap return '\n'.join(wrap(text, max_width)) else: return "" def print_list(self): try: rows = self.tbl_row_query(self.tbl_name, "PartitionKey eq 'net'") #print(dir(row)) from colorclass import Color, Windows from terminaltables import AsciiTable Windows.enable(auto_colors=True, reset_atexit=True) table_data = [[Color('{autocyan}Hostname'), Color('{autocyan}Last Reply'), Color('{autocyan}IP'), Color('{autocyan}OS'), Color('{autocyan}OS Release'), Color('{autocyan}OS Version'), Color('{autocyan}System'), Color('{autocyan}Processor'), ] ] max_wrap=10 for row in rows: #d = self.entity2dict(row) d = row.__dict__ time = alxlib.time_help.format_from_timestamp(d['Timestamp']) li = [d['hostname'], time, d['ip'], d['os'], d['osrelease'], self.wrap_text(d['osversion'], max_wrap), d["system"], self.wrap_text(d["processor"], max_wrap), ] table_data.append(li) table = AsciiTable(table_data) table.table_data = table_data print(table.table) except Exception as e: logging.warning("Error print_list") print(e)
#update test cat's owner #whom we will send messages from azure.storage import TableService import config table_service = TableService(account_name=config.ACC_NAME, account_key=config.ACC_KEY) #newMaster = {'masterID' : '188622142'} #table_service.update_entity('bandcredentials', 'band','test', newMaster) #table_service.insert_entity('bandcredentials', newMaster) task = table_service.get_entity('bandcredentials', 'band', 'test') print task.masterID
from azure.storage import TableService, Entity #Replace account_name with your account and account_key with your primary key table_service = TableService(account_name='myaccount', account_key='myKey') table_service.create_table('tasktable') # Updating an entity to table in multiple ways task1 = {'description' : 'schedule doctor appointment', 'priority' : 300} table_service.update_entity('tasktable', 'toDoTasks', '1', task1) task2 = {'PartitionKey': 'toDoTasks', 'RowKey': '2', 'description' : 'Pay bills', 'priority' : 200} table_service.insert_or_replace_entity('tasktable', task2)
#coding:utf8 from azure.storage import TableService, Entity table_service = TableService(account_name='portalvhdspbrd34f2fnbl', account_key='y48JkXg+VcHQRCgsylJf4xV4Fd0AuJNkQKSwGhAR+BppHnFhkI+UHPOS/oYaTo0rqFCGQkEBW+thNFZNB9W8yg==') task = table_service.get_entity('tasktable', 'tasksSeattle', '1') print(task.description) print(task.priority)
def __init__(self): self.table_service = TableService(account_name='portalvhdspbrd34f2fnbl', account_key='y48JkXg+VcHQRCgsylJf4xV4Fd0AuJNkQKSwGhAR+BppHnFhkI+UHPOS/oYaTo0rqFCGQkEBW+thNFZNB9W8yg==')
class OrderTable: def __init__(self): self.table_service = TableService(account_name='portalvhdspbrd34f2fnbl', account_key='y48JkXg+VcHQRCgsylJf4xV4Fd0AuJNkQKSwGhAR+BppHnFhkI+UHPOS/oYaTo0rqFCGQkEBW+thNFZNB9W8yg==') def insert(self,ID,CustomerID,ProductID,Datetime,TotalPrice): task = {'PartitionKey': 'Order', 'RowKey': ID, 'CustomerID' : CustomerID, 'ProductID' : ProductID, 'Datetime' : Datetime, 'TotalPrice' : TotalPrice} try: self.table_service.insert_entity('Order', task) except: print"azure.WindowsAzureConflictError: Order Conflict" def listAll(self): task1 = self.table_service.query_entities('Order', None, None, 1000) task2 = self.table_service.query_entities('Order', None, None, 1000, task1.x_ms_continuation['NextPartitionKey'], task1.x_ms_continuation['NextRowKey']) task3 = self.table_service.query_entities('Order', None, None, 1000, task2.x_ms_continuation['NextPartitionKey'], task2.x_ms_continuation['NextRowKey']) task4 = self.table_service.query_entities('Order', None, None, 1000, task3.x_ms_continuation['NextPartitionKey'], task3.x_ms_continuation['NextRowKey']) ''' task5 = self.table_service.query_entities('Order', None, None, 1000, task4.x_ms_continuation['NextPartitionKey'], task4.x_ms_continuation['NextRowKey']) task6 = self.table_service.query_entities('Order', None, None, 1000, task5.x_ms_continuation['NextPartitionKey'], task5.x_ms_continuation['NextRowKey']) task7 = self.table_service.query_entities('Order', None, None, 1000, task6.x_ms_continuation['NextPartitionKey'], task6.x_ms_continuation['NextRowKey']) task8 = self.table_service.query_entities('Order', None, None, 1000, task7.x_ms_continuation['NextPartitionKey'], task7.x_ms_continuation['NextRowKey']) task9 = self.table_service.query_entities('Order', None, None, 1000, task8.x_ms_continuation['NextPartitionKey'], task8.x_ms_continuation['NextRowKey']) task10 = self.table_service.query_entities('Order', None, None, 1000, task9.x_ms_continuation['NextPartitionKey'], task9.x_ms_continuation['NextRowKey']) ''' i=0 for task in task1: i=i+1 ''' print("ID: %s, CustomerID: %s, ProductID: %s, Datetime: %s, TotalPrice: %s") %(task.RowKey, task.CustomerID,task.ProductID,task.Datetime,task.TotalPrice) ''' print(task.TotalPrice) for task in task2: i=i+1 print(task.TotalPrice) print("Total Order: %s") %(i) def TableInfo(self): info=self.table_service.query_tables() for i in info: print(i.name)
tmpfile.write(json.dumps(settings, indent=4, sort_keys=True)) username = settings["username"] home_dir = os.path.join("/home", username) install_log = os.path.join(home_dir, "install.log") # Prepare the containers storage_account_name = settings["STORAGE-ACCOUNT-NAME"] storage_access_key = settings["STORAGE-ACCESS-KEY"] blob_service = BlobService(storage_account_name, storage_access_key) blob_service.create_container('bosh') blob_service.create_container(container_name='stemcell', x_ms_blob_public_access='blob' ) # Prepare the table for storing meta datas of storage account and stemcells table_service = TableService(storage_account_name, storage_access_key) table_service.create_table('stemcells') # Generate the private key and certificate call("sh create_cert.sh", shell=True) call("cp bosh.key ./bosh/bosh", shell=True) with open ('bosh_cert.pem', 'r') as tmpfile: ssh_cert = tmpfile.read() ssh_cert = "|\n" + ssh_cert ssh_cert="\n ".join([line for line in ssh_cert.split('\n')]) # Render the yml template for bosh-init bosh_template = 'bosh.yml' if os.path.exists(bosh_template): with open (bosh_template, 'r') as tmpfile: contents = tmpfile.read()
class TableServiceTest(AzureTestCase): def setUp(self): self.ts = TableService(credentials.getStorageServicesName(), credentials.getStorageServicesKey()) set_service_options(self.ts) self.table_name = getUniqueName('uttable') self.additional_table_names = [] def tearDown(self): self.cleanup() return super(TableServiceTest, self).tearDown() def cleanup(self): try: self.ts.delete_table(self.table_name) except: pass for name in self.additional_table_names: try: self.ts.delete_table(name) except: pass #--Helpers----------------------------------------------------------------- def _create_table(self, table_name): ''' Creates a table with the specified name. ''' self.ts.create_table(table_name, True) def _create_table_with_default_entities(self, table_name, entity_count): ''' Creates a table with the specified name and adds entities with the default set of values. PartitionKey is set to 'MyPartition' and RowKey is set to a unique counter value starting at 1 (as a string). ''' entities = [] self._create_table(table_name) for i in range(1, entity_count + 1): entities.append(self.ts.insert_entity( table_name, self._create_default_entity_dict('MyPartition', str(i)))) return entities def _create_default_entity_class(self, partition, row): ''' Creates a class-based entity with fixed values, using all of the supported data types. ''' entity = Entity() entity.PartitionKey = partition entity.RowKey = row entity.age = 39 entity.sex = 'male' entity.married = True entity.deceased = False entity.optional = None entity.ratio = 3.1 entity.large = 9333111000 entity.Birthday = datetime(1973, 10, 4) entity.birthday = datetime(1970, 10, 4) entity.binary = None entity.other = EntityProperty('Edm.Int64', 20) entity.clsid = EntityProperty( 'Edm.Guid', 'c9da6455-213d-42c9-9a79-3e9149a57833') return entity def _create_default_entity_dict(self, partition, row): ''' Creates a dictionary-based entity with fixed values, using all of the supported data types. ''' return {'PartitionKey': partition, 'RowKey': row, 'age': 39, 'sex': 'male', 'married': True, 'deceased': False, 'optional': None, 'ratio': 3.1, 'large': 9333111000, 'Birthday': datetime(1973, 10, 4), 'birthday': datetime(1970, 10, 4), 'other': EntityProperty('Edm.Int64', 20), 'clsid': EntityProperty( 'Edm.Guid', 'c9da6455-213d-42c9-9a79-3e9149a57833')} def _create_updated_entity_dict(self, partition, row): ''' Creates a dictionary-based entity with fixed values, with a different set of values than the default entity. It adds fields, changes field values, changes field types, and removes fields when compared to the default entity. ''' return {'PartitionKey': partition, 'RowKey': row, 'age': 'abc', 'sex': 'female', 'sign': 'aquarius', 'birthday': datetime(1991, 10, 4)} def _assert_default_entity(self, entity): ''' Asserts that the entity passed in matches the default entity. ''' self.assertEqual(entity.age, 39) self.assertEqual(entity.sex, 'male') self.assertEqual(entity.married, True) self.assertEqual(entity.deceased, False) self.assertFalse(hasattr(entity, "aquarius")) self.assertEqual(entity.ratio, 3.1) self.assertEqual(entity.large, 9333111000) self.assertEqual(entity.Birthday, datetime(1973, 10, 4, tzinfo=tzutc())) self.assertEqual(entity.birthday, datetime(1970, 10, 4, tzinfo=tzutc())) self.assertEqual(entity.other, 20) self.assertIsInstance(entity.clsid, EntityProperty) self.assertEqual(entity.clsid.type, 'Edm.Guid') self.assertEqual(entity.clsid.value, 'c9da6455-213d-42c9-9a79-3e9149a57833') self.assertTrue(hasattr(entity, "Timestamp")) def _assert_updated_entity(self, entity): ''' Asserts that the entity passed in matches the updated entity. ''' self.assertEqual(entity.age, 'abc') self.assertEqual(entity.sex, 'female') self.assertFalse(hasattr(entity, "married")) self.assertFalse(hasattr(entity, "deceased")) self.assertEqual(entity.sign, 'aquarius') self.assertFalse(hasattr(entity, "optional")) self.assertFalse(hasattr(entity, "ratio")) self.assertFalse(hasattr(entity, "large")) self.assertFalse(hasattr(entity, "Birthday")) self.assertEqual(entity.birthday, datetime(1991, 10, 4, tzinfo=tzutc())) self.assertFalse(hasattr(entity, "other")) self.assertFalse(hasattr(entity, "clsid")) self.assertTrue(hasattr(entity, "Timestamp")) def _assert_merged_entity(self, entity): ''' Asserts that the entity passed in matches the default entity merged with the updated entity. ''' self.assertEqual(entity.age, 'abc') self.assertEqual(entity.sex, 'female') self.assertEqual(entity.sign, 'aquarius') self.assertEqual(entity.married, True) self.assertEqual(entity.deceased, False) self.assertEqual(entity.sign, 'aquarius') self.assertEqual(entity.ratio, 3.1) self.assertEqual(entity.large, 9333111000) self.assertEqual(entity.Birthday, datetime(1973, 10, 4, tzinfo=tzutc())) self.assertEqual(entity.birthday, datetime(1991, 10, 4, tzinfo=tzutc())) self.assertEqual(entity.other, 20) self.assertIsInstance(entity.clsid, EntityProperty) self.assertEqual(entity.clsid.type, 'Edm.Guid') self.assertEqual(entity.clsid.value, 'c9da6455-213d-42c9-9a79-3e9149a57833') self.assertTrue(hasattr(entity, "Timestamp")) #--Test cases for table service ------------------------------------------- def test_get_set_table_service_properties(self): table_properties = self.ts.get_table_service_properties() self.ts.set_table_service_properties(table_properties) tests = [('logging.delete', True), ('logging.delete', False), ('logging.read', True), ('logging.read', False), ('logging.write', True), ('logging.write', False), ] for path, value in tests: # print path cur = table_properties for component in path.split('.')[:-1]: cur = getattr(cur, component) last_attr = path.split('.')[-1] setattr(cur, last_attr, value) self.ts.set_table_service_properties(table_properties) retry_count = 0 while retry_count < MAX_RETRY: table_properties = self.ts.get_table_service_properties() cur = table_properties for component in path.split('.'): cur = getattr(cur, component) if value == cur: break time.sleep(1) retry_count += 1 self.assertEqual(value, cur) def test_table_service_retention_single_set(self): table_properties = self.ts.get_table_service_properties() table_properties.logging.retention_policy.enabled = False table_properties.logging.retention_policy.days = 5 # TODO: Better error, ValueError? self.assertRaises(WindowsAzureError, self.ts.set_table_service_properties, table_properties) table_properties = self.ts.get_table_service_properties() table_properties.logging.retention_policy.days = None table_properties.logging.retention_policy.enabled = True # TODO: Better error, ValueError? self.assertRaises(WindowsAzureError, self.ts.set_table_service_properties, table_properties) def test_table_service_set_both(self): table_properties = self.ts.get_table_service_properties() table_properties.logging.retention_policy.enabled = True table_properties.logging.retention_policy.days = 5 self.ts.set_table_service_properties(table_properties) table_properties = self.ts.get_table_service_properties() self.assertEqual( True, table_properties.logging.retention_policy.enabled) self.assertEqual(5, table_properties.logging.retention_policy.days) #--Test cases for tables -------------------------------------------------- def test_create_table(self): # Arrange # Act created = self.ts.create_table(self.table_name) # Assert self.assertTrue(created) def test_create_table_fail_on_exist(self): # Arrange # Act created = self.ts.create_table(self.table_name, True) # Assert self.assertTrue(created) def test_create_table_with_already_existing_table(self): # Arrange # Act created1 = self.ts.create_table(self.table_name) created2 = self.ts.create_table(self.table_name) # Assert self.assertTrue(created1) self.assertFalse(created2) def test_create_table_with_already_existing_table_fail_on_exist(self): # Arrange # Act created = self.ts.create_table(self.table_name) with self.assertRaises(WindowsAzureError): self.ts.create_table(self.table_name, True) # Assert self.assertTrue(created) def test_query_tables(self): # Arrange self._create_table(self.table_name) # Act tables = self.ts.query_tables() for table in tables: pass # Assert tableNames = [x.name for x in tables] self.assertGreaterEqual(len(tableNames), 1) self.assertGreaterEqual(len(tables), 1) self.assertIn(self.table_name, tableNames) def test_query_tables_with_table_name(self): # Arrange self._create_table(self.table_name) # Act tables = self.ts.query_tables(self.table_name) for table in tables: pass # Assert self.assertEqual(len(tables), 1) self.assertEqual(tables[0].name, self.table_name) def test_query_tables_with_table_name_no_tables(self): # Arrange # Act with self.assertRaises(WindowsAzureError): self.ts.query_tables(self.table_name) # Assert def test_query_tables_with_top(self): # Arrange self.additional_table_names = [ self.table_name + suffix for suffix in 'abcd'] for name in self.additional_table_names: self.ts.create_table(name) # Act tables = self.ts.query_tables(None, 3) for table in tables: pass # Assert self.assertEqual(len(tables), 3) def test_query_tables_with_top_and_next_table_name(self): # Arrange self.additional_table_names = [ self.table_name + suffix for suffix in 'abcd'] for name in self.additional_table_names: self.ts.create_table(name) # Act tables_set1 = self.ts.query_tables(None, 3) tables_set2 = self.ts.query_tables( None, 3, tables_set1.x_ms_continuation['NextTableName']) # Assert self.assertEqual(len(tables_set1), 3) self.assertGreaterEqual(len(tables_set2), 1) self.assertLessEqual(len(tables_set2), 3) def test_delete_table_with_existing_table(self): # Arrange self._create_table(self.table_name) # Act deleted = self.ts.delete_table(self.table_name) # Assert self.assertTrue(deleted) tables = self.ts.query_tables() self.assertNamedItemNotInContainer(tables, self.table_name) def test_delete_table_with_existing_table_fail_not_exist(self): # Arrange self._create_table(self.table_name) # Act deleted = self.ts.delete_table(self.table_name, True) # Assert self.assertTrue(deleted) tables = self.ts.query_tables() self.assertNamedItemNotInContainer(tables, self.table_name) def test_delete_table_with_non_existing_table(self): # Arrange # Act deleted = self.ts.delete_table(self.table_name) # Assert self.assertFalse(deleted) def test_delete_table_with_non_existing_table_fail_not_exist(self): # Arrange # Act with self.assertRaises(WindowsAzureError): self.ts.delete_table(self.table_name, True) # Assert #--Test cases for entities ------------------------------------------ def test_insert_entity_dictionary(self): # Arrange self._create_table(self.table_name) # Act dict = self._create_default_entity_dict('MyPartition', '1') resp = self.ts.insert_entity(self.table_name, dict) # Assert self.assertIsNotNone(resp) def test_insert_entity_class_instance(self): # Arrange self._create_table(self.table_name) # Act entity = self._create_default_entity_class('MyPartition', '1') resp = self.ts.insert_entity(self.table_name, entity) # Assert self.assertIsNotNone(resp) def test_insert_entity_conflict(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act with self.assertRaises(WindowsAzureError): self.ts.insert_entity( self.table_name, self._create_default_entity_dict('MyPartition', '1')) # Assert def test_get_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.ts.get_entity(self.table_name, 'MyPartition', '1') # Assert self.assertEqual(resp.PartitionKey, 'MyPartition') self.assertEqual(resp.RowKey, '1') self._assert_default_entity(resp) def test_get_entity_not_existing(self): # Arrange self._create_table(self.table_name) # Act with self.assertRaises(WindowsAzureError): self.ts.get_entity(self.table_name, 'MyPartition', '1') # Assert def test_get_entity_with_select(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.ts.get_entity( self.table_name, 'MyPartition', '1', 'age,sex') # Assert self.assertEqual(resp.age, 39) self.assertEqual(resp.sex, 'male') self.assertFalse(hasattr(resp, "birthday")) self.assertFalse(hasattr(resp, "married")) self.assertFalse(hasattr(resp, "deceased")) def test_query_entities(self): # Arrange self._create_table_with_default_entities(self.table_name, 2) # Act resp = self.ts.query_entities(self.table_name) # Assert self.assertEqual(len(resp), 2) for entity in resp: self.assertEqual(entity.PartitionKey, 'MyPartition') self._assert_default_entity(entity) self.assertEqual(resp[0].RowKey, '1') self.assertEqual(resp[1].RowKey, '2') def test_query_entities_large(self): # Arrange self._create_table(self.table_name) total_entities_count = 1000 entities_per_batch = 50 for j in range(total_entities_count // entities_per_batch): self.ts.begin_batch() for i in range(entities_per_batch): entity = Entity() entity.PartitionKey = 'large' entity.RowKey = 'batch{0}-item{1}'.format(j, i) entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'hello world;' * 100 entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.insert_entity(self.table_name, entity) self.ts.commit_batch() # Act start_time = datetime.now() resp = self.ts.query_entities(self.table_name) elapsed_time = datetime.now() - start_time # Assert print('query_entities took {0} secs.'.format(elapsed_time.total_seconds())) # azure allocates 5 seconds to execute a query # if it runs slowly, it will return fewer results and make the test fail self.assertEqual(len(resp), total_entities_count) def test_query_entities_with_filter(self): # Arrange self._create_table_with_default_entities(self.table_name, 2) self.ts.insert_entity( self.table_name, self._create_default_entity_dict('MyOtherPartition', '3')) # Act resp = self.ts.query_entities( self.table_name, "PartitionKey eq 'MyPartition'") # Assert self.assertEqual(len(resp), 2) for entity in resp: self.assertEqual(entity.PartitionKey, 'MyPartition') self._assert_default_entity(entity) def test_query_entities_with_select(self): # Arrange self._create_table_with_default_entities(self.table_name, 2) # Act resp = self.ts.query_entities(self.table_name, None, 'age,sex') # Assert self.assertEqual(len(resp), 2) self.assertEqual(resp[0].age, 39) self.assertEqual(resp[0].sex, 'male') self.assertFalse(hasattr(resp[0], "birthday")) self.assertFalse(hasattr(resp[0], "married")) self.assertFalse(hasattr(resp[0], "deceased")) def test_query_entities_with_top(self): # Arrange self._create_table_with_default_entities(self.table_name, 3) # Act resp = self.ts.query_entities(self.table_name, None, None, 2) # Assert self.assertEqual(len(resp), 2) def test_query_entities_with_top_and_next(self): # Arrange self._create_table_with_default_entities(self.table_name, 5) # Act resp1 = self.ts.query_entities(self.table_name, None, None, 2) resp2 = self.ts.query_entities( self.table_name, None, None, 2, resp1.x_ms_continuation['NextPartitionKey'], resp1.x_ms_continuation['NextRowKey']) resp3 = self.ts.query_entities( self.table_name, None, None, 2, resp2.x_ms_continuation['NextPartitionKey'], resp2.x_ms_continuation['NextRowKey']) # Assert self.assertEqual(len(resp1), 2) self.assertEqual(len(resp2), 2) self.assertEqual(len(resp3), 1) self.assertEqual(resp1[0].RowKey, '1') self.assertEqual(resp1[1].RowKey, '2') self.assertEqual(resp2[0].RowKey, '3') self.assertEqual(resp2[1].RowKey, '4') self.assertEqual(resp3[0].RowKey, '5') def test_update_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.update_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_update_entity_with_if_matches(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.update_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=entities[0].etag) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_update_entity_with_if_doesnt_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') with self.assertRaises(WindowsAzureError): self.ts.update_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"') # Assert def test_insert_or_merge_entity_with_existing_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.insert_or_merge_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_merged_entity(received_entity) def test_insert_or_merge_entity_with_non_existing_entity(self): # Arrange self._create_table(self.table_name) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.insert_or_merge_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_insert_or_replace_entity_with_existing_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.insert_or_replace_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_insert_or_replace_entity_with_non_existing_entity(self): # Arrange self._create_table(self.table_name) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.insert_or_replace_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_merge_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.merge_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_merged_entity(received_entity) def test_merge_entity_not_existing(self): # Arrange self._create_table(self.table_name) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') with self.assertRaises(WindowsAzureError): self.ts.merge_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert def test_merge_entity_with_if_matches(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.merge_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=entities[0].etag) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_merged_entity(received_entity) def test_merge_entity_with_if_doesnt_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') with self.assertRaises(WindowsAzureError): self.ts.merge_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"') # Assert def test_delete_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.ts.delete_entity(self.table_name, 'MyPartition', '1') # Assert self.assertIsNone(resp) with self.assertRaises(WindowsAzureError): self.ts.get_entity(self.table_name, 'MyPartition', '1') def test_delete_entity_not_existing(self): # Arrange self._create_table(self.table_name) # Act with self.assertRaises(WindowsAzureError): self.ts.delete_entity(self.table_name, 'MyPartition', '1') # Assert def test_delete_entity_with_if_matches(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.ts.delete_entity( self.table_name, 'MyPartition', '1', if_match=entities[0].etag) # Assert self.assertIsNone(resp) with self.assertRaises(WindowsAzureError): self.ts.get_entity(self.table_name, 'MyPartition', '1') def test_delete_entity_with_if_doesnt_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act with self.assertRaises(WindowsAzureError): self.ts.delete_entity( self.table_name, 'MyPartition', '1', if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"') # Assert #--Test cases for batch --------------------------------------------- def test_with_filter_single(self): called = [] def my_filter(request, next): called.append(True) return next(request) tc = self.ts.with_filter(my_filter) tc.create_table(self.table_name) self.assertTrue(called) del called[:] tc.delete_table(self.table_name) self.assertTrue(called) del called[:] def test_with_filter_chained(self): called = [] def filter_a(request, next): called.append('a') return next(request) def filter_b(request, next): called.append('b') return next(request) tc = self.ts.with_filter(filter_a).with_filter(filter_b) tc.create_table(self.table_name) self.assertEqual(called, ['b', 'a']) tc.delete_table(self.table_name) def test_batch_insert(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_insert' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.begin_batch() self.ts.insert_entity(self.table_name, entity) self.ts.commit_batch() # Assert result = self.ts.get_entity(self.table_name, '001', 'batch_insert') self.assertIsNotNone(result) def test_batch_update(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_update' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.insert_entity(self.table_name, entity) entity = self.ts.get_entity(self.table_name, '001', 'batch_update') self.assertEqual(3, entity.test3) entity.test2 = 'value1' self.ts.begin_batch() self.ts.update_entity(self.table_name, '001', 'batch_update', entity) self.ts.commit_batch() entity = self.ts.get_entity(self.table_name, '001', 'batch_update') # Assert self.assertEqual('value1', entity.test2) def test_batch_merge(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_merge' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.insert_entity(self.table_name, entity) entity = self.ts.get_entity(self.table_name, '001', 'batch_merge') self.assertEqual(3, entity.test3) entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_merge' entity.test2 = 'value1' self.ts.begin_batch() self.ts.merge_entity(self.table_name, '001', 'batch_merge', entity) self.ts.commit_batch() entity = self.ts.get_entity(self.table_name, '001', 'batch_merge') # Assert self.assertEqual('value1', entity.test2) self.assertEqual(1234567890, entity.test4) def test_batch_update_if_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') self.ts.begin_batch() resp = self.ts.update_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=entities[0].etag) self.ts.commit_batch() # Assert self.assertIsNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_batch_update_if_doesnt_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 2) # Act sent_entity1 = self._create_updated_entity_dict('MyPartition', '1') sent_entity2 = self._create_updated_entity_dict('MyPartition', '2') self.ts.begin_batch() self.ts.update_entity( self.table_name, 'MyPartition', '1', sent_entity1, if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"') self.ts.update_entity( self.table_name, 'MyPartition', '2', sent_entity2) try: self.ts.commit_batch() except WindowsAzureBatchOperationError as error: self.assertEqual(error.code, 'UpdateConditionNotSatisfied') self.assertTrue(str(error).startswith('0:The update condition specified in the request was not satisfied.')) else: self.fail('WindowsAzureBatchOperationError was expected') # Assert received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_default_entity(received_entity) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '2') self._assert_default_entity(received_entity) def test_batch_insert_replace(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_insert_replace' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.begin_batch() self.ts.insert_or_replace_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) self.ts.commit_batch() entity = self.ts.get_entity( self.table_name, '001', 'batch_insert_replace') # Assert self.assertIsNotNone(entity) self.assertEqual('value', entity.test2) self.assertEqual(1234567890, entity.test4) def test_batch_insert_merge(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_insert_merge' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.begin_batch() self.ts.insert_or_merge_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) self.ts.commit_batch() entity = self.ts.get_entity( self.table_name, '001', 'batch_insert_merge') # Assert self.assertIsNotNone(entity) self.assertEqual('value', entity.test2) self.assertEqual(1234567890, entity.test4) def test_batch_delete(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_delete' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.insert_entity(self.table_name, entity) entity = self.ts.get_entity(self.table_name, '001', 'batch_delete') #self.assertEqual(3, entity.test3) self.ts.begin_batch() self.ts.delete_entity(self.table_name, '001', 'batch_delete') self.ts.commit_batch() def test_batch_inserts(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = 'batch_inserts' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') self.ts.begin_batch() for i in range(100): entity.RowKey = str(i) self.ts.insert_entity(self.table_name, entity) self.ts.commit_batch() entities = self.ts.query_entities( self.table_name, "PartitionKey eq 'batch_inserts'", '') # Assert self.assertIsNotNone(entities) self.assertEqual(100, len(entities)) def test_batch_all_operations_together(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '003' entity.RowKey = 'batch_all_operations_together-1' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-2' self.ts.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-3' self.ts.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-4' self.ts.insert_entity(self.table_name, entity) self.ts.begin_batch() entity.RowKey = 'batch_all_operations_together' self.ts.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-1' self.ts.delete_entity( self.table_name, entity.PartitionKey, entity.RowKey) entity.RowKey = 'batch_all_operations_together-2' entity.test3 = 10 self.ts.update_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) entity.RowKey = 'batch_all_operations_together-3' entity.test3 = 100 self.ts.merge_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) entity.RowKey = 'batch_all_operations_together-4' entity.test3 = 10 self.ts.insert_or_replace_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) entity.RowKey = 'batch_all_operations_together-5' self.ts.insert_or_merge_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) self.ts.commit_batch() # Assert entities = self.ts.query_entities( self.table_name, "PartitionKey eq '003'", '') self.assertEqual(5, len(entities)) def test_batch_same_row_operations_fail(self): # Arrange self._create_table(self.table_name) entity = self._create_default_entity_dict('001', 'batch_negative_1') self.ts.insert_entity(self.table_name, entity) # Act with self.assertRaises(WindowsAzureError): self.ts.begin_batch() entity = self._create_updated_entity_dict( '001', 'batch_negative_1') self.ts.update_entity( self.table_name, entity['PartitionKey'], entity['RowKey'], entity) entity = self._create_default_entity_dict( '001', 'batch_negative_1') self.ts.merge_entity( self.table_name, entity['PartitionKey'], entity['RowKey'], entity) self.ts.cancel_batch() # Assert def test_batch_different_partition_operations_fail(self): # Arrange self._create_table(self.table_name) entity = self._create_default_entity_dict('001', 'batch_negative_1') self.ts.insert_entity(self.table_name, entity) # Act with self.assertRaises(WindowsAzureError): self.ts.begin_batch() entity = self._create_updated_entity_dict( '001', 'batch_negative_1') self.ts.update_entity( self.table_name, entity['PartitionKey'], entity['RowKey'], entity) entity = self._create_default_entity_dict( '002', 'batch_negative_1') self.ts.insert_entity(self.table_name, entity) self.ts.cancel_batch() # Assert def test_batch_different_table_operations_fail(self): # Arrange other_table_name = self.table_name + 'other' self.additional_table_names = [other_table_name] self._create_table(self.table_name) self._create_table(other_table_name) # Act with self.assertRaises(WindowsAzureError): self.ts.begin_batch() entity = self._create_default_entity_dict( '001', 'batch_negative_1') self.ts.insert_entity(self.table_name, entity) entity = self._create_default_entity_dict( '001', 'batch_negative_2') self.ts.insert_entity(other_table_name, entity) self.ts.cancel_batch() def test_unicode_property_value(self): ''' regression test for github issue #57''' # Act self._create_table(self.table_name) self.ts.insert_entity( self.table_name, {'PartitionKey': 'test', 'RowKey': 'test1', 'Description': u'ꀕ'}) self.ts.insert_entity( self.table_name, {'PartitionKey': 'test', 'RowKey': 'test2', 'Description': 'ꀕ'}) resp = self.ts.query_entities( self.table_name, "PartitionKey eq 'test'") # Assert self.assertEqual(len(resp), 2) self.assertEqual(resp[0].Description, u'ꀕ') self.assertEqual(resp[1].Description, u'ꀕ') def test_unicode_property_name(self): # Act self._create_table(self.table_name) self.ts.insert_entity( self.table_name, {'PartitionKey': 'test', 'RowKey': 'test1', u'啊齄丂狛狜': u'ꀕ'}) self.ts.insert_entity( self.table_name, {'PartitionKey': 'test', 'RowKey': 'test2', u'啊齄丂狛狜': 'hello'}) resp = self.ts.query_entities( self.table_name, "PartitionKey eq 'test'") # Assert self.assertEqual(len(resp), 2) self.assertEqual(resp[0].__dict__[u'啊齄丂狛狜'], u'ꀕ') self.assertEqual(resp[1].__dict__[u'啊齄丂狛狜'], u'hello') def test_unicode_create_table_unicode_name(self): # Arrange self.table_name = self.table_name + u'啊齄丂狛狜' # Act with self.assertRaises(WindowsAzureError): # not supported - table name must be alphanumeric, lowercase self.ts.create_table(self.table_name) # Assert def test_empty_and_spaces_property_value(self): # Act self._create_table(self.table_name) self.ts.insert_entity( self.table_name, { 'PartitionKey': 'test', 'RowKey': 'test1', 'EmptyByte': '', 'EmptyUnicode': u'', 'SpacesOnlyByte': ' ', 'SpacesOnlyUnicode': u' ', 'SpacesBeforeByte': ' Text', 'SpacesBeforeUnicode': u' Text', 'SpacesAfterByte': 'Text ', 'SpacesAfterUnicode': u'Text ', 'SpacesBeforeAndAfterByte': ' Text ', 'SpacesBeforeAndAfterUnicode': u' Text ', }) resp = self.ts.get_entity(self.table_name, 'test', 'test1') # Assert self.assertIsNotNone(resp) self.assertEqual(resp.EmptyByte, '') self.assertEqual(resp.EmptyUnicode, u'') self.assertEqual(resp.SpacesOnlyByte, ' ') self.assertEqual(resp.SpacesOnlyUnicode, u' ') self.assertEqual(resp.SpacesBeforeByte, ' Text') self.assertEqual(resp.SpacesBeforeUnicode, u' Text') self.assertEqual(resp.SpacesAfterByte, 'Text ') self.assertEqual(resp.SpacesAfterUnicode, u'Text ') self.assertEqual(resp.SpacesBeforeAndAfterByte, ' Text ') self.assertEqual(resp.SpacesBeforeAndAfterUnicode, u' Text ') def test_none_property_value(self): # Act self._create_table(self.table_name) self.ts.insert_entity( self.table_name, { 'PartitionKey': 'test', 'RowKey': 'test1', 'NoneValue': None, }) resp = self.ts.get_entity(self.table_name, 'test', 'test1') # Assert self.assertIsNotNone(resp) self.assertFalse(hasattr(resp, 'NoneValue')) def test_binary_property_value(self): # Act binary_data = b'\x01\x02\x03\x04\x05\x06\x07\x08\t\n' self._create_table(self.table_name) self.ts.insert_entity( self.table_name, { 'PartitionKey': 'test', 'RowKey': 'test1', 'binary': EntityProperty('Edm.Binary', binary_data) }) resp = self.ts.get_entity(self.table_name, 'test', 'test1') # Assert self.assertIsNotNone(resp) self.assertEqual(resp.binary.type, 'Edm.Binary') self.assertEqual(resp.binary.value, binary_data) def test_timezone(self): # Act local_tz = tzoffset('BRST', -10800) local_date = datetime(2003, 9, 27, 9, 52, 43, tzinfo=local_tz) self._create_table(self.table_name) self.ts.insert_entity( self.table_name, { 'PartitionKey': 'test', 'RowKey': 'test1', 'date': local_date, }) resp = self.ts.get_entity(self.table_name, 'test', 'test1') # Assert self.assertIsNotNone(resp) self.assertEqual(resp.date, local_date.astimezone(tzutc())) self.assertEqual(resp.date.astimezone(local_tz), local_date)
with open(os.path.join('bosh', 'settings'), "w") as tmpfile: tmpfile.write(json.dumps(settings, indent=4, sort_keys=True)) username = settings["username"] home_dir = os.path.join("/home", username) install_log = os.path.join(home_dir, "install.log") # Prepare the containers storage_account_name = settings["STORAGE-ACCOUNT-NAME"] storage_access_key = settings["STORAGE-ACCESS-KEY"] blob_service = BlobService(storage_account_name, storage_access_key) blob_service.create_container('bosh') blob_service.create_container(container_name='stemcell', x_ms_blob_public_access='blob') # Prepare the table for storing meta datas of storage account and stemcells table_service = TableService(storage_account_name, storage_access_key) table_service.create_table('stemcells') # Generate the private key and certificate call("sh create_cert.sh", shell=True) call("cp bosh.key ./bosh/bosh", shell=True) with open('bosh_cert.pem', 'r') as tmpfile: ssh_cert = tmpfile.read() ssh_cert = "|\n" + ssh_cert ssh_cert = "\n ".join([line for line in ssh_cert.split('\n')]) # Render the yml template for bosh-init bosh_template = 'bosh.yml' if os.path.exists(bosh_template): with open(bosh_template, 'r') as tmpfile: contents = tmpfile.read()
def delete_tables(): print "deleting tables: " + ", ".join(TABLES) for i in range(len(ACCOUNT)): ts = TableService(ACCOUNT[i], KEY[i]) for table in TABLES: ts.delete_table(table)
from tools.drawing import * from tools.blobcache import BlobCache from tools.fieldtable import FieldManager from app.settings import * # Storage for computed results # Each time consuming computation stores its results here, # -- keyed by the request that generated the result. # AAA changed to ice2ocean blob storage: cache = BlobCache(connection_properties["STORAGE_ACCOUNT_NAME"], connection_properties["STORAGE_ACCOUNT_KEY"], connection_properties["STORAGE_CONTAINER"]) # Data Parameters (if we want to get at them directly) table_service = TableService(connection_properties["STORAGE_ACCOUNT_NAME"], connection_properties["STORAGE_ACCOUNT_KEY"]) # Data parameters wrapped in a more specific (and prefered) interface fieldManager = FieldManager(table_service,connection_properties["FIELD_TABLE"]) # Projection for bing maps bingProjection = pyproj.Proj("+init=EPSG:3857") # time details... # AAA: modified to handle month/day data def extract_time(request, default_value=None): ''' How to ask for a time? Direct: give me julian hour h from year y
accountName = myfile.read().replace('\n', '') # accountKey = environ["AZURE_STORAGE_ACCESS_KEY"] with open("ASK.key", "r") as myfile: accountKey = myfile.read().replace('\n', '') # Create blob service blob_service = BlobService(account_name=accountName, account_key=accountKey) blob_service.create_container(blob_small) blob_service.create_container(blob_big) blob_service.create_container(blob_analysis) # Open queue with given credentials queue_service = QueueService(account_name=accountName, account_key=accountKey) # Open table service table_service = TableService(account_name=accountName, account_key=accountKey) # Repeat while (True): # get images form *imagesQueue* - it is invoked by CRON messages = queue_service.get_messages(imagesQueue) if len(messages) == 0: sleep(15) for message in messages: # get image: image ID imgBlobName = b64decode(message.message_text) print(imgBlobName) tableRowKey = imgBlobName # Check if analysis completed: if not continue to next message
from azure.storage import TableService, Entity table_service = TableService(account_name='upmsample', account_key='5YC/6x9KL56rtaAUAZMgGsREDvvPHYJIMqH3z1c9IgQLy0qMP+Awr+7j51Tfzniczj//6jn7lvYQutD/mHm6dw==') table_service.create_table('ledtracktable') task = table_service.get_entity('ledtracktable', 'ledswitch', '1') print(task.state)
with open("ASA.key", "r") as myfile: accountName = myfile.read().replace('\n', '') # accountKey = environ["AZURE_STORAGE_ACCESS_KEY"] with open("ASK.key", "r") as myfile: accountKey = myfile.read().replace('\n', '') # Create blob service blob_service = BlobService(account_name=accountName, account_key=accountKey) blob_service.create_container(blob_container) blob_service.create_container(blob_analysis) # Open queue with given credentials queue_service = QueueService(account_name=accountName, account_key=accountKey) # Open table service table_service = TableService(account_name=accountName, account_key=accountKey) # Analysis results results = None # Regions for analysis region = 4 # Repeat while (True): # get images form *imagesQueue* - it is invoked by CRON messages = queue_service.get_messages(imagesQueue) if len(messages) == 0: sleep(15) for message in messages: # get image: image ID
class TableServiceTest(AzureTestCase): def setUp(self): self.tc = TableService( account_name=credentials.getStorageServicesName(), account_key=credentials.getStorageServicesKey()) proxy_host = credentials.getProxyHost() proxy_port = credentials.getProxyPort() if proxy_host: self.tc.set_proxy(proxy_host, proxy_port) __uid = getUniqueTestRunID() table_base_name = u'testtable%s' % (__uid) self.table_name = getUniqueNameBasedOnCurrentTime(table_base_name) self.additional_table_names = [] def tearDown(self): self.cleanup() return super(TableServiceTest, self).tearDown() def cleanup(self): try: self.tc.delete_table(self.table_name) except: pass for name in self.additional_table_names: try: self.tc.delete_table(name) except: pass #--Helpers----------------------------------------------------------------- def _create_table(self, table_name): ''' Creates a table with the specified name. ''' self.tc.create_table(table_name, True) def _create_table_with_default_entities(self, table_name, entity_count): ''' Creates a table with the specified name and adds entities with the default set of values. PartitionKey is set to 'MyPartition' and RowKey is set to a unique counter value starting at 1 (as a string). ''' entities = [] self._create_table(table_name) for i in range(1, entity_count + 1): entities.append( self.tc.insert_entity( table_name, self._create_default_entity_dict('MyPartition', str(i)))) return entities def _create_default_entity_class(self, partition, row): ''' Creates a class-based entity with fixed values, using all of the supported data types. ''' # TODO: Edm.Binary and null entity = Entity() entity.PartitionKey = partition entity.RowKey = row entity.age = 39 entity.sex = 'male' entity.married = True entity.deceased = False entity.optional = None entity.ratio = 3.1 entity.large = 9333111000 entity.Birthday = datetime(1973, 10, 04) entity.birthday = datetime(1970, 10, 04) entity.binary = None entity.other = EntityProperty('Edm.Int64', 20) entity.clsid = EntityProperty('Edm.Guid', 'c9da6455-213d-42c9-9a79-3e9149a57833') return entity def _create_default_entity_dict(self, partition, row): ''' Creates a dictionary-based entity with fixed values, using all of the supported data types. ''' # TODO: Edm.Binary and null return { 'PartitionKey': partition, 'RowKey': row, 'age': 39, 'sex': 'male', 'married': True, 'deceased': False, 'optional': None, 'ratio': 3.1, 'large': 9333111000, 'Birthday': datetime(1973, 10, 04), 'birthday': datetime(1970, 10, 04), 'binary': EntityProperty('Edm.Binary', None), 'other': EntityProperty('Edm.Int64', 20), 'clsid': EntityProperty('Edm.Guid', 'c9da6455-213d-42c9-9a79-3e9149a57833') } def _create_updated_entity_dict(self, partition, row): ''' Creates a dictionary-based entity with fixed values, with a different set of values than the default entity. It adds fields, changes field values, changes field types, and removes fields when compared to the default entity. ''' return { 'PartitionKey': partition, 'RowKey': row, 'age': 'abc', 'sex': 'female', 'sign': 'aquarius', 'birthday': datetime(1991, 10, 04) } def _assert_default_entity(self, entity): ''' Asserts that the entity passed in matches the default entity. ''' self.assertEquals(entity.age, 39) self.assertEquals(entity.sex, 'male') self.assertEquals(entity.married, True) self.assertEquals(entity.deceased, False) self.assertFalse(hasattr(entity, "aquarius")) self.assertEquals(entity.ratio, 3.1) self.assertEquals(entity.large, 9333111000) self.assertEquals(entity.Birthday, datetime(1973, 10, 04)) self.assertEquals(entity.birthday, datetime(1970, 10, 04)) self.assertEquals(entity.other, 20) self.assertIsInstance(entity.clsid, EntityProperty) self.assertEquals(entity.clsid.type, 'Edm.Guid') self.assertEquals(entity.clsid.value, 'c9da6455-213d-42c9-9a79-3e9149a57833') def _assert_updated_entity(self, entity): ''' Asserts that the entity passed in matches the updated entity. ''' self.assertEquals(entity.age, 'abc') self.assertEquals(entity.sex, 'female') self.assertFalse(hasattr(entity, "married")) self.assertFalse(hasattr(entity, "deceased")) self.assertEquals(entity.sign, 'aquarius') self.assertFalse(hasattr(entity, "optional")) self.assertFalse(hasattr(entity, "ratio")) self.assertFalse(hasattr(entity, "large")) self.assertFalse(hasattr(entity, "Birthday")) self.assertEquals(entity.birthday, datetime(1991, 10, 04)) self.assertFalse(hasattr(entity, "other")) self.assertFalse(hasattr(entity, "clsid")) def _assert_merged_entity(self, entity): ''' Asserts that the entity passed in matches the default entity merged with the updated entity. ''' self.assertEquals(entity.age, 'abc') self.assertEquals(entity.sex, 'female') self.assertEquals(entity.sign, 'aquarius') self.assertEquals(entity.married, True) self.assertEquals(entity.deceased, False) self.assertEquals(entity.sign, 'aquarius') self.assertEquals(entity.ratio, 3.1) self.assertEquals(entity.large, 9333111000) self.assertEquals(entity.Birthday, datetime(1973, 10, 04)) self.assertEquals(entity.birthday, datetime(1991, 10, 04)) self.assertEquals(entity.other, 20) self.assertIsInstance(entity.clsid, EntityProperty) self.assertEquals(entity.clsid.type, 'Edm.Guid') self.assertEquals(entity.clsid.value, 'c9da6455-213d-42c9-9a79-3e9149a57833') #--Test cases for table service ------------------------------------------- def test_get_set_table_service_properties(self): table_properties = self.tc.get_table_service_properties() self.tc.set_table_service_properties(table_properties) tests = [ ('logging.delete', True), ('logging.delete', False), ('logging.read', True), ('logging.read', False), ('logging.write', True), ('logging.write', False), ] for path, value in tests: #print path cur = table_properties for component in path.split('.')[:-1]: cur = getattr(cur, component) last_attr = path.split('.')[-1] setattr(cur, last_attr, value) self.tc.set_table_service_properties(table_properties) retry_count = 0 while retry_count < MAX_RETRY: table_properties = self.tc.get_table_service_properties() cur = table_properties for component in path.split('.'): cur = getattr(cur, component) if value == cur: break time.sleep(1) retry_count += 1 self.assertEquals(value, cur) def test_table_service_retention_single_set(self): table_properties = self.tc.get_table_service_properties() table_properties.logging.retention_policy.enabled = False table_properties.logging.retention_policy.days = 5 # TODO: Better error, ValueError? self.assertRaises(WindowsAzureError, self.tc.set_table_service_properties, table_properties) table_properties = self.tc.get_table_service_properties() table_properties.logging.retention_policy.days = None table_properties.logging.retention_policy.enabled = True # TODO: Better error, ValueError? self.assertRaises(WindowsAzureError, self.tc.set_table_service_properties, table_properties) def test_table_service_set_both(self): table_properties = self.tc.get_table_service_properties() table_properties.logging.retention_policy.enabled = True table_properties.logging.retention_policy.days = 5 self.tc.set_table_service_properties(table_properties) table_properties = self.tc.get_table_service_properties() self.assertEquals(True, table_properties.logging.retention_policy.enabled) self.assertEquals(5, table_properties.logging.retention_policy.days) #--Test cases for tables -------------------------------------------------- def test_create_table(self): # Arrange # Act created = self.tc.create_table(self.table_name) # Assert self.assertTrue(created) def test_create_table_fail_on_exist(self): # Arrange # Act created = self.tc.create_table(self.table_name, True) # Assert self.assertTrue(created) def test_create_table_with_already_existing_table(self): # Arrange # Act created1 = self.tc.create_table(self.table_name) created2 = self.tc.create_table(self.table_name) # Assert self.assertTrue(created1) self.assertFalse(created2) def test_create_table_with_already_existing_table_fail_on_exist(self): # Arrange # Act created = self.tc.create_table(self.table_name) with self.assertRaises(WindowsAzureError): self.tc.create_table(self.table_name, True) # Assert self.assertTrue(created) def test_query_tables(self): # Arrange self._create_table(self.table_name) # Act tables = self.tc.query_tables() for table in tables: pass # Assert tableNames = [x.name for x in tables] self.assertGreaterEqual(len(tableNames), 1) self.assertGreaterEqual(len(tables), 1) self.assertIn(self.table_name, tableNames) def test_query_tables_with_table_name(self): # Arrange self._create_table(self.table_name) # Act tables = self.tc.query_tables(self.table_name) for table in tables: pass # Assert self.assertEqual(len(tables), 1) self.assertEqual(tables[0].name, self.table_name) def test_query_tables_with_table_name_no_tables(self): # Arrange # Act with self.assertRaises(WindowsAzureError): self.tc.query_tables(self.table_name) # Assert def test_query_tables_with_top(self): # Arrange self.additional_table_names = [ self.table_name + suffix for suffix in 'abcd' ] for name in self.additional_table_names: self.tc.create_table(name) # Act tables = self.tc.query_tables(None, 3) for table in tables: pass # Assert self.assertEqual(len(tables), 3) def test_query_tables_with_top_and_next_table_name(self): # Arrange self.additional_table_names = [ self.table_name + suffix for suffix in 'abcd' ] for name in self.additional_table_names: self.tc.create_table(name) # Act tables_set1 = self.tc.query_tables(None, 3) tables_set2 = self.tc.query_tables( None, 3, tables_set1.x_ms_continuation['NextTableName']) # Assert self.assertEqual(len(tables_set1), 3) self.assertGreaterEqual(len(tables_set2), 1) self.assertLessEqual(len(tables_set2), 3) def test_delete_table_with_existing_table(self): # Arrange self._create_table(self.table_name) # Act deleted = self.tc.delete_table(self.table_name) # Assert self.assertTrue(deleted) tables = self.tc.query_tables() self.assertNamedItemNotInContainer(tables, self.table_name) def test_delete_table_with_existing_table_fail_not_exist(self): # Arrange self._create_table(self.table_name) # Act deleted = self.tc.delete_table(self.table_name, True) # Assert self.assertTrue(deleted) tables = self.tc.query_tables() self.assertNamedItemNotInContainer(tables, self.table_name) def test_delete_table_with_non_existing_table(self): # Arrange # Act deleted = self.tc.delete_table(self.table_name) # Assert self.assertFalse(deleted) def test_delete_table_with_non_existing_table_fail_not_exist(self): # Arrange # Act with self.assertRaises(WindowsAzureError): self.tc.delete_table(self.table_name, True) # Assert #--Test cases for entities ------------------------------------------ def test_insert_entity_dictionary(self): # Arrange self._create_table(self.table_name) # Act dict = self._create_default_entity_dict('MyPartition', '1') resp = self.tc.insert_entity(self.table_name, dict) # Assert self.assertIsNotNone(resp) def test_insert_entity_class_instance(self): # Arrange self._create_table(self.table_name) # Act entity = self._create_default_entity_class('MyPartition', '1') resp = self.tc.insert_entity(self.table_name, entity) # Assert self.assertIsNotNone(resp) def test_insert_entity_conflict(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act with self.assertRaises(WindowsAzureError): self.tc.insert_entity( self.table_name, self._create_default_entity_dict('MyPartition', '1')) # Assert def test_get_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.tc.get_entity(self.table_name, 'MyPartition', '1') # Assert self.assertEquals(resp.PartitionKey, 'MyPartition') self.assertEquals(resp.RowKey, '1') self._assert_default_entity(resp) def test_get_entity_not_existing(self): # Arrange self._create_table(self.table_name) # Act with self.assertRaises(WindowsAzureError): self.tc.get_entity(self.table_name, 'MyPartition', '1') # Assert def test_get_entity_with_select(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.tc.get_entity(self.table_name, 'MyPartition', '1', 'age,sex') # Assert self.assertEquals(resp.age, 39) self.assertEquals(resp.sex, 'male') self.assertFalse(hasattr(resp, "birthday")) self.assertFalse(hasattr(resp, "married")) self.assertFalse(hasattr(resp, "deceased")) def test_query_entities(self): # Arrange self._create_table_with_default_entities(self.table_name, 2) # Act resp = self.tc.query_entities(self.table_name) # Assert self.assertEquals(len(resp), 2) for entity in resp: self.assertEquals(entity.PartitionKey, 'MyPartition') self._assert_default_entity(entity) self.assertEquals(resp[0].RowKey, '1') self.assertEquals(resp[1].RowKey, '2') def test_query_entities_with_filter(self): # Arrange self._create_table_with_default_entities(self.table_name, 2) self.tc.insert_entity( self.table_name, self._create_default_entity_dict('MyOtherPartition', '3')) # Act resp = self.tc.query_entities(self.table_name, "PartitionKey eq 'MyPartition'") # Assert self.assertEquals(len(resp), 2) for entity in resp: self.assertEquals(entity.PartitionKey, 'MyPartition') self._assert_default_entity(entity) def test_query_entities_with_select(self): # Arrange self._create_table_with_default_entities(self.table_name, 2) # Act resp = self.tc.query_entities(self.table_name, None, 'age,sex') # Assert self.assertEquals(len(resp), 2) self.assertEquals(resp[0].age, 39) self.assertEquals(resp[0].sex, 'male') self.assertFalse(hasattr(resp[0], "birthday")) self.assertFalse(hasattr(resp[0], "married")) self.assertFalse(hasattr(resp[0], "deceased")) def test_query_entities_with_top(self): # Arrange self._create_table_with_default_entities(self.table_name, 3) # Act resp = self.tc.query_entities(self.table_name, None, None, 2) # Assert self.assertEquals(len(resp), 2) def test_query_entities_with_top_and_next(self): # Arrange self._create_table_with_default_entities(self.table_name, 5) # Act resp1 = self.tc.query_entities(self.table_name, None, None, 2) resp2 = self.tc.query_entities( self.table_name, None, None, 2, resp1.x_ms_continuation['NextPartitionKey'], resp1.x_ms_continuation['NextRowKey']) resp3 = self.tc.query_entities( self.table_name, None, None, 2, resp2.x_ms_continuation['NextPartitionKey'], resp2.x_ms_continuation['NextRowKey']) # Assert self.assertEquals(len(resp1), 2) self.assertEquals(len(resp2), 2) self.assertEquals(len(resp3), 1) self.assertEquals(resp1[0].RowKey, '1') self.assertEquals(resp1[1].RowKey, '2') self.assertEquals(resp2[0].RowKey, '3') self.assertEquals(resp2[1].RowKey, '4') self.assertEquals(resp3[0].RowKey, '5') def test_update_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.tc.update_entity(self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_update_entity_with_if_matches(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.tc.update_entity(self.table_name, 'MyPartition', '1', sent_entity, if_match=entities[0].etag) # Assert self.assertIsNotNone(resp) received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_update_entity_with_if_doesnt_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') with self.assertRaises(WindowsAzureError): self.tc.update_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"') # Assert def test_insert_or_merge_entity_with_existing_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.tc.insert_or_merge_entity(self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1') self._assert_merged_entity(received_entity) def test_insert_or_merge_entity_with_non_existing_entity(self): # Arrange self._create_table(self.table_name) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.tc.insert_or_merge_entity(self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_insert_or_replace_entity_with_existing_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.tc.insert_or_replace_entity(self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_insert_or_replace_entity_with_non_existing_entity(self): # Arrange self._create_table(self.table_name) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.tc.insert_or_replace_entity(self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_merge_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.tc.merge_entity(self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1') self._assert_merged_entity(received_entity) def test_merge_entity_not_existing(self): # Arrange self._create_table(self.table_name) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') with self.assertRaises(WindowsAzureError): self.tc.merge_entity(self.table_name, 'MyPartition', '1', sent_entity) # Assert def test_merge_entity_with_if_matches(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.tc.merge_entity(self.table_name, 'MyPartition', '1', sent_entity, if_match=entities[0].etag) # Assert self.assertIsNotNone(resp) received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1') self._assert_merged_entity(received_entity) def test_merge_entity_with_if_doesnt_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') with self.assertRaises(WindowsAzureError): self.tc.merge_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"') # Assert def test_delete_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.tc.delete_entity(self.table_name, 'MyPartition', '1') # Assert self.assertIsNone(resp) with self.assertRaises(WindowsAzureError): self.tc.get_entity(self.table_name, 'MyPartition', '1') def test_delete_entity_not_existing(self): # Arrange self._create_table(self.table_name) # Act with self.assertRaises(WindowsAzureError): self.tc.delete_entity(self.table_name, 'MyPartition', '1') # Assert def test_delete_entity_with_if_matches(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.tc.delete_entity(self.table_name, 'MyPartition', '1', if_match=entities[0].etag) # Assert self.assertIsNone(resp) with self.assertRaises(WindowsAzureError): self.tc.get_entity(self.table_name, 'MyPartition', '1') def test_delete_entity_with_if_doesnt_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act with self.assertRaises(WindowsAzureError): self.tc.delete_entity( self.table_name, 'MyPartition', '1', if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"') # Assert #--Test cases for batch --------------------------------------------- def test_with_filter_single(self): called = [] def my_filter(request, next): called.append(True) return next(request) tc = self.tc.with_filter(my_filter) tc.create_table(self.table_name) self.assertTrue(called) del called[:] tc.delete_table(self.table_name) self.assertTrue(called) del called[:] def test_with_filter_chained(self): called = [] def filter_a(request, next): called.append('a') return next(request) def filter_b(request, next): called.append('b') return next(request) tc = self.tc.with_filter(filter_a).with_filter(filter_b) tc.create_table(self.table_name) self.assertEqual(called, ['b', 'a']) tc.delete_table(self.table_name) def test_batch_insert(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_insert' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.tc.begin_batch() self.tc.insert_entity(self.table_name, entity) self.tc.commit_batch() # Assert result = self.tc.get_entity(self.table_name, '001', 'batch_insert') self.assertIsNotNone(result) def test_batch_update(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_update' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.tc.insert_entity(self.table_name, entity) entity = self.tc.get_entity(self.table_name, '001', 'batch_update') self.assertEqual(3, entity.test3) entity.test2 = 'value1' self.tc.begin_batch() self.tc.update_entity(self.table_name, '001', 'batch_update', entity) self.tc.commit_batch() entity = self.tc.get_entity(self.table_name, '001', 'batch_update') # Assert self.assertEqual('value1', entity.test2) def test_batch_merge(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_merge' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.tc.insert_entity(self.table_name, entity) entity = self.tc.get_entity(self.table_name, '001', 'batch_merge') self.assertEqual(3, entity.test3) entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_merge' entity.test2 = 'value1' self.tc.begin_batch() self.tc.merge_entity(self.table_name, '001', 'batch_merge', entity) self.tc.commit_batch() entity = self.tc.get_entity(self.table_name, '001', 'batch_merge') # Assert self.assertEqual('value1', entity.test2) self.assertEqual(1234567890, entity.test4) def test_batch_insert_replace(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_insert_replace' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.tc.begin_batch() self.tc.insert_or_replace_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity) self.tc.commit_batch() entity = self.tc.get_entity(self.table_name, '001', 'batch_insert_replace') # Assert self.assertIsNotNone(entity) self.assertEqual('value', entity.test2) self.assertEqual(1234567890, entity.test4) def test_batch_insert_merge(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_insert_merge' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.tc.begin_batch() self.tc.insert_or_merge_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity) self.tc.commit_batch() entity = self.tc.get_entity(self.table_name, '001', 'batch_insert_merge') # Assert self.assertIsNotNone(entity) self.assertEqual('value', entity.test2) self.assertEqual(1234567890, entity.test4) def test_batch_delete(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_delete' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.tc.insert_entity(self.table_name, entity) entity = self.tc.get_entity(self.table_name, '001', 'batch_delete') #self.assertEqual(3, entity.test3) self.tc.begin_batch() self.tc.delete_entity(self.table_name, '001', 'batch_delete') self.tc.commit_batch() def test_batch_inserts(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = 'batch_inserts' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') self.tc.begin_batch() for i in range(100): entity.RowKey = str(i) self.tc.insert_entity(self.table_name, entity) self.tc.commit_batch() entities = self.tc.query_entities(self.table_name, "PartitionKey eq 'batch_inserts'", '') # Assert self.assertIsNotNone(entities) self.assertEqual(100, len(entities)) def test_batch_all_operations_together(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '003' entity.RowKey = 'batch_all_operations_together-1' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.tc.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-2' self.tc.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-3' self.tc.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-4' self.tc.insert_entity(self.table_name, entity) self.tc.begin_batch() entity.RowKey = 'batch_all_operations_together' self.tc.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-1' self.tc.delete_entity(self.table_name, entity.PartitionKey, entity.RowKey) entity.RowKey = 'batch_all_operations_together-2' entity.test3 = 10 self.tc.update_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity) entity.RowKey = 'batch_all_operations_together-3' entity.test3 = 100 self.tc.merge_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity) entity.RowKey = 'batch_all_operations_together-4' entity.test3 = 10 self.tc.insert_or_replace_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity) entity.RowKey = 'batch_all_operations_together-5' self.tc.insert_or_merge_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity) self.tc.commit_batch() # Assert entities = self.tc.query_entities(self.table_name, "PartitionKey eq '003'", '') self.assertEqual(5, len(entities)) def test_batch_same_row_operations_fail(self): # Arrange self._create_table(self.table_name) entity = self._create_default_entity_dict('001', 'batch_negative_1') self.tc.insert_entity(self.table_name, entity) # Act with self.assertRaises(WindowsAzureError): self.tc.begin_batch() entity = self._create_updated_entity_dict('001', 'batch_negative_1') self.tc.update_entity(self.table_name, entity['PartitionKey'], entity['RowKey'], entity) entity = self._create_default_entity_dict('001', 'batch_negative_1') self.tc.merge_entity(self.table_name, entity['PartitionKey'], entity['RowKey'], entity) self.tc.cancel_batch() # Assert def test_batch_different_partition_operations_fail(self): # Arrange self._create_table(self.table_name) entity = self._create_default_entity_dict('001', 'batch_negative_1') self.tc.insert_entity(self.table_name, entity) # Act with self.assertRaises(WindowsAzureError): self.tc.begin_batch() entity = self._create_updated_entity_dict('001', 'batch_negative_1') self.tc.update_entity(self.table_name, entity['PartitionKey'], entity['RowKey'], entity) entity = self._create_default_entity_dict('002', 'batch_negative_1') self.tc.insert_entity(self.table_name, entity) self.tc.cancel_batch() # Assert def test_batch_different_table_operations_fail(self): # Arrange other_table_name = self.table_name + 'other' self.additional_table_names = [other_table_name] self._create_table(self.table_name) self._create_table(other_table_name) # Act with self.assertRaises(WindowsAzureError): self.tc.begin_batch() entity = self._create_default_entity_dict('001', 'batch_negative_1') self.tc.insert_entity(self.table_name, entity) entity = self._create_default_entity_dict('001', 'batch_negative_2') self.tc.insert_entity(other_table_name, entity) self.tc.cancel_batch() def test_unicode_property_value(self): ''' regression test for github issue #57''' # Act self._create_table(self.table_name) self.tc.insert_entity(self.table_name, { 'PartitionKey': 'test', 'RowKey': 'test1', 'Description': u'ꀕ' }) self.tc.insert_entity(self.table_name, { 'PartitionKey': 'test', 'RowKey': 'test2', 'Description': 'ꀕ' }) resp = self.tc.query_entities(self.table_name, "PartitionKey eq 'test'") # Assert self.assertEquals(len(resp), 2) self.assertEquals(resp[0].Description, u'ꀕ') self.assertEquals(resp[1].Description, u'ꀕ') def test_unicode_property_name(self): # Act self._create_table(self.table_name) self.tc.insert_entity(self.table_name, { 'PartitionKey': 'test', 'RowKey': 'test1', u'啊齄丂狛狜': u'ꀕ' }) self.tc.insert_entity(self.table_name, { 'PartitionKey': 'test', 'RowKey': 'test2', u'啊齄丂狛狜': 'hello' }) resp = self.tc.query_entities(self.table_name, "PartitionKey eq 'test'") # Assert self.assertEquals(len(resp), 2) self.assertEquals(resp[0].__dict__[u'啊齄丂狛狜'], u'ꀕ') self.assertEquals(resp[1].__dict__[u'啊齄丂狛狜'], u'hello') def test_unicode_create_table_unicode_name(self): # Arrange self.table_name = unicode(self.table_name) + u'啊齄丂狛狜' # Act with self.assertRaises(WindowsAzureError): # not supported - table name must be alphanumeric, lowercase self.tc.create_table(self.table_name)
class Repository(object): """Azure Table Storage repository.""" def __init__(self, settings): """Initializes the repository with the specified settings dict. Required settings are: - STORAGE_NAME - STORAGE_KEY - STORAGE_TABLE_POLL - STORAGE_TABLE_CHOICE """ self.name = 'Azure Table Storage' self.storage_name = settings['STORAGE_NAME'] self.storage_key = settings['STORAGE_KEY'] self.poll_table = settings['STORAGE_TABLE_POLL'] self.choice_table = settings['STORAGE_TABLE_CHOICE'] self.svc = TableService(self.storage_name, self.storage_key) self.svc.create_table(self.poll_table) self.svc.create_table(self.choice_table) def get_polls(self): """Returns all the polls from the repository.""" poll_entities = self.svc.query_entities(self.poll_table) polls = [_poll_from_entity(entity) for entity in poll_entities] return polls def get_poll(self, poll_key): """Returns a poll from the repository.""" try: partition, row = _key_to_partition_and_row(poll_key) poll_entity = self.svc.get_entity(self.poll_table, partition, row) choice_entities = self.svc.query_entities( self.choice_table, "PollPartitionKey eq '{0}' and PollRowKey eq '{1}'" \ .format(partition, row) ) poll = _poll_from_entity(poll_entity) poll.choices = [_choice_from_entity(choice_entity) for choice_entity in choice_entities] return poll except WindowsAzureMissingResourceError: raise PollNotFound() def increment_vote(self, poll_key, choice_key): """Increment the choice vote count for the specified poll.""" try: partition, row = _key_to_partition_and_row(choice_key) entity = self.svc.get_entity(self.choice_table, partition, row) entity.Votes += 1 self.svc.update_entity(self.choice_table, partition, row, entity) except WindowsAzureMissingResourceError: raise PollNotFound() def add_sample_polls(self): """Adds a set of polls from data stored in a samples.json file.""" poll_partition = '2014' poll_row = 0 choice_partition = '2014' choice_row = 0 for sample_poll in _load_samples_json(): poll_entity = { 'PartitionKey': poll_partition, 'RowKey': str(poll_row), 'Text': sample_poll['text'], } self.svc.insert_entity(self.poll_table, poll_entity) for sample_choice in sample_poll['choices']: choice_entity = { 'PartitionKey': choice_partition, 'RowKey': str(choice_row), 'Text': sample_choice, 'Votes': 0, 'PollPartitionKey': poll_partition, 'PollRowKey': str(poll_row), } self.svc.insert_entity(self.choice_table, choice_entity) choice_row += 1 poll_row += 1
with open ("ASA.key", "r") as myfile: accountName=myfile.read().replace('\n', '') # accountKey = environ["AZURE_STORAGE_ACCESS_KEY"] with open ("ASK.key", "r") as myfile: accountKey=myfile.read().replace('\n', '') # Create blob service blob_service = BlobService( account_name=accountName, account_key=accountKey ) blob_service.create_container( blob_container ) blob_service.create_container( blob_analysis ) # Open queue with given credentials queue_service = QueueService( account_name=accountName, account_key=accountKey ) # Open table service table_service = TableService( account_name=accountName, account_key=accountKey ) # Analysis results results = None # Regions for analysis region = 4 # Repeat while(True): # get images form *imagesQueue* - it is invoked by CRON messages = queue_service.get_messages( imagesQueue ) if len(messages) == 0: sleep(15) for message in messages: # get image: image ID
from azure.storage import TableService, Entity table_service = TableService(account_name='upmsample', account_key='5YC/6x9KL56rtaAUAZMgGsREDvvPHYJIMqH3z1c9IgQLy0qMP+Awr+7j51Tfzniczj//6jn7lvYQutD/mHm6dw==') table_service.create_table('table1') task = {'PartitionKey': 'ledswitch', 'RowKey': '1', 'state' :1} table_service.insert_entity('table1', task)
import json, random, services, hashlib, user_services, app_services, notifying from azure.storage import TableService, Entity account_name = 'rcms' account_key = '9L1kZqrgAovvt1KI3xOfRj6RxLPt+hWpAI2mfsJ3zpf6DjMCN/TqYcaCb956jYG8qELgWpv0T0Cn5OC4vCPOng==' table = 'applications' priority = '200' unique_sequence = services.uniqueid() # next(unique_sequence) ts = TableService(account_name = account_name, account_key = account_key) ts.create_table(table) # ts.delete_table(table) def saveApplication(application): app = Entity() app.PartitionKey = application['partition'] #uiquely identifies a partition of entities app.RowKey = str(next(unique_sequence)) # app.RowKey = "kjlhajkdlhfjhasdnfasdkjflnasdf" app.package_type = application['apptype'] app.name = application['name'] app.reigon = application['reigon'] app.port = str(services.get_port()) app.priority = priority app.requests = str(0) app.cost = str(0) user = user_services.get_user_by_email(application['partition']) #get the current user
from azure.storage import TableService from azure.storage import QueueService import time import string account_name = "tr15sharemyphoto" storage_key = "4yhu8YT3Y6A3do0s+anHFAX6ZUA11V2NJJNhjmJc0iAgSLW8Xwk3QvVQn2Um+hgMmO+vGf0UFd2zOo8K63PD4w==" queue_service = QueueService(account_name, storage_key) table_service = TableService(account_name, storage_key) tags = ('pillow', 'soft', 'white') def tag_text(text): t = [] words = string.split(text, " ") for w in words: if w in tags: t.append(w) ret = set(t) print "keywords: " print ret return ret def tag_table_entry(rowKey, entry): tags = tag_text(entry.title + " " + entry.message)