class AzureCosmosDb(TableStorage): def __init__(self, config: AzureCosmosDbConfig): self._tableService = TableService(account_name=config.account_name, account_key=config.account_key) self._tableName = config.table_name def check_entry_exists(self, entry): try: self.query(entry['PartitionKey'], entry['RowKey']) return True except: return False def write(self, entry): prepared = entry_storage.EntryOperations.prepare_entry_for_insert( entry) if not self.check_entry_exists(prepared): self._tableService.insert_entity(self._tableName, prepared) else: self._tableService.update_entity(self._tableName, prepared) def query(self, partitionkey, rowkey): task = self._tableService.get_entity(self._tableName, partitionkey, rowkey) return task def delete(self, partitionkey, rowkey): self._tableService.delete_entity(self._tableName, partitionkey, rowkey)
def throttle_ip_requests(ip_entry): max_from_single_ip = 5 table_service = TableService(account_name=os.environ['STORAGE_ACCOUNT'], account_key=os.environ['STORAGE_KEY']) table_service.create_table( table_name=os.environ['BILLING_TABLE']) #create if it doesn't exist ip_row = None try: ip_row = table_service.get_entity(os.environ['BILLING_TABLE'], ip_entry['PartitionKey'], ip_entry['RowKey']) except: pass if not ip_row: ip_entry['count'] = 1 table_service.insert_entity(table_name=os.environ['BILLING_TABLE'], entity=ip_entry) ip_row = ip_entry else: lastdatetime = datetime.strptime(ip_row['time'], "%d/%m/%Y %H:%M:%S") currdatetime = datetime.strptime(ip_entry['time'], "%d/%m/%Y %H:%M:%S") tdelta = currdatetime - lastdatetime if tdelta.days < 1 and ip_row['count'] > max_from_single_ip: return True # throttle this entry.. elif tdelta.days > 0: #over 1 day has passed, update the count to 1 and reset time ip_row['count'] = 1 ip_row['time'] = currdatetime.strftime("%d/%m/%Y %H:%M:%S") table_service.update_entity(os.environ['BILLING_TABLE'], ip_row) else: # less than 1 day but count is < max_from_single_ip, update the count ip_row['count'] = ip_row['count'] + 1 table_service.update_entity(os.environ['BILLING_TABLE'], ip_row) # However we got here, do not throttle return False
def update_pipeline(account_name, account_key, table_name, partition_name, filter_name, filter_value, name1, value1, name2=None, value2=None, name3=None, value3=None, name4=None, value4=None): table_service = TableService(account_name=account_name, account_key=account_key) entities = table_service.query_entities(table_name, filter=filter_name + " eq '" + filter_value + "'") count = 0 for entity in entities: count = count + 1 add = False if name1 in entity and entity[name1] != value1.lower(): add = True entity[name1] = value1.lower() if name2 != None and value2 != None: if name2 in entity and entity[name2] != value2.lower(): add = True entity[name2] = value2.lower() if name3 != None and value3 != None: if name3 in entity and entity[name3] != value3.lower(): add = True entity[name3] = value3.lower() if name4 != None and value4 != None: if name4 in entity and entity[name4] != value4.lower(): add = True entity[name4] = value4.lower() if add == False: table_service.update_entity(table_name, entity) print("Updating existing entry") else: guid = generate_row_key() entity["RowKey"] = guid table_service.insert_entity(table_name, entity) print("Adding new entry since one already existed") print(entity) break if count == 0: add_pipeline(account_name, account_key, table_name, partition_name, filter_name, filter_value, name1, value1, name2, value2, name3, value3) print("Done")
def main(req: func.HttpRequest, ipListRowIn, ipListRowOut) -> func.HttpResponse: logging.info('Python HTTP trigger function processed a request.') # read request body reqMessage = req.get_json() logging.debug(reqMessage) # if available, load previous ip addressess from storage previousIpAddresses = None dataRow = json.loads(ipListRowIn) if len(dataRow) > 0: previousIpAddresses = json.loads(dataRow[0]['IpAddresses']) # read env vars tenantId = os.getenv("TENANT_ID") clientId = os.getenv("CLIENT_ID") clientSecret = os.getenv("CLIENT_SECRET") tableName = os.getenv("TABLE_NAME") entityRowKey = os.getenv("ENTITY_ROW_KEY") storageTableConnString = os.getenv("STORAGE_TABLE_CONN_STRING") # obtain access token address from Azure AD adservice = AzureAdService.AzureAdService(logging) accessToken = adservice.getAccessToken(tenantId, clientId, clientSecret) logging.debug('Azure AD access token: {0}'.format(accessToken)) # obtain current possible IP addresses from web/func app appservice = WebAppService.WebAppService(logging) possibleOutboundIps = appservice.getPossibleIpAddresses(accessToken, reqMessage['webAppSubscriptionId'], reqMessage['webAppResourceGroup'], reqMessage['webAppName']) # convert to json possibleOutboundIps = possibleOutboundIps.split(',') logging.debug('ip addresses: {0}'.format(possibleOutboundIps)) analysisservice = AzureAnalysisService.AzureAnalysisService(logging) analysisservice.updateFirewallSettings( accessToken, previousIpAddresses, possibleOutboundIps, AnalysisServerModel.AnalysisServerModel(reqMessage['analysisServerSubscriptionId'], reqMessage['analysisServerResourceGroup'], reqMessage['analysisServerName'])) # upsert table row if previousIpAddresses is None: data = { "PartitionKey": entityRowKey, "RowKey": entityRowKey, "IpAddresses": possibleOutboundIps } # insert using table storage output binding ipListRowOut.set(json.dumps(data)) else: # update existing row using table storage client library table_service = TableService(connection_string=storageTableConnString) table_row = table_service.get_entity(tableName, entityRowKey, entityRowKey) table_row.IpAddresses = json.dumps(possibleOutboundIps) table_service.update_entity(tableName, table_row) return func.HttpResponse('done', status_code=200)
def tableStorage(table_name, partition_key, row_key, hins_processed, timesaved, time_by_system, time_by_user, requests): try: table_service = TableService( account_name=config.AZURE['STORAGE_ACCOUNT_NAME'], account_key=config.AZURE['STORAGE_ACCOUNT_KEY']) entity = { 'PartitionKey': partition_key, 'RowKey': row_key, 'HinsProcessed': hins_processed, 'TimeSaved': timesaved, 'TimeBySystem': time_by_system, 'TimeByUser': time_by_user, 'Requests': requests } if not table_service.exists(table_name, timeout=None): table_service.create_table(table_name, fail_on_exist=False) try: table_service.insert_entity(table_name, entity) print("Entity Doesn't Exist") print("Creating Entity\n") except Exception as e: print("Entity Exists") print("Updating entity\n") currentEntity = table_service.get_entity(table_name, partition_key, row_key) tempHinProcessed = currentEntity.HinsProcessed + hins_processed tempTimeSaved = currentEntity.TimeSaved + timesaved tempTimeBySystem = currentEntity.TimeBySystem + time_by_system tempTimeByUser = currentEntity.TimeByUser + time_by_user tempRequest = currentEntity.Requests + requests entity = { 'PartitionKey': partition_key, 'RowKey': row_key, 'HinsProcessed': tempHinProcessed, 'TimeSaved': tempTimeSaved, 'TimeBySystem': tempTimeBySystem, 'TimeByUser': tempTimeByUser, 'Requests': tempRequest } table_service.update_entity(table_name, entity, if_match='*', timeout=None) except Exception as e: print(e)
def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('Python HTTP trigger function processed a request.') CF.BaseUrl.set("https://emotiontrack.cognitiveservices.azure.com/face/v1.0") CF.Key.set("4a1e0d41a8494d71ac0b9028464d8e62") rowkey = req.params.get('rowkey') if not rowkey: logging.error("Missing parameter(s)") return func.HttpResponse("Missing one or more parameter.", status_code=400) face = req.get_json() face_rect = face['faceRectangle'] table = TableService(connection_string=conn_string) if not table: logging.error("Failed to connect to the storage") return func.HttpResponse("Failed to connect to the storage. Please try again later.", status_code=500) test_img = getFaceImage(table, rowkey, face_rect) test_imgIO = io.BytesIO() test_img.save(test_imgIO, format='JPG') entities = table.query_entities(table_name, filter=None) isMatch = False for entity in entities: img = getFaceImage(table, entity.RowKey, entity.rect) imgIO = io.BytesIO() img.save(imgIO, format='JPG') try: res = CF.face.verify(test_imgIO, imgIO) if res['isIdentical']: # update entry entity.RowKey = rowkey entity.rect = face_rect table.update_entity(table_name, entity) isMatch = True break if not isMatch: # new entry entity = Entity() entity.PartitionKey = "1" entity.RowKey = str(uuid.uuid4()) entity.rect = face_rect table.insert_entity(table_name, entity) return func.HttpResponse(entity.RowKey, status_code=200)
def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('Python HTTP trigger function processed a request.') teams = req.params.get('teams') if not teams: try: req_body = req.get_json() except ValueError: logging.info('Value error retrieving request body') else: teams = req_body logging.info('Request body information retrieved') if isinstance(teams, list): logging.info('Valid list passed to function in body') # get the table details accountkey = os.environ["CupstoreKeyId"] logging.info('Table storage account key retrieved from key vault') accountname = 'thecupstore' # connect to the table and update the player controlled teams table_service = TableService(account_name=accountname, account_key=accountkey) query_string = "Name eq '" counter = 1 for team in teams: if counter == len(teams): query_string += team + "'" else: query_string += team + "' or Name eq '" counter += 1 logging.info('query string: ' + query_string) returned_teams = table_service.query_entities('Teams', filter=query_string) for team in returned_teams: logging.info('editing team: ' + team.Name) team.Controlled = "p1" table_service.update_entity('Teams', team) return func.HttpResponse("", status_code=200) else: logging.info('Invalid list passed to function in body.') return func.HttpResponse("Please provide teams in a list", status_code=422) else: return func.HttpResponse("teams argument invalid", status_code=400)
def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('Python HTTP trigger function processed a request.') KeyVault_DNS = os.environ["KeyVault_DNS"] SecretName = os.environ["SecretName"] table_name = req.headers.get('name') value = req.get_json() if table_name: try: # Try with managed identity, otherwise to with Service Principal creds = ManagedIdentityCredential() client = SecretClient(vault_url=KeyVault_DNS, credential=creds) retrieved_secret = client.get_secret(SecretName) except: creds = ClientSecretCredential( client_id=os.environ["SP_ID"], client_secret=os.environ["SP_SECRET"], tenant_id=os.environ["TENANT_ID"]) client = SecretClient(vault_url=KeyVault_DNS, credential=creds) retrieved_secret = client.get_secret(SecretName) table_service = TableService(connection_string=retrieved_secret.value) if table_service.exists(table_name): if 'PartitionKey' not in value.keys(): #This is mandatory value['PartitionKey'] = 'reference' if 'RowKey' not in value.keys(): #This is mandatory too value['RowKey'] = '001' try: table_service.update_entity(table_name=table_name, entity=value) except: table_service.insert_entity(table_name=table_name, entity=value) else: ret = dict() ret['result'] = "Please create the table!" return func.HttpResponse(json.dumps(ret), status_code=400) ret = dict() ret['result'] = "Success" return func.HttpResponse(json.dumps(ret), status_code=200) else: ret = dict() ret['result'] = "Please pass a name!!" return func.HttpResponse(json.dumps(ret), status_code=400)
def update_result(results_entity): """ Updates a result in the ``rosiepi`` storage table. :param: results_entity: A ``azure.cosmodb.table.models.Entity`` object containing the results to add to the storage table. ``Entity`` object can be retrieved from ``lib/result.py::Result.results_to_table_entity()``. :return: The entity's Etag if successful. None if failed. """ response = None if isinstance(results_entity, Entity): table = TableService(connection_string=os.environ['APP_STORAGE_CONN_STR']) try: response = table.update_entity('rosiepi', results_entity) except Exception as err: logging.info(f'Failed to update result in rosiepi table. Error: {err}') else: logging.info( 'Result not updated in rosiepi table. Supplied result was an incorrect ' 'type. Should be azure.cosmodb.table.models.Entity. Supplied type: ' f'{type(results_entity)}' ) return response
class AzureTableData: """ This class handles the functionality of getting data from Azure Table Storage cleaning it. """ def __init__(self, args): connect_str = args.connection_str self.table_service = TableService(connection_string=connect_str) def clean_up_table(self, args): """ Fetches all the images from Azure Table Storage. Marks the images as deleted if the image is not present in Azure Marketplace """ allimages = open(args.all_image_list, 'r') images_in_marketplace = allimages.readlines() imagequeryresult = self.table_service.query_entities(args.table_name, filter="IsDeleted eq 0", accept='application/json;odata=minimalmetadata') print("Creating list of images") list_of_images_to_clean_up = [] for image in imagequeryresult: disk_version = image.PartitionKey.split('-')[-1] l = [image_name for image_name in images_in_marketplace if disk_version in image_name] if l == None or len(l) is 0: list_of_images_to_clean_up.append(image) print("Updating", len(list_of_images_to_clean_up)) self.mark_deleted(list_of_images_to_clean_up, args.table_name) def mark_deleted(self, images, table_name): """ Updates Azure Table Storage record by marking it as deleted """ i = 1 for image in images: image.IsDeleted = 1 self.table_service.update_entity(table_name, image) print(i) i += 1
def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('Starting insert row.') table_name = req.headers.get('name') if not table_name: #If name wasnt added as header, search for it in the parameters table_name = req.params.get('name') value = req.get_json() if table_name: retrieved_secret = getConnectionString() table_service = TableService(connection_string=retrieved_secret.value) if table_service.exists(table_name): if 'PartitionKey' not in value.keys(): #This is mandatory value['PartitionKey'] = 'reference' if 'RowKey' not in value.keys(): #This is mandatory too value['RowKey'] = '001' try: table_service.update_entity(table_name=table_name, entity=value) except: table_service.insert_entity(table_name=table_name, entity=value) else: ret = dict() ret['result'] = "Please create the table!" return func.HttpResponse(json.dumps(ret), status_code=400) ret = dict() ret['result'] = "Success" return func.HttpResponse(json.dumps(ret), status_code=200) else: ret = dict() ret['result'] = "Please pass a name!!" return func.HttpResponse(json.dumps(ret), status_code=400)
class AzureTable(): def __init__(self, account_name, account_key): self.table_service = TableService(account_name=account_name, account_key=account_key) def create_table(self, table_name): return self.table_service.create_table(table_name) def exists_table(self, table_name): return self.table_service.exists(table_name) def insert_or_replace_entity(self, table_name, partition_key, row_key, **kwargs): try: entity = self.table_service.get_entity(table_name, partition_key, row_key) except Exception: # Insert a new entity entity = {'PartitionKey': partition_key, 'RowKey': row_key} for (k, v) in kwargs.items(): entity[k] = v return self.table_service.insert_or_replace_entity(table_name, entity) def insert_or_replace_entity2(self, table_name, entity): return self.table_service.insert_or_replace_entity(table_name, entity) def insert_entity(self, table_name, entity): return self.table_service.insert_entity(table_name, entity) def update_entity(self, table_name, entity): return self.table_service.update_entity(table_name, entity) def get_entity(self, table_name, partition_key, row_key): return self.table_service.get_entity(table_name, partition_key, row_key) def delete_entity(self, table_name, partition_key, row_key): self.table_service.delete_entity(table_name, partition_key, row_key) def delete_table(self, table_name): return self.table_service.delete_table(table_name) def get_entities(self, table_name, partition_key): filter = "PartitionKey eq '{0}'".format(partition_key) return self.table_service.query_entities(table_name, filter)
class TableStorage: """ A class for manipulating TableStorage in Storage Account The class provides simple methods below. * create * read * update * delete(not yet) The class is intended to be used as `delegation`, not `extend`. Args: account_name: name of the Storage Account account_key: key for the Storage Account database_name: name of the StorageTable database """ def __init__(self, account_name: str, account_key: str, database_name: str): """ """ self.table_service = TableService(account_name=account_name, account_key=account_key) self.database_name = database_name def get(self, partition_key_value: str, filter_key_values: dict): """ use PartitionKey as table_name Args: partition_key_value: filter_key_values: Returns: """ filter_value_list = [f"PartitionKey eq '{partition_key_value}'"] # その他の条件を付与する場合 filter_value_list.extend( [f"{k} eq '{v}'" for k, v in filter_key_values.items()]) tasks = self.table_service.query_entities( table_name=self.database_name, filter=" and ".join(filter_value_list)) return [task for task in tasks] def put(self, partition_key_value: str, data: dict): """ put data. Args: partition_key_value: data: Returns: """ insert_data = {'PartitionKey': partition_key_value} insert_data.update(data) self.table_service.insert_entity(table_name=self.database_name, entity=insert_data) return insert_data def update(self, partition_key_value: str, row_key: str, data: dict): """ update data. Args: partition_key_value: row_key: data: Returns: """ updated_data = {'PartitionKey': partition_key_value, 'RowKey': row_key} updated_data.update(data) self.table_service.update_entity(table_name=self.database_name, entity=updated_data) return updated_data
'description': 'Take out the trash', 'priority': 200} table_service.insert_entity('tasktable', task) print("Create task through an Entity object...") task = Entity() task.PartitionKey = 'tasksSeattle' task.RowKey = '002' task.description = 'Wash the car' task.priority = 100 table_service.insert_entity('tasktable', task) # update print("Update task 001...") task = {'PartitionKey': 'tasksSeattle', 'RowKey': '001', 'description': 'Take out the garbage', 'priority': 250} table_service.update_entity('tasktable', task) # Replace the entity created earlier print("Replace task using insert_or_replace... - Take out the garbage again") task = {'PartitionKey': 'tasksSeattle', 'RowKey': '001', 'description': 'Take out the garbage again', 'priority': 250} table_service.insert_or_replace_entity('tasktable', task) # Insert a new entity print("insert or replay rowkey 003 - buy detergent") task = {'PartitionKey': 'tasksSeattle', 'RowKey': '003', 'description': 'Buy detergent', 'priority': 300} table_service.insert_or_replace_entity('tasktable', task) # batch processing - Add multiple entries print("batch processing task 004/005")
action='store') parser.add_argument('--accountKey', dest='accountKey', required=True, action='store') parser.add_argument('--type', dest='type', required=True, action='store', choices=['option1', 'option2', 'option3']) parser.add_argument('--timestamp', dest='timestamp', required=True, action='store') args = parser.parse_args() table_service = TableService(args.accountName, args.accountKey) filterString = "RowKey eq '" + args.type + "' and Timestamp lt datetime'" + args.timestamp + "'" tasks = table_service.query_entities('<table>', filter=filterString) if len(tasks.items) == 0: print('No entities found.') quit() for task in tasks: print(task.PartitionKey) task.field1 = False if args.dryRun == False: table_service.update_entity('<table>', task)
} # Get target sizes sizes = {} for target, proc in procs.items(): # Wait for the target to complete building or fail proc.wait() # Get build size on success bin_path = os.path.join(PYBRICKS_PATH, "bricks", target, "build", "firmware.bin") try: sizes[target] = os.path.getsize(bin_path) except FileNotFoundError: pass service.insert_or_replace_entity(FIRMWARE_SIZE_TABLE, { "PartitionKey": "size", "RowKey": commit.hexsha, **sizes }) service.update_entity( CI_STATUS_TABLE, { "PartitionKey": "build", "RowKey": "lastHash", "hash": pybricks.commit(PYBRICKS_BRANCH).hexsha, }, )
class AzureTable(object): def __init__(self, account_name: str, account_key: str, table_name: str, partition_key_field: str, clustering_key_field: str): self.table = TableService(account_name=account_name, account_key=account_key) self.table_name = self.table_name self.partition_key_field = partition_key_field self.clustering_key_field = clustering_key_field @property def partition_key_name(self) -> str: return 'PartitionKey' @property def clustering_key_name(self) -> str: return 'RowKey' def get_payload(self, payload: dict): item = deepcopy(payload) partition_key = payload.get(self.partition_key_field) clustering_key = payload.get(self.clustering_key_field) if partition_key is None: raise PartitionKeyNotFoundError( 'payload={} does not have a partition key') if clustering_key is None: raise ClusteringKeyNotFoundError( 'payload={} does not have a clustering key') item.update({ self.partition_key_name: partition_key, self.clustering_key_name: clustering_key }) return item def create(self): return self.table.create_table(self.table_name) def insert(self, item: dict): return self.table.insert_entity(self.table_name, self.get_payload(item)) def update(self, item: dict): return self.table.update_entity(self.table_name, self.get_payload(item)) def upsert(self, item: dict): return self.table.insert_or_replace_entity(self.table_name, self.get_payload(item)) def delete(self, partition_key: str, clustering_key: str): return self.table.delete_entity(self.table_name, partition_key=partition_key, row_key=clustering_key) def read(self, partition_key: str, clustering_key: str): return self.table.get_entity(self.table_name, partition_key=partition_key, row_key=clustering_key) def insert_batch(self, items: list): batch = TableBatch() for item in items: batch.insert_entity(self.get_payload(item)) return self.table.commit_batch(self.table_name, batch) def get(self, partition_key: str, clustering_key: str): return self.table.get_entity(self.table_name, partition_key, clustering_key) def get_by_partition(self, partition_key: str) -> list: return self.table.query_entities(self.table_name, filter="{} eq '{}'".format( self.partition_key_name, partition_key))
def configure_front_end(): try: table_service = TableService(account_name=STORAGE_ACCT_NAME, account_key=STORAGE_ACCT_KEY) except: print( "Error: Could not connect to table service. Check STORAGE_ACCT_NAME and STORAGE_ACCT_KEY" ) return if (not table_service.exists(TABLE_NAME_CONFIGURATION)): print("Error: Could not find configuration table: %s" % (TABLE_NAME_CONFIGURATION)) return taps = table_service.query_entities(TABLE_NAME_CONFIGURATION) for tap in taps: print("") print("%s scale-%s" % (tap['PartitionKey'], tap['RowKey'])) print("---------------------------------") for key in tap: if key in table_data and table_data[key][ 'type'] != 'drop' and table_data[key]['type'] != 'hide': if key in tap: print(" %s: %s" % (table_data[key]['description'], tap[key])) else: print(" %s:" % (table_data[key]['description'])) answer = input("Update tap info [y/N]? ") if not answer: answer = 'N' if answer.lower() == "y" or answer.lower == "yes": newdata = Entity() for key in table_data: prompt = True if table_data[key]['type'] != 'drop': update = None if table_data[key]['type'] == 'hide': update = tap[key] prompt = False else: if key in tap: existing = tap[key] else: existing = '' if prompt: update = input( " %s: [%s]: " % (table_data[key]['description'], existing)) if (not update): update = existing if table_data[key]['type'] == 'hide': newdata[key] = update elif table_data[key]['type'] == 'int': newdata[key] = int(update) elif table_data[key]['type'] == 'double': newdata[key] = float(update) elif table_data[key]['type'] == 'string': newdata[key] = str(update) else: print("Error: uknown data type: %s" % table_data[key]['type']) print("Updated record: ") print(newdata) table_service.update_entity(TABLE_NAME_CONFIGURATION, newdata)
class AzureStorageHandler(): def __init__(self, kv): try: self.table_service = TableService( account_name=kv.get_keyvault_secret("storageAccount-name"), account_key=kv.get_keyvault_secret("storageAccount-key")) # Quick start code goes here except Exception as ex: print('Exception:') print(ex) def insert_submission_entry(self, entry): submission = Entity() submission.PartitionKey = entry.subreddit submission.RowKey = entry.id submission.author = entry.author submission.created_utc = entry.created_utc submission.flair = entry.flair submission.title = entry.title # Flatten list of keywords into comma separated string submission.title_keywords = ','.join(map(str, entry.title_keywords)) submission.title_sentiment = entry.title_sentiment try: submission.body_keywords = ','.join(map(str, entry.body_keywords)) submission.body_sentiment = entry.body_sentiment except AttributeError: submission.body_keywords = "" submission.body_sentiment = "" self.table_service.insert_entity('submissions', submission) def insert_comment_entry(self, entries): for entry in entries: comment = Entity() comment.PartitionKey = entry.link_id comment.RowKey = entry.id comment.author = entry.author comment.body = entry.body comment.created_utc = entry.created_utc comment.parent_id = entry.parent_id comment.score = entry.score comment.subreddit = entry.subreddit comment.subreddit_id = entry.subreddit_id comment.total_awards_received = entry.total_awards_received comment.sentiment = entry.sentiment # Flatten list of keywords into comma separated string comment.keywords = ','.join(map(str, entry.keywords)) self.table_service.insert_entity('comments', comment) def insert_recommendation_entry(self, entries): for entry in entries: recommendation = Entity() recommendation.PartitionKey = "{0}_{1}".format( entry.subreddit, entry.query_word) recommendation.RowKey = entry.keyword recommendation.subreddit = entry.subreddit recommendation.query_word = entry.query_word recommendation.post_id = ','.join(map(str, entry.post_id)) recommendation.comment_id = ','.join(map(str, entry.comment_id)) recommendation.sentiment = entry.sentiment recommendation.count = entry.count try: self.table_service.insert_entity('recommendations', recommendation) except AzureConflictHttpError as error: # print(error) subreddit_query_word = recommendation.PartitionKey.split('_') print( "The recommendation entry with subreddit = '{0}', search term = '{1}', and keyword = '{2}' already exists in the database. Updating it..." .format(subreddit_query_word[0], subreddit_query_word[1], recommendation.RowKey)) self.table_service.update_entity('recommendations', recommendation) def insert_sub_date_entry(self, entry): sub_date = Entity() sub_date.PartitionKey = entry.subreddit sub_date.RowKey = entry.title sub_date.created_utc = entry.created_utc sub_date.post_id = entry.post_id try: self.table_service.insert_or_replace_entity( 'mostrecentsubdate', sub_date) except TypeError as error: print(error) print( f"The mostrecentsubdate object is formatted incorrectly and was not updated. One of the parameters is not an int, str, bool or datetime, or defined custom EntityProperty. Continuing..." ) def get_entry(self, table, partition_key, row_key): return self.table_service.get_entity(table, partition_key, row_key) def filter_entries(self, table, filter_string): return self.table_service.query_entities(table, filter_string) def update_entry(self, table, entity): return self.table_service.update_entity(table, entity) def delete_entry(self, table, partition_key, row_key): return self.table_service.delete_entity(table, partition_key, row_key)
class AzureTableDatabase(object): def __init__(self): self.connection = TableService(account_name=storage_account, account_key=table_connection_string) self.table_name = table_name def _update_entity(self, record): record.LastModified = datetime.now() self.connection.update_entity(self.table_name, record) def create_table(self): if not self.connection.exists(self.table_name): self.connection.create_table(self.table_name) def raw_table(self, limit=100): """ Retrieve a list of rows in the table. """ calls = self.connection.query_entities(self.table_name, num_results=limit) return calls def list_calls(self, limit=100, select='PartitionKey'): """ Retrieve a set of records that need a phone call """ calls = self.connection.query_entities(self.table_name, num_results=limit, select=select) return [c.PartitionKey for c in calls] def reset_stale_calls(self, time_limit): """ Retrieve calls that are not done and whose last modified time was older than the limit. """ records = self.connection.query_entities(self.table_name, filter="LastModified lt datetime'{0}' and Status ne '{1}'".format(time_limit.date(), Statuses.extracting_done)) if not records.items: raise NoRecordsToProcessError() num_records = len(records.items) for record in records: if 'LastErrorStep' in record: record.Status = record.LastErrorStep del record.LastErrorStep record.Status = Statuses.reset_map.get(record.Status, record.Status) self._update_entity(record) return num_records def retrieve_next_record_for_call(self): """ Retrieve a set of records that need a phone call """ records = self.connection.query_entities(self.table_name, num_results=1, filter="Status eq '{0}'".format(Statuses.new)) if len(records.items) == 0: raise NoRecordsToProcessError() record = records.items[0] record.Status = Statuses.calling self._update_entity(record) return record.PartitionKey def set_error(self, partition_key, step): """ Reset a row from error state """ record = self.connection.get_entity(self.table_name, partition_key, partition_key) record.Status = Statuses.error record['LastErrorStep'] = step self._update_entity(record) def retrieve_next_record_for_transcribing(self): records = self.connection.query_entities( self.table_name, num_results=1, filter="Status eq '{0}'".format(Statuses.recording_ready), ) if not records.items: raise NoRecordsToProcessError() record = records.items[0] record.Status = Statuses.transcribing self._update_entity(record) return record.CallUploadUrl, record.PartitionKey def update_transcript(self, partition_key, transcript, status): record = self.connection.get_entity( self.table_name, partition_key, partition_key, ) if status == TranscriptionStatus.success: record.CallTranscript = transcript record.Status = Statuses.transcribing_done record.TranscribeTimestamp = datetime.now() self._update_entity(record) elif status == TranscriptionStatus.request_error: self.set_error(partition_key, Statuses.transcribing) else: record.Status = Statuses.transcribing_failed self._update_entity(record) def change_status(self, original_status, new_status): records = self.connection.query_entities( self.table_name, filter="Status eq '{0}'".format(original_status), ) if not records.items: return for record in records.items: record.Status = new_status self.connection.update_entity(self.table_name, record) def query(self, column, value, limit=1): records = self.connection.query_entities(self.table_name, num_results=limit, filter="{0} eq '{1}'".format(column, value)) return records def retrieve_next_record_for_extraction(self): records = self.connection.query_entities(self.table_name, num_results=1, filter="Status eq '{0}'".format(Statuses.transcribing_done)) if not records.items: raise NoRecordsToProcessError() record = records.items[0] record.Status = Statuses.extracting self._update_entity(record) return record.CallTranscript, record.PartitionKey def retrieve_next_record_for_extraction(self): records = self.connection.query_entities(self.table_name, num_results=1, filter="Status eq '{0}'".format(Statuses.transcribing_done)) if not records.items: raise NoRecordsToProcessError() record = records.items[0] record.Status = Statuses.extracting self.connection.update_entity(self.table_name, record) return record.CallTranscript, record.PartitionKey def update_location_date(self, case_number, city, location_confidence, state, zipcode, date): record = self.connection.get_entity(self.table_name, case_number, case_number) record.City = city record.LocationConfidence = location_confidence record.State = state record.Zipcode = zipcode record.CourtHearingDate = date record.Status = Statuses.extracting_done self.connection.update_entity(self.table_name, record) def upload_new_requests(self, request_ids): """ Upload new request ids to the database """ for request_id in request_ids: record = {'PartitionKey': request_id, 'RowKey': request_id, 'Status': Statuses.new, 'LastModified': datetime.now()} try: self.connection.insert_entity(self.table_name, record) except AzureConflictHttpError: pass # already exists. silently ignore. def update_call_id(self, alien_registration_id, call_id): record = self.connection.get_entity(self.table_name, alien_registration_id, alien_registration_id) record.CallID = call_id record.Status = Statuses.calling record.CallTimestamp = datetime.now() self._update_entity(record) def update_azure_path(self, alien_registration_id, azure_path): record = self.connection.get_entity(self.table_name, alien_registration_id, alien_registration_id) record.Status = Statuses.recording_ready record.CallUploadUrl = azure_path self._update_entity(record) def delete_ain(self, ain): return self.connection.delete_entity(self.table_name, ain, ain) def get_ain(self, ain): return self.connection.get_entity(self.table_name, ain, ain)
class AzureOperationsStorage(BasicOperationStorage): """ Implementation of :class:`.interface.IOperationStorage` with Azure Table Storage using the default implementation :class:`.interface.BasicOperationStorage` On creating a connection to the storage is initialized and all needed tables are created. If a purge is necessary, tables are not deleted but simple the content removed. Table creation can take a while with Azure Table Storage. As Azure Table Storage only supports two indices, the operations are inserted multiple times in different tables to enable multi-index queries. """ def get_retry_exceptions(self): return (NewConnectionError) @retry_auto_reconnect def __init__(self, azure_config, purge=False): super(AzureOperationsStorage, self).__init__() if not azure_config: raise Exception("No azure table storage configuration provided!") self._azure_config = azure_config # ensure defaults self._azure_config["operation_table"] = self._azure_config.get( "operation_table", "operations") self._azure_config["address_table"] = self._azure_config.get( "address_table", "address") self._azure_config["status_table"] = self._azure_config.get( "status_table", "status") self._azure_config["balances_table"] = self._azure_config.get( "balances_table", "balances") if not self._azure_config["account"]: raise Exception( "Please include the azure account name in the config") if not self._azure_config["key"]: raise Exception( "Please include the azure account key in the config") self._service = TableService( account_name=self._azure_config["account"], account_key=self._azure_config["key"]) # if tables doesnt exist, create it self._create_operations_storage(purge) self._create_status_storage(purge) self._create_address_storage(purge) self._create_balances_storage(purge) def _debug_print(self, operation): from pprint import pprint pprint(operation) def _create_address_storage(self, purge): _varients = ["balance", "historyfrom", "historyto"] for variant in _varients: tablename = self._azure_config["address_table"] + variant if purge: try: for item in self._service.query_entities(tablename): self._service.delete_entity(tablename, item["PartitionKey"], item["RowKey"]) except AzureHttpError: pass except AzureMissingResourceHttpError: pass while not self._service.exists(tablename): self._service.create_table(tablename) time.sleep(0.1) def _create_status_storage(self, purge): if purge: try: tablename = self._azure_config["status_table"] for item in self._service.query_entities(tablename): self._service.delete_entity(tablename, item["PartitionKey"], item["RowKey"]) except AzureMissingResourceHttpError: pass while not self._service.exists(self._azure_config["status_table"]): self._service.create_table(self._azure_config["status_table"]) time.sleep(0.1) def _create_balances_storage(self, purge): if purge: try: tablename = self._azure_config["balances_table"] for item in self._service.query_entities(tablename): self._service.delete_entity(tablename, item["PartitionKey"], item["RowKey"]) except AzureMissingResourceHttpError: pass while not self._service.exists(self._azure_config["balances_table"]): self._service.create_table(self._azure_config["balances_table"]) time.sleep(0.1) def _create_operations_storage(self, purge): self._operation_varients = [ "incident", "statuscompleted", "statusfailed", "statusinprogress" ] # "customer" self._operation_tables = {} for variant in self._operation_varients: self._operation_tables[ variant] = self._azure_config["operation_table"] + variant self._operation_prep = { "statusinprogress": lambda op: { "PartitionKey": self._short_digit_hash(op["chain_identifier"]), "RowKey": op["chain_identifier"] }, "statuscompleted": lambda op: { "PartitionKey": self._short_digit_hash(op["chain_identifier"]), "RowKey": op["chain_identifier"] }, "statusfailed": lambda op: { "PartitionKey": self._short_digit_hash(op["chain_identifier"]), "RowKey": op["chain_identifier"] }, "customer": lambda op: { "PartitionKey": op["customer_id"], "RowKey": op["chain_identifier"] }, "incident": lambda op: { "PartitionKey": self._short_digit_hash(op["incident_id"]), "RowKey": op["incident_id"] } } for variant in self._operation_varients: if purge: try: tablename = self._operation_tables[variant] for item in self._service.query_entities(tablename): self._service.delete_entity(tablename, item["PartitionKey"], item["RowKey"]) except AzureMissingResourceHttpError: pass while not self._service.exists(self._operation_tables[variant]): self._service.create_table(self._operation_tables[variant]) time.sleep(0.1) def _get_with_ck(self, variant, operation): with_ck = operation.copy() with_ck.update(self._operation_prep[variant](with_ck)) return with_ck def _short_digit_hash(self, value): hash_type = Config.get("operation_storage", "key_hash", "type", default="crc32") if hash_type == "crc32": short_hash = hex(zlib.crc32(value.encode(encoding='UTF-8'))) short_hash = short_hash[2:len(short_hash)] elif hash_type == "sha256": checker = hashlib.sha256() checker.update(value.encode(encoding='UTF-8')) short_hash = checker.hexdigest() return short_hash[0:Config. get("operation_storage", "key_hash", "digits", 3)] @retry_auto_reconnect def track_address(self, address, usage="balance"): address = ensure_address_format(address) try: short_hash = self._short_digit_hash(address) logging.getLogger(__name__).debug("track_address with " + str(address) + ", hash " + str(short_hash)) self._service.insert_entity( self._azure_config["address_table"] + usage, { "PartitionKey": short_hash, "RowKey": address, "address": address, "usage": usage }) except AzureConflictHttpError: raise AddressAlreadyTrackedException @retry_auto_reconnect def untrack_address(self, address, usage="balance"): address = ensure_address_format(address) try: short_hash = self._short_digit_hash(address) logging.getLogger(__name__).debug("untrack_address with " + str(address) + ", hash " + str(short_hash)) self._service.delete_entity( self._azure_config["address_table"] + usage, short_hash, address) try: self._delete_balance(address) except AzureMissingResourceHttpError: pass except AzureMissingResourceHttpError: raise AddressNotTrackedException() @retry_auto_reconnect def _get_address(self, address, usage="balance"): try: short_hash = self._short_digit_hash(address) logging.getLogger(__name__).debug("_get_address with " + str(address) + ", hash " + str(short_hash)) return self._service.get_entity( self._azure_config["address_table"] + usage, short_hash, address) except AzureMissingResourceHttpError: raise AddressNotTrackedException() def _update(self, operation, status=None): try: mapping = { "in_progress": "statusinprogress", "completed": "statuscompleted", "failed": "statusfailed" } operation = self._get_with_ck("incident", operation.copy()) new_operation = operation if status: tmp = self.get_operation(operation["incident_id"]) new_operation["timestamp"] = tmp["timestamp"] new_operation["status"] = status new_operation = self._get_with_ck("incident", new_operation) logging.getLogger(__name__).debug( "_update: Table " + self._operation_tables["incident"] + " PartitionKey " + new_operation["PartitionKey"] + " " + new_operation["RowKey"]) self._service.update_entity(self._operation_tables["incident"], new_operation) operation = self._get_with_ck("statuscompleted", operation.copy()) new_operation = operation if status: tmp = self.get_operation(operation["incident_id"]) new_operation["timestamp"] = tmp["timestamp"] new_operation["status"] = status new_operation = self._get_with_ck("statuscompleted", new_operation) self._service.update_entity( self._operation_tables["statuscompleted"], new_operation) logging.getLogger(__name__).debug( "_update: Table " + self._operation_tables["statuscompleted"] + " PartitionKey " + new_operation["PartitionKey"] + " " + new_operation["RowKey"]) if status: # needs delete and insert try: self._service.delete_entity( self._operation_tables[mapping[operation["status"]]], operation["PartitionKey"], operation["RowKey"]) except AzureMissingResourceHttpError: pass try: self._service.insert_entity( self._operation_tables[mapping[ new_operation["status"]]], new_operation) except AzureConflictHttpError: # already exists, try update self._service.update_entity( self._operation_tables[mapping[ new_operation["status"]]], new_operation) else: self._service.update_entity( self._operation_tables[mapping[new_operation["status"]]], new_operation) except AzureMissingResourceHttpError: raise OperationNotFoundException() def _insert(self, operation): try: for variant in self._operation_varients: to_insert = operation.copy() to_insert.update(self._operation_prep[variant](to_insert)) if not to_insert["PartitionKey"]: raise AzureMissingResourceHttpError() if not to_insert["RowKey"]: raise AzureMissingResourceHttpError() logging.getLogger(__name__).debug( "_insert: Table " + self._operation_tables[variant] + " PartitionKey " + to_insert["PartitionKey"] + " " + to_insert["RowKey"]) self._service.insert_entity(self._operation_tables[variant], to_insert) except AzureConflictHttpError: raise DuplicateOperationException() def _delete(self, operation): try: for variant in self._operation_varients: to_delete = operation.copy() to_delete.update(self._operation_prep[variant](to_delete)) self._service.delete_entity(self._operation_tables[variant], to_delete["PartitionKey"], to_delete["RowKey"]) except AzureMissingResourceHttpError: raise OperationNotFoundException() @retry_auto_reconnect def flag_operation_completed(self, operation): # do basics operation = super(AzureOperationsStorage, self).flag_operation_completed(operation) self._update(operation, status="completed") self._ensure_balances(operation) @retry_auto_reconnect def flag_operation_failed(self, operation, message=None): # do basics operation = super(AzureOperationsStorage, self).flag_operation_failed(operation) operation["message"] = message self._update(operation, status="failed") @retry_auto_reconnect def insert_operation(self, operation): # do basics operation = super(AzureOperationsStorage, self).insert_operation(operation) error = None try: self._insert(operation) except DuplicateOperationException as e: error = e try: # always check if balances are ok if operation["status"] == "completed": self._ensure_balances(operation) except BalanceConcurrentException as e: if error is None: error = e if error is not None: raise error @retry_auto_reconnect def _delete_balance(self, address, if_match='*'): self._service.delete_entity(self._azure_config["balances_table"], self._short_digit_hash(address), address, if_match=if_match) @retry_auto_reconnect def _ensure_balances(self, operation): affected_address = get_tracking_address(operation) logging.getLogger(__name__).debug("_ensure_balances: with " + operation["chain_identifier"] + " for address " + str(affected_address)) try: self._get_address(affected_address) except AddressNotTrackedException: # delte if exists and return try: self._delete_balance(affected_address) except AzureMissingResourceHttpError: pass return try: balance_dict = self._service.get_entity( self._azure_config["balances_table"], self._short_digit_hash(affected_address), affected_address) insert = False except AzureMissingResourceHttpError as e: balance_dict = {"address": affected_address} balance_dict["PartitionKey"] = self._short_digit_hash( balance_dict["address"]) balance_dict["RowKey"] = balance_dict["address"] insert = True if operation["block_num"] < balance_dict.get("blocknum", 0): raise BalanceConcurrentException() elif operation["block_num"] == balance_dict.get("blocknum", 0) and\ operation["txnum"] < balance_dict.get("txnum", 0): raise BalanceConcurrentException() elif operation["block_num"] == balance_dict.get("blocknum", 0) and\ operation["txnum"] == balance_dict.get("txnum", 0) and\ operation["opnum"] <= balance_dict.get("opnum", 0): raise BalanceConcurrentException() balance_dict["blocknum"] = max(balance_dict.get("blocknum", 0), operation["block_num"]) balance_dict["txnum"] = max(balance_dict.get("txnum", 0), operation["tx_in_block"]) balance_dict["opnum"] = max(balance_dict.get("opnum", 0), operation["op_in_tx"]) total = 0 addrs = split_unique_address(affected_address) asset_id = "balance" + operation["amount_asset_id"].split("1.3.")[1] if addrs["account_id"] == operation["from"]: # internal transfer and withdraw # negative balance = balance_dict.get(asset_id, 0) balance_dict[asset_id] = balance - operation["amount_value"] # fee as well asset_id = operation["fee_asset_id"] balance = balance_dict.get(asset_id, 0) balance_dict[asset_id] = balance - operation["fee_value"] elif addrs["account_id"] == operation["to"]: # deposit # positive balance = balance_dict.get(asset_id, 0) balance_dict[asset_id] = balance + operation["amount_value"] # fees were paid by someone else else: raise InvalidOperationException() for key, value in balance_dict.items(): if key.startswith("balance"): total = total + value if total == 0: if not insert: try: self._delete_balance(affected_address, if_match=balance_dict.etag) except AzureMissingResourceHttpError: pass return # may be updated or inserted, total > 0 if (insert): try: self._service.insert_entity( self._azure_config["balances_table"], balance_dict) except AzureMissingResourceHttpError: raise OperationStorageException( "Critical error in database consistency") else: try: self._service.update_entity( self._azure_config["balances_table"], balance_dict, if_match=balance_dict.etag) except AzureConflictHttpError: raise OperationStorageException( "Critical error in database consistency") @retry_auto_reconnect def insert_or_update_operation(self, operation): # do basics operation = super(AzureOperationsStorage, self).insert_operation(operation) # check if this is from in_progress to complete (for withdrawals we need to find incident id as its # not stored onchain) try: logging.getLogger(__name__).debug( "insert_or_update_operation: check if in_progress with " + str(operation["chain_identifier"]) + " exists") existing_operation = self.get_operation_by_chain_identifier( "in_progress", operation["chain_identifier"]) logging.getLogger(__name__).debug( "insert_or_update_operation: found existing in_progress operation" ) if not existing_operation["incident_id"] == operation["incident_id"] and\ operation["incident_id"] == operation["chain_identifier"]: logging.getLogger(__name__).debug( "insert_or_update_operation: using preset incident_id " + str(existing_operation["incident_id"])) operation["incident_id"] = existing_operation["incident_id"] except OperationNotFoundException: existing_operation = None if existing_operation is None: try: logging.getLogger(__name__).debug( "insert_or_update_operation: attempting insert") error = None try: self._insert(operation) except DuplicateOperationException as e: error = e try: # always check if balances are ok if operation["status"] == "completed": self._ensure_balances(operation) except BalanceConcurrentException as e: if error is None: error = e if error is not None: raise error except DuplicateOperationException as ex: logging.getLogger(__name__).debug( "insert_or_update_operation: fallback to update") # could be an update to completed ... if operation.get("block_num"): try: operation.pop("status") self.flag_operation_completed(operation) except OperationNotFoundException: raise ex else: raise ex else: logging.getLogger(__name__).debug( "insert_or_update_operation: attempting update") if operation.get("block_num"): try: operation.pop("status") self.flag_operation_completed(operation) except OperationNotFoundException: raise ex @retry_auto_reconnect def delete_operation(self, operation_or_incident_id): # do basics operation = super(AzureOperationsStorage, self).delete_operation(operation_or_incident_id) if type(operation_or_incident_id) == str: operation = self.get_operation(operation_or_incident_id) else: operation = operation_or_incident_id self._delete(operation) @retry_auto_reconnect def get_operation_by_chain_identifier(self, status, chain_identifier): mapping = { "in_progress": "statusinprogress", "completed": "statuscompleted", "failed": "statusfailed" } try: operation = self._service.get_entity( self._operation_tables[mapping[status]], self._short_digit_hash(chain_identifier), chain_identifier) operation.pop("PartitionKey") operation.pop("RowKey") operation.pop("Timestamp") operation.pop("etag") except AzureMissingResourceHttpError: raise OperationNotFoundException() return operation @retry_auto_reconnect def get_operation(self, incident_id): try: short_hash = self._short_digit_hash(incident_id) logging.getLogger(__name__).debug("get_operation with " + str(incident_id) + ", hash " + str(short_hash)) operation = self._service.get_entity( self._operation_tables["incident"], short_hash, incident_id) operation.pop("PartitionKey") operation.pop("RowKey") operation.pop("Timestamp") operation.pop("etag") except AzureMissingResourceHttpError: raise OperationNotFoundException() return operation @retry_auto_reconnect def get_balances(self, take, continuation=None, addresses=None, recalculate=False): if recalculate: raise Exception( "Currently not supported due to memo change on withdraw") return self._get_balances_recalculate(take, continuation, addresses) else: if continuation is not None: try: continuation_marker = json.loads(continuation) except TypeError: raise InputInvalidException() except JSONDecodeError: raise InputInvalidException() balances = self._service.query_entities( self._azure_config["balances_table"], num_results=take, marker=continuation_marker) else: balances = self._service.query_entities( self._azure_config["balances_table"], num_results=take) return_balances = {} for address_balance in balances: return_balances[address_balance["address"]] = { "block_num": address_balance["blocknum"] } for key, value in address_balance.items(): if key.startswith("balance"): asset_id = "1.3." + key.split("balance")[1] return_balances[ address_balance["address"]][asset_id] = value return_balances["continuation"] = None if balances.next_marker: return_balances["continuation"] = json.dumps( balances.next_marker) return return_balances @retry_auto_reconnect def _get_balances_recalculate(self, take, continuation=None, addresses=None): address_balances = collections.defaultdict( lambda: collections.defaultdict()) if not addresses: if continuation is not None: try: continuation_marker = json.loads(continuation) except TypeError: raise InputInvalidException() except JSONDecodeError: raise InputInvalidException() addresses = self._service.query_entities( self._azure_config["address_table"] + "balance", num_results=take, marker=continuation_marker) else: addresses = self._service.query_entities( self._azure_config["address_table"] + "balance", num_results=take) if addresses.next_marker: address_balances["continuation"] = json.dumps( addresses.next_marker) addresses = [x["address"] for x in addresses] if type(addresses) == str: addresses = [addresses] for address in addresses: addrs = split_unique_address(address) max_block_number = 0 for operation in self.get_operations_completed( filter_by={"customer_id": addrs["customer_id"]}): this_block_num = operation["block_num"] asset_id = operation["amount_asset_id"] if addrs["account_id"] == operation["from"]: # negative balance = address_balances[address].get(asset_id, 0) address_balances[address][asset_id] =\ balance - operation["amount_value"] # fee as well asset_id = operation["fee_asset_id"] balance = address_balances[address].get(asset_id, 0) address_balances[address][asset_id] =\ balance - operation["fee_value"] elif addrs["account_id"] == operation["to"]: # positive balance = address_balances[address].get(asset_id, 0) address_balances[address][asset_id] =\ balance + operation["amount_value"] else: raise InvalidOperationException() max_block_number = max(max_block_number, this_block_num) if max_block_number > 0: address_balances[address]["block_num"] = max_block_number # do not return default dicts for key, value in address_balances.items(): if type(value) == collections.defaultdict: address_balances[key] = dict(value) return dict(address_balances) def _parse_filter(self, filter_by): if filter_by: if filter_by.get("customer_id"): return {"customer_id": filter_by.pop("customer_id")} if filter_by.get("address"): addrs = split_unique_address(filter_by.pop("address")) return {"customer_id": addrs["customer_id"]} if filter_by.get("from"): addrs = split_unique_address(filter_by.pop("from")) return {"from": addrs["account_id"]} if filter_by.get("to"): addrs = split_unique_address(filter_by.pop("to")) return {"to": addrs["account_id"]} if filter_by: raise Exception("Filter not supported") return {} def _filter_dict_to_string(self, filter_dict, partition_key=None): filter_str = None for key, value in filter_dict.items(): if partition_key == key: key = "PartitionKey" if filter_str is not None: delimiter = " and " delimiter = "" filter_str = delimiter + key + " eq '" + value + "'" return filter_str @retry_auto_reconnect def get_operations_in_progress(self, filter_by=None): mapping = { "in_progress": "statusinprogress", "completed": "statuscompleted", "failed": "statusfailed" } filter_dict = {} filter_dict.update(self._parse_filter(filter_by)) filter_str = self._filter_dict_to_string(filter_dict, "status") return list( self._service.query_entities( self._operation_tables[mapping["in_progress"]], filter_str)) @retry_auto_reconnect def get_operations_completed(self, filter_by=None): mapping = { "in_progress": "statusinprogress", "completed": "statuscompleted", "failed": "statusfailed" } filter_dict = {} filter_dict.update(self._parse_filter(filter_by)) filter_str = self._filter_dict_to_string(filter_dict, "status") return list( self._service.query_entities( self._operation_tables[mapping["completed"]], filter_str)) @retry_auto_reconnect def get_operations_failed(self, filter_by=None): mapping = { "in_progress": "statusinprogress", "completed": "statuscompleted", "failed": "statusfailed" } filter_dict = {} filter_dict.update(self._parse_filter(filter_by)) filter_str = self._filter_dict_to_string(filter_dict, "status") return list( self._service.query_entities( self._operation_tables[mapping["failed"]], filter_str)) @retry_auto_reconnect def get_last_head_block_num(self): try: document = self._service.get_entity( self._azure_config["status_table"], "head_block_num", "last") return document["last_head_block_num"] except AzureMissingResourceHttpError: return 0 @retry_auto_reconnect def set_last_head_block_num(self, head_block_num): current_last = self.get_last_head_block_num() if current_last >= head_block_num: raise Exception("Marching backwards not supported! Last: " + str(current_last) + " New: " + str(head_block_num)) self._service.insert_or_replace_entity( self._azure_config["status_table"], { "PartitionKey": "head_block_num", "RowKey": "last", "last_head_block_num": head_block_num })
class AzureCosmosDb(TableStorage): """Azure CosmosDB provider for Table Storage.""" def __init__(self, config: AzureCosmosDbConfig): self._table_service = TableService(account_name=config.account_name, account_key=config.account_key) self._tableName = config.table_name def check_entry_exists(self, entry): """Check if entry exists in table. :param entry: Dictionary with PartitionKey and RowKey fields :return: True if entry exists """ try: self.query(entry['PartitionKey'], entry['RowKey']) return True except Exception: return False def write(self, resource): """Write resource to table. :param resource: Expecting Resource object (see Common.Contracts.Resource) :return: None """ entry = resource.to_dict() prepared = entry_storage.EntryOperations.prepare_entry_for_insert( entry) if not self.check_entry_exists(prepared): self._table_service.insert_entity(self._tableName, prepared) else: self._table_service.update_entity(self._tableName, prepared) def query(self, partition_key, row_key): """Get entry with specified partition and row keys. :param partition_key: Partition key for entry :param row_key: Row key for entry :return: Entity if found, None otherwise """ task = self._table_service.get_entity(self._tableName, partition_key, row_key) return task def query_list(self): """Get entities from table. :return: List of entities from table """ return self._table_service.query_entities(self._tableName) def delete(self, partition_key, row_key): """Delete entry with specified partition and row keys. :param partition_key: Partition key for entry :param row_key: Row key for entry :return: None """ self._table_service.delete_entity(self._tableName, partition_key, row_key) @staticmethod def create(): """Initialize AzureCosmosDb service. :return: AzureCosmosDb service object """ config = AzureConfig() cosmos_storage = AzureCosmosDb(config.cosmos_storage_config) return cosmos_storage
def update_entity_state(table_service: TableService, accountId: str, tripName: str, entityList: [], table_name: str): for entity in entityList: entity['status'] = 'archived' table_service.update_entity(table_name, entity)