def migrate_notification_keys(table_service: TableService) -> None: table_name = "Notification" notifications = table_service.query_entities( table_name, select="PartitionKey,RowKey,config") partitionKey = None count = 0 for entry in notifications: try: UUID(entry.PartitionKey) continue except ValueError: pass table_service.insert_or_replace_entity( table_name, { "PartitionKey": entry.RowKey, "RowKey": entry.PartitionKey, "config": entry.config, }, ) table_service.delete_entity(table_name, entry.PartitionKey, entry.RowKey) count += 1 print("migrated %s rows" % count)
class StorageManager: def __init__(self, table_name=None): self.azure_storage_name = cnf.get('credentials', 'azure_storage_name') self.azure_storage_key = cnf.get('credentials', 'azure_storage_key') self.table_service = TableService(account_name=self.azure_storage_name, account_key=self.azure_storage_key) self.table_name = table_name if table_name is not None else cnf.get( 'resources', 'table_name') def create_table(self): self.table_service.create_table(self.table_name) def upload_data(self, entities): # Count records to upload num_entities = len(entities) # Upload record by record and print info time_start = time.time() for i, entity in enumerate(entities): self.table_service.insert_or_replace_entity( self.table_name, entity) print_uploading_state(i + 1, num_entities, self.table_name) print_successful_upload_state(num_entities, self.table_name, time.time() - time_start) def query_entities(self, query_filter=None, query_selector=None): return self.table_service.query_entities(self.table_name, filter=query_filter, select=query_selector)
def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('Python HTTP trigger function processed a request.') # Connect to Azure Table Storage table_service = TableService( connection_string=os.environ['AzureWebJobsStorage']) table_service.create_table( 'intents') if not table_service.exists('intents') else None req_body = req.get_json() if req_body: # Create row to be saved on Azure Table Storage print(req_body.get('ConversationId')) data = req_body data["PartitionKey"] = req_body.get('ConversationId') data["RowKey"] = req_body.get('MessageId') # Save row on Azure Table Storage table_service.insert_or_replace_entity('intents', data) return func.HttpResponse( f"Row {req_body.get('MessageId')} for {req_body.get('ConversationId')} added" ) else: return func.HttpResponse("Please pass valid request body", status_code=400)
def main(req: func.HttpRequest) -> func.HttpResponse: sender = req.form['From'] message = req.form['Body'] # We will try to parse the message into location, minimum temperature and maximum temperaturess try: message = message.strip() maxIndex = message.rindex(' ') minIndex = message.rindex(' ', 0, maxIndex) maxInput = message[maxIndex + 1:] minInput = message[minIndex + 1:maxIndex] location = message[:minIndex] maxTemp = float(maxInput.lower().strip('c')) minTemp = float(minInput.lower().strip('c')) except ValueError as err: return func.HttpResponse( f'INVALID FORMAT. USE: Location MinTemp MaxTemp. EXAMPLE: Platz der Republik 1, Berlin, Germany 3C 25C' ) MAPS_KEY = os.environ.get('AZURE_MAPS_SUBSCRIPTION_KEY') requesturl = f'https://atlas.microsoft.com/search/address/json?subscription-key={MAPS_KEY}&api-version=1.0&limit=1&query={urllib.parse.quote(location, safe="")}' res = urllib.request.urlopen(requesturl) res_body = res.read() res_json = json.loads(res_body.decode("utf-8")) # This is a standardized way the location's address is returned location_normalized = res_json['results'][0]['address']['freeformAddress'] lat = res_json['results'][0]['position']['lat'] lon = res_json['results'][0]['position']['lon'] coordinates = f'{lat},{lon}' record = dict() record['max'] = maxTemp record['min'] = minTemp record['coordinates'] = coordinates record['location'] = location_normalized record['number'] = sender record['PartitionKey'] = f'{sender}' record['RowKey'] = f'{sender}:{coordinates}' try: STORAGE_CONNECTION_STRING = os.environ.get('AzureWebJobsStorage') service = TableService(connection_string=STORAGE_CONNECTION_STRING) service.insert_or_replace_entity('alerts', record) except Exception as err: logging.error(f'{err}') return func.HttpResponse( 'An error occurred saving your alert. Please try again later.') return func.HttpResponse( f'Alert for {location_normalized} at {coordinates} with Max:{maxTemp}C and Min:{minTemp}C saved.' )
def updatetable(currency, day, rbf, linear, poly, accuracy): print("Uploading: " + str(currency) + str(day) + str(rbf) + str(linear) + str(poly) + str(accuracy)) table_service = TableService( connection_string=tablestorageconnectionstring) entity = { 'PartitionKey': currency, 'RowKey': day, 'RBF': EntityProperty(EdmType.DOUBLE, rbf), 'Linear': EntityProperty(EdmType.DOUBLE, linear), 'Polynomial': EntityProperty(EdmType.DOUBLE, poly), 'Accuracy': EntityProperty(EdmType.DOUBLE, accuracy) } table_service.insert_or_replace_entity('PredictionConfiguration', entity)
def put_object(obj, table_name='default', account_name='cloudmaticafunc9b4c', account_key='YOUR_KEY', partition_key='default', row_key=None): table_service = TableService(account_name=account_name, account_key=account_key) if not table_service.exists(table_name): table_service.create_table(table_name) if not row_key: row_key = str(uuid.uuid4()) obj['PartitionKey'] = partition_key obj['RowKey'] = row_key table_service.insert_or_replace_entity(table_name, obj) return obj
def update_table(currency, day, rbf, linear, poly, actual): print("Updating table: ", "\nCurrency: ", currency, "\nDay: ", day, "\nRBF: ", rbf, "\nLinear: ", linear, "\nPolynomial: ", poly, "\nActual: ", actual) table_service = TableService( connection_string=tablestorageconnectionstring) entity = { 'PartitionKey': currency, 'RowKey': day, 'RBF': EntityProperty(EdmType.DOUBLE, rbf), 'Linear': EntityProperty(EdmType.DOUBLE, linear), 'Polynomial': EntityProperty(EdmType.DOUBLE, poly), 'Actual': EntityProperty(EdmType.DOUBLE, actual) } table_service.insert_or_replace_entity('PredictionData', entity)
class AzureTable(): def __init__(self, account_name, account_key): self.table_service = TableService(account_name=account_name, account_key=account_key) def create_table(self, table_name): return self.table_service.create_table(table_name) def exists_table(self, table_name): return self.table_service.exists(table_name) def insert_or_replace_entity(self, table_name, partition_key, row_key, **kwargs): try: entity = self.table_service.get_entity(table_name, partition_key, row_key) except Exception: # Insert a new entity entity = {'PartitionKey': partition_key, 'RowKey': row_key} for (k, v) in kwargs.items(): entity[k] = v return self.table_service.insert_or_replace_entity(table_name, entity) def insert_or_replace_entity2(self, table_name, entity): return self.table_service.insert_or_replace_entity(table_name, entity) def insert_entity(self, table_name, entity): return self.table_service.insert_entity(table_name, entity) def update_entity(self, table_name, entity): return self.table_service.update_entity(table_name, entity) def get_entity(self, table_name, partition_key, row_key): return self.table_service.get_entity(table_name, partition_key, row_key) def delete_entity(self, table_name, partition_key, row_key): self.table_service.delete_entity(table_name, partition_key, row_key) def delete_table(self, table_name): return self.table_service.delete_table(table_name) def get_entities(self, table_name, partition_key): filter = "PartitionKey eq '{0}'".format(partition_key) return self.table_service.query_entities(table_name, filter)
def main(msg) -> None: az_config = config.DefaultConfig() blob_name = msg # .get_body().decode('utf-8') logging.info('Python queue trigger function processed a queue item: %s', blob_name) start_time = datetime.utcnow() try: records = pf.process_blob(blob_name) status = 'Success' error = '' except Exception as e: traceback_str = ''.join(traceback.format_tb(e.__traceback__)) error = str(e) + ": " + traceback_str status = 'Failed' records = 0 # write record in diagnostic table end_time = datetime.utcnow() duration = str(end_time - start_time) row_key = str(uuid.uuid4()) data = { 'PartitionKey': blob_name, 'RowKey': row_key, 'Records': records, 'Status': status, 'Error': error, 'StartTime': str(start_time), 'EndTime': str(end_time), 'Duration': duration, 'Table': az_config.OUTPUT_TABLE } logging.info( f'Starting write to storage table {az_config.DIAGNOSTIC_TABLE}.') table_service = TableService( connection_string=az_config.STORAGE_CONNECTION) table_service.insert_or_replace_entity(az_config.DIAGNOSTIC_TABLE, data, timeout=None)
def create_orphan_record(url, unique_id, partition_key, short_id, instance_name, account_key): start = time.time() utcnow = datetime.datetime.utcnow().isoformat() print "Creating orphan record" # Each ingested (and successfully processed) file has a unique record containing # information, list of previews, orphan_record = { 'PartitionKey': partition_key, # using tree structure for partition key a good idea? #possiblybadidea #possiblygoodidea 'RowKey': short_id, # lookup key for this asset 'uid': unique_id, # globally uniqueId 'url': url, # original asset url 'it': utcnow # ingestion_time } table_service = TableService(account_name=instance_name, account_key=account_key) table_service.insert_or_replace_entity('orphans', orphan_record) print "orphan_record inserted in {} sec".format(time.time()-start)
class State: def __init__(self): with open('./vardhamanbot/bot/config/app.json') as app_config_file: app_config = json.load(app_config_file) self.tableservice = TableService(app_config["ats_name"], app_config["ats_key"]) self.tableservice.retry = ExponentialRetry(initial_backoff=30, increment_base=2, max_attempts=20).retry def getStudent(self, userId): try: #Try to get the user data based on his year return self.tableservice.get_entity('vbotusers', "VMEG", userId) except azure.common.AzureMissingResourceHttpError: return {} def insertOrUpdateStudent(self, studentdata): #Partition the Data Based on Year studentdata["PartitionKey"] = "VMEG" self.tableservice.insert_or_replace_entity("vbotusers", studentdata)
def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('Python HTTP trigger function processed a request.') try: req_body = req.get_json() except ValueError: pass emotion = req_body.get('emotion') if not emotion: return func.HttpResponse("Client did not submit an emotion", status_code=400) activity = req_body.get('activity') if not activity: return func.HttpResponse("Client did not submit an activity", status_code=400) notes = req_body.get('notes') time_generated = datetime.datetime.now().timestamp() id = str(uuid.uuid4()) # Connect to Cosmos the_connection_string = os.environ['COSMOS_CXN_STRING'] table_service = TableService(endpoint_suffix="table.cosmos.azure.com", connection_string=the_connection_string) # Insert/Replace new_body = req_body new_body["RowKey"] = id new_body["PartitionKey"] = '1' new_body["TimeGenerated"] = time_generated insert = table_service.insert_or_replace_entity('emotions', new_body) del new_body["PartitionKey"] del new_body["RowKey"] new_body["id"] = id if not insert: return func.HttpResponse("Database Error", status_code=500) elif emotion and activity: return func.HttpResponse( f"""Hello. This HTTP triggered function executed successfully. You submitted an emotion: {emotion} an activity: {activity} and some notes: {notes} at time: {time_generated} """) else: return func.HttpResponse( "This HTTP triggered function executed successfully. However, all the correct parameters were not sent. Please send an emotion and an activity. We'll also keep track of your notes if you would like.", status_code=200)
def sentiment_analysis(client): documents = [comments] date = datetime.now() id = shortuuid.uuid() filename = str(id) + ".json" response = client.analyze_sentiment(documents=documents)[0] print("Document Sentiment: {}".format(response.sentiment)) sentiment = response.sentiment the_connection_string = "your connection string" table_service= TableService(endpoint_suffix="table.cosmos.azure.com", connection_string=the_connection_string) data = { 'email': email, 'spam_note': spam_note, 'validEmail': validEmail, 'mxRecord': mxRecord, 'comment': comments, 'datetime': str(date), 'sentiment': sentiment, 'PartitionKey': 'Zuradigital', 'RowKey': id } table_service.insert_or_replace_entity('zuradigital', data) body = json.dumps(data, sort_keys=True, indent=5) print(body)
def table(): account_name = config.STORAGE_ACCOUNT_NAME account_key = config.STORAGE_ACCOUNT_KEY table_service = TableService(account_name=account_name, account_key=account_key) table_name = config.TABLE_NAME #table_service.create_table(table_name) imageId = str(uuid.uuid4()) task = Entity() task.PartitionKey = 'dlws' task.RowKey = imageId task.description = 'test' table_service.insert_or_replace_entity(table_name, task) task = table_service.get_entity(table_name, 'dlws', imageId) print(task.description) tasks = table_service.query_entities('tasktable') for task in tasks: print(task.description) print(task.RowKey)
class MetaStorageClient: """ Client to store metadata to table storage """ def __init__(self, table_name, account_name, account_key): self.table = table_name self.client = TableService(account_name=account_name, account_key=account_key) def upload(self, path: str, partition_key: str, row_key: str, meta: dict): """ Upload metadata for image """ meta['PartitionKey'] = partition_key meta['RowKey'] = row_key meta['path'] = path self.client.insert_or_replace_entity(self.table, meta) def search(self, row_key): """ Search TableStorage for metadata """ query = f"RowKey eq '{row_key}'" items = self.client.query_entities( self.table, filter=query ) result = [item for item in items] return result
# If we have the UID, continue if status == MIFAREReader.MI_OK: # Print UID print "Card read UID: "+str(uid[0])+","+str(uid[1])+","+str(uid[2])+","+str(uid[3]) # This is the default key for authentication key = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] # Select the scanned tag MIFAREReader.MFRC522_SelectTag(uid) # Authenticate status = MIFAREReader.MFRC522_Auth(MIFAREReader.PICC_AUTHENT1A, 8, key, uid) # Check if authenticated if status == MIFAREReader.MI_OK: # Add the card to RFID table card = Entity() card.PartitionKey = "2019" card.RowKey = time.time() card.UID = str(uid[0])+","+str(uid[1])+","+str(uid[2])+","+str(uid[3]) card.Authenticate = status table_service.insert_or_replace_entity("RFID", card) MIFAREReader.MFRC522_Read(8) MIFAREReader.MFRC522_StopCrypto1() else: print "Authentication error"
class AzureOperationsStorage(BasicOperationStorage): """ Implementation of :class:`.interface.IOperationStorage` with Azure Table Storage using the default implementation :class:`.interface.BasicOperationStorage` On creating a connection to the storage is initialized and all needed tables are created. If a purge is necessary, tables are not deleted but simple the content removed. Table creation can take a while with Azure Table Storage. As Azure Table Storage only supports two indices, the operations are inserted multiple times in different tables to enable multi-index queries. """ def get_retry_exceptions(self): return (NewConnectionError) @retry_auto_reconnect def __init__(self, azure_config, purge=False): super(AzureOperationsStorage, self).__init__() if not azure_config: raise Exception("No azure table storage configuration provided!") self._azure_config = azure_config # ensure defaults self._azure_config["operation_table"] = self._azure_config.get( "operation_table", "operations") self._azure_config["address_table"] = self._azure_config.get( "address_table", "address") self._azure_config["status_table"] = self._azure_config.get( "status_table", "status") self._azure_config["balances_table"] = self._azure_config.get( "balances_table", "balances") if not self._azure_config["account"]: raise Exception( "Please include the azure account name in the config") if not self._azure_config["key"]: raise Exception( "Please include the azure account key in the config") self._service = TableService( account_name=self._azure_config["account"], account_key=self._azure_config["key"]) # if tables doesnt exist, create it self._create_operations_storage(purge) self._create_status_storage(purge) self._create_address_storage(purge) self._create_balances_storage(purge) def _debug_print(self, operation): from pprint import pprint pprint(operation) def _create_address_storage(self, purge): _varients = ["balance", "historyfrom", "historyto"] for variant in _varients: tablename = self._azure_config["address_table"] + variant if purge: try: for item in self._service.query_entities(tablename): self._service.delete_entity(tablename, item["PartitionKey"], item["RowKey"]) except AzureHttpError: pass except AzureMissingResourceHttpError: pass while not self._service.exists(tablename): self._service.create_table(tablename) time.sleep(0.1) def _create_status_storage(self, purge): if purge: try: tablename = self._azure_config["status_table"] for item in self._service.query_entities(tablename): self._service.delete_entity(tablename, item["PartitionKey"], item["RowKey"]) except AzureMissingResourceHttpError: pass while not self._service.exists(self._azure_config["status_table"]): self._service.create_table(self._azure_config["status_table"]) time.sleep(0.1) def _create_balances_storage(self, purge): if purge: try: tablename = self._azure_config["balances_table"] for item in self._service.query_entities(tablename): self._service.delete_entity(tablename, item["PartitionKey"], item["RowKey"]) except AzureMissingResourceHttpError: pass while not self._service.exists(self._azure_config["balances_table"]): self._service.create_table(self._azure_config["balances_table"]) time.sleep(0.1) def _create_operations_storage(self, purge): self._operation_varients = [ "incident", "statuscompleted", "statusfailed", "statusinprogress" ] # "customer" self._operation_tables = {} for variant in self._operation_varients: self._operation_tables[ variant] = self._azure_config["operation_table"] + variant self._operation_prep = { "statusinprogress": lambda op: { "PartitionKey": self._short_digit_hash(op["chain_identifier"]), "RowKey": op["chain_identifier"] }, "statuscompleted": lambda op: { "PartitionKey": self._short_digit_hash(op["chain_identifier"]), "RowKey": op["chain_identifier"] }, "statusfailed": lambda op: { "PartitionKey": self._short_digit_hash(op["chain_identifier"]), "RowKey": op["chain_identifier"] }, "customer": lambda op: { "PartitionKey": op["customer_id"], "RowKey": op["chain_identifier"] }, "incident": lambda op: { "PartitionKey": self._short_digit_hash(op["incident_id"]), "RowKey": op["incident_id"] } } for variant in self._operation_varients: if purge: try: tablename = self._operation_tables[variant] for item in self._service.query_entities(tablename): self._service.delete_entity(tablename, item["PartitionKey"], item["RowKey"]) except AzureMissingResourceHttpError: pass while not self._service.exists(self._operation_tables[variant]): self._service.create_table(self._operation_tables[variant]) time.sleep(0.1) def _get_with_ck(self, variant, operation): with_ck = operation.copy() with_ck.update(self._operation_prep[variant](with_ck)) return with_ck def _short_digit_hash(self, value): hash_type = Config.get("operation_storage", "key_hash", "type", default="crc32") if hash_type == "crc32": short_hash = hex(zlib.crc32(value.encode(encoding='UTF-8'))) short_hash = short_hash[2:len(short_hash)] elif hash_type == "sha256": checker = hashlib.sha256() checker.update(value.encode(encoding='UTF-8')) short_hash = checker.hexdigest() return short_hash[0:Config. get("operation_storage", "key_hash", "digits", 3)] @retry_auto_reconnect def track_address(self, address, usage="balance"): address = ensure_address_format(address) try: short_hash = self._short_digit_hash(address) logging.getLogger(__name__).debug("track_address with " + str(address) + ", hash " + str(short_hash)) self._service.insert_entity( self._azure_config["address_table"] + usage, { "PartitionKey": short_hash, "RowKey": address, "address": address, "usage": usage }) except AzureConflictHttpError: raise AddressAlreadyTrackedException @retry_auto_reconnect def untrack_address(self, address, usage="balance"): address = ensure_address_format(address) try: short_hash = self._short_digit_hash(address) logging.getLogger(__name__).debug("untrack_address with " + str(address) + ", hash " + str(short_hash)) self._service.delete_entity( self._azure_config["address_table"] + usage, short_hash, address) try: self._delete_balance(address) except AzureMissingResourceHttpError: pass except AzureMissingResourceHttpError: raise AddressNotTrackedException() @retry_auto_reconnect def _get_address(self, address, usage="balance"): try: short_hash = self._short_digit_hash(address) logging.getLogger(__name__).debug("_get_address with " + str(address) + ", hash " + str(short_hash)) return self._service.get_entity( self._azure_config["address_table"] + usage, short_hash, address) except AzureMissingResourceHttpError: raise AddressNotTrackedException() def _update(self, operation, status=None): try: mapping = { "in_progress": "statusinprogress", "completed": "statuscompleted", "failed": "statusfailed" } operation = self._get_with_ck("incident", operation.copy()) new_operation = operation if status: tmp = self.get_operation(operation["incident_id"]) new_operation["timestamp"] = tmp["timestamp"] new_operation["status"] = status new_operation = self._get_with_ck("incident", new_operation) logging.getLogger(__name__).debug( "_update: Table " + self._operation_tables["incident"] + " PartitionKey " + new_operation["PartitionKey"] + " " + new_operation["RowKey"]) self._service.update_entity(self._operation_tables["incident"], new_operation) operation = self._get_with_ck("statuscompleted", operation.copy()) new_operation = operation if status: tmp = self.get_operation(operation["incident_id"]) new_operation["timestamp"] = tmp["timestamp"] new_operation["status"] = status new_operation = self._get_with_ck("statuscompleted", new_operation) self._service.update_entity( self._operation_tables["statuscompleted"], new_operation) logging.getLogger(__name__).debug( "_update: Table " + self._operation_tables["statuscompleted"] + " PartitionKey " + new_operation["PartitionKey"] + " " + new_operation["RowKey"]) if status: # needs delete and insert try: self._service.delete_entity( self._operation_tables[mapping[operation["status"]]], operation["PartitionKey"], operation["RowKey"]) except AzureMissingResourceHttpError: pass try: self._service.insert_entity( self._operation_tables[mapping[ new_operation["status"]]], new_operation) except AzureConflictHttpError: # already exists, try update self._service.update_entity( self._operation_tables[mapping[ new_operation["status"]]], new_operation) else: self._service.update_entity( self._operation_tables[mapping[new_operation["status"]]], new_operation) except AzureMissingResourceHttpError: raise OperationNotFoundException() def _insert(self, operation): try: for variant in self._operation_varients: to_insert = operation.copy() to_insert.update(self._operation_prep[variant](to_insert)) if not to_insert["PartitionKey"]: raise AzureMissingResourceHttpError() if not to_insert["RowKey"]: raise AzureMissingResourceHttpError() logging.getLogger(__name__).debug( "_insert: Table " + self._operation_tables[variant] + " PartitionKey " + to_insert["PartitionKey"] + " " + to_insert["RowKey"]) self._service.insert_entity(self._operation_tables[variant], to_insert) except AzureConflictHttpError: raise DuplicateOperationException() def _delete(self, operation): try: for variant in self._operation_varients: to_delete = operation.copy() to_delete.update(self._operation_prep[variant](to_delete)) self._service.delete_entity(self._operation_tables[variant], to_delete["PartitionKey"], to_delete["RowKey"]) except AzureMissingResourceHttpError: raise OperationNotFoundException() @retry_auto_reconnect def flag_operation_completed(self, operation): # do basics operation = super(AzureOperationsStorage, self).flag_operation_completed(operation) self._update(operation, status="completed") self._ensure_balances(operation) @retry_auto_reconnect def flag_operation_failed(self, operation, message=None): # do basics operation = super(AzureOperationsStorage, self).flag_operation_failed(operation) operation["message"] = message self._update(operation, status="failed") @retry_auto_reconnect def insert_operation(self, operation): # do basics operation = super(AzureOperationsStorage, self).insert_operation(operation) error = None try: self._insert(operation) except DuplicateOperationException as e: error = e try: # always check if balances are ok if operation["status"] == "completed": self._ensure_balances(operation) except BalanceConcurrentException as e: if error is None: error = e if error is not None: raise error @retry_auto_reconnect def _delete_balance(self, address, if_match='*'): self._service.delete_entity(self._azure_config["balances_table"], self._short_digit_hash(address), address, if_match=if_match) @retry_auto_reconnect def _ensure_balances(self, operation): affected_address = get_tracking_address(operation) logging.getLogger(__name__).debug("_ensure_balances: with " + operation["chain_identifier"] + " for address " + str(affected_address)) try: self._get_address(affected_address) except AddressNotTrackedException: # delte if exists and return try: self._delete_balance(affected_address) except AzureMissingResourceHttpError: pass return try: balance_dict = self._service.get_entity( self._azure_config["balances_table"], self._short_digit_hash(affected_address), affected_address) insert = False except AzureMissingResourceHttpError as e: balance_dict = {"address": affected_address} balance_dict["PartitionKey"] = self._short_digit_hash( balance_dict["address"]) balance_dict["RowKey"] = balance_dict["address"] insert = True if operation["block_num"] < balance_dict.get("blocknum", 0): raise BalanceConcurrentException() elif operation["block_num"] == balance_dict.get("blocknum", 0) and\ operation["txnum"] < balance_dict.get("txnum", 0): raise BalanceConcurrentException() elif operation["block_num"] == balance_dict.get("blocknum", 0) and\ operation["txnum"] == balance_dict.get("txnum", 0) and\ operation["opnum"] <= balance_dict.get("opnum", 0): raise BalanceConcurrentException() balance_dict["blocknum"] = max(balance_dict.get("blocknum", 0), operation["block_num"]) balance_dict["txnum"] = max(balance_dict.get("txnum", 0), operation["tx_in_block"]) balance_dict["opnum"] = max(balance_dict.get("opnum", 0), operation["op_in_tx"]) total = 0 addrs = split_unique_address(affected_address) asset_id = "balance" + operation["amount_asset_id"].split("1.3.")[1] if addrs["account_id"] == operation["from"]: # internal transfer and withdraw # negative balance = balance_dict.get(asset_id, 0) balance_dict[asset_id] = balance - operation["amount_value"] # fee as well asset_id = operation["fee_asset_id"] balance = balance_dict.get(asset_id, 0) balance_dict[asset_id] = balance - operation["fee_value"] elif addrs["account_id"] == operation["to"]: # deposit # positive balance = balance_dict.get(asset_id, 0) balance_dict[asset_id] = balance + operation["amount_value"] # fees were paid by someone else else: raise InvalidOperationException() for key, value in balance_dict.items(): if key.startswith("balance"): total = total + value if total == 0: if not insert: try: self._delete_balance(affected_address, if_match=balance_dict.etag) except AzureMissingResourceHttpError: pass return # may be updated or inserted, total > 0 if (insert): try: self._service.insert_entity( self._azure_config["balances_table"], balance_dict) except AzureMissingResourceHttpError: raise OperationStorageException( "Critical error in database consistency") else: try: self._service.update_entity( self._azure_config["balances_table"], balance_dict, if_match=balance_dict.etag) except AzureConflictHttpError: raise OperationStorageException( "Critical error in database consistency") @retry_auto_reconnect def insert_or_update_operation(self, operation): # do basics operation = super(AzureOperationsStorage, self).insert_operation(operation) # check if this is from in_progress to complete (for withdrawals we need to find incident id as its # not stored onchain) try: logging.getLogger(__name__).debug( "insert_or_update_operation: check if in_progress with " + str(operation["chain_identifier"]) + " exists") existing_operation = self.get_operation_by_chain_identifier( "in_progress", operation["chain_identifier"]) logging.getLogger(__name__).debug( "insert_or_update_operation: found existing in_progress operation" ) if not existing_operation["incident_id"] == operation["incident_id"] and\ operation["incident_id"] == operation["chain_identifier"]: logging.getLogger(__name__).debug( "insert_or_update_operation: using preset incident_id " + str(existing_operation["incident_id"])) operation["incident_id"] = existing_operation["incident_id"] except OperationNotFoundException: existing_operation = None if existing_operation is None: try: logging.getLogger(__name__).debug( "insert_or_update_operation: attempting insert") error = None try: self._insert(operation) except DuplicateOperationException as e: error = e try: # always check if balances are ok if operation["status"] == "completed": self._ensure_balances(operation) except BalanceConcurrentException as e: if error is None: error = e if error is not None: raise error except DuplicateOperationException as ex: logging.getLogger(__name__).debug( "insert_or_update_operation: fallback to update") # could be an update to completed ... if operation.get("block_num"): try: operation.pop("status") self.flag_operation_completed(operation) except OperationNotFoundException: raise ex else: raise ex else: logging.getLogger(__name__).debug( "insert_or_update_operation: attempting update") if operation.get("block_num"): try: operation.pop("status") self.flag_operation_completed(operation) except OperationNotFoundException: raise ex @retry_auto_reconnect def delete_operation(self, operation_or_incident_id): # do basics operation = super(AzureOperationsStorage, self).delete_operation(operation_or_incident_id) if type(operation_or_incident_id) == str: operation = self.get_operation(operation_or_incident_id) else: operation = operation_or_incident_id self._delete(operation) @retry_auto_reconnect def get_operation_by_chain_identifier(self, status, chain_identifier): mapping = { "in_progress": "statusinprogress", "completed": "statuscompleted", "failed": "statusfailed" } try: operation = self._service.get_entity( self._operation_tables[mapping[status]], self._short_digit_hash(chain_identifier), chain_identifier) operation.pop("PartitionKey") operation.pop("RowKey") operation.pop("Timestamp") operation.pop("etag") except AzureMissingResourceHttpError: raise OperationNotFoundException() return operation @retry_auto_reconnect def get_operation(self, incident_id): try: short_hash = self._short_digit_hash(incident_id) logging.getLogger(__name__).debug("get_operation with " + str(incident_id) + ", hash " + str(short_hash)) operation = self._service.get_entity( self._operation_tables["incident"], short_hash, incident_id) operation.pop("PartitionKey") operation.pop("RowKey") operation.pop("Timestamp") operation.pop("etag") except AzureMissingResourceHttpError: raise OperationNotFoundException() return operation @retry_auto_reconnect def get_balances(self, take, continuation=None, addresses=None, recalculate=False): if recalculate: raise Exception( "Currently not supported due to memo change on withdraw") return self._get_balances_recalculate(take, continuation, addresses) else: if continuation is not None: try: continuation_marker = json.loads(continuation) except TypeError: raise InputInvalidException() except JSONDecodeError: raise InputInvalidException() balances = self._service.query_entities( self._azure_config["balances_table"], num_results=take, marker=continuation_marker) else: balances = self._service.query_entities( self._azure_config["balances_table"], num_results=take) return_balances = {} for address_balance in balances: return_balances[address_balance["address"]] = { "block_num": address_balance["blocknum"] } for key, value in address_balance.items(): if key.startswith("balance"): asset_id = "1.3." + key.split("balance")[1] return_balances[ address_balance["address"]][asset_id] = value return_balances["continuation"] = None if balances.next_marker: return_balances["continuation"] = json.dumps( balances.next_marker) return return_balances @retry_auto_reconnect def _get_balances_recalculate(self, take, continuation=None, addresses=None): address_balances = collections.defaultdict( lambda: collections.defaultdict()) if not addresses: if continuation is not None: try: continuation_marker = json.loads(continuation) except TypeError: raise InputInvalidException() except JSONDecodeError: raise InputInvalidException() addresses = self._service.query_entities( self._azure_config["address_table"] + "balance", num_results=take, marker=continuation_marker) else: addresses = self._service.query_entities( self._azure_config["address_table"] + "balance", num_results=take) if addresses.next_marker: address_balances["continuation"] = json.dumps( addresses.next_marker) addresses = [x["address"] for x in addresses] if type(addresses) == str: addresses = [addresses] for address in addresses: addrs = split_unique_address(address) max_block_number = 0 for operation in self.get_operations_completed( filter_by={"customer_id": addrs["customer_id"]}): this_block_num = operation["block_num"] asset_id = operation["amount_asset_id"] if addrs["account_id"] == operation["from"]: # negative balance = address_balances[address].get(asset_id, 0) address_balances[address][asset_id] =\ balance - operation["amount_value"] # fee as well asset_id = operation["fee_asset_id"] balance = address_balances[address].get(asset_id, 0) address_balances[address][asset_id] =\ balance - operation["fee_value"] elif addrs["account_id"] == operation["to"]: # positive balance = address_balances[address].get(asset_id, 0) address_balances[address][asset_id] =\ balance + operation["amount_value"] else: raise InvalidOperationException() max_block_number = max(max_block_number, this_block_num) if max_block_number > 0: address_balances[address]["block_num"] = max_block_number # do not return default dicts for key, value in address_balances.items(): if type(value) == collections.defaultdict: address_balances[key] = dict(value) return dict(address_balances) def _parse_filter(self, filter_by): if filter_by: if filter_by.get("customer_id"): return {"customer_id": filter_by.pop("customer_id")} if filter_by.get("address"): addrs = split_unique_address(filter_by.pop("address")) return {"customer_id": addrs["customer_id"]} if filter_by.get("from"): addrs = split_unique_address(filter_by.pop("from")) return {"from": addrs["account_id"]} if filter_by.get("to"): addrs = split_unique_address(filter_by.pop("to")) return {"to": addrs["account_id"]} if filter_by: raise Exception("Filter not supported") return {} def _filter_dict_to_string(self, filter_dict, partition_key=None): filter_str = None for key, value in filter_dict.items(): if partition_key == key: key = "PartitionKey" if filter_str is not None: delimiter = " and " delimiter = "" filter_str = delimiter + key + " eq '" + value + "'" return filter_str @retry_auto_reconnect def get_operations_in_progress(self, filter_by=None): mapping = { "in_progress": "statusinprogress", "completed": "statuscompleted", "failed": "statusfailed" } filter_dict = {} filter_dict.update(self._parse_filter(filter_by)) filter_str = self._filter_dict_to_string(filter_dict, "status") return list( self._service.query_entities( self._operation_tables[mapping["in_progress"]], filter_str)) @retry_auto_reconnect def get_operations_completed(self, filter_by=None): mapping = { "in_progress": "statusinprogress", "completed": "statuscompleted", "failed": "statusfailed" } filter_dict = {} filter_dict.update(self._parse_filter(filter_by)) filter_str = self._filter_dict_to_string(filter_dict, "status") return list( self._service.query_entities( self._operation_tables[mapping["completed"]], filter_str)) @retry_auto_reconnect def get_operations_failed(self, filter_by=None): mapping = { "in_progress": "statusinprogress", "completed": "statuscompleted", "failed": "statusfailed" } filter_dict = {} filter_dict.update(self._parse_filter(filter_by)) filter_str = self._filter_dict_to_string(filter_dict, "status") return list( self._service.query_entities( self._operation_tables[mapping["failed"]], filter_str)) @retry_auto_reconnect def get_last_head_block_num(self): try: document = self._service.get_entity( self._azure_config["status_table"], "head_block_num", "last") return document["last_head_block_num"] except AzureMissingResourceHttpError: return 0 @retry_auto_reconnect def set_last_head_block_num(self, head_block_num): current_last = self.get_last_head_block_num() if current_last >= head_block_num: raise Exception("Marching backwards not supported! Last: " + str(current_last) + " New: " + str(head_block_num)) self._service.insert_or_replace_entity( self._azure_config["status_table"], { "PartitionKey": "head_block_num", "RowKey": "last", "last_head_block_num": head_block_num })
def create_file_record(url, unique_id, partition_key, short_id, name, extension, relative_path, exifString, xmpString, url_list, md5, sha256, instance_name, account_key): start = time.time() utcnow = datetime.datetime.utcnow().isoformat() # Each ingested (and successfully processed) file has a unique record containing # information, list of previews, file_record = { 'PartitionKey': partition_key, # using tree structure for partition key a good idea? #possiblybadidea #possiblygoodidea 'RowKey': short_id, # using unique file name for key a good idea? #badidea #mustbeuniqueinpartition 'uid': unique_id, # globally uniqueId 'url': url, # master blob url 'name': name, # filename 'ext' : extension, # file extension 'path' : relative_path, # path / folder file lives in 'it': utcnow, # ingestion_time 'pvs' : json.dumps(url_list), # json list of preview urls 'md5' : md5, # md5 checksum of total file binary data at ingestion time 'sha256' : sha256, # sha256 checksum of total file binary data at ingestion time 'exif' : exifString, # exif dumped as json by imagemagick 'xmp' : xmpString, # if exif identified APP1 data, xmp dump in xml by imagemagick 'created_time' : utcnow, # file creation time, using now, TODO: pick up file metadata if provided in upload 'modified_time' : utcnow # file mod time, using now, TODO: pick up file metadata if provided in upload } table_service = TableService(account_name=instance_name, account_key=account_key) table_service.insert_or_replace_entity('files', file_record) print "file_record inserted in {} sec".format(time.time()-start) # Change record to folder facing # TODO: Strip large metadata blocks and keep info needed for UIs file_record["PartitionKey"] = relative_path.replace("/", "%2F") file_record['item_type'] = 'file' table_service.insert_or_replace_entity('folders', file_record) # Ensure we have folder records for the entire path # TODO: Optimization; Check if the final folder exists, if so, skip step (we know all higher level paths have been created too) folder_struct = relative_path.split("/") # partition keys cannot have / in them, this is the best I can come up with atm folder_struct[0] = "%2F" # path starts with slash, will have empty slot first, replace with / last_folder = folder_struct[0] # weird exception case, root refers to itself as parent, but easy to check for later for folder in folder_struct: if len(folder) == 0: # ignore empty paths, tolerate e.g. folder1//folder2/folder3/ continue folder_record = { 'PartitionKey': last_folder, 'RowKey': folder, 'created_time': utcnow, 'modified_time': utcnow, 'nf_flag': True, 'nf_time': utcnow, 'item_type': 'folder' } if len(folder) > 3: # special handling of root last_folder = last_folder + "%2F" + folder # if folder already exist, we will fail, remove the creation properties and # try a merge operation (that should work unless service is down) try: table_service.insert_entity('folders', folder_record) except: folder_record.pop('created_time') table_service.insert_or_merge_entity('folders', folder_record)
class Azure_Storage(): def __init__(self, create_new=False): account_name = config.STORAGE_ACCOUNT_NAME account_key = config.STORAGE_ACCOUNT_KEY self.task_queue_name = config.TASK_QUEUE_NAME self.table_name = config.TABLE_NAME self.container_name = config.BLOB_CONTAINER_NAME self.ImagePartitionKey = config.IMAGE_PARTITION_KEY self.table_service = TableService(account_name=account_name, account_key=account_key) self.block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key) self.queue_service = QueueService(account_name=account_name, account_key=account_key) if create_new: queue_service.create_queue(task_queue_name) block_blob_service.create_container(container_name) table_service.create_table(table_name) def put_image(self, image_uuid, image_bytes): ret = self.block_blob_service.create_blob_from_bytes( self.container_name, image_uuid, image_bytes) return ret def get_image(self, image_uuid): ret = self.block_blob_service.get_blob_to_bytes( self.container_name, image_uuid).content return ret def put_classification_result(self, image_uuid, results): task = Entity() task.PartitionKey = self.ImagePartitionKey task.RowKey = image_uuid task.results = str(results) ret = self.table_service.insert_or_replace_entity( self.table_name, task) return ret def get_classification_result(self, image_uuid): try: task = self.table_service.get_entity(self.table_name, self.ImagePartitionKey, image_uuid) return task.results except Exception as e: return None def put_task(self, taskmsg): ret = self.queue_service.put_message(self.task_queue_name, taskmsg) return ret #payload is in message.content def get_task(self, num_messages=16): messages = self.queue_service.get_messages(self.task_queue_name, num_messages=num_messages, visibility_timeout=1 * 60) return messages def delete_task(self, message): ret = self.queue_service.delete_message(self.task_queue_name, message.id, message.pop_receipt) return ret
} # Get target sizes sizes = {} for target, proc in procs.items(): # Wait for the target to complete building or fail proc.wait() # Get build size on success bin_path = os.path.join(PYBRICKS_PATH, "bricks", target, "build", "firmware.bin") try: sizes[target] = os.path.getsize(bin_path) except FileNotFoundError: pass service.insert_or_replace_entity(FIRMWARE_SIZE_TABLE, { "PartitionKey": "size", "RowKey": commit.hexsha, **sizes }) service.update_entity( CI_STATUS_TABLE, { "PartitionKey": "build", "RowKey": "lastHash", "hash": pybricks.commit(PYBRICKS_BRANCH).hexsha, }, )
'device_id=e4ba4977314e200234880bac6b3585af; s=f61bvmhzj7; _ga=GA1.2.1562254249.1522590556; _gid=GA1.2.447715148.1528814975; xq_a_token=019174f18bf425d22c8e965e48243d9fcfbd2cc0; xq_a_token.sig=_pB0kKy3fV9fvtvkOzxduQTrp7E; xq_r_token=2d465aa5d312fbe8d88b4e7de81e1e915de7989a; xq_r_token.sig=lOCElS5ycgbih9P-Ny3cohQ-FSA; Hm_lvt_1db88642e346389874251b5a1eded6e3=1528814975,1528897343; Hm_lpvt_1db88642e346389874251b5a1eded6e3=1528897343; u=111528897342585; _gat_gtag_UA_16079156_4=1' } req = urllib.request.Request(url, headers=headers) response = urllib.request.urlopen(req) jdata = json.loads(response.read()) if self.total_symbols_to_crawl == 0: self.total_symbols_to_crawl = int(jdata['count']['count']) for eachStock in jdata['stocks']: self.stocks.append( Stock(eachStock['symbol'], eachStock['code'], eachStock['name'])) self.total_symbols_to_crawled += 1 print('current parsed to page {}, {} symbols queried'.format( self.current_page, self.total_symbols_to_crawled)) if __name__ == '__main__': crawler = SymbolCrawler() while crawler.has_next(): crawler.crawl_next() table_service = TableService(account_name='heaventextb06a', account_key='**fill your own key**') for each_stock in crawler.stocks: print(each_stock) body = EntityProperty(EdmType.STRING, str(each_stock)) table_service.insert_or_replace_entity('heavenstock', each_stock)
class AzureTable(object): def __init__(self, account_name: str, account_key: str, table_name: str, partition_key_field: str, clustering_key_field: str): self.table = TableService(account_name=account_name, account_key=account_key) self.table_name = self.table_name self.partition_key_field = partition_key_field self.clustering_key_field = clustering_key_field @property def partition_key_name(self) -> str: return 'PartitionKey' @property def clustering_key_name(self) -> str: return 'RowKey' def get_payload(self, payload: dict): item = deepcopy(payload) partition_key = payload.get(self.partition_key_field) clustering_key = payload.get(self.clustering_key_field) if partition_key is None: raise PartitionKeyNotFoundError( 'payload={} does not have a partition key') if clustering_key is None: raise ClusteringKeyNotFoundError( 'payload={} does not have a clustering key') item.update({ self.partition_key_name: partition_key, self.clustering_key_name: clustering_key }) return item def create(self): return self.table.create_table(self.table_name) def insert(self, item: dict): return self.table.insert_entity(self.table_name, self.get_payload(item)) def update(self, item: dict): return self.table.update_entity(self.table_name, self.get_payload(item)) def upsert(self, item: dict): return self.table.insert_or_replace_entity(self.table_name, self.get_payload(item)) def delete(self, partition_key: str, clustering_key: str): return self.table.delete_entity(self.table_name, partition_key=partition_key, row_key=clustering_key) def read(self, partition_key: str, clustering_key: str): return self.table.get_entity(self.table_name, partition_key=partition_key, row_key=clustering_key) def insert_batch(self, items: list): batch = TableBatch() for item in items: batch.insert_entity(self.get_payload(item)) return self.table.commit_batch(self.table_name, batch) def get(self, partition_key: str, clustering_key: str): return self.table.get_entity(self.table_name, partition_key, clustering_key) def get_by_partition(self, partition_key: str) -> list: return self.table.query_entities(self.table_name, filter="{} eq '{}'".format( self.partition_key_name, partition_key))
class Storage_az_table(): ################################################################## # BEGIN - Common methods to implement storage interface def __init__(self): connection_string = os.environ['SHKOLA_AZ_TABLE_CONN_STR'] self.table_service = TableService(connection_string=connection_string) self.default_partition_key = "USER" self.users_table_name = 'users' self.responses_table_name = 'responses' self.sessions_table_name = 'sessions' self.feedbacks_table_name = 'feedbacks' tables = [ self.users_table_name, self.responses_table_name, self.sessions_table_name, self.feedbacks_table_name ] existing_tables = list( map(lambda x: x.name, self.table_service.list_tables())) for table in tables: if table not in existing_tables: self.table_service.create_table(table) def get_user(self, user_id): partition_key = self.default_partition_key try: entity = self.table_service.get_entity(self.users_table_name, partition_key, user_id) except AzureMissingResourceHttpError: return None entity["user_id"] = user_id return entity def update_user(self, user_id, name=None, remote_ip=None, user_agent=None, picture=None, user_language=None, selected_language=None, last_accessed=None): properties = dict() # Nothing better at the moment: properties['PartitionKey'] = self.default_partition_key properties['RowKey'] = user_id properties['user_id'] = user_id properties["name"] = name properties["remote_ip"] = remote_ip properties["user_agent"] = user_agent properties["picture"] = picture properties["user_language"] = user_language if selected_language: properties["selected_language"] = selected_language properties["last_accessed"] = last_accessed logging.debug("azure table update_user %s: %s", str(name), str(properties)) try: self.table_service.insert_or_merge_entity(self.users_table_name, properties) except Exception: logging.exception('Error adding to table ' + self.users_table_name + ' record: {}'.format(properties)) def update_selected_language(self, user_id, selected_language): properties = dict() # Nothing better at the moment: properties['PartitionKey'] = self.default_partition_key properties['RowKey'] = user_id properties['user_id'] = user_id properties["selected_language"] = selected_language logging.debug("azure table update_user_langauge %s: %s", str(user_id), str(properties)) try: self.table_service.insert_or_merge_entity(self.users_table_name, properties) except Exception: logging.exception('Error adding to table ' + self.users_table_name + ' record: {}'.format(properties)) def insert_user_id(self, user_id): self.update_user(user_id) return user_id # Get all user responses from the response table def get_all_user_results(self, u_id, from_date=None): req = "(PartitionKey eq '{}')".format(u_id) if from_date: if len(req) > 0: req = req + " and " req = req + "(Timestamp ge datetime'{}')".format(from_date) entries = self.table_service.query_entities(self.responses_table_name, req) result = [] for row in entries: result.append(row) return result # Update user info with the latest <stats>, and with # <stats_time> being the time of the latest stats def update_user_stats(self, user_id, stats, stats_time): properties = dict() # Nothing better at the moment: properties['PartitionKey'] = self.default_partition_key properties['RowKey'] = user_id properties['user_id'] = user_id properties['stats'] = encode_dict(stats) properties['stats_time'] = stats_time try: self.table_service.merge_entity(self.users_table_name, properties) except Exception: logging.exception('Error updating results for user ' + user_id + ' record: {}'.format(properties)) @timer_section("storage.record_response") def record_response(self, response): fb_time = int(time.time() * 1000) response['PartitionKey'] = response['user_id'] response['RowKey'] = response['question_id'] + "|" + \ str(response['attempt']) + "|" + \ str(fb_time) + "|" + \ str(response['duration']) # Remove special characters not allowed in Azure PartitionKey and RowKey response['PartitionKey'] = re.sub("[\ /?#]", "_", response['PartitionKey']) response['RowKey'] = re.sub("[\ /?#]", "_", response['RowKey']) #logging.debug("*** record response: {}".format(response)) try: self.table_service.insert_entity(self.responses_table_name, response) except Exception as err: logging.exception('Error adding response: {}\n\n{}'.format( response, err)) @timer_section("storage.record_feedback") def record_feedback(self, response): fb_time = int(time.time() * 1000) response['PartitionKey'] = response['question_id'] response['RowKey'] = response['type'] + "|" + response[ 'list_id'] + "|" + str(fb_time) # Remove special characters not allowed in Azure PartitionKey and RowKey response['PartitionKey'] = re.sub("[\ /?#]", "_", response['PartitionKey']) response['RowKey'] = re.sub("[\ /?#]", "_", response['RowKey']) logging.debug("*** record feedback: {}".format(response)) try: self.table_service.insert_entity(self.feedbacks_table_name, response) except Exception as err: logging.exception('Error adding feedback: ' + str(err)) @timer_section("storage.update_session") def update_session(self, session_id, data={}): assert session_id is not None assert data['state_id'] is not None properties = { 'PartitionKey': session_id, 'RowKey': "", 'data': encode_dict(data['data']), 'user_id': data['user_id'], 'state_id': data['state_id'], 'valid': data['valid'] } logging.debug( "storage: updating session: {}, valid={}, state_id={}".format( session_id, data['valid'], data['state_id'])) try: self.table_service.insert_or_replace_entity( self.sessions_table_name, properties) except Exception: logging.exception('Error adding to table ' + self.sessions_table_name + ' record: {}'.format(properties)) @timer_section("get_session") def get_session(self, session_id): try: entity = self.table_service.get_entity(self.sessions_table_name, session_id, "") except AzureMissingResourceHttpError: return None # Azurite simulator returns an empty entity instead of exception, so check here if "user_id" not in entity.keys(): return None logging.debug( "storage: loaded session: {}, valid={}, state_id={}".format( session_id, entity.get('valid'), entity.get('state_id'))) # Compatibility: old records don't have state_id, valid if not "state_id" in entity: entity['state_id'] = None if not "valid" in entity: entity["valid"] = True return { # Convert "None" to None, see above # "user_id": entity["user_id"] if (not entity["user_id"] == "None") else None, "user_id": entity["user_id"], "data": decode_dict(entity["data"]), "state_id": entity["state_id"], "valid": entity["valid"] } # Get all user responses from the response table def get_all_user_sessions(self, u_id, from_date=None): req = "(user_id eq '{}')".format(u_id) if from_date: if len(req) > 0: req = req + " and " req = req + "(Timestamp ge datetime'{}')".format( from_date.isoformat()) entries = self.table_service.query_entities(self.sessions_table_name, req) result = [] for row in entries: row["data"] = decode_dict(row["data"]) result.append(row) return result # Get direct user feedback before given date def get_all_user_feedback(self, from_date=None): # Ignore JS and Google errors, only return specific user feedback req = "(type ne 'JS_ERROR') and (type ne 'GOOGLE_ERROR')" if from_date: if len(req) > 0: req = req + " and " req = req + "(Timestamp ge datetime'{}')".format( from_date.isoformat()) entries = self.table_service.query_entities(self.feedbacks_table_name, req) result = [] for row in entries: result.append(row) return result # END - Common methods to implement storage interface ################################################################## # Doesn't really work # def wipe_tables(self): # try: # self.table_service.delete_entity(self.users_table_name, "", "") # except Exception as err: # print('Error wiping table, ' + self.users_table_name + ': ' + str(err)) # try: # self.table_service.delete_entity(self.responses_table_name, "", "") # except Exception as err: # print('Error wiping table, ' + self.responses_table_name + ':' + str(err)) # def delete_all_tables(self): # try: # self.table_service.delete_table(self.users_table_name) # except Exception as err: # logging.exception('Error deleting table, ' + self.users_table_name + ': ' + str(err)) # try: # self.table_service.delete_table(self.responses_table_name) # except Exception as err: # logging.exception('Error deleting table, ' + self.responses_table_name + ':' + str(err)) def get_all_responses(self, user_id=None): if user_id is None: req = "" else: req = "PartitionKey = {}".format(user_id) entries = self.table_service.query_entities(self.responses_table_name, req) return entries def get_all_users(self): entries = self.table_service.query_entities(self.users_table_name, "") return entries def print_all_responses(self, user_id=None): entries = self.get_all_responses(user_id) if user_id is None: print(" USER_ID ", end='') else: print("USER_ID = {}\n".format(user_id)) print( " QUESTION_ID LIST_ID RESPONSE_TYPE TIME DURATION CORRECT INCORRECT QUESTIONS" ) for response in entries: if user_id is None: print("{:^30} ".format(response['question_id']), end='') # UID print("{:^20} ".format(response['user_id']), end='') # QID print("{:^16} ".format(response['list_id']), end='') # List ID print("{:^12} ".format(response['response_type']), end='') # RESPONSE TYPE print("{:^26} ".format( time.strftime("%d-%m-%y %H:%M:%S", time.localtime(int(response['time'])))), end='') # TIME print("{:^16} ".format(response['duration']), end='') # DURATION print("{:^10} ".format(response['correct']), end='') # CORRECT print("{:^10} ".format(response['incorrect']), end='') # INCORRECT print("{:^38} ".format(response['questions'])) # QUESTIONS print("\n") def print_all_users(self): entries = self.get_all_users() print( " USER ID NAME EMAIL LAST ACCESSED REMOTE IP USER AGENT USER LANGUAGE" ) for row in entries: print("{:^30} {:^20} {:^20} {:^20} {:^40} {:^40}".format( row['user_id'], row['name'], time.strftime("%d-%m-%y %H:%M:%S", time.localtime(row['last_accessed'])), row['remote_ip'] if 'remote_ip' in row.keys() else "", row['user_agent'] if 'user_agent' in row.keys() else "", row['user_language'] if 'user_language' in row.keys() else "")) print("\n") def get_question_stats(self, q_id=None, from_date=None): req = "" if q_id: # Remove / from q_id # mq_id = ("".join("fractions/q00022".split("/"))) mq_id = ("".join(q_id.split("/"))) req = req + "((RowKey ge '{}|') and (RowKey lt '{}{}'))".format( mq_id, mq_id, chr(255)) if from_date: if len(req) > 0: req = req + " and " req = req + "(Timestamp ge datetime'{}')".format(from_date) entries = self.table_service.query_entities(self.responses_table_name, req) result = [] for row in entries: if "user_id" not in row.keys() or \ "UNKNOWN" in row["user_id"]: continue result.append(row) return result
task.RowKey = '002' task.description = 'Wash the car' task.priority = 100 table_service.insert_entity('tasktable', task) # update print("Update task 001...") task = {'PartitionKey': 'tasksSeattle', 'RowKey': '001', 'description': 'Take out the garbage', 'priority': 250} table_service.update_entity('tasktable', task) # Replace the entity created earlier print("Replace task using insert_or_replace... - Take out the garbage again") task = {'PartitionKey': 'tasksSeattle', 'RowKey': '001', 'description': 'Take out the garbage again', 'priority': 250} table_service.insert_or_replace_entity('tasktable', task) # Insert a new entity print("insert or replay rowkey 003 - buy detergent") task = {'PartitionKey': 'tasksSeattle', 'RowKey': '003', 'description': 'Buy detergent', 'priority': 300} table_service.insert_or_replace_entity('tasktable', task) # batch processing - Add multiple entries print("batch processing task 004/005") batch = TableBatch() task004 = {'PartitionKey': 'tasksSeattle', 'RowKey': '004', 'description': 'Go grocery shopping', 'priority': 400} task005 = {'PartitionKey': 'tasksSeattle', 'RowKey': '005', 'description': 'Clean the bathroom', 'priority': 100} batch.insert_entity(task004)
if status == MIFAREReader.MI_OK: # Beep #try: # GPIO.output(BeepPin,GPIO.HIGH) # time.sleep(0.5) # GPIO.output(BeepPin,GPIO.LOW) #except KeyboardInterrupt: # print "Exception:KeyboardInterrupt" # GPIO.cleanup() # Add the card to RFID table card = Entity() card.PartitionKey = time.strftime("%Y-%m-%d", time.localtime()) card.RowKey = time.strftime("%H:%M:%S", time.localtime()) card.UID = CardUID card.Authenticate = status table_service.insert_or_replace_entity("RFID", card) # Update the Projects table projectlog = Entity() projectlog.PartitionKey = "CheckID" projectlog.RowKey = "001" projectlog.Value = CardUID table_service.insert_or_replace_entity("Projects", projectlog) MIFAREReader.MFRC522_Read(8) MIFAREReader.MFRC522_StopCrypto1() #time.sleep(0.5) else: print "Authentication error"
class SendTableData: def __init__(self, connection_string=None, storage_account_name=None, account_key=None, identity=None, table_name=None): assert (storage_account_name and account_key) or connection_string self.storage_account_name = storage_account_name self.account_key = account_key self.table_name = table_name or "defaulttablepython" self.create_table_instance = CreateTableInstance( identity=identity, connection_string=connection_string, storage_account_name=storage_account_name, account_key=account_key) if identity else None self.table_service = TableService(account_name=self.storage_account_name, account_key=self.account_key) \ if self.storage_account_name and self.account_key else None if connection_string: self.connection_string = connection_string self.table_service = TableService( connection_string=self.connection_string) self.ROW_KEY_GEN = False self.PARTITION_KEY_GEN = False def create_table(self, table_name=None, **options): """ Create a table :param table_name: :param options: :return: """ self.table_name = table_name if table_name else self.table_name assert self.create_table_instance, "Initialize this obj by sending identity object in constructor" self.create_table_instance.create_table(table_name=self.table_name, **options) def create_storage_account(self, **options): """ Create a storage account :return: """ self.create_table_instance.create_storage_account(**options) @beartype def commit_data(self, data: dict): """ Send data to the table. It will insert the data, and if it already exists, it will replace the data :param data: :return: """ if self.ROW_KEY_GEN and self.PARTITION_KEY_GEN: import uuid, random data['RowKey'] = str(uuid.uuid4()) data['PartitionKey'] = str(random.randint(0, 10)) self.table_service.insert_or_replace_entity(self.table_name, data) @beartype def commit_batch_data(self, data: list): """ Send data in a batch :param data: :return: """ import uuid, random if self.ROW_KEY_GEN and self.PARTITION_KEY_GEN: partition_key = str(random.randint(0, 10)) for data_ in data: data_['RowKey'] = str(uuid.uuid4()) data_['PartitionKey'] = partition_key with self.table_service.batch(self.table_name) as batch: for data_ in data: batch.insert_entity(data_)
def main(mytimer: func.TimerRequest) -> None: utc_timestamp = datetime.datetime.utcnow().replace( tzinfo=datetime.timezone.utc).isoformat() if mytimer.past_due: logging.info('The timer is past due...') logging.info('Fetching data from IdentityNow at %s', utc_timestamp) url = f'https://{tenant_id}.api.identitynow.com/oauth/token' new_checkpoint_time = (datetime.datetime.utcnow() - datetime.timedelta(minutes=60)).isoformat() + "Z" checkpoint_table_name = 'checkpointTable' table_service = TableService(account_name=storage_account_name, account_key=access_key) task = { 'PartitionKey': 'checkpointTime', 'RowKey': '001', 'createdTime': new_checkpoint_time } table_exists = table_service.exists(checkpoint_table_name) # Check if table already exists, if yes- get existing checkpoint time from the table entry. # If not then create table and insert the row containing new checkpoint time. if not table_exists: table_service.create_table(checkpoint_table_name) table_service.insert_entity(checkpoint_table_name, task) checkpoint_time = new_checkpoint_time else: returned_entity = table_service.get_entity(checkpoint_table_name, 'checkpointTime', '001') checkpoint_time = returned_entity.createdTime if use_current(new_checkpoint_time, checkpoint_time): checkpoint_time = new_checkpoint_time tokenparams = { 'grant_type': grant_type, 'client_id': client_id, 'client_secret': client_secret } oauth_response = requests.request("POST", url=url, params=tokenparams) if oauth_response is not None: try: oauth_response.raise_for_status() access_token = oauth_response.json()["access_token"] headers = { 'Content-Type': 'application/json', 'Authorization': "Bearer " + access_token } except (HTTPError, KeyError, ValueError): logging.error("No access token received..." + str(oauth_response.status_code)) return 0 partial_set = False audit_events = [] # Search API results are slightly delayed, allow for 5 minutes though in reality. # This time will be much shorter. Cap query at checkpoint time to 5 minutes ago. search_delay_time = (datetime.datetime.utcnow() - datetime.timedelta(minutes=60)).isoformat() + "Z" # Number of Events to return per call to the search API. limit = int(os.environ["LIMIT"]) while True: if partial_set == True: break # Standard query params, but include limit for result set size. queryparams = {"count": "true", "offset": "0", "limit": limit} query_checkpoint_time = checkpoint_time.replace('-', '\\-').replace( '.', '\\.').replace(':', '\\:') query_search_delay_time = search_delay_time.replace( '-', '\\-').replace('.', '\\.').replace(':', '\\:') logging.info( f'checkpoint_time {query_checkpoint_time} search_delay_time {query_search_delay_time}' ) # Search criteria - retrieve all audit events since the checkpoint time, sorted by created date searchpayload = { "queryType": "SAILPOINT", "query": { "query": f"created:>{query_checkpoint_time} AND created:<{query_search_delay_time}" }, "queryResultFilter": {}, "sort": ["created"], "searchAfter": [] } audit_url = f'https://{tenant_id}.api.identitynow.com/v3/search/events' # Initiate request audit_events_response = requests.request("POST", url=audit_url, params=queryparams, json=searchpayload, headers=headers) # API Gateway saturated / rate limit encountered. Delay and try again. Delay will either be dictated by IdentityNow server response or 5000 seconds if audit_events_response.status_code == 429: retryDelay = 5000 retryAfter = audit_events_response.headers['Retry-After'] if retryAfter is not None: retryDelay = int(retryAfter) logging.warning( f'429 - Rate Limit Exceeded, retrying in: {retryDelay}') time.sleep(retryDelay) elif audit_events_response.ok: # Check response headers to get toal number of search results - if this value is 0 there is nothing to parse, if it is less than the limit value then we are caught up to most recent, and can exit the query loop x_total_count = int(audit_events_response.headers['X-Total-Count']) if x_total_count > 0: try: if x_total_count < int(limit): # Less than limit returned, caught up so exit. partial_set = True results = audit_events_response.json() # Add this set of results to the audit events array audit_events.extend(results) current_last_event = audit_events[-1] checkpoint_time = current_last_event['created'] except KeyError: logging.info("Response does not contain items...") break else: # Set partial_set to True to exit loop (no results) partial_set = True else: logging.info( f'Failure from server... " {audit_events_response.status_code}' ) # Forced Exit return 0 # Iterate the audit events array and create events for each one. if len(audit_events) > 0: for audit_event in audit_events: data_json = json.dumps(audit_event) table_name = "SailPointIDN_Events" try: post_data(customer_id, shared_key, data_json, table_name, logAnalyticsUri) except Exception as error: logging.error("Unable to send data to Azure Log...") logging.error(error) # Get the created date of the last AuditEvent in this run and save it as the checkpoint time in the table. last_event = audit_events[-1] new_checkpoint_time = last_event['created'] # Create an entry with new checkpoint time. task = { 'PartitionKey': 'checkpointTime', 'RowKey': '001', 'createdTime': new_checkpoint_time } # Write new checkpoint time back to the table. table_service.insert_or_replace_entity(checkpoint_table_name, task) logging.info("Table successfully updated...") else: logging.info("No Events were returned...")
class AzureStorageHandler(): def __init__(self, kv): try: self.table_service = TableService( account_name=kv.get_keyvault_secret("storageAccount-name"), account_key=kv.get_keyvault_secret("storageAccount-key")) # Quick start code goes here except Exception as ex: print('Exception:') print(ex) def insert_submission_entry(self, entry): submission = Entity() submission.PartitionKey = entry.subreddit submission.RowKey = entry.id submission.author = entry.author submission.created_utc = entry.created_utc submission.flair = entry.flair submission.title = entry.title # Flatten list of keywords into comma separated string submission.title_keywords = ','.join(map(str, entry.title_keywords)) submission.title_sentiment = entry.title_sentiment try: submission.body_keywords = ','.join(map(str, entry.body_keywords)) submission.body_sentiment = entry.body_sentiment except AttributeError: submission.body_keywords = "" submission.body_sentiment = "" self.table_service.insert_entity('submissions', submission) def insert_comment_entry(self, entries): for entry in entries: comment = Entity() comment.PartitionKey = entry.link_id comment.RowKey = entry.id comment.author = entry.author comment.body = entry.body comment.created_utc = entry.created_utc comment.parent_id = entry.parent_id comment.score = entry.score comment.subreddit = entry.subreddit comment.subreddit_id = entry.subreddit_id comment.total_awards_received = entry.total_awards_received comment.sentiment = entry.sentiment # Flatten list of keywords into comma separated string comment.keywords = ','.join(map(str, entry.keywords)) self.table_service.insert_entity('comments', comment) def insert_recommendation_entry(self, entries): for entry in entries: recommendation = Entity() recommendation.PartitionKey = "{0}_{1}".format( entry.subreddit, entry.query_word) recommendation.RowKey = entry.keyword recommendation.subreddit = entry.subreddit recommendation.query_word = entry.query_word recommendation.post_id = ','.join(map(str, entry.post_id)) recommendation.comment_id = ','.join(map(str, entry.comment_id)) recommendation.sentiment = entry.sentiment recommendation.count = entry.count try: self.table_service.insert_entity('recommendations', recommendation) except AzureConflictHttpError as error: # print(error) subreddit_query_word = recommendation.PartitionKey.split('_') print( "The recommendation entry with subreddit = '{0}', search term = '{1}', and keyword = '{2}' already exists in the database. Updating it..." .format(subreddit_query_word[0], subreddit_query_word[1], recommendation.RowKey)) self.table_service.update_entity('recommendations', recommendation) def insert_sub_date_entry(self, entry): sub_date = Entity() sub_date.PartitionKey = entry.subreddit sub_date.RowKey = entry.title sub_date.created_utc = entry.created_utc sub_date.post_id = entry.post_id try: self.table_service.insert_or_replace_entity( 'mostrecentsubdate', sub_date) except TypeError as error: print(error) print( f"The mostrecentsubdate object is formatted incorrectly and was not updated. One of the parameters is not an int, str, bool or datetime, or defined custom EntityProperty. Continuing..." ) def get_entry(self, table, partition_key, row_key): return self.table_service.get_entity(table, partition_key, row_key) def filter_entries(self, table, filter_string): return self.table_service.query_entities(table, filter_string) def update_entry(self, table, entity): return self.table_service.update_entity(table, entity) def delete_entry(self, table, partition_key, row_key): return self.table_service.delete_entity(table, partition_key, row_key)
class AzureTableData: """ This class handles the functionality of getting data from Azure Table Storage and generating the HTML report """ def __init__(self, args): connect_str = args.connection_str self.table_service = TableService(connection_string=connect_str) def get_report_line(self, index, image, context): """ This function generates a single row of resultant validation report """ result_line = "\t<tr class='" + context + "'>\n" if hasattr(image, 'ErrorMessages'): err_msg = str(image.ErrorMessages).replace("\n", "</br>") else: err_msg = "" result_line = result_line + "\t\t<td>" + str(index) + "</td>\n" result_line = result_line + "\t\t<td>" + str(image.PartitionKey) + "</td>\n" result_line = result_line + "\t\t<td>" + str(image.ValidationResult) + "</td>\n" result_line = result_line + "\t\t<td>" + err_msg + "</td>\n" result_line = result_line + "\t</tr>\n" return result_line def generate_validation_report(self, args): """ This functions generates the HTML report of validations """ imagequeryresult = self.table_service.query_entities(args.table_name, filter="IsDeleted eq '0'", accept='application/json;odata=minimalmetadata') current_date_time = datetime.datetime.now(datetime.timezone.utc) index = 1 result_line = """ <!DOCTYPE html> <html lang="en"> <head> <title>Marketplace Image Validation Report</title> <meta charset="utf-8"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css"> <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js"></script> </head> <body> <table class="table"> <tr> <td> # </td> <td> VM Name </td> <td> Validation Result </td> <td> Error Messages </td> </tr>\n""" with open('./report/index.html', 'w') as report: context = "danger" for image in imagequeryresult: if image.ValidationResult == "Failed": result_line = result_line + self.get_report_line(index, image, context) index += 1 context = "success" for image in imagequeryresult: if image.ValidationResult == "Success": result_line = result_line + self.get_report_line(index, image, context) index += 1 context = "warning" for image in imagequeryresult: if image.ValidationResult == "NA": result_line = result_line + self.get_report_line(index, image, context) index += 1 result_line = result_line + "</table></body></html>" report.write("%s\n" % result_line) def select_images_to_validate(self, args): """ This function iterates over all the entries in the Azure Table Storage and selects at max 'args.max_vm_to_validate' images which should get validated during this run. """ max_vms_to_validate_at_a_time = int(args.max_vm_to_validate ) validation_period = int(args.validation_period) allimages = open(args.all_image_list, 'r') Lines = allimages.readlines() imagequeryresult = self.table_service.query_entities(args.table_name, filter="IsDeleted eq '0'", accept='application/json;odata=minimalmetadata') entries = [] list_of_images_to_validate = [] current_date_time = datetime.datetime.now(datetime.timezone.utc) for image in imagequeryresult: entries.append(image) for line in Lines: publisher = line.split(':')[0] offer = line.split(':')[1] sku = line.split(':')[2] disk_version = line.split(':')[3].replace('\n', '') image_name = offer.replace("_", "-") + "-" + sku.replace("_", "-") + "-" + disk_version image_entry_exists = False for image in entries: # if the image entry exists and it was last validated 2 days ago, # add it to the list to be validated if image.PartitionKey == image_name: image_entry_exists = True if image.ValidationResult == 'NA' or (current_date_time - image.Timestamp).days > validation_period: list_of_images_to_validate.append(line) break if not image_entry_exists: list_of_images_to_validate.append(line) ## insert the entry as well args.image_name = image_name args.validation_result = 'NA' self.insert_data(args) i = 0 with open(args.filtered_image_list, 'w') as filteredimagefile: filteredimagefile.write("") for image in list_of_images_to_validate: if i == max_vms_to_validate_at_a_time: break filteredimagefile.write("%s" % image) i += 1 def insert_data(self, args): """ Inserts/updates the records in the Azure Table Storage """ table_name = args.table_name image_name = args.image_name validation_time = args.validation_time validation_result = args.validation_result validation_epoch = args.validation_epoch print("error message path", args.err_msg_file) if args.err_msg_file != None and path.exists(args.err_msg_file): err_msgs = open(args.err_msg_file, "r").read() else: err_msgs = "" validationResult = { 'PartitionKey': image_name, 'RowKey': "1", 'ValidationResult': validation_result, "ErrorMessages": err_msgs, "IsDeleted": "0" } print(validationResult) self.table_service.insert_or_replace_entity(table_name, validationResult)
class Db(object): ts = None def __init__(self): """Init connection with cosmosdb""" self.ts = TableService(account_name=ACCOUNT_NAME, account_key=ACCOUNT_KEY) def migrate(self): """ Create tabel if not exists""" if not self.ts.exists(USER_TABLE_NAME): self.ts.create_table(USER_TABLE_NAME) if not self.ts.exists(MESSAGE_TABLE_NAME): self.ts.create_table(MESSAGE_TABLE_NAME) def get_all_users(self): """select email from user""" return [ i['PartitionKey'] for i in self.ts.query_entities(USER_TABLE_NAME) ] def create_user(self, data=None): bjson = ep( EdmType.BINARY, dumps({ 'email': data['email'], 'password': sha224(bytes(data['password'], encoding='utf-8')).hexdigest(), 'full_name': data['full_name'] })) user = { 'PartitionKey': data['email'], 'RowKey': row_key, 'info': bjson } if (self.ts.insert_or_replace_entity(USER_TABLE_NAME, user)): return {'success': True} def delete_user(self, email=None): if (self.ts.delete_entity(USER_TABLE_NAME, email, row_key)): return {'success': True} def create_message(self, email=None, message=None): """ Create message in protobuf""" proto_message = message_pb2.Message() proto_message.title = message['title'] proto_message.content = message['content'] proto_message.magic_number = message['magic_number'] details = ep(EdmType.BINARY, str(proto_message)) bmessage = { 'PartitionKey': email, 'RowKey': row_key, 'details': details, } if (self.ts.insert_or_replace_entity(MESSAGE_TABLE_NAME, bmessage)): return {'success': True} def get_user(self, email=''): return self.ts.get_entity(USER_TABLE_NAME, email, row_key) def get_message(self, email=''): return self.ts.get_entity(MESSAGE_TABLE_NAME, email, row_key) def get_messages(self): messages = self.ts.query_entities(MESSAGE_TABLE_NAME) return list(messages)