Exemple #1
0
    def request_session(self):
        # A custom request session may be used to set special network options
        session = requests.Session()
        client = TableService(account_name='<account_name>', account_key='<account_key>',
                                  request_session=session)

        # Set later
        client = TableService(account_name='<account_name>', account_key='<account_key>')
        client.request_session = session
Exemple #2
0
    def protocol(self):
        # https is the default protocol and is strongly recommended for security 
        # However, http may be used if desired
        client = TableService(account_name='<account_name>', account_key='<account_key>',
                                  protocol='http')

        # Set later
        client = TableService(account_name='<account_name>', account_key='<account_key>')
        client.protocol = 'http'
Exemple #3
0
    def proxy(self):
        # Unauthenticated
        client = TableService(account_name='<account_name>', account_key='<account_key>')
        client.set_proxy('127.0.0.1', '8888')

        # Authenticated
        client = TableService(account_name='<account_name>', account_key='<account_key>')
        proxy_user = '******'
        proxy_password = '******'
        client.set_proxy('127.0.0.1', '8888', user=proxy_user, password=proxy_password)
Exemple #4
0
    def test_logging(self):
        # key to identify this session
        test_id = str(uuid4())

        # setup logger
        logger = logging.getLogger(__name__)
        logger.setLevel(logging.DEBUG)

        # setup handler and add to logger
        handler = AzureTableStorageHandler(
            account_name=account_name,
            account_key=account_key,
            table_name=table_name
        )
        logger.addHandler(handler)

        # write logs
        logger.debug(f"DEBUG: {test_id}")
        logger.info(f"INFO: {test_id}")
        logger.warning(f"WARNING: {test_id}")
        logger.error(f"ERROR: {test_id}")
        logger.critical(f"CRITICAL: {test_id}")

        # get log messages
        ts = TableService(account_name=account_name, account_key=account_key)
        for ent in ts.query_entities(table_name=table_name, filter="PartitionKey eq '__main__'"):
            self.assertEqual(ent["LevelName"] + ": " + test_id, ent["Message"])
    def sas_with_signed_identifiers(self):
        table_name = self._create_table()
        entity = {
            'PartitionKey': 'test',
            'RowKey': 'test1',
            'text': 'hello world',
        }
        self.service.insert_entity(table_name, entity)

        # Set access policy on table
        access_policy = AccessPolicy(permission=TablePermissions.QUERY,
                                     expiry=datetime.utcnow() +
                                     timedelta(hours=1))
        identifiers = {'id': access_policy}
        acl = self.service.set_table_acl(table_name, identifiers)

        # Wait 30 seconds for acl to propagate
        time.sleep(30)

        # Indicates to use the access policy set on the table
        token = self.service.generate_table_shared_access_signature(table_name,
                                                                    id='id')

        # Create a service and use the SAS
        sas_service = TableService(
            account_name=self.account.account_name,
            sas_token=token,
        )

        entities = list(sas_service.query_entities(table_name))
        for entity in entities:
            print(entity.text)  # hello world

        self.service.delete_table(table_name)
Exemple #6
0
    def test_sas_add(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        token = self.ts.generate_table_shared_access_signature(
            self.table_name,
            TablePermissions.ADD,
            datetime.utcnow() + timedelta(hours=1),
            datetime.utcnow() - timedelta(minutes=1),
        )

        # Act
        service = TableService(
            account_name=self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
        )
        self._set_test_proxy(service, self.settings)

        entity = self._create_random_entity_dict()
        service.insert_entity(self.table_name, entity)

        # Assert
        resp = self.ts.get_entity(self.table_name, entity['PartitionKey'],
                                  entity['RowKey'])
        self._assert_default_entity(resp)
Exemple #7
0
    def test_sas_add_outside_range(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange

        token = self.ts.generate_table_shared_access_signature(
            self.table_name,
            TablePermissions.ADD,
            datetime.utcnow() + timedelta(hours=1),
            start_pk='test',
            start_rk='test1',
            end_pk='test',
            end_rk='test1',
        )

        # Act
        service = TableService(
            account_name=self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
        )
        self._set_test_proxy(service, self.settings)
        with self.assertRaises(AzureHttpError):
            entity = self._create_random_entity_dict()
            service.insert_entity(self.table_name, entity)
    def emulator(self):
        # With account
        account = CloudStorageAccount(is_emulated=True)
        client = account.create_table_service()

        # Directly
        client = TableService(is_emulated=True)
async def main(stationData: func.InputStream):
    """ Azure function body """
    logging.info('Python blob trigger function processed blob (%s) - %s bytes',
                 stationData.name, stationData.length)

    table_service = TableService(
        connection_string=os.environ['TableBindingConnection'])

    table_name = 'WeatherStations'
    table_service.create_table(table_name, fail_on_exist=False)

    batch_manager = BatchManager(table_service, table_name)

    bytes_data = stationData.read()

    stationData = StringIO(str(bytes_data, 'ascii'), newline="\n")

    station_list = parse_station_list(stationData)

    logging.info('Processing %i records', len(station_list))

    for record in station_list:
        entity = create_entity(record)
        batch_manager.add_entity(entity)

    batch_manager.process()

    logging.info('Updated %s - %i records', table_name, len(station_list))
Exemple #10
0
    def test_sas_upper_case_table_name(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        entity = self._insert_random_entity()

        # Table names are case insensitive, so simply upper case our existing table name to test
        token = self.ts.generate_table_shared_access_signature(
            self.table_name.upper(),
            TablePermissions.QUERY,
            datetime.utcnow() + timedelta(hours=1),
            datetime.utcnow() - timedelta(minutes=1),
        )

        # Act
        service = TableService(
            account_name=self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
        )
        self._set_test_proxy(service, self.settings)
        entities = list(
            service.query_entities(self.table_name,
                                   filter="PartitionKey eq '{}'".format(
                                       entity['PartitionKey'])))

        # Assert
        self.assertEqual(len(entities), 1)
        self._assert_default_entity(entities[0])
Exemple #11
0
    def test_sas_delete(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        entity = self._insert_random_entity()
        token = self.ts.generate_table_shared_access_signature(
            self.table_name,
            TablePermissions.DELETE,
            datetime.utcnow() + timedelta(hours=1),
        )

        # Act
        service = TableService(
            account_name=self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
        )
        self._set_test_proxy(service, self.settings)
        service.delete_entity(self.table_name, entity.PartitionKey,
                              entity.RowKey)

        # Assert
        with self.assertRaises(AzureMissingResourceHttpError):
            self.ts.get_entity(self.table_name, entity.PartitionKey,
                               entity.RowKey)
    def table_sas(self):
        table_name = self._create_table()
        entity = {
            'PartitionKey': 'test',
            'RowKey': 'test1',
            'text': 'hello world',
        }
        self.service.insert_entity(table_name, entity)

        # Access only to the entities in the given table
        # Query permissions to access entities
        # Expires in an hour
        token = self.service.generate_table_shared_access_signature(
            table_name,
            TablePermissions.QUERY,
            datetime.utcnow() + timedelta(hours=1),
        )

        # Create a service and use the SAS
        sas_service = TableService(
            account_name=self.account.account_name,
            sas_token=token,
        )

        entities = sas_service.query_entities(table_name)
        for entity in entities:
            print(entity.text)  # hello world

        self.service.delete_table(table_name)
Exemple #13
0
    def key_auth(self):
        # With account
        account = CloudStorageAccount(account_name='<account_name>', account_key='<account_key>')
        client = account.create_table_service()

        # Directly
        client = TableService(account_name='<account_name>', account_key='<account_key>')
Exemple #14
0
    def sas_auth(self):
        # With account
        account = CloudStorageAccount(account_name='<account_name>', sas_token='<sas_token>')
        client = account.create_table_service()

        # Directly
        client = TableService(account_name='<account_name>', sas_token='<sas_token>')
    def account_sas(self):
        table_name = self._create_table()
        entity = {
            'PartitionKey': 'test',
            'RowKey': 'test1',
            'text': 'hello world',
        }
        self.service.insert_entity(table_name, entity)

        # Access to all entities in all the tables
        # Expires in an hour
        token = self.service.generate_account_shared_access_signature(
            ResourceTypes.OBJECT,
            AccountPermissions.READ,
            datetime.utcnow() + timedelta(hours=1),
        )

        # Create a service and use the SAS
        sas_service = TableService(
            account_name=self.account.account_name,
            sas_token=token,
        )

        entities = list(sas_service.query_entities(table_name))
        for entity in entities:
            print(entity.text)  # hello world

        self.service.delete_table(table_name)
Exemple #16
0
    def test_sas_signed_identifier(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        entity = self._insert_random_entity()

        access_policy = AccessPolicy()
        access_policy.start = '2011-10-11'
        access_policy.expiry = '2018-10-12'
        access_policy.permission = TablePermissions.QUERY
        identifiers = {'testid': access_policy}

        entities = self.ts.set_table_acl(self.table_name, identifiers)

        token = self.ts.generate_table_shared_access_signature(
            self.table_name,
            id='testid',
        )

        # Act
        service = TableService(
            account_name=self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
        )
        self._set_test_proxy(service, self.settings)
        entities = list(
            self.ts.query_entities(self.table_name,
                                   filter="PartitionKey eq '{}'".format(
                                       entity.PartitionKey)))

        # Assert
        self.assertEqual(len(entities), 1)
        self._assert_default_entity(entities[0])
Exemple #17
0
    def test_sas_update(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        entity = self._insert_random_entity()
        token = self.ts.generate_table_shared_access_signature(
            self.table_name,
            TablePermissions.UPDATE,
            datetime.utcnow() + timedelta(hours=1),
        )

        # Act
        service = TableService(
            account_name=self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
        )
        self._set_test_proxy(service, self.settings)
        updated_entity = self._create_updated_entity_dict(
            entity.PartitionKey, entity.RowKey)
        resp = service.update_entity(self.table_name, updated_entity)

        # Assert
        received_entity = self.ts.get_entity(self.table_name,
                                             entity.PartitionKey,
                                             entity.RowKey)
        self._assert_updated_entity(received_entity)
Exemple #18
0
 def __init__(self, container=None):
     self.AZURE_STORAGE_ACCOUNT = 'logodetectionstorage'
     self.AZURE_STORAGE_KEY  = 'jPJyzct+8WD1lKU5M+ZwDflWUGRu+YBpH8n/3Z6qR7WD7uc3HV2U1rtiQKesLRq2tU3jtXIe26RklAYdKzoydA=='
     self.table_service = TableService(account_name=self.AZURE_STORAGE_ACCOUNT, account_key=self.AZURE_STORAGE_KEY)
     self.blob_service = BlockBlobService(account_name=self. AZURE_STORAGE_ACCOUNT, account_key=self.AZURE_STORAGE_KEY)
     self.container = "input"
     self.table_list = [] #everything in the table for this logo
     self.logo = ""
Exemple #19
0
def get_table_service():
    """Return the TableService instance for this request initializing it if it doesn't exist."""
    table_service = getattr(g, 'table_service', None)
    if table_service is None:
        table_service = g.table_service = TableService(
            account_name=app.config['AZURE_STORAGE_ACCOUNT'],
            account_key=app.config['AZURE_STORAGE_KEY'])
    return table_service
Exemple #20
0
    def __init__(self, table_name='HemoniDataTable'):
        connection_string = "**"

        self.table_client = TableService(connection_string=connection_string)
        self.table_name = table_name
        if self.table_client.exists(table_name):
            pass
        else:
            self.table_client.create_table(table_name=table_name)
Exemple #21
0
    def _get_table_client_from_storage_account(storage_account, session):
        primary_key = StorageUtilities.get_storage_primary_key(storage_account['resourceGroup'],
                                                               storage_account['name'],
                                                               session)

        return TableService(
            account_name=storage_account['name'],
            account_key=primary_key
        )
Exemple #22
0
    def __init__(self, cred, subs_id, hub, storage, pan_handle, logger = None):
        self.credentials = cred
        self.subscription_id = subs_id
        self.logger = logger
        self.hub_name = hub
        self.storage_name = storage
        self.panorama_handler = pan_handle
        self.vmss_table_name = re.sub(self.ALPHANUM, '', hub + 'vmsstable')

        try:
            self.resource_client = ResourceManagementClient(cred, subs_id)
            self.compute_client = ComputeManagementClient(cred, subs_id)
            self.network_client = NetworkManagementClient(cred, subs_id)
            self.store_client = StorageManagementClient(cred, subs_id)
            store_keys = self.store_client.storage_accounts.list_keys(hub, storage).keys[0].value
            self.table_service = TableService(account_name=storage,
                                              account_key=store_keys)
        except Exception as e:
            self.logger.error("Getting Azure Infra handlers failed %s" % str(e))
            raise e

        # Start -> List out all RGs and identify new spokes to mark them with tags.
        # Look for Resource Groups (RGs) which do not have tags or does not have a
        # a tag named "PANORAMA_PROGRAMMED".
        potential_new_spokes = [x.name for x in self.resource_client.resource_groups.list()\
                         if not x.tags or not x.tags.get(self.RG_RULE_PROGRAMMED_TAG, None)]

        # If the RG has a VMSS which has a tag named "PanoramaManaged" with a value
        # as Hub Resource Group name then we know that this is a new spoke that is
        # launched managed by the Hub and not yet programmed for NAT/Azure Instrumentation
        # key.
        for rg in potential_new_spokes:
            fw_vm_list = [x for x in self.resource_client.resources.list_by_resource_group(rg)
                          if x.type == self.VMSS_TYPE and self.filter_vmss(rg, x.name)]
            if fw_vm_list:
                rg_params = {'location': self.resource_client.resource_groups.get(rg).location}
                rg_params.update(tags={
                                         self.RG_RULE_PROGRAMMED_TAG : 'No',
                                         self.HUB_MANAGED_TAG        : self.hub_name
                                      })
                self.resource_client.resource_groups.create_or_update(rg, rg_params)
                self.logger.info("RG %s marked as a spoke managed by this hub %s" % (rg, self.hub_name))
        # End -> List out all RGs and identify new spokes to mark them with tags.

        # Populate the list of spokes managed by this Azure hub.
        rg_list = self.resource_client.resource_groups.list()
        self.managed_spokes = []
        self.new_spokes = []
        for rg in rg_list:
            if rg.tags and rg.tags.get(self.HUB_MANAGED_TAG, None) == self.hub_name:
                self.managed_spokes.append(rg.name)
                if rg.tags.get(self.RG_RULE_PROGRAMMED_TAG, 'Yes') == 'No':
                    self.new_spokes.append(rg.name)
        self.logger.debug('%s identified as spokes managed by %s' % (self.managed_spokes, self.hub_name))
        if self.new_spokes:
            self.logger.info('%s identified as new spokes to be programmed by %s' % (self.new_spokes, self.hub_name))
Exemple #23
0
def get_client(table: Optional[str] = None,
               account_id: Optional[str] = None) -> TableService:
    if account_id is None:
        account_id = os.environ["ONEFUZZ_FUNC_STORAGE"]

    logging.debug("getting table account: (account_id: %s)", account_id)
    name, key = get_storage_account_name_key(account_id)
    client = TableService(account_name=name, account_key=key)

    if table and not client.exists(table):
        logging.info("creating missing table %s", table)
        client.create_table(table, fail_on_exist=False)
    return client
Exemple #24
0
 def __init__(self, shards=1):
     self.shards = shards
     self.table_name = "ShoppingCartTable"
     try:
         self.db = TableService(
             endpoint_suffix="table.cosmos.azure.com",
             connection_string=os.getenv("AZURE_COSMOS_CONNECTION_STRING"),
         )
     except ValueError:
         raise Exception(
             "Please initialize $AZURE_COSMOS_CONNECTION_STRING")
     try:
         self.db.create_table(self.table_name, fail_on_exist=True)
     except AzureConflictHttpError:
         # Accept error only if already exists
         pass
Exemple #25
0
    def retries(self):
        # By default, retries are performed with an exponential backoff.
        # Any custom retry logic may be used by simply defining a retry function, 
        # but several easy pre-written options are available with modifiable settings.
        client = TableService(account_name='<account_name>', account_key='<account_key>')

        # Use an exponential retry, but modify the backoff settings
        # Here, we increase the initial back off, increase the number of retry attempts
        # and decrease the base of the exponential backoff.
        client.retry = ExponentialRetry(initial_backoff=30, increment_power=2, max_attempts=5).retry

        # Use a default linear retry policy instead
        client.retry = LinearRetry().retry

        # Turn off retries
        client.retry = no_retry
Exemple #26
0
 def __init__(self, locator, jobChunkSize=maxAzureTablePropertySize):
     super(AzureJobStore, self).__init__()
     accountName, namePrefix = locator.split(':', 1)
     if '--' in namePrefix:
         raise ValueError(
             "Invalid name prefix '%s'. Name prefixes may not contain %s." %
             (namePrefix, self.nameSeparator))
     if not self.containerNameRe.match(namePrefix):
         raise ValueError(
             "Invalid name prefix '%s'. Name prefixes must contain only digits, "
             "hyphens or lower-case letters and must not start or end in a "
             "hyphen." % namePrefix)
     # Reserve 13 for separator and suffix
     if len(namePrefix) > self.maxContainerNameLen - self.maxNameLen - len(
             self.nameSeparator):
         raise ValueError((
             "Invalid name prefix '%s'. Name prefixes may not be longer than 50 "
             "characters." % namePrefix))
     if '--' in namePrefix:
         raise ValueError(
             "Invalid name prefix '%s'. Name prefixes may not contain "
             "%s." % (namePrefix, self.nameSeparator))
     self.locator = locator
     self.jobChunkSize = jobChunkSize
     self.accountKey = _fetchAzureAccountKey(accountName)
     self.accountName = accountName
     # Table names have strict requirements in Azure
     self.namePrefix = self._sanitizeTableName(namePrefix)
     # These are the main API entry points.
     self.tableService = TableService(account_key=self.accountKey,
                                      account_name=accountName)
     self.blobService = BlockBlobService(account_key=self.accountKey,
                                         account_name=accountName)
     # Serialized jobs table
     self.jobItems = None
     # Job<->file mapping table
     self.jobFileIDs = None
     # Container for all shared and unshared files
     self.files = None
     # Stats and logging strings
     self.statsFiles = None
     # File IDs that contain stats and logging strings
     self.statsFileIDs = None
Exemple #27
0
def clean_storage_account(connection_string):
    pool = ThreadPool(16)
    no_retry = azure.storage.common.retry.no_retry

    try:
        blob_service = BlobServiceClient.from_connection_string(
            connection_string)
        blob_service.retry = no_retry
        pool.map(
            lambda container: delete_container(blob_service, container.name),
            blob_service.list_containers(timeout=3))
    except azure.core.exceptions.ServiceRequestError:
        print("No blob service")

    try:
        file_service = ShareServiceClient.from_connection_string(
            connection_string)
        file_service.retry = no_retry
        pool.map(lambda share: delete_file_share(file_service, share.name),
                 file_service.list_shares(timeout=3))
    except azure.core.exceptions.ServiceRequestError:
        print("No file service")

    try:
        queue_service = QueueServiceClient.from_connection_string(
            connection_string)
        queue_service.retry = no_retry
        pool.map(lambda queue: delete_queue(queue_service, queue.name),
                 queue_service.list_queues(timeout=3))
    except azure.core.exceptions.ServiceRequestError:
        print("No queue service")

    try:
        table_service = TableService(connection_string=connection_string)
        table_service.retry = no_retry
        pool.map(lambda table: delete_table(table_service, table.name),
                 table_service.list_tables(timeout=3))
    except azure.common.AzureException:
        print("No table service")
Exemple #28
0
    def read_from_secondary(self):
        # If you are using RA-GRS accounts, you may want to enable reading from the 
        # secondary endpoint. Note that your application will have to handle this 
        # data potentially being out of date as the secondary may be behind the 
        # primary. 
        client = TableService(account_name='<account_name>', account_key='<account_key>')

        # The location mode is set to primary by default meaning that all requests 
        # are sent to the primary endpoint. If you'd like to instead read from the 
        # secondary endpoint by default, set location mode to secondary. Note that 
        # writes will continue to go to primary as they are not allowed on secondary.
        client.location_mode = LocationMode.SECONDARY

        # You may also decide you want to retry to secondary. This is useful if 
        # you'd like to automatically handle the primary being temporarily down. 
        # Again, your application will have to handle data being potentially out 
        # of date. Retry to secondary logic may be built into a custom retry policy, 
        # but our retry policies have a flag to enable it. Here we use the same 
        # exponential retry as by default, but allow it to retry to secondary if 
        # the initial request to primary fails.
        client.location_mode = LocationMode.PRIMARY  # Reset the location_mode to start with primary
        client.retry = ExponentialRetry(retry_to_secondary=True).retry
Exemple #29
0
    def callbacks(self):
        # Callbacks may be used read or modify the request and response.
        # The request_callback is called when the request is complete except for
        # adding the authentication and date headers.
        # The response_callback is called when the HTTP response is received before 
        # any parsing is done.

        # Custom client request id
        client = TableService(account_name='<account_name>', account_key='<account_key>')

        def request_callback(request):
            request.headers['x-ms-client-request-id'] = '<my custom id>'

        client.request_callback = request_callback

        # View data from the response
        def response_callback(response):
            status = response.status
            headers = response.headers

        # Force an exists call to succeed by resetting the status
        client.response_callback = response_callback
Exemple #30
0
    def __init__(self,
                 cred,
                 subs_id,
                 my_storage_rg,
                 vmss_rg_name,
                 vmss_name,
                 storage,
                 pan_handle,
                 logger=None):
        self.credentials = cred
        self.subscription_id = subs_id
        self.logger = logger
        self.hub_name = vmss_rg_name
        self.storage_name = storage
        self.panorama_handler = pan_handle
        self.vmss_table_name = re.sub(self.ALPHANUM, '',
                                      vmss_name + 'vmsstable')
        self.vmss_rg_name = vmss_rg_name

        try:
            self.resource_client = ResourceManagementClient(cred, subs_id)
            self.compute_client = ComputeManagementClient(cred, subs_id)
            self.network_client = NetworkManagementClient(cred, subs_id)
            self.store_client = StorageManagementClient(cred, subs_id)
            store_keys = self.store_client.storage_accounts.list_keys(
                my_storage_rg, storage).keys[0].value
            self.table_service = TableService(account_name=storage,
                                              account_key=store_keys)
        except Exception as e:
            self.logger.error("Getting Azure Infra handlers failed %s" %
                              str(e))
            raise e

        rg_list = self.resource_client.resource_groups.list()
        self.managed_spokes = []
        self.managed_spokes.append(vmss_rg_name)
        self.new_spokes = []