Esempio n. 1
0
def get_storage_account_details(creds, resource_group_name, account_name):
    storage_client = StorageManagementClient(creds)
    account_result = storage_client.storage_accounts.get_properties(resource_group_name, account_name)
    keys_result = storage_client.storage_accounts.list_keys(resource_group_name, account_name)
    account_key = keys_result.storage_account_keys.key1

    account = CloudStorageAccount(account_name, account_key)
    blob_service = account.create_blob_service()
    file_service = account.create_file_service()
    queue_service = account.create_queue_service()
    table_service = account.create_table_service()

    model = StorageAccountDetails()
    model.account_props = account_result.storage_account
    model.account_keys = keys_result.storage_account_keys
    model.blob_containers = blob_service.iterate_containers()
    model.queues = queue_service.iterate_queues()
    # TODO: find out why listing shares doesn't work
    # model.shares = file_service.iterate_shares()
    model.shares = []
    model.tables = table_service.iterate_tables()
    model.blob_service_properties = blob_service.get_blob_service_properties()
    model.queue_service_properties = queue_service.get_queue_service_properties()
    model.table_service_properties = table_service.get_table_service_properties()

    return model
    def test_create_blob_service_empty_credentials(self):
        # Arrange

        # Act
        bad_account = CloudStorageAccount('', '')
        with self.assertRaises(WindowsAzureError):
            service = bad_account.create_blob_service()
Esempio n. 3
0
def dbAzureApp(figurename, isinserted):
    account_name = 'shanistorage'
    account_key = 'j1COI4eq+p/Yl/e8dVCAiaHX/ly1StLuFAlgalNhVI+rjU8YL6wkWlulld4XIZ/5kjnrqkFyGhQsVo68y9NWpg=='
    account = CloudStorageAccount(account_name, account_key)
    table_service = None
    the_figures=[]
    try:
        table_service = account.create_table_service()
        table_name = 'azureFirstStor'
        #insret to a list by order
        by_ord=0
        for entity in table_service.query_entities(table_name):
            the_figures.insert(by_ord,entity['NameFigure'])
            by_ord +=1
	#insert into the DB
        if isinserted is True:
            str_coun= str(by_ord)
            part_key= 'N' + str_coun
            figure_new = {'PartitionKey': part_key, 'RowKey': str_coun, 'NameFigure' : figurename}
            # Insert the entity into the table
            table_service.insert_entity(table_name, figure_new)
            the_figures.insert(by_ord,figurename)   
         # delete an entity if want
         #   table_service.delete_entity(table_name, part_key, str_coun)
    except Exception as e:
        print('Error occurred in the sample. Please make sure the account name and key are correct.', e)
    return the_figures
Esempio n. 4
0
def retrieve_symbols():
    # retrieves the list of option symbols from the MS SQL Server database.  This has been changed
    # to use Azure Table Storage, but is kept here for reference in case we need to later
    # make a call to the older MSSQL database
    #
    # modified to retrieve symbols from the Azure table storage 'Symbols' table
    # Old code to insert / update the table in the MSSQL database.  This code has been replaced
    ###with pypyodbc.connect('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx') as connection:

    ###    # retrieve the cursor
    ###    cursor = connection.cursor()
    ###    cursor.execute("Select Symbol from Symbols")
    ###    rows = cursor.fetchall()

    ###    #connection.close() # connection is automatically closed when using the
    ###    #"with" block
    ###    return rows

    account_name = config.STORAGE_ACCOUNT_NAME
    account_key = config.STORAGE_ACCOUNT_KEY
    account = CloudStorageAccount(account_name, account_key)

    table_service = None
    try:
        table_service = account.create_table_service()
        symbols_list = table_service.query_entities(
            ut.TABLE_NAME_SYMBOLS, filter="PartitionKey eq 'Symbol'")
        return symbols_list

    except Exception as e:
        print(
            'Error occurred in the sample. If you are using the emulator, please make sure the emulator is running.',
            e)
Esempio n. 5
0
    def saveCsvLogToBlob(self, fileName):

        to_location_path = fileName

        account_name = s().Data["blob"]["account_name"]
        account_key = s().Data["blob"]["account_key"]
        container_name = s().Data["blob"]["container"]

        cloud_account = CloudStorageAccount(account_name=account_name,
                                            account_key=account_key)

        append_blob_service = cloud_account.create_append_blob_service()
        append_blob_service.create_container(container_name)
        append_blob_service.set_container_acl(
            container_name, public_access=PublicAccess.Container)

        if append_blob_service.exists(container_name, self.fileName):
            append_blob_service.append_blob_from_path(
                container_name,
                self.fileName,
                self.fileName,
                progress_callback=self.progress_callback_w)
        else:
            cloud_account.create_block_blob_service().create_blob_from_path(
                container_name,
                self.fileName,
                to_location_path,
                progress_callback=self.progress_callback_w)
Esempio n. 6
0
    def __init__(self, test_method):
        super(TestBatchNCJLive, self).__init__(__file__, test_method)
        self.account_name = 'test1'
        if not self.playback:
            self.account_key = os.environ['AZURE_BATCH_ACCESS_KEY']
        else:
            self.account_key = 'ZmFrZV9hY29jdW50X2tleQ=='
        self.account_endpoint = 'https://test1.westus.batch.azure.com/'
        storage_account = 'testaccountforbatch'
        if not self.playback:
            storage_key = os.environ['AZURE_STORAGE_ACCESS_KEY']
        else:
            storage_key = '1234'
        self.blob_client = CloudStorageAccount(storage_account, storage_key)\
            .create_block_blob_service()
        credentials = batchauth.SharedKeyCredentials(self.account_name,
                                                     self.account_key)
        self.batch_client = batch.BatchServiceClient(
            credentials, base_url=self.account_endpoint)

        self.output_blob_container = 'aaatestcontainer'
        sas_token = self.blob_client.generate_container_shared_access_signature(
            self.output_blob_container,
            permission=BlobPermissions(read=True, write=True),
            start=datetime.datetime.utcnow(),
            expiry=datetime.datetime.utcnow() + datetime.timedelta(days=1))
        self.output_container_sas = 'https://{}.blob.core.windows.net/{}?{}'.format(
            storage_account, self.output_blob_container, sas_token)
        print('Full container sas: {}'.format(self.output_container_sas))
Esempio n. 7
0
    def test_create_service_no_key(self):
        # Arrange

        # Act
        bad_account = CloudStorageAccount('', '')
        with self.assertRaises(ValueError):
            service = bad_account.create_block_blob_service()
    def test_create_blob_service_empty_credentials(self):
        # Arrange

        # Act
        bad_account = CloudStorageAccount('', '')
        with self.assertRaises(ValueError):
            service = bad_account.create_blob_service()
    def emulator(self):
        # With account
        account = CloudStorageAccount(is_emulated=True)
        client = account.create_block_blob_service()

        # Directly
        client = BlockBlobService(is_emulated=True)
Esempio n. 10
0
    def sas_auth(self):
        # With account
        account = CloudStorageAccount(account_name="<account_name>", sas_token="<sas_token>")
        client = account.create_block_blob_service()

        # Directly
        client = BlockBlobService(account_name="<account_name>", sas_token="<sas_token>")
Esempio n. 11
0
    def key_auth(self):
        # With account
        account = CloudStorageAccount(account_name="<account_name>", account_key="<account_key>")
        client = account.create_block_blob_service()

        # Directly
        client = BlockBlobService(account_name="<account_name>", account_key="<account_key>")
class CloudStorageAccountTest(AzureTestCase):

    def setUp(self):
        self.account = CloudStorageAccount(
            account_name=credentials.getStorageServicesName(),
            account_key=credentials.getStorageServicesKey())

    #--Test cases --------------------------------------------------------
    def test_create_blob_service(self):
        # Arrange

        # Act
        service = self.account.create_blob_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, BlobService)
        self.assertEqual(service.account_name,
                         credentials.getStorageServicesName())
        self.assertEqual(service.account_key,
                         credentials.getStorageServicesKey())

    def test_create_blob_service_empty_credentials(self):
        # Arrange

        # Act
        bad_account = CloudStorageAccount('', '')
        with self.assertRaises(WindowsAzureError):
            service = bad_account.create_blob_service()

        # Assert

    def test_create_table_service(self):
        # Arrange

        # Act
        service = self.account.create_table_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, TableService)
        self.assertEqual(service.account_name,
                         credentials.getStorageServicesName())
        self.assertEqual(service.account_key,
                         credentials.getStorageServicesKey())

    def test_create_queue_service(self):
        # Arrange

        # Act
        service = self.account.create_queue_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, QueueService)
        self.assertEqual(service.account_name,
                         credentials.getStorageServicesName())
        self.assertEqual(service.account_key,
                         credentials.getStorageServicesKey())
class CloudStorageAccountTest(AzureTestCase):
    def setUp(self):
        self.account = CloudStorageAccount(
            account_name=credentials.getStorageServicesName(),
            account_key=credentials.getStorageServicesKey())

    #--Test cases --------------------------------------------------------
    def test_create_blob_service(self):
        # Arrange

        # Act
        service = self.account.create_blob_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, BlobService)
        self.assertEqual(service.account_name,
                         credentials.getStorageServicesName())
        self.assertEqual(service.account_key,
                         credentials.getStorageServicesKey())

    def test_create_blob_service_empty_credentials(self):
        # Arrange

        # Act
        bad_account = CloudStorageAccount('', '')
        with self.assertRaises(WindowsAzureError):
            service = bad_account.create_blob_service()

        # Assert

    def test_create_table_service(self):
        # Arrange

        # Act
        service = self.account.create_table_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, TableService)
        self.assertEqual(service.account_name,
                         credentials.getStorageServicesName())
        self.assertEqual(service.account_key,
                         credentials.getStorageServicesKey())

    def test_create_queue_service(self):
        # Arrange

        # Act
        service = self.account.create_queue_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, QueueService)
        self.assertEqual(service.account_name,
                         credentials.getStorageServicesName())
        self.assertEqual(service.account_key,
                         credentials.getStorageServicesKey())
def main():
    argument_spec = dict(source_uri=dict(required=True),
                         source_key=dict(required=True),
                         destination_account=dict(required=True),
                         destination_key=dict(required=True),
                         destination_container=dict(required=True),
                         destination_blob=dict(required=True),
                         wait=dict(default=False, type='bool'),
                         timeout=dict(default=1000))
    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_DEPS:
        module.fail_json(
            msg="requests and azure are required for this module ".format(
                HAS_DEPS_EXC))

    source_account, source_container, source_blob = split_uri(
        module.params.get('source_uri'))
    source = CloudStorageAccount(account_name=source_account,
                                 account_key=module.params.get('source_key'))
    source_service = source.create_block_blob_service()
    destination_service = BlockBlobService(
        account_name=module.params.get('destination_account'),
        account_key=module.params.get('destination_key'))

    source_token = source.generate_shared_access_signature(
        Services.BLOB, ResourceTypes.OBJECT, AccountPermissions.READ,
        datetime.datetime.now() + timedelta(hours=1))
    source_sas_url = source_service.make_blob_url(source_container,
                                                  source_blob, 'https',
                                                  source_token)

    destination_service.create_container(
        module.params.get('destination_container'), fail_on_exist=False)
    status = destination_service.copy_blob(
        module.params.get('destination_container'),
        module.params.get('destination_blob'), source_sas_url)

    if not module.params.get('wait'):
        data = dict(changed=True, status='started')
        module.exit_json(**data)
    else:
        copy = destination_service.get_blob_properties(
            module.params.get('destination_container'),
            module.params.get('destination_blob')).properties.copy
        count = 0
        while copy.status != 'success':
            count = count + 30
            if count > module.params.get('timeout'):
                module.fail_json(
                    msg='Timed out waiting for async copy to complete.')
            time.sleep(30)
            copy = destination_service.get_blob_properties(
                module.params.get('destination_container'),
                module.params.get('destination_blob')).properties.copy
        data = dict(changed=True, status='completed')
        module.exit_json(**data)
Esempio n. 15
0
    def test_create_account_sas_and_key(self):
        # Arrange
        
        # Act
        account = CloudStorageAccount(self.account_name, self.account_key, self.sas_token)
        service = account.create_block_blob_service()

        # Assert
        self.validate_service(service, BlockBlobService)
Esempio n. 16
0
def get_azure_account():
    # returns the account needed to access the Azure storage tables
    account = None
    if config.IS_EMULATED:
        account = CloudStorageAccount(is_emulated=True)
    else:
        account_name = config.STORAGE_ACCOUNT_NAME
        account_key = config.STORAGE_ACCOUNT_KEY
        account = CloudStorageAccount(account_name, account_key)
    return account
Esempio n. 17
0
    def public(self):
        # This applies to the blob services only
        # Public access must be enabled on the container or requests will fail

        # With account
        account = CloudStorageAccount(account_name="<account_name>")
        client = account.create_block_blob_service()

        # Directly
        client = BlockBlobService(account_name="<account_name>")
Esempio n. 18
0
    def test_create_account_emulated(self):
        # Arrange
      
        # Act
        account = CloudStorageAccount(is_emulated=True)
        service = account.create_block_blob_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertEqual(service.account_name, 'devstoreaccount1')
        self.assertIsNotNone(service.account_key)
Esempio n. 19
0
    def test_create_account_sas(self):
        # Arrange
      
        # Act
        sas_account = CloudStorageAccount(self.account_name, sas_token=self.sas_token)
        service = sas_account.create_block_blob_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertEqual(service.account_name, self.account_name)
        self.assertIsNone(service.account_key)
        self.assertEqual(service.sas_token, self.sas_token)
class StorageAccountTest(ExtendedTestCase):

    def setUp(self):
        self.account_name = 'storagename'
        self.account_key = 'NzhL3hKZbJBuJ2484dPTR+xF30kYaWSSCbs2BzLgVVI1woqeST/1IgqaLm6QAOTxtGvxctSNbIR/1hW8yH+bJg=='
        self.account = CloudStorageAccount(self.account_name, self.account_key)

    #--Test cases --------------------------------------------------------
    def test_create_blob_service(self):
        # Arrange

        # Act
        service = self.account.create_blob_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, BlobService)
        self.assertEqual(service.account_name, self.account_name)
        self.assertEqual(service.account_key, self.account_key)

    def test_create_blob_service_empty_credentials(self):
        # Arrange

        # Act
        bad_account = CloudStorageAccount('', '')
        with self.assertRaises(ValueError):
            service = bad_account.create_blob_service()

        # Assert

    def test_create_table_service(self):
        # Arrange

        # Act
        service = self.account.create_table_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, TableService)
        self.assertEqual(service.account_name, self.account_name)
        self.assertEqual(service.account_key, self.account_key)

    def test_create_queue_service(self):
        # Arrange

        # Act
        service = self.account.create_queue_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, QueueService)
        self.assertEqual(service.account_name, self.account_name)
        self.assertEqual(service.account_key, self.account_key)
Esempio n. 21
0
class StorageAccountTest(StorageTestCase):

    def setUp(self):
        self.account_name = 'storagename'
        self.account_key = 'NzhL3hKZbJBuJ2484dPTR+xF30kYaWSSCbs2BzLgVVI1woqeST/1IgqaLm6QAOTxtGvxctSNbIR/1hW8yH+bJg=='
        self.account = CloudStorageAccount(self.account_name, self.account_key)

    #--Test cases --------------------------------------------------------
    def test_create_blob_service(self):
        # Arrange

        # Act
        service = self.account.create_blob_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, BlobService)
        self.assertEqual(service.account_name, self.account_name)
        self.assertEqual(service.account_key, self.account_key)

    def test_create_blob_service_empty_credentials(self):
        # Arrange

        # Act
        bad_account = CloudStorageAccount('', '')
        with self.assertRaises(ValueError):
            service = bad_account.create_blob_service()

        # Assert

    def test_create_table_service(self):
        # Arrange

        # Act
        service = self.account.create_table_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, TableService)
        self.assertEqual(service.account_name, self.account_name)
        self.assertEqual(service.account_key, self.account_key)

    def test_create_queue_service(self):
        # Arrange

        # Act
        service = self.account.create_queue_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertIsInstance(service, QueueService)
        self.assertEqual(service.account_name, self.account_name)
        self.assertEqual(service.account_key, self.account_key)
Esempio n. 22
0
def submit_video_encoding_job(request, video_guid=None):

    blob_name = format_blob_name(user=request.user, video_guid=video_guid)

    csa = CloudStorageAccount(
        account_name=settings.AZURE_CONFIG["account_name"], account_key=settings.AZURE_CONFIG["account_key"]
    )

    qs = csa.create_queue_service()

    qs.put_message(queue_name=settings.AZURE_CONFIG["video_encode_queue"], message_text=base64.b64encode(blob_name))

    return None
Esempio n. 23
0
 def __init__(self, make_public=False, transaction=None):
     account_name = settings.AZURE['account_name']
     account_key = settings.AZURE['account_key']
     sas = settings.AZURE['sas']
     self.transaction = transaction
     self.transaction_body = json.loads(transaction.body)['data']
     logger.info('copying uuid: ' + self.transaction_body['uuid'])
     self.file_uuid = self.transaction_body['uuid']
     self.filename = self.transaction_body['original_filename']
     self.make_public = make_public
     self.account = CloudStorageAccount(account_name=account_name,
                                        account_key=account_key,
                                        sas_token=sas)
     self.service = self.account.create_block_blob_service()
Esempio n. 24
0
def submit_video_encoding_job(request, video_guid = None):
    
    blob_name = format_blob_name(user = request.user, video_guid = video_guid)
    
    csa = CloudStorageAccount(
        account_name = settings.AZURE_CONFIG['account_name'],
        account_key = settings.AZURE_CONFIG['account_key'])
    
    qs = csa.create_queue_service()
    
    qs.put_message(
        queue_name = settings.AZURE_CONFIG['video_encode_queue'],
        message_text = base64.b64encode(blob_name))
    
    return None
Esempio n. 25
0
    def __get_client(self, id):
        credential = self.middleware.call_sync('datastore.query', 'system.cloudcredentials', [('id', '=', id)], {'get': True})

        return CloudStorageAccount(
            credential['attributes'].get('account_name'),
            credential['attributes'].get('account_key'),
        )
Esempio n. 26
0
	def __init__(self):
		try:
			import config as config
		except:
			raise ValueError('Please specify configuration settings in config.py.')

		if config.IS_EMULATED:
			self.account = CloudStorageAccount(is_emulated=True)
		else:
			# Note that account key and sas should not both be included
			account_name = config.STORAGE_ACCOUNT_NAME
			account_key = config.STORAGE_ACCOUNT_KEY
			sas = config.SAS
			self.account = CloudStorageAccount(account_name=account_name, 
											   account_key=account_key, 
											   sas_token=sas)
			self.service = self.account.create_block_blob_service()
    def register_model(self, model_name, service_def):
        storage_account_key = self.__create_storage_account_and_get_key()
        cloud_storage_account = CloudStorageAccount(self.__storage_account_name, storage_account_key)

        print("Registering model " + model_name)
        url = self.__upload_model(model_name, service_def, cloud_storage_account)
        register_model_result = self.__register_model_with_mms(model_name, url)
        print("Successfully registered model " + model_name)
        return register_model_result['id']
Esempio n. 28
0
    def setUp(self):
        super(SampleTest, self).setUp()

        try:
            import samples.config as config
        except:
            raise ValueError(
                'Please specify configuration settings in config.py.')

        if config.IS_EMULATED:
            self.account = CloudStorageAccount(is_emulated=True)
        else:
            # Note that account key and sas should not both be included
            account_name = config.STORAGE_ACCOUNT_NAME
            account_key = config.STORAGE_ACCOUNT_KEY
            sas = config.SAS
            self.account = CloudStorageAccount(account_name=account_name,
                                               account_key=account_key,
                                               sas_token=sas)
Esempio n. 29
0
class Passthrough(Operations):
	def __init__(self, root):
		self.root = root
		print root
		try:
			import config as config
		except:
			raise ValueError('Please specify configuration settings in config.py.')

		if config.IS_EMULATED:
			self.account = CloudStorageAccount(is_emulated=True)
		else:
			# Note that account key and sas should not both be included
			account_name = config.STORAGE_ACCOUNT_NAME
			account_key = config.STORAGE_ACCOUNT_KEY
			sas = config.SAS
			self.account = CloudStorageAccount(account_name=account_name, 
											   account_key=account_key, 
											   sas_token=sas)
			self.service = self.account.create_block_blob_service()


	def _full_path(self, partial):
		if partial.startswith("/"):
			partial = partial[1:]
		path = os.path.join(self.root, partial)
		return path

	def _get_container_reference(self, prefix='container'):
		return '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))

	def access(self, path, mode):
		if debug:
			print "access" 

		full_path = self._full_path(path)
		#if not os.access(full_path, mode):
		#	pass#raise FuseOSError(errno.EACCES)
		return 0

	def chmod(self, path, mode):
		pass

	def chown(self, path, uid, gid):
		pass

	def getattr(self, path, fh=None):
		if debug:
			print "getattr  " + path 
		isFolder = False
		if len(path.split('/')) == 2:
			isFolder = True

		"""link_data = {
			"st_ctime" : 1456615173,
			"st_mtime" : 1456615173,
			"st_nlink" : 2,
			"st_mode" : 16893,
			"st_size" : 2,
			"st_gid" : 1000,
			"st_uid" : 1000,
			"st_atime" : time(),
		}"""


		folder_data = {
			"st_ctime" : 1456615173,
			"st_mtime" : 1456615173,
			"st_nlink" : 2,
#			"st_mode" : 16893,
			"st_mode" : 16895,
			"st_size" : 2,
			"st_gid" : 1000,
			"st_uid" : 1000,
			"st_atime" : time(),
		}

		
		full_path = self._full_path(path)
		try:
			st = os.lstat(full_path)
			print st
			rdata = dict((key, getattr(st, key)) for key in ('st_atime', 'st_ctime', 'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
		except: 
			pass
		#if os.path.isfile == True:
		#	return 
		if isFolder:
			for container in list(self.service.list_containers()):
				if container.name == path[1:]:
					return folder_data
		else:
			"""import config as config
			account_name = config.STORAGE_ACCOUNT_NAME
			account_key = config.STORAGE_ACCOUNT_KEY"""
			containername = path.split('/')[1]
			filename = path.split('/')[2]
			"""block_blob_service = BlockBlobService(account_name, account_key)
			if os.path.isfile(full_path) == False:
				fileSize = 1
			else:
				try:
					pass
					fileSize = os.path.getsize(full_path)
				except:
					fileSize = 1"""
			self.service = self.account.create_block_blob_service()
			file_data = {
				"st_ctime" : 1456615173,
				"st_mtime" : 1456615173,
				"st_nlink" : 1,
#				"st_mode" : 33188,
				"st_mode" : 33279,
				"st_size" : self.service.get_blob_properties(containername, filename).properties.content_length,
				"st_gid" : 1000,
				"st_uid" : 1000,
				"st_atime" : time(),
			}
			return file_data

		st = os.lstat(full_path)
		print st
		rdata = dict((key, getattr(st, key)) for key in ('st_atime', 'st_ctime', 'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
		return rdata

	def readdir(self, path, fh):
		if debug:
			print "readdir  " + path  
		
		full_path = self._full_path(path)

		dirents = ['.', '..']
		#if os.path.isdir(full_path):
		#	dirents.extend(os.listdir(full_path))
		for r in dirents:
			yield r
		containers = list(self.service.list_containers())
		#print('All containers in your account:')
		if path == "/":
			for container in containers:
				yield container.name
		else: 
			folder = path[1:]
			blobs = list(self.service.list_blobs(folder))
			for blob in blobs:
				yield blob.name
			

	def readlink(self, path):
		if debug:
			print "readlink" 

		pathname = os.readlink(self._full_path(path))
		if pathname.startswith("/"):
			# Path name is absolute, sanitize it.
			return os.path.relpath(pathname, self.root)
		else:
			return pathname

	def mknod(self, path, mode, dev):
		return os.mknod(self._full_path(path), mode, dev)

	def rmdir(self, path):
		if debug:
			print "rmdir  " + path[1:]
		deleted = self.service.delete_container(path[1:])
		return 0

	def mkdir(self, path, mode):
		"""
		Only valid in the top level of the mounted directory.
		Creates a container to serve as the folder 

		A container name must be a valid DNS name, conforming to the following 
		naming rules:
		1) Container names must start with a letter or number, and can contain
		   only letters, numbers, and the dash (-) character.
		2) Every dash (-) character must be immediately preceded and followed 
		   by a letter or number; consecutive dashes are not permitted in
		   container names.
		3) All letters in a container name must be lowercase.
		4) Container names must be from 3 through 63 characters long.

		30 second lease timeout on deleted directory.
		"""
		if debug:
			print "mkdir  " + path[1:]

		# TODO: validate input 
		self.service.create_container(path[1:])
		return 0

	def statfs(self, path):
		full_path = self._full_path(path)
		stv = os.statvfs(full_path)
		return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
			'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
			'f_frsize', 'f_namemax'))

	def unlink(self, path):
		return os.unlink(self._full_path(path))

	def symlink(self, name, target):
		return os.symlink(name, self._full_path(target))

	def rename(self, old, new):
		"""
		1) create new container
		2) stream contents of old container to new container
		3) delete old container
		"""
		# step 1 
		self.mkdir(new, 0777)

		# step 2
		# TODO: steam contents to new container
		"""import config as config
		account_name = config.STORAGE_ACCOUNT_NAME
		account_key = config.STORAGE_ACCOUNT_KEY
		block_blob_service = BlockBlobService(account_name, account_key)
		block_blob_service.get_blob_to_path(containername, filename, tempfilename)	
		block_blob_service.create_blob_from_path(new, filename, filename)"""	
				
	
		#step 3
		self.rmdir(old)

	def link(self, target, name):
		return os.link(self._full_path(target), self._full_path(name))

	def utimens(self, path, times=None):
		return os.utime(self._full_path(path), times)

	# File methods
	# ============

	def open(self, path, flags):
		"""if debug:
			print "open:    " + path
			print flags
		full_path = self._full_path(path)
		import config as config
		account_name = config.STORAGE_ACCOUNT_NAME
		account_key = config.STORAGE_ACCOUNT_KEY
		containername = path.split('/')[1]
		filename = path.split('/')[2]
		block_blob_service = BlockBlobService(account_name, account_key)
		try:
			print "get block blob" 
			if os.path.isdir(path.split('/')[1]) == False:
				os.mkdir(full_path.split('/')[0]+'/'+containername)
			if os.path.isfile(full_path) == False:
				block_blob_service.get_blob_to_path(containername, filename, full_path)
			else:
				print "get block blob" 
				os.remove(full_path)
				block_blob_service.get_blob_to_path(containername, filename, full_path)
		except:
			pass
		print "full path:   " + full_path 
		print os.path.isfile(full_path)"""
		return 0#os.open(full_path, flags)

	def create(self, path, mode, fi=None):
		if debug:
			print "create:   " + path
		full_path = self._full_path(path)
		return os.open(full_path, os.O_WRONLY | os.O_CREAT, mode)

	def read(self, path, length, offset, fh):
		if debug:
			print "read:	   " + path
			print "offset:  " 
			print offset
			print "length: "
			print length 
			print fh
		full_path = self._full_path(path)
		print full_path
		#os.lseek(fh, offset, os.SEEK_SET)
		#if os.path.isfile(full_path) == False:
		import config as config
		account_name = config.STORAGE_ACCOUNT_NAME
		account_key = config.STORAGE_ACCOUNT_KEY
		containername = path.split('/')[1]
		filename = path.split('/')[2]
		service = baseblobservice.BaseBlobService(account_name, account_key)
		blob = service.get_blob_to_bytes(containername, filename, None, offset, offset+length-1)
		#blob = blob[offset:(offset+length)]
		bytes = blob.content 
		return bytes
		"""try:
			if os.path.isdir(path.split('/')[1]) == False:
				os.mkdir(full_path.split('/')[0]+'/'+containername)
			if os.path.isfile(full_path) == False:
				print "read block blob" 
				block_blob_service.get_blob_to_path(containername, filename, full_path)
			else:
				os.remove(full_path)
				block_blob_service.get_blob_to_path(containername, filename, full_path)
		except:
			pass
			
		fhn = os.open(full_path, 32768)
		os.lseek(fhn, offset, os.SEEK_SET)
	
		#print "os.read(fh, length)"
		#print os.read(fh, length)
		return os.read(fhn, length)"""

	def write(self, path, buf, offset, fh):
		if debug:
			print "write:   " + path
		os.lseek(fh, offset, os.SEEK_SET)
		return os.write(fh, buf)

	def truncate(self, path, length, fh=None):
		print "truncate:   " + path
		full_path = self._full_path(path)
		with open(full_path, 'r+') as f:
			f.truncate(length)

	def flush(self, path, fh):
		print "flush:   " + path
		return os.fsync(fh)

	def release(self, path, fh):
		print "release:   " + path
		return os.close(fh)

	def fsync(self, path, fdatasync, fh):
		print "fsync:   " + path
		return self.flush(path, fh)
 def setUp(self):
     super(StorageAccountTest, self).setUp()
     self.account_name = self.settings.STORAGE_ACCOUNT_NAME
     self.account_key = self.settings.STORAGE_ACCOUNT_KEY
     self.sas_token = '?sv=2015-04-05&st=2015-04-29T22%3A18%3A26Z&se=2015-04-30T02%3A23%3A26Z&sr=b&sp=rw&sip=168.1.5.60-168.1.5.70&spr=https&sig=Z%2FRHIX5Xcg0Mq2rqI3OlWTjEg2tYkboXr1P9ZUXDtkk%3D'
     self.account = CloudStorageAccount(self.account_name, self.account_key)
from azure.servicebus import ServiceBusService
from azure.storage import CloudStorageAccount
import MapReduce

#
# The CloudStorageAccount provides factory methods for the queue, table, and
# blob services.
#
# See http://go.microsoft.com/fwlink/?linkid=246933 for Storage documentation.
#
STORAGE_ACCOUNT_NAME = '__paste_your_storage_account_name_here__'
STORAGE_ACCOUNT_KEY = '__paste_your_storage_key_here__'

if os.environ.get('EMULATED', '').lower() == 'true':
    # Running in the emulator, so use the development storage account
    storage_account = CloudStorageAccount(None, None)
else:
    storage_account = CloudStorageAccount(STORAGE_ACCOUNT_NAME, STORAGE_ACCOUNT_KEY)

blob_service = storage_account.create_blob_service()
table_service = storage_account.create_table_service()
queue_service = storage_account.create_queue_service()

#
# Service Bus is a messaging solution for applications. It sits between
# components of your applications and enables them to exchange messages in a
# loosely coupled way for improved scale and resiliency.
#
# See http://go.microsoft.com/fwlink/?linkid=246934 for Service Bus documentation.
#
SERVICE_BUS_NAMESPACE = '__paste_your_service_bus_namespace_here__'
Esempio n. 32
0
 def _configure_connection(self):
     log.debug("Configuring Connection")
     self.account = CloudStorageAccount(self.account_name, self.account_key)
     self.service = self.account.create_block_blob_service()
Esempio n. 33
0
class AzureBlobObjectStore(ObjectStore):
    """
    Object store that stores objects as blobs in an Azure Blob Container. A local
    cache exists that is used as an intermediate location for files between
    Galaxy and Azure.
    """
    def __init__(self, config, config_xml):
        if BlockBlobService is None:
            raise Exception(NO_BLOBSERVICE_ERROR_MESSAGE)
        super(AzureBlobObjectStore, self).__init__(config)

        self.staging_path = self.config.file_path
        self.transfer_progress = 0
        self._parse_config_xml(config_xml)
        self._configure_connection()
        self.container_lease = self._get_container_lease()

        # Clean cache only if value is set in galaxy.ini
        if self.cache_size != -1:
            # Convert GBs to bytes for comparison
            self.cache_size = self.cache_size * 1073741824
            # Helper for interruptable sleep
            self.sleeper = Sleeper()
            self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
            self.cache_monitor_thread.start()
            log.info("Cache cleaner manager started")

    ###################
    # Private Methods #
    ###################

    # config_xml is an ElementTree object.
    def _parse_config_xml(self, config_xml):
        try:
            auth_xml = config_xml.find('auth')
            self.account_name = auth_xml.get('account_name')
            self.account_key = auth_xml.get('account_key')
            container_xml = config_xml.find('container')
            self.container_name = container_xml.get('name')
            self.max_chunk_size = int(container_xml.get('max_chunk_size', 250))  # currently unused
            cache_xml = config_xml.find('cache')
            self.cache_size = float(cache_xml.get('size', -1))
            self.staging_path = cache_xml.get('path', self.config.object_store_cache_path)

            for d_xml in config_xml.findall('extra_dir'):
                self.extra_dirs[d_xml.get('type')] = d_xml.get('path')

            log.debug("Object cache dir:    %s", self.staging_path)
            log.debug("       job work dir: %s", self.extra_dirs['job_work'])

        except Exception:
            # Toss it back up after logging, we can't continue loading at this point.
            log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
            raise

    def _configure_connection(self):
        log.debug("Configuring Connection")
        self.account = CloudStorageAccount(self.account_name, self.account_key)
        self.service = self.account.create_block_blob_service()

    def _get_container_lease(self):
        """ Sometimes a handle to a container is not established right away so try
        it a few times. Raise error is connection is not established. """
        for i in range(5):
            try:
                self.service.break_container_lease(self.container_name)
                container_lease = self.service.acquire_container_lease(self.container_name)
                log.debug("Using azure blob store with container '%s'", self.container_name)
                return container_lease
            except AzureHttpError:
                try:
                    log.debug("container not found, creating azure blob store container with name '%s'", self.container_name)
                    self.service.create_container(self.container_name)
                    container_lease = self.service.acquire_container_lease(self.container_name)
                    return container_lease
                except AzureHttpError:
                    log.exception("Could not get container '%s', attempt %s/5", self.container_name, i + 1)
                    time.sleep(2)
        # All the attempts have been exhausted and connection was not established,
        # raise error
        raise AzureHttpError

    def _construct_path(self, obj, base_dir=None, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, **kwargs):
        # extra_dir should never be constructed from provided data but just
        # make sure there are no shenannigans afoot
        if extra_dir and extra_dir != os.path.normpath(extra_dir):
            log.warning('extra_dir is not normalized: %s', extra_dir)
            raise ObjectInvalid("The requested object is invalid")
        # ensure that any parent directory references in alt_name would not
        # result in a path not contained in the directory path constructed here
        if alt_name:
            if not safe_relpath(alt_name):
                log.warning('alt_name would locate path outside dir: %s', alt_name)
                raise ObjectInvalid("The requested object is invalid")
            # alt_name can contain parent directory references, but S3 will not
            # follow them, so if they are valid we normalize them out
            alt_name = os.path.normpath(alt_name)

        rel_path = os.path.join(*directory_hash_id(obj.id))

        if extra_dir is not None:
            if extra_dir_at_root:
                rel_path = os.path.join(extra_dir, rel_path)
            else:
                rel_path = os.path.join(rel_path, extra_dir)

        # for JOB_WORK directory
        if obj_dir:
            rel_path = os.path.join(rel_path, str(obj.id))
        if base_dir:
            base = self.extra_dirs.get(base_dir)
            return os.path.join(base, rel_path)

        # S3 folders are marked by having trailing '/' so add it now
        # rel_path = '%s/' % rel_path # assume for now we don't need this in Azure blob storage.

        if not dir_only:
            rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)

        return rel_path

    def _fix_permissions(self, rel_path):
        """ Set permissions on rel_path"""
        for basedir, _, files in os.walk(rel_path):
            umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid)
            for filename in files:
                path = os.path.join(basedir, filename)
                # Ignore symlinks
                if os.path.islink(path):
                    continue
                umask_fix_perms(path, self.config.umask, 0o666, self.config.gid)

    def _get_cache_path(self, rel_path):
        return os.path.abspath(os.path.join(self.staging_path, rel_path))

    def _get_transfer_progress(self):
        return self.transfer_progress

    def _get_size_in_azure(self, rel_path):
        try:
            properties = self.service.get_blob_properties(self.container_name, rel_path)
            # Currently this returns a blob and not a BlobProperties object
            # Similar issue for the ruby https://github.com/Azure/azure-storage-ruby/issues/13
            # The typecheck is an attempt at future-proofing this when/if the bug is fixed.
            if type(properties) is Blob:
                properties = properties.properties
            if properties:
                size_in_bytes = properties.content_length
                return size_in_bytes
        except AzureHttpError:
            log.exception("Could not get size of blob '%s' from Azure", rel_path)
            return -1

    def _in_azure(self, rel_path):
        try:
            exists = self.service.exists(self.container_name, rel_path)
        except AzureHttpError:
            log.exception("Trouble checking existence of Azure blob '%s'", rel_path)
            return False
        return exists

    def _in_cache(self, rel_path):
        """ Check if the given dataset is in the local cache. """
        cache_path = self._get_cache_path(rel_path)
        return os.path.exists(cache_path)

    def _pull_into_cache(self, rel_path):
        # Ensure the cache directory structure exists (e.g., dataset_#_files/)
        rel_path_dir = os.path.dirname(rel_path)
        if not os.path.exists(self._get_cache_path(rel_path_dir)):
            os.makedirs(self._get_cache_path(rel_path_dir))
        # Now pull in the file
        file_ok = self._download(rel_path)
        self._fix_permissions(self._get_cache_path(rel_path_dir))
        return file_ok

    def _transfer_cb(self, complete, total):
        self.transfer_progress = float(complete) / float(total) * 100  # in percent

    def _download(self, rel_path):
        local_destination = self._get_cache_path(rel_path)
        try:
            log.debug("Pulling '%s' into cache to %s", rel_path, local_destination)
            if self.cache_size > 0 and self._get_size_in_azure(rel_path) > self.cache_size:
                log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
                             rel_path, self._get_size_in_azure(rel_path), self.cache_size)
                return False
            else:
                self.transfer_progress = 0  # Reset transfer progress counter
                self.service.get_blob_to_path(self.container_name, rel_path, local_destination, progress_callback=self._transfer_cb)
                return True
        except AzureHttpError:
            log.exception("Problem downloading '%s' from Azure", rel_path)
        return False

    def _push_to_os(self, rel_path, source_file=None, from_string=None):
        """
        Push the file pointed to by ``rel_path`` to the object store naming the blob
        ``rel_path``. If ``source_file`` is provided, push that file instead while
        still using ``rel_path`` as the blob name.
        If ``from_string`` is provided, set contents of the file to the value of
        the string.
        """
        try:
            source_file = source_file or self._get_cache_path(rel_path)

            if not os.path.exists(source_file):
                log.error("Tried updating blob '%s' from source file '%s', but source file does not exist.", rel_path, source_file)
                return False

            if os.path.getsize(source_file) == 0:
                log.debug("Wanted to push file '%s' to azure blob '%s' but its size is 0; skipping.", source_file, rel_path)
                return True

            if from_string:
                self.service.create_blob_from_text(self.container_name, rel_path, from_string, progress_callback=self._transfer_cb)
                log.debug("Pushed data from string '%s' to blob '%s'", from_string, rel_path)
            else:
                start_time = datetime.now()
                log.debug("Pushing cache file '%s' of size %s bytes to '%s'", source_file, os.path.getsize(source_file), rel_path)
                self.transfer_progress = 0  # Reset transfer progress counter
                self.service.create_blob_from_path(self.container_name, rel_path, source_file, progress_callback=self._transfer_cb)
                end_time = datetime.now()
                log.debug("Pushed cache file '%s' to blob '%s' (%s bytes transfered in %s sec)",
                          source_file, rel_path, os.path.getsize(source_file), end_time - start_time)
            return True

        except AzureHttpError:
            log.exception("Trouble pushing to Azure Blob '%s' from file '%s'", rel_path, source_file)
        return False

    ##################
    # Public Methods #
    ##################

    def exists(self, obj, **kwargs):
        in_cache = in_azure = False
        rel_path = self._construct_path(obj, **kwargs)

        in_cache = self._in_cache(rel_path)
        in_azure = self._in_azure(rel_path)
        # log.debug("~~~~~~ File '%s' exists in cache: %s; in azure: %s" % (rel_path, in_cache, in_azure))
        # dir_only does not get synced so shortcut the decision
        dir_only = kwargs.get('dir_only', False)
        base_dir = kwargs.get('base_dir', None)
        if dir_only:
            if in_cache or in_azure:
                return True
            # for JOB_WORK directory
            elif base_dir:
                if not os.path.exists(rel_path):
                    os.makedirs(rel_path)
                return True
            else:
                return False

        # TODO: Sync should probably not be done here. Add this to an async upload stack?
        if in_cache and not in_azure:
            self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
            return True
        elif in_azure:
            return True
        else:
            return False

    def file_ready(self, obj, **kwargs):
        """
        A helper method that checks if a file corresponding to a dataset is
        ready and available to be used. Return ``True`` if so, ``False`` otherwise.
        """
        rel_path = self._construct_path(obj, **kwargs)
        # Make sure the size in cache is available in its entirety
        if self._in_cache(rel_path):
            local_size = os.path.getsize(self._get_cache_path(rel_path))
            remote_size = self._get_size_in_azure(rel_path)
            if local_size == remote_size:
                return True
            else:
                log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path, local_size, remote_size)

        return False

    def create(self, obj, **kwargs):

        if not self.exists(obj, **kwargs):

            # Pull out locally used fields
            extra_dir = kwargs.get('extra_dir', None)
            extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
            dir_only = kwargs.get('dir_only', False)
            alt_name = kwargs.get('alt_name', None)

            # Construct hashed path
            rel_path = os.path.join(*directory_hash_id(obj.id))

            # Optionally append extra_dir
            if extra_dir is not None:
                if extra_dir_at_root:
                    rel_path = os.path.join(extra_dir, rel_path)
                else:
                    rel_path = os.path.join(rel_path, extra_dir)

            # Create given directory in cache
            cache_dir = os.path.join(self.staging_path, rel_path)
            if not os.path.exists(cache_dir):
                os.makedirs(cache_dir)

            # Although not really necessary to create S3 folders (because S3 has
            # flat namespace), do so for consistency with the regular file system
            # S3 folders are marked by having trailing '/' so add it now
            # s3_dir = '%s/' % rel_path
            # self._push_to_os(s3_dir, from_string='')
            # If instructed, create the dataset in cache & in S3
            if not dir_only:
                rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
                open(os.path.join(self.staging_path, rel_path), 'w').close()
                self._push_to_os(rel_path, from_string='')

    def empty(self, obj, **kwargs):
        if self.exists(obj, **kwargs):
            return bool(self.size(obj, **kwargs) > 0)
        else:
            raise ObjectNotFound( 'objectstore.empty, object does not exist: %s, kwargs: %s' % ( str( obj ), str( kwargs ) ) )

    def size(self, obj, **kwargs):
        rel_path = self._construct_path(obj, **kwargs)
        if self._in_cache(rel_path):
            try:
                return os.path.getsize(self._get_cache_path(rel_path))
            except OSError as ex:
                log.info("Could not get size of file '%s' in local cache, will try Azure. Error: %s", rel_path, ex)
        elif self.exists(obj, **kwargs):
            return self._get_size_in_azure(rel_path)
        log.warning("Did not find dataset '%s', returning 0 for size", rel_path)
        return 0

    def delete(self, obj, entire_dir=False, **kwargs):
        rel_path = self._construct_path(obj, **kwargs)
        extra_dir = kwargs.get('extra_dir', None)
        base_dir = kwargs.get('base_dir', None)
        dir_only = kwargs.get('dir_only', False)
        obj_dir = kwargs.get('obj_dir', False)
        try:
            if base_dir and dir_only and obj_dir:
                # Remove temporary data in JOB_WORK directory
                shutil.rmtree(os.path.abspath(rel_path))
                return True

            # For the case of extra_files, because we don't have a reference to
            # individual files/blobs we need to remove the entire directory structure
            # with all the files in it. This is easy for the local file system,
            # but requires iterating through each individual blob in Azure and deleing it.
            if entire_dir and extra_dir:
                shutil.rmtree(self._get_cache_path(rel_path))
                blobs = self.service.list_blobs(self.container_name, prefix=rel_path)
                for blob in blobs:
                    log.debug("Deleting from Azure: %s", blob)
                    self.service.delete_blob(self.container_name, blob.name)
                return True
            else:
                # Delete from cache first
                os.unlink(self._get_cache_path(rel_path))
                # Delete from S3 as well
                if self._in_azure(rel_path):
                    log.debug("Deleting from Azure: %s", rel_path)
                    self.service.delete_blob(self.container_name, rel_path)
                    return True
        except AzureHttpError:
            log.exception("Could not delete blob '%s' from Azure", rel_path)
        except OSError:
            log.exception('%s delete error', self.get_filename(obj, **kwargs))
        return False

    def get_data(self, obj, start=0, count=-1, **kwargs):
        rel_path = self._construct_path(obj, **kwargs)
        # Check cache first and get file if not there
        if not self._in_cache(rel_path):
            self._pull_into_cache(rel_path)
        # Read the file content from cache
        data_file = open(self._get_cache_path(rel_path), 'r')
        data_file.seek(start)
        content = data_file.read(count)
        data_file.close()
        return content

    def get_filename(self, obj, **kwargs):
        rel_path = self._construct_path(obj, **kwargs)
        base_dir = kwargs.get('base_dir', None)
        dir_only = kwargs.get('dir_only', False)
        obj_dir = kwargs.get('obj_dir', False)

        # for JOB_WORK directory
        if base_dir and dir_only and obj_dir:
            return os.path.abspath(rel_path)

        cache_path = self._get_cache_path(rel_path)
        # S3 does not recognize directories as files so cannot check if those exist.
        # So, if checking dir only, ensure given dir exists in cache and return
        # the expected cache path.
        # dir_only = kwargs.get('dir_only', False)
        # if dir_only:
        #     if not os.path.exists(cache_path):
        #         os.makedirs(cache_path)
        #     return cache_path
        # Check if the file exists in the cache first
        if self._in_cache(rel_path):
            return cache_path
        # Check if the file exists in persistent storage and, if it does, pull it into cache
        elif self.exists(obj, **kwargs):
            if dir_only:  # Directories do not get pulled into cache
                return cache_path
            else:
                if self._pull_into_cache(rel_path):
                    return cache_path
        # For the case of retrieving a directory only, return the expected path
        # even if it does not exist.
        # if dir_only:
        #     return cache_path
        raise ObjectNotFound( 'objectstore.get_filename, no cache_path: %s, kwargs: %s' % ( str( obj ), str( kwargs ) ) )

        return cache_path  # Until the upload tool does not explicitly create the dataset, return expected path

    def update_from_file(self, obj, file_name=None, create=False, **kwargs):
        if create is True:
            self.create(obj, **kwargs)
        elif self.exists(obj, **kwargs):
            rel_path = self._construct_path(obj, **kwargs)
            # Chose whether to use the dataset file itself or an alternate file
            if file_name:
                source_file = os.path.abspath(file_name)
                # Copy into cache
                cache_file = self._get_cache_path(rel_path)
                try:
                    if source_file != cache_file:
                        # FIXME? Should this be a `move`?
                        shutil.copy2(source_file, cache_file)
                    self._fix_permissions(cache_file)
                except OSError:
                    log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file)
            else:
                source_file = self._get_cache_path(rel_path)

            self._push_to_os(rel_path, source_file)

        else:
            raise ObjectNotFound( 'objectstore.update_from_file, object does not exist: %s, kwargs: %s' % ( str( obj ), str( kwargs ) ) )

    def get_object_url(self, obj, **kwargs):
        if self.exists(obj, **kwargs):
            rel_path = self._construct_path(obj, **kwargs)
            try:
                url = self.service.make_blob_url(container_name=self.container_name, blob_name=rel_path)
                return url
            except AzureHttpError:
                log.exception("Trouble generating URL for dataset '%s'", rel_path)
        return None

    def get_store_usage_percent(self):
        return 0.0

    ##################
    # Secret Methods #
    ##################

    def __cache_monitor(self):
        time.sleep(2)  # Wait for things to load before starting the monitor
        while self.running:
            total_size = 0
            # Is this going to be too expensive of an operation to be done frequently?
            file_list = []
            for dirpath, _, filenames in os.walk(self.staging_path):
                for filename in filenames:
                    filepath = os.path.join(dirpath, filename)
                    file_size = os.path.getsize(filepath)
                    total_size += file_size
                    # Get the time given file was last accessed
                    last_access_time = time.localtime(os.stat(filepath)[7])
                    # Compose a tuple of the access time and the file path
                    file_tuple = last_access_time, filepath, file_size
                    file_list.append(file_tuple)
            # Sort the file list (based on access time)
            file_list.sort()
            # Initiate cleaning once within 10% of the defined cache size?
            cache_limit = self.cache_size * 0.9
            if total_size > cache_limit:
                log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s",
                         convert_bytes(total_size), convert_bytes(cache_limit))
                # How much to delete? If simply deleting up to the cache-10% limit,
                # is likely to be deleting frequently and may run the risk of hitting
                # the limit - maybe delete additional #%?
                # For now, delete enough to leave at least 10% of the total cache free
                delete_this_much = total_size - cache_limit
                # Keep deleting datasets from file_list until deleted_amount does not
                # exceed delete_this_much; start deleting from the front of the file list,
                # which assumes the oldest files come first on the list.
                deleted_amount = 0
                for entry in enumerate(file_list):
                    if deleted_amount < delete_this_much:
                        deleted_amount += entry[2]
                        os.remove(entry[1])
                        # Debugging code for printing deleted files' stats
                        # folder, file_name = os.path.split(f[1])
                        # file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
                        # log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
                        #     % (i, file_name, convert_bytes(f[2]), file_date, \
                        #     convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
                    else:
                        log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount))

            self.sleeper.sleep(30)  # Test cache size every 30 seconds?
Esempio n. 34
0
class SampleTest():
	def __init__(self):
		try:
			import config as config
		except:
			raise ValueError('Please specify configuration settings in config.py.')

		if config.IS_EMULATED:
			self.account = CloudStorageAccount(is_emulated=True)
		else:
			# Note that account key and sas should not both be included
			account_name = config.STORAGE_ACCOUNT_NAME
			account_key = config.STORAGE_ACCOUNT_KEY
			sas = config.SAS
			self.account = CloudStorageAccount(account_name=account_name, 
											   account_key=account_key, 
											   sas_token=sas)
			self.service = self.account.create_block_blob_service()

	def test_container_samples(self):
		container = ContainerSamples(self.account)
		container.run_all_samples()

	def test_block_blob_samples(self):
		blob = BlockBlobSamples(self.account)
		blob.run_all_samples()

	def test_append_blob_samples(self):
		blob = AppendBlobSamples(self.account)
		blob.run_all_samples()

	def test_page_blob_samples(self):
		blob = PageBlobSamples(self.account)
		blob.run_all_samples()

	def list_containers(self):
		self.service = self.account.create_block_blob_service()
		containers = list(self.service.list_containers())
		print('All containers in your account:')
		for container in containers:
			print(container.name)     
	
	def list_all_blobs_in_all_containers(self):
		#self.service = self.account.create_block_blob_service()
		containers = list(self.service.list_containers())
		print('Full list:')
		for container in containers:
			print(container.name+':')		
			blobs = list(self.service.list_blobs(container.name))
			for blob in blobs:
				print(blob.name)
			print('')

	def test_get_put_blob(self):
		import config as config
		account_name = config.STORAGE_ACCOUNT_NAME
		account_key = config.STORAGE_ACCOUNT_KEY
		block_blob_service = BlockBlobService(account_name, account_key)
		block_blob_service.create_blob_from_path(
			'cont2',
			'sunset.png',
			'sunset.png',)	
		block_blob_service.get_blob_to_path('cont2', 'sunset.png', 'out-sunset.png')
class StorageAccountTest(StorageTestCase):

    def setUp(self):
        super(StorageAccountTest, self).setUp()
        self.account_name = self.settings.STORAGE_ACCOUNT_NAME
        self.account_key = self.settings.STORAGE_ACCOUNT_KEY
        self.sas_token = '?sv=2015-04-05&st=2015-04-29T22%3A18%3A26Z&se=2015-04-30T02%3A23%3A26Z&sr=b&sp=rw&sip=168.1.5.60-168.1.5.70&spr=https&sig=Z%2FRHIX5Xcg0Mq2rqI3OlWTjEg2tYkboXr1P9ZUXDtkk%3D'
        self.account = CloudStorageAccount(self.account_name, self.account_key)

    #--Helpers-----------------------------------------------------------------
    def validate_service(self, service, type):
        self.assertIsNotNone(service)
        self.assertIsInstance(service, type)
        self.assertEqual(service.account_name, self.account_name)
        self.assertEqual(service.account_key, self.account_key)

    #--Test cases --------------------------------------------------------
    def test_create_block_blob_service(self):
        # Arrange

        # Act
        service = self.account.create_block_blob_service()

        # Assert
        self.validate_service(service, BlockBlobService)

    def test_create_page_blob_service(self):
        # Arrange

        # Act
        service = self.account.create_page_blob_service()

        # Assert
        self.validate_service(service, PageBlobService)

    def test_create_append_blob_service(self):
        # Arrange

        # Act
        service = self.account.create_append_blob_service()

        # Assert
        self.validate_service(service, AppendBlobService)

    def test_create_table_service(self):
        # Arrange

        # Act
        service = self.account.create_table_service()

        # Assert
        self.validate_service(service, TableService)

    def test_create_queue_service(self):
        # Arrange

        # Act
        service = self.account.create_queue_service()

        # Assert
        self.validate_service(service, QueueService)

    def test_create_file_service(self):
        # Arrange

        # Act
        service = self.account.create_file_service()

        # Assert
        self.validate_service(service, FileService)

    def test_create_service_no_key(self):
        # Arrange

        # Act
        bad_account = CloudStorageAccount('', '')
        with self.assertRaises(ValueError):
            service = bad_account.create_block_blob_service()

        # Assert

    def test_create_account_sas(self):
        # Arrange
      
        # Act
        sas_account = CloudStorageAccount(self.account_name, sas_token=self.sas_token)
        service = sas_account.create_block_blob_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertEqual(service.account_name, self.account_name)
        self.assertIsNone(service.account_key)
        self.assertEqual(service.sas_token, self.sas_token)

    def test_create_account_sas_and_key(self):
        # Arrange
        
        # Act
        account = CloudStorageAccount(self.account_name, self.account_key, self.sas_token)
        service = account.create_block_blob_service()

        # Assert
        self.validate_service(service, BlockBlobService)

    def test_create_account_emulated(self):
        # Arrange
      
        # Act
        account = CloudStorageAccount(is_emulated=True)
        service = account.create_block_blob_service()

        # Assert
        self.assertIsNotNone(service)
        self.assertEqual(service.account_name, 'devstoreaccount1')
        self.assertIsNotNone(service.account_key)

    @record
    def test_generate_account_sas(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recordingfile(self.test_mode):
            return

        # Arrange
        token = self.account.generate_shared_access_signature(
            Services.BLOB,
            ResourceTypes.OBJECT,
            AccountPermissions.READ,
            datetime.utcnow() + timedelta(hours=1),
        )

        service = self.account.create_block_blob_service()
        data = b'shared access signature with read permission on blob'
        container_name='container1'
        blob_name = 'blob1.txt'

        try:
            service.create_container(container_name)
            service.create_blob_from_bytes(container_name, blob_name, data)

            # Act
            url = service.make_blob_url(
                container_name,
                blob_name,
                sas_token=token,
            )
            response = requests.get(url)

            # Assert
            self.assertTrue(response.ok)
            self.assertEqual(data, response.content)
        finally:
            service.delete_container(container_name)
 def setUp(self):
     self.account = CloudStorageAccount(account_name=credentials.getStorageServicesName(), 
                                        account_key=credentials.getStorageServicesKey())
Esempio n. 37
0
def Init(name,key):
  global account, table_service
  account = CloudStorageAccount(name,key)
  print("storage account created")
  table_service = account.create_table_service()
  print("table service created")
#      THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#      IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#      FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#      AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#      LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#      OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#      THE SOFTWARE.

from azure.storage import CloudStorageAccount

import config

account_name = config.STORAGE_ACCOUNT_NAME
account_key  = config.STORAGE_ACCOUNT_KEY

account = CloudStorageAccount(account_name = account_name, 
                              account_key = account_key)

service = account.create_block_blob_service()

#   The last time a backup was dropped into the folder, it was named 'splunketccfg.tar'.
#   This is (almost) always the one to restore.

container_name      = 'backups'
restore_file_name   = 'splunketccfg.tar'
OUTPUT_FILE         = 'splunketccfg.tar'

exists              = service.exists(container_name, restore_file_name)
if exists:
    service.get_blob_to_path(container_name, restore_file_name, OUTPUT_FILE)
else:
    print('Backup file does not exist')
def _get_service():
    account_name        = config.STORAGE_ACCOUNT_NAME
    account_key         = config.STORAGE_ACCOUNT_KEY
    account             = CloudStorageAccount(account_name = account_name, account_key = account_key)
    service             = account.create_block_blob_service()
    return service
import os

from datetime import date
from datetime import timedelta

from azure.storage      import CloudStorageAccount, AccessPolicy, AccountPermissions
from azure.servicebus   import ServiceBusService, Message
from dumpling_util      import Logging

import nearby_config
_config = nearby_config.named('analysis_worker_config.json')

s_storage_account        = CloudStorageAccount(_config.STORAGE_ACCOUNT_NAME, _config.STORAGE_ACCOUNT_KEY)
s_blob_service           = s_storage_account.create_block_blob_service()
s_tableService 	         = s_storage_account.create_table_service();

_bus_service = ServiceBusService(
    service_namespace= _config.SERVICE_BUS_NAMESPACE,
    shared_access_key_name= _config.SHARED_ACCESS_KEY_NAME,
    shared_access_key_value= _config.SERVICE_BUS_KEY)

class DumplingStateContext:    
    def Update(self):
        Logging.Verbose('Setting dumpling state to "%s"' % self.data['State'])
        s_tableService.insert_or_replace_entity(_config.STORAGE_ACCOUNT_STATE_TABLE_NAME, self.data);

    def SetState(self, newState):
        self.data['State'] = str(newState)
        self.Update()
 
    def SaveResult(self, result, testContext):
Esempio n. 41
0

def append_css(r, subreddit, css, position, height):
    new_css = '.flair-' + str(position) + '{background-position: 0 -' + str(
        height * position) + 'px}'
    r.set_stylesheet(subreddit, css + new_css)


def log(message):
    table_service.insert_entity('logs',
                                {'PartitionKey': 'flair', 'RowKey': str(datetime.datetime.now()),
                                 'text': message})
    print('[*] ' + message)


storage_account = CloudStorageAccount(storage_account_name, storage_account_key)

table_service = storage_account.create_table_service()
blob_service = storage_account.create_block_blob_service()

blob_service.create_container('images', public_access='container')
table_service.create_table('flair')
table_service.create_table('logs')

r = praw.Reddit(user_agent)
r.login(username, password)
r.config.decode_html_entities = True

while True:
    for message in (m for m in r.get_unread(limit=None)):
        log('received mesage from ' + message.author.name)
 def setUp(self):
     self.account_name = 'storagename'
     self.account_key = 'NzhL3hKZbJBuJ2484dPTR+xF30kYaWSSCbs2BzLgVVI1woqeST/1IgqaLm6QAOTxtGvxctSNbIR/1hW8yH+bJg=='
     self.account = CloudStorageAccount(self.account_name, self.account_key)