def test_with_credentials(self): try: finish = conf.AWS_ACCOUNTS.set_for_testing({ 'default': { 'access_key_id': 'access_key_id', 'secret_access_key': 'secret_access_key' } }) with patch('aws.client.conf_idbroker.get_conf') as get_conf: with patch('aws.client.Client.get_s3_connection'): get_conf.return_value = {} client1 = get_client(name='default', fs='s3a') client2 = get_client(name='default', fs='s3a', user='******') provider = get_credential_provider('default', 'hue') assert_equal( provider.get_credentials().get('AccessKeyId'), conf.AWS_ACCOUNTS['default'].ACCESS_KEY_ID.get()) assert_equal( client1, client2 ) # Should be the same as no support for user based client with credentials & no Expiration finally: finish() clear_cache() conf.clear_cache()
def test_with_idbroker(self): try: finish = conf.AWS_ACCOUNTS.set_for_testing( {}) # Set empty to test when no configs are set with patch('aws.client.conf_idbroker.get_conf') as get_conf: with patch('aws.client.Client.get_s3_connection'): with patch('aws.client.IDBroker.get_cab') as get_cab: get_conf.return_value = { 'fs.s3a.ext.cab.address': 'address' } get_cab.return_value = { 'Credentials': { 'AccessKeyId': 'AccessKeyId', 'Expiration': 0 } } provider = get_credential_provider('default', 'hue') assert_equal( provider.get_credentials().get('AccessKeyId'), 'AccessKeyId') client1 = get_client(name='default', fs='s3a', user='******') client2 = get_client(name='default', fs='s3a', user='******') assert_not_equal( client1, client2 ) # Test that with Expiration 0 clients not equal get_cab.return_value = { 'Credentials': { 'AccessKeyId': 'AccessKeyId', 'Expiration': int(current_ms_from_utc()) + 10 * 1000 } } client3 = get_client(name='default', fs='s3a', user='******') client4 = get_client(name='default', fs='s3a', user='******') client5 = get_client(name='default', fs='s3a', user='******') assert_equal( client3, client4 ) # Test that with 10 sec expiration, clients equal assert_not_equal( client4, client5 ) # Test different user have different clients finally: finish() clear_cache() conf.clear_cache()
def _get_abfs(self, request): fs = fsmanager.get_client(fs='abfs') if not fs: raise ABFSFileUploadError(_("No ABFS filesystem found")) return fs
def _get_abfs(self, request): fs = get_client(fs='abfs', user=request.user.username) if not fs: raise ABFSFileUploadError(_("No ABFS filesystem found")) return fs
def _get_s3fs(self, request): # Pre 6.0 request.fs did not exist, now it does. The logic for assigning request.fs is not correct for FileUploadHandler. fs = get_client(user=request.user.username) if not fs: raise S3FileUploadError(_("No S3 filesystem found.")) return fs
def test_with_core_site(self): try: finish = (conf.AZURE_ACCOUNTS.set_for_testing({'default': {}}), conf.ABFS_CLUSTERS.set_for_testing({ 'default': { 'fs_defaultfs': 'fs_defaultfs', 'webhdfs_url': 'webhdfs_url' } })) with patch('azure.client.conf_idbroker.get_conf') as get_conf: with patch('azure.client.ABFS.get_client'): with patch('azure.client.ActiveDirectory.get_token' ) as get_token: with patch('azure.conf.core_site.get_conf' ) as core_site_get_conf: get_token.return_value = { 'access_token': 'access_token', 'token_type': '', 'expires_on': None } get_conf.return_value = {} core_site_get_conf.return_value = { 'fs.azure.account.oauth2.client.id': 'client_id', 'fs.azure.account.oauth2.client.secret': 'client_secret', 'fs.azure.account.oauth2.client.endpoint': 'refresh_url' } client1 = get_client(name='default', fs='abfs') client2 = get_client(name='default', fs='abfs', user='******') provider = get_credential_provider( 'default', 'hue') assert_equal( provider.get_credentials().get('access_token'), 'access_token') assert_equal( client1, client2 ) # Should be the same as no support for user based client with credentials & no Expiration finally: for f in finish: f() clear_cache()
def __init__(self, request): super(S3FileUploadHandler, self).__init__(request) self.chunk_size = DEFAULT_WRITE_SIZE self.destination = request.GET.get( 'dest', None) # GET param avoids infinite looping self.target_path = None self.file = None self._request = request self._mp = None self._part_num = 1 if self._is_s3_upload(): self._fs = get_client(fs='s3a', user=request.user.username) self.bucket_name, self.key_name = parse_uri(self.destination)[:2] # Verify that the path exists self._fs._stats(self.destination) self._bucket = self._fs._get_bucket(self.bucket_name)
def setUpClass(cls): cls.bucket_name = get_test_bucket() cls._should_skip = False if not cls.bucket_name: cls._should_skip = True cls._skip_msg = 'TEST_S3_BUCKET environment variable isn\'t set' return cls.path_prefix = 'test-hue/%s' % generate_id(size=16) if aws_conf.IS_SELF_SIGNING_ENABLED.get(): cls.s3_connection = get_client(name='default', fs='s3a', user='******')._s3_connection else: cls.s3_connection = aws.get_client( 'default').get_s3_connection() # Probably broken nowadays cls.bucket = cls.s3_connection.get_bucket(cls.bucket_name, validate=True)
def test_with_raz_enabled(self): with patch('aws.client.RazS3Connection') as raz_s3_connection: resets = [ RAZ.IS_ENABLED.set_for_testing(True), conf.AWS_ACCOUNTS.set_for_testing({ 'default': { 'region': 'us-west-2', 'host': 's3-us-west-2.amazonaws.com', 'allow_environment_credentials': 'false' } }) ] try: client = get_client(name='default', fs='s3a', user='******') assert_true(client) finally: for reset in resets: reset() clear_cache() conf.clear_cache()
def test_with_idbroker(self): try: finish = (conf.AZURE_ACCOUNTS.set_for_testing({}), conf.ADLS_CLUSTERS.set_for_testing({ 'default': { 'fs_defaultfs': 'fs_defaultfs', 'webhdfs_url': 'webhdfs_url' } })) with patch('azure.client.conf_idbroker.get_conf') as get_conf: with patch('azure.client.WebHdfs.get_client'): with patch('azure.client.IDBroker.get_cab') as get_cab: get_conf.return_value = { 'fs.azure.ext.cab.address': 'address' } get_cab.return_value = { 'access_token': 'access_token', 'token_type': 'token_type', 'expires_on': 0 } provider = get_credential_provider('default', 'hue') assert_equal( provider.get_credentials().get('access_token'), 'access_token') client1 = get_client(name='default', fs='adl', user='******') client2 = get_client(name='default', fs='adl', user='******') assert_not_equal( client1, client2 ) # Test that with Expiration 0 clients not equal get_cab.return_value = { 'Credentials': { 'access_token': 'access_token', 'token_type': 'token_type', 'expires_on': int(current_ms_from_utc()) + 10 * 1000 } } client3 = get_client(name='default', fs='adl', user='******') client4 = get_client(name='default', fs='adl', user='******') client5 = get_client(name='default', fs='adl', user='******') assert_equal( client3, client4 ) # Test that with 10 sec expiration, clients equal assert_not_equal( client4, client5 ) # Test different user have different clients finally: for f in finish: f() clear_cache()
def test_with_credentials(self): # Simple test that makes sure no errors are thrown. client = get_client(fs='gs') buckets = client.listdir_stats('gs://') LOG.info(len(buckets))