def test__get_fs(): make_logged_in_client(username='******', groupname='default', recreate=True, is_superuser=False) user = User.objects.get(username='******') add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser') add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser') s3fs, adls, hdfs = MockFs("s3_access"), MockFs("adls_access"), MockFs() proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs') proxy_fs.setuser(user) f = proxy_fs._get_fs eq_(f('s3a://bucket'), s3fs) eq_(f('S3A://bucket/key'), s3fs) eq_(f('adl:/path'), adls) eq_(f('adl://net/path'), adls) eq_(f('hdfs:/path'), hdfs) eq_(f('hdfs://net/path'), hdfs) eq_(f('/tmp'), hdfs) assert_raises(IOError, f, 'ftp://host')
def setUp(self): self.client = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False) self.client_not_me = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False) self.user = User.objects.get(username="******") self.user_not_me = User.objects.get(username="******") # Beware: Monkey patch HS2API Mock API if not hasattr( notebook.connectors.hiveserver2, 'original_HS2Api'): # Could not monkey patch base.get_api notebook.connectors.hiveserver2.original_HS2Api = notebook.connectors.hiveserver2.HS2Api notebook.connectors.hiveserver2.HS2Api = MockedApi originalCluster.get_hdfs() self.original_fs = originalCluster.FS_CACHE["default"] originalCluster.FS_CACHE["default"] = MockFs() grant_access("test", "default", "notebook") grant_access("test", "default", "beeswax") grant_access("not_perm_user", "default", "notebook") grant_access("not_perm_user", "default", "beeswax") add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
def test_upload(self): with tempfile.NamedTemporaryFile() as local_file: # Make sure we can upload larger than the UPLOAD chunk size file_size = DEFAULT_WRITE_SIZE * 2 local_file.write('0' * file_size) local_file.flush() self.client.mkdir(self.test_fs + '/test_upload') dest_dir = self.test_fs + '/test_upload' local_file = local_file.name dest_path = '%s/%s' % (dest_dir, os.path.basename(local_file)) add_permission(self.user.username, 'has_abfs', permname='abfs_access', appname='filebrowser') # Just upload the current python file try: resp = self.c.post( '/filebrowser/upload/file?dest=%s' % dest_dir, dict(dest=dest_dir, hdfs_file=file(local_file))) response = json.loads(resp.content) finally: remove_from_group(self.user.username, 'has_abfs') assert_equal(0, response['status'], response) stats = self.client.stats(dest_path) actual = self.client.read(dest_path) expected = file(local_file).read() assert_equal( actual, expected, 'files do not match: %s != %s' % (len(actual), len(expected)))
def test_fs_permissions_regular_user(self): user_client = make_logged_in_client(username='******', groupname='default', recreate=True, is_superuser=False) user = User.objects.get(username='******') s3fs, adls, hdfs = MockFs("s3_access"), MockFs("adls_access"), MockFs() proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs') proxy_fs.setuser(user) f = proxy_fs._get_fs remove_from_group(user.username, 'has_s3') remove_from_group(user.username, 'has_adls') # No perms by default assert_raises(Exception, f, 's3a://bucket') assert_raises(Exception, f, 'S3A://bucket/key') assert_raises(Exception, f, 'adl://net/key') assert_raises(Exception, f, 'adl:/key') f('hdfs://path') f('/tmp') try: # Add perm add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser') add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser') f('s3a://bucket') f('S3A://bucket/key') f('adl://net/key') f('adl:/key') f('hdfs://path') f('/tmp') finally: remove_from_group(user.username, 'has_s3') remove_from_group(user.username, 'has_adls')
def test_upload_file(self): with tempfile.NamedTemporaryFile() as local_file: # Make sure we can upload larger than the UPLOAD chunk size file_size = DEFAULT_WRITE_SIZE * 2 local_file.write('0' * file_size) local_file.flush() dest_dir = self.get_test_path('test_upload') local_file = local_file.name dest_path = '%s/%s' % (dest_dir, os.path.basename(local_file)) add_permission(self.user.username, 'has_s3', permname='s3_access', appname='filebrowser') try: # Just upload the current python file resp = self.c.post('/filebrowser/upload/file?dest=%s' % dest_dir, dict(dest=dest_dir, hdfs_file=file(local_file))) response = json.loads(resp.content) finally: remove_from_group(self.user.username, 'has_s3') assert_equal(0, response['status'], response) stats = self.fs.stats(dest_path) f = self.fs.open(dest_path) actual = f.read(file_size) expected = file(local_file).read() assert_equal(actual, expected, 'files do not match: %s != %s' % (len(actual), len(expected)))
def test_multi_fs_selection(): try: from mock import MagicMock except ImportError: raise SkipTest("Skips until HUE-2947 is resolved") make_logged_in_client(username='******', groupname='default', recreate=True, is_superuser=False) user = User.objects.get(username='******') add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser') s3fs, hdfs = MagicMock(), MagicMock() proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs') proxy_fs.setuser(user) proxy_fs.copy('s3a://bucket1/key', 's3a://bucket2/key') s3fs.copy.assert_called_once_with('s3a://bucket1/key', 's3a://bucket2/key') assert_false(hdfs.copy.called) proxy_fs.copyfile('s3a://bucket/key', 'key2') s3fs.copyfile.assert_called_once_with('s3a://bucket/key', 'key2') assert_false(hdfs.copyfile.called) proxy_fs.rename('/tmp/file', 'shmile') hdfs.rename.assert_called_once_with('/tmp/file', 'shmile') assert_false(s3fs.rename.called) # Will be addressed in HUE-2934 assert_raises(NotImplementedError, proxy_fs.copy_remote_dir, 's3a://bucket/key', '/tmp/dir')
def test_fs_selection(): try: from mock import MagicMock except ImportError: raise SkipTest("Skips until HUE-2947 is resolved") make_logged_in_client(username='******', groupname='default', recreate=True, is_superuser=False) user = User.objects.get(username='******') add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser') s3fs, hdfs = MagicMock(), MagicMock() proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs') proxy_fs.setuser(user) proxy_fs.isdir('s3a://bucket/key') s3fs.isdir.assert_called_once_with('s3a://bucket/key') assert_false(hdfs.isdir.called) proxy_fs.isfile('hdfs://localhost:42/user/alice/file') hdfs.isfile.assert_called_once_with('hdfs://localhost:42/user/alice/file') assert_false(s3fs.isfile.called) proxy_fs.open('/user/alice/file') hdfs.open.assert_called_once_with('/user/alice/file') assert_false(s3fs.open.called) assert_raises(IOError, proxy_fs.stats, 'ftp://host') assert_raises(IOError, proxy_fs.stats, 's3//bucket/key')
def test_fs_permissions_regular_user(self): user_client = make_logged_in_client(username='******', groupname='default', recreate=True, is_superuser=False) user = User.objects.get(username='******') proxy_fs = ProxyFS({'s3a': MockFs(), 'hdfs': MockFs()}, 'hdfs') proxy_fs.setuser(user) f = proxy_fs._get_fs remove_from_group(user.username, 'has_s3') # No perms by default assert_raises(Exception, f, 's3a://bucket') assert_raises(Exception, f, 'S3A://bucket/key') f('hdfs://path') f('/tmp') try: # Add perm add_permission(user.username, 'has_s3', permname='s3_access', appname='filebrowser') f('s3a://bucket') f('S3A://bucket/key') f('hdfs://path') f('/tmp') finally: remove_from_group(user.username, 'has_s3')
def test__get_fs_pair(): make_logged_in_client(username='******', groupname='default', recreate=True, is_superuser=False) user = User.objects.get(username='******') add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser') s3fs, hdfs = MockFs(), MockFs() proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs') proxy_fs.setuser(user) f = proxy_fs._get_fs_pair eq_(f('s3a://bucket1/key', 's3a://bucket2/key'), (s3fs, s3fs)) eq_(f('s3a://bucket/key', 'key2'), (s3fs, s3fs)) eq_(f('/tmp/file', 'shmile'), (hdfs, hdfs)) assert_raises(IOError, f, 'ftp://host', 'key2') assert_raises(IOError, f, 's3//bucket/key', 'hdfs://normal/path')
def test_view_perms(): # Super user c = make_logged_in_client() response = c.get("/useradmin/") assert_equal(200, response.status_code) response = c.get("/useradmin/users/edit/test") assert_equal(200, response.status_code) # Normal user c = make_logged_in_client('user', is_superuser=False) add_permission('user', 'test-view-group', 'access_view:useradmin:edit_user', 'useradmin') response = c.get("/useradmin/") assert_equal(401, response.status_code) response = c.get("/useradmin/users/edit/test") assert_equal(401, response.status_code) response = c.get("/useradmin/users/edit/user") # Can access his profile page assert_equal(200, response.status_code, response.content)
def setUp(self): user = User.objects.get(username='******') self.db = dbms.get(user, get_query_server_config()) add_permission("test", "test", "write", "metastore")