def test_skip_wizard(): c = make_logged_in_client() # is_superuser response = c.get('/', follow=True) assert_true(['admin_wizard.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates]) c.cookies['hueLandingPage'] = 'home' response = c.get('/', follow=True) assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates]) c.cookies['hueLandingPage'] = '' response = c.get('/', follow=True) assert_true(['admin_wizard.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates]) c = make_logged_in_client(username="******", password="******", is_superuser=False) response = c.get('/', follow=True) assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates]) c.cookies['hueLandingPage'] = 'home' response = c.get('/', follow=True) assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates]) c.cookies['hueLandingPage'] = '' response = c.get('/', follow=True) assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
def test_fs_selection(): try: from mock import MagicMock except ImportError: raise SkipTest("Skips until HUE-2947 is resolved") make_logged_in_client(username='******', groupname='default', recreate=True, is_superuser=False) user = User.objects.get(username='******') add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser') s3fs, hdfs = MagicMock(), MagicMock() proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs') proxy_fs.setuser(user) proxy_fs.isdir('s3a://bucket/key') s3fs.isdir.assert_called_once_with('s3a://bucket/key') assert_false(hdfs.isdir.called) proxy_fs.isfile('hdfs://localhost:42/user/alice/file') hdfs.isfile.assert_called_once_with('hdfs://localhost:42/user/alice/file') assert_false(s3fs.isfile.called) proxy_fs.open('/user/alice/file') hdfs.open.assert_called_once_with('/user/alice/file') assert_false(s3fs.open.called) assert_raises(IOError, proxy_fs.stats, 'ftp://host') assert_raises(IOError, proxy_fs.stats, 's3//bucket/key')
def setUp(self): self.client = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False) self.client_not_me = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False) self.user = User.objects.get(username="******") self.user_not_me = User.objects.get(username="******") grant_access("test", "default", "notebook") grant_access("not_perm_user", "default", "notebook") self.notebook_json = """ { "selectedSnippet": "hive", "showHistory": false, "description": "Test Hive Query", "name": "Test Hive Query", "sessions": [ { "type": "hive", "properties": [], "id": null } ], "type": "query-hive", "id": 50010, "snippets": [], "uuid": "5982a274-de78-083c-2efc-74f53dce744c" } """ self.notebook = json.loads(self.notebook_json) self.doc2 = Document2.objects.create(id=50010, name=self.notebook['name'], type=self.notebook['type'], owner=self.user) self.doc1 = Document.objects.link(self.doc2, owner=self.user, name=self.doc2.name, description=self.doc2.description, extra=self.doc2.type)
def setUp(self): # Beware: Monkey patching if not hasattr(resource_manager_api, 'old_get_resource_manager_api'): resource_manager_api.old_get_resource_manager = resource_manager_api.get_resource_manager if not hasattr(resource_manager_api, 'old_get_mapreduce_api'): mapreduce_api.old_get_mapreduce_api = mapreduce_api.get_mapreduce_api if not hasattr(history_server_api, 'old_get_history_server_api'): history_server_api.old_get_history_server_api = history_server_api.get_history_server_api self.c = make_logged_in_client(is_superuser=False) grant_access("test", "test", "jobbrowser") self.user = User.objects.get(username='******') self.c2 = make_logged_in_client(is_superuser=False, username="******") grant_access("test2", "test2", "jobbrowser") self.user2 = User.objects.get(username='******') resource_manager_api.get_resource_manager = lambda user: MockResourceManagerApi(user) mapreduce_api.get_mapreduce_api = lambda: MockMapreduceApi() history_server_api.get_history_server_api = lambda: HistoryServerApi() self.finish = [ YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True), SHARE_JOBS.set_for_testing(False) ] assert_true(cluster.is_yarn())
def test_multi_fs_selection(): try: from mock import MagicMock except ImportError: raise SkipTest("Skips until HUE-2947 is resolved") make_logged_in_client(username='******', groupname='default', recreate=True, is_superuser=False) user = User.objects.get(username='******') add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser') s3fs, hdfs = MagicMock(), MagicMock() proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs') proxy_fs.setuser(user) proxy_fs.copy('s3a://bucket1/key', 's3a://bucket2/key') s3fs.copy.assert_called_once_with('s3a://bucket1/key', 's3a://bucket2/key') assert_false(hdfs.copy.called) proxy_fs.copyfile('s3a://bucket/key', 'key2') s3fs.copyfile.assert_called_once_with('s3a://bucket/key', 'key2') assert_false(hdfs.copyfile.called) proxy_fs.rename('/tmp/file', 'shmile') hdfs.rename.assert_called_once_with('/tmp/file', 'shmile') assert_false(s3fs.rename.called) # Will be addressed in HUE-2934 assert_raises(NotImplementedError, proxy_fs.copy_remote_dir, 's3a://bucket/key', '/tmp/dir')
def test_list_for_autocomplete(): c1 = make_logged_in_client('test_list_for_autocomplete', is_superuser=False, groupname='test_list_for_autocomplete') c2_same_group = make_logged_in_client('test_list_for_autocomplete2', is_superuser=False, groupname='test_list_for_autocomplete') c3_other_group = make_logged_in_client('test_list_for_autocomplete3', is_superuser=False, groupname='test_list_for_autocomplete_other_group') # c1 is in the same group as c2 response = c1.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest') content = json.loads(response.content) users = [user['username'] for user in content['users']] groups = [user['name'] for user in content['groups']] assert_equal(['test_list_for_autocomplete2'], users) assert_equal(['test_list_for_autocomplete'], groups) # c2 is in the same group as c1 response = c2_same_group.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest') content = json.loads(response.content) users = [user['username'] for user in content['users']] groups = [user['name'] for user in content['groups']] assert_equal(['test_list_for_autocomplete'], users) assert_equal(['test_list_for_autocomplete'], groups) # c3 is alone response = c3_other_group.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest') content = json.loads(response.content) users = [user['username'] for user in content['users']] groups = [user['name'] for user in content['groups']] assert_equal([], users) assert_equal(['test_list_for_autocomplete_other_group'], groups)
def get_shared_beeswax_server(db_name='default'): global _SHARED_HIVE_SERVER global _SHARED_HIVE_SERVER_CLOSER if _SHARED_HIVE_SERVER is None: cluster = pseudo_hdfs4.shared_cluster() if is_live_cluster(): def s(): pass else: s = _start_mini_hs2(cluster) start = time.time() started = False sleep = 1 make_logged_in_client() user = User.objects.get(username='******') query_server = get_query_server_config() db = dbms.get(user, query_server) while not started and time.time() - start <= 30: try: db.open_session(user) started = True break except Exception, e: LOG.info('HiveServer2 server could not be found after: %s' % e) time.sleep(sleep) if not started: raise Exception("Server took too long to come up.") _SHARED_HIVE_SERVER, _SHARED_HIVE_SERVER_CLOSER = cluster, s
def test_chown(): cluster = mini_cluster.shared_cluster(conf=True) try: # Only the Hadoop superuser really has carte blanche here c = make_logged_in_client(cluster.superuser) cluster.fs.setuser(cluster.superuser) PATH = u"/test-chown-en-Español" cluster.fs.mkdir(PATH) c.post("/filebrowser/chown", dict(path=PATH, user="******", group="y")) assert_equal("x", cluster.fs.stats(PATH)["user"]) assert_equal("y", cluster.fs.stats(PATH)["group"]) c.post("/filebrowser/chown", dict(path=PATH, user="******", user_other="z", group="y")) assert_equal("z", cluster.fs.stats(PATH)["user"]) # Make sure that the regular user chown form doesn't have useless fields, # and that the superuser's form has all the fields it could dream of. PATH = '/filebrowser/chown-regular-user' cluster.fs.mkdir(PATH) cluster.fs.chown(PATH, 'chown_test', 'chown_test') response = c.get('/filebrowser/chown', dict(path=PATH, user='******', group='chown_test')) assert_true('<option value="__other__"' in response.content) c = make_logged_in_client('chown_test') response = c.get('/filebrowser/chown', dict(path=PATH, user='******', group='chown_test')) assert_false('<option value="__other__"' in response.content) finally: cluster.shutdown()
def test_group_admin(): reset_all_users() reset_all_groups() c = make_logged_in_client(username="******", is_superuser=True) response = c.get('/useradmin/groups') # No groups just yet assert_true(len(response.context["groups"]) == 0) assert_true("Hue Groups" in response.content) # Create a group response = c.get('/useradmin/groups/new') assert_equal('/useradmin/groups/new', response.context['action']) c.post('/useradmin/groups/new', dict(name="testgroup")) # We should have an empty group in the DB now assert_true(len(Group.objects.all()) == 1) assert_true(Group.objects.filter(name="testgroup").exists()) assert_true(len(Group.objects.get(name="testgroup").user_set.all()) == 0) # And now, just for kicks, let's try adding a user response = c.post('/useradmin/groups/edit/testgroup', dict(name="testgroup", members=[User.objects.get(username="******").pk], save="Save"), follow=True) assert_true(len(Group.objects.get(name="testgroup").user_set.all()) == 1) assert_true(Group.objects.get(name="testgroup").user_set.filter(username="******").exists()) # Test some permissions c2 = make_logged_in_client(username="******", is_superuser=False) # Need to give access to the user for the rest of the test group = Group.objects.create(name="access-group") perm = HuePermission.objects.get(app='useradmin', action='access') GroupPermission.objects.create(group=group, hue_permission=perm) test_user = User.objects.get(username="******") test_user.groups.add(Group.objects.get(name="access-group")) test_user.save() # Make sure non-superusers can't do bad things response = c2.get('/useradmin/groups/new') assert_true("You must be a superuser" in response.content) response = c2.get('/useradmin/groups/edit/testgroup') assert_true("You must be a superuser" in response.content) response = c2.post('/useradmin/groups/new', dict(name="nonsuperuser")) assert_true("You must be a superuser" in response.content) response = c2.post('/useradmin/groups/edit/testgroup', dict(name="nonsuperuser", members=[User.objects.get(username="******").pk], save="Save"), follow=True) assert_true("You must be a superuser" in response.content) # Should be one group left, because we created the other group response = c.post('/useradmin/groups/delete', {'group_names': ['testgroup']}) assert_true(len(Group.objects.all()) == 1) group_count = len(Group.objects.all()) response = c.post('/useradmin/groups/new', dict(name="with space")) assert_equal(len(Group.objects.all()), group_count + 1)
def test_dump_config(): c = make_logged_in_client() CANARY = "abracadabra" # Depending on the order of the conf.initialize() in settings, the set_for_testing() are not seen in the global settings variable clear = HIVE_SERVER_HOST.set_for_testing(CANARY) response1 = c.get(reverse('desktop.views.dump_config')) assert_true(CANARY in response1.content, response1.content) response2 = c.get(reverse('desktop.views.dump_config'), dict(private="true")) assert_true(CANARY in response2.content) # There are more private variables... assert_true(len(response1.content) < len(response2.content)) clear() CANARY = "(localhost|127\.0\.0\.1):(50030|50070|50060|50075)" clear = proxy.conf.WHITELIST.set_for_testing(CANARY) response1 = c.get(reverse('desktop.views.dump_config')) assert_true(CANARY in response1.content) clear() # Malformed port per HUE-674 CANARY = "asdfoijaoidfjaosdjffjfjaoojosjfiojdosjoidjfoa" clear = HIVE_SERVER_HOST.set_for_testing(CANARY) response1 = c.get(reverse('desktop.views.dump_config')) assert_true(CANARY in response1.content, response1.content) clear() CANARY = '/tmp/spacé.dat' finish = proxy.conf.WHITELIST.set_for_testing(CANARY) try: response = c.get(reverse('desktop.views.dump_config')) assert_true(CANARY in response.content, response.content) finally: finish() # Not showing some passwords response = c.get(reverse('desktop.views.dump_config')) assert_false('bind_password' in response.content) # Login as someone else client_not_me = make_logged_in_client(username='******', is_superuser=False, groupname='test') grant_access("not_me", "test", "desktop") response = client_not_me.get(reverse('desktop.views.dump_config')) assert_true("You must be a superuser" in response.content, response.content) os.environ["HUE_CONF_DIR"] = "/tmp/test_hue_conf_dir" resp = c.get(reverse('desktop.views.dump_config')) del os.environ["HUE_CONF_DIR"] assert_true('/tmp/test_hue_conf_dir' in resp.content, resp)
def test_group_permissions(): reset_all_users() reset_all_groups() # Get ourselves set up with a user and a group c = make_logged_in_client(username="******", is_superuser=True) Group.objects.create(name="test-group") test_user = User.objects.get(username="******") test_user.groups.add(Group.objects.get(name="test-group")) test_user.save() # Make sure that a superuser can always access applications response = c.get("/useradmin/users") assert_true("Hue Users" in response.content) assert_true(len(GroupPermission.objects.all()) == 0) c.post( "/useradmin/groups/edit/test-group", dict( name="test-group", members=[User.objects.get(username="******").pk], permissions=[HuePermission.objects.get(app="useradmin", action="access").pk], save="Save", ), follow=True, ) assert_true(len(GroupPermission.objects.all()) == 1) # Now test that we have limited access c1 = make_logged_in_client(username="******", is_superuser=False) response = c1.get("/useradmin/users") assert_true("You do not have permission to access the Useradmin application." in response.content) # Add the non-admin to a group that should grant permissions to the app test_user = User.objects.get(username="******") test_user.groups.add(Group.objects.get(name="test-group")) test_user.save() # Check that we have access now response = c1.get("/useradmin/users") assert_true(get_profile(test_user).has_hue_permission("access", "useradmin")) assert_true("Hue Users" in response.content) # Make sure we can't modify permissions response = c1.get("/useradmin/permissions/edit/useradmin/access") assert_true("must be a superuser to change permissions" in response.content) # And revoke access from the group c.post( "/useradmin/permissions/edit/useradmin/access", dict(app="useradmin", priv="access", groups=[], save="Save"), follow=True, ) assert_true(len(GroupPermission.objects.all()) == 0) assert_false(get_profile(test_user).has_hue_permission("access", "useradmin")) # We should no longer have access to the app response = c1.get("/useradmin/users") assert_true("You do not have permission to access the Useradmin application." in response.content)
def test_upload_file(): """Test file upload""" cluster = pseudo_hdfs4.shared_cluster() try: USER_NAME = 'test' HDFS_DEST_DIR = "/tmp/fb-upload-test" LOCAL_FILE = __file__ HDFS_FILE = HDFS_DEST_DIR + '/' + os.path.basename(__file__) cluster.fs.setuser(USER_NAME) client = make_logged_in_client(USER_NAME) cluster.fs.do_as_superuser(cluster.fs.mkdir, HDFS_DEST_DIR) cluster.fs.do_as_superuser(cluster.fs.chown, HDFS_DEST_DIR, USER_NAME, USER_NAME) cluster.fs.do_as_superuser(cluster.fs.chmod, HDFS_DEST_DIR, 0700) stats = cluster.fs.stats(HDFS_DEST_DIR) assert_equal(stats['user'], USER_NAME) assert_equal(stats['group'], USER_NAME) # Just upload the current python file resp = client.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR, # GET param avoids infinite looping dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE))) response = json.loads(resp.content) assert_equal(0, response['status'], response) stats = cluster.fs.stats(HDFS_FILE) assert_equal(stats['user'], USER_NAME) assert_equal(stats['group'], USER_NAME) f = cluster.fs.open(HDFS_FILE) actual = f.read() expected = file(LOCAL_FILE).read() assert_equal(actual, expected) # Upload again and so fails because file already exits resp = client.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR, dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE))) response = json.loads(resp.content) assert_equal(-1, response['status'], response) assert_true('already exists' in response['data'], response) # Upload in / and fails because of missing permissions not_me = make_logged_in_client("not_me", is_superuser=False) grant_access("not_me", "not_me", "filebrowser") try: resp = not_me.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR, dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE))) response = json.loads(resp.content) assert_equal(-1, response['status'], response) assert_true('Permission denied' in response['data'], response) except AttributeError: # Seems like a Django bug. # StopFutureHandlers() does not seem to work in test mode as it continues to MemoryFileUploadHandler after perm issue and so fails. pass finally: cleanup_file(cluster, HDFS_DEST_DIR)
def setUp(self): self.client = make_logged_in_client(username="******", recreate=True, is_superuser=False) self.client_not_me = make_logged_in_client(username="******", recreate=True, is_superuser=False) self.user = User.objects.get(username="******") self.user_not_me = User.objects.get(username="******") grant_access(self.user.username, self.user.username, "desktop") grant_access(self.user_not_me.username, self.user_not_me.username, "desktop")
def setUp(self): self.client = make_logged_in_client(username="******") self.client_not_me = make_logged_in_client(username="******") self.user = User.objects.get(username="******") self.user_not_me = User.objects.get(username="******") grant_access(self.user.username, self.user.username, "desktop") grant_access(self.user_not_me.username, self.user_not_me.username, "desktop")
def setUp(self): self.client = make_logged_in_client(username="******", groupname="hue_test_admin", recreate=True, is_superuser=True) self.user = User.objects.get(username="******") self.non_superuser_client = make_logged_in_client(username="******", groupname="hue_test_user", recreate=True, is_superuser=False) self.non_superuser = User.objects.get(username="******") self.test_group = Group.objects.create(name="hue_test_group") self.non_superuser.groups.add(self.test_group) self.non_superuser.save()
def setUp(self): self.client = make_logged_in_client(username="******", groupname="default", recreate=False, is_superuser=True) self.client_user = make_logged_in_client(username="******", groupname="default", recreate=False, is_superuser=False) self.admin = User.objects.get(username="******") self.user = User.objects.get(username="******") self.group = get_default_user_group() grant_access(self.admin.username, self.admin.username, "desktop") grant_access(self.user.username, self.user.username, "desktop")
def setUp(self): self.client = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False) self.client_not_me = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False) self.user = User.objects.get(username="******") self.user_not_me = User.objects.get(username="******") grant_access(self.user.username, self.user.username, "desktop") grant_access(self.user_not_me.username, self.user_not_me.username, "desktop") PigScript.objects.filter(owner=self.user).delete() Document.objects.filter(owner=self.user).delete()
def test_dump_config(): c = make_logged_in_client() CANARY = "abracadabra" clear = desktop.conf.HTTP_HOST.set_for_testing(CANARY) response1 = c.get('/dump_config') assert_true(CANARY in response1.content) response2 = c.get('/dump_config', dict(private="true")) assert_true(CANARY in response2.content) # There are more private variables... assert_true(len(response1.content) < len(response2.content)) clear() CANARY = "(localhost|127\.0\.0\.1):(50030|50070|50060|50075)" clear = proxy.conf.WHITELIST.set_for_testing(CANARY) response1 = c.get('/dump_config') assert_true(CANARY in response1.content) clear() # Malformed port per HUE-674 CANARY = "asdfoijaoidfjaosdjffjfjaoojosjfiojdosjoidjfoa" clear = desktop.conf.HTTP_PORT.set_for_testing(CANARY) response1 = c.get('/dump_config') assert_true(CANARY in response1.content, response1.content) clear() # CANARY = '/tmp/space.dat' # finish = proxy.conf.WHITELIST.set_for_testing(CANARY) # try: # response = c.get('/dump_config') # assert_true(CANARY in response.content, response.content) # finally: # finish() # Login as someone else client_not_me = make_logged_in_client(username='******', is_superuser=False, groupname='test') grant_access("not_me", "test", "desktop") response = client_not_me.get('/dump_config') assert_equal("You must be a superuser.", response.content) os.environ["HUE_CONF_DIR"] = "/tmp/test_hue_conf_dir" resp = c.get('/dump_config') del os.environ["HUE_CONF_DIR"] assert_true('/tmp/test_hue_conf_dir' in resp.content, resp)
def test_upload_file(): """Test file upload""" cluster = pseudo_hdfs4.shared_cluster() try: USER_NAME = 'test' USER_NAME_NOT_ME = 'not_me' HDFS_DEST_DIR = "/tmp/fb-upload-test" LOCAL_FILE = __file__ HDFS_FILE = HDFS_DEST_DIR + '/' + os.path.basename(__file__) cluster.fs.setuser(USER_NAME) client = make_logged_in_client(USER_NAME) client_not_me = make_logged_in_client(username=USER_NAME_NOT_ME, is_superuser=False, groupname='test') grant_access(USER_NAME_NOT_ME, "test", "filebrowser") cluster.fs.do_as_superuser(cluster.fs.mkdir, HDFS_DEST_DIR) cluster.fs.do_as_superuser(cluster.fs.chown, HDFS_DEST_DIR, USER_NAME, USER_NAME) cluster.fs.do_as_superuser(cluster.fs.chmod, HDFS_DEST_DIR, 0700) # Just upload the current python file resp = client.post('/filebrowser/upload/file', dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE))) response = json.loads(resp.content) assert_equal(0, response['status'], response) stats = cluster.fs.stats(HDFS_FILE) assert_equal(stats['user'], USER_NAME) assert_equal(stats['group'], USER_NAME) f = cluster.fs.open(HDFS_FILE) actual = f.read() expected = file(LOCAL_FILE).read() assert_equal(actual, expected) # Upload again and so fails because file already exits resp = client.post('/filebrowser/upload/file', dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE))) response = json.loads(resp.content) assert_equal(-1, response['status'], response) # Upload in tmp and fails because of missing permissions resp = client_not_me.post('/filebrowser/upload/file', dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE))) response = json.loads(resp.content) assert_equal(-1, response['status'], response) finally: try: cluster.fs.remove(HDFS_DEST_DIR) except Exception, ex: pass
def setUp(self): if not hasattr(api, 'OriginalSentryApi'): api.OriginalSentryApi = api.get_api api.get_api = mocked_get_api self.client = make_logged_in_client(username='******', groupname='test', is_superuser=False) self.client_admin = make_logged_in_client(username='******', groupname='hue', is_superuser=False) grant_access("sentry_test", "test", "security") grant_access("sentry_hue", "hue", "security") add_to_group("sentry_test") add_to_group("sentry_hue") raise SkipTest
def test_list_for_autocomplete(self): # Now the autocomplete has access to all the users and groups c1 = make_logged_in_client( "test_list_for_autocomplete", is_superuser=False, groupname="test_list_for_autocomplete" ) c2_same_group = make_logged_in_client( "test_list_for_autocomplete2", is_superuser=False, groupname="test_list_for_autocomplete" ) c3_other_group = make_logged_in_client( "test_list_for_autocomplete3", is_superuser=False, groupname="test_list_for_autocomplete_other_group" ) # c1 is in the same group as c2 response = c1.get(reverse("useradmin.views.list_for_autocomplete"), HTTP_X_REQUESTED_WITH="XMLHttpRequest") content = json.loads(response.content) users = [smart_unicode(user["username"]) for user in content["users"]] groups = [smart_unicode(user["name"]) for user in content["groups"]] assert_equal([u"test_list_for_autocomplete2", u"test_list_for_autocomplete3"], users) assert_true(u"test_list_for_autocomplete" in groups, groups) assert_true(u"test_list_for_autocomplete_other_group" in groups, groups) # c2 is in the same group as c1 response = c2_same_group.get( reverse("useradmin.views.list_for_autocomplete"), HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) content = json.loads(response.content) users = [smart_unicode(user["username"]) for user in content["users"]] groups = [smart_unicode(user["name"]) for user in content["groups"]] assert_equal([u"test_list_for_autocomplete", u"test_list_for_autocomplete3"], users) assert_true(u"test_list_for_autocomplete" in groups, groups) assert_true(u"test_list_for_autocomplete_other_group" in groups, groups) # c3 is alone except for groups response = c3_other_group.get( reverse("useradmin.views.list_for_autocomplete"), HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) content = json.loads(response.content) users = [smart_unicode(user["username"]) for user in content["users"]] groups = [smart_unicode(user["name"]) for user in content["groups"]] assert_equal([u"test_list_for_autocomplete", u"test_list_for_autocomplete2"], users) assert_true(u"test_list_for_autocomplete" in groups, groups) assert_true(u"test_list_for_autocomplete_other_group" in groups, groups)
def test_ensure_safe_redirect_middleware(): done = [] settings.MIDDLEWARE_CLASSES.append('desktop.middleware.EnsureSafeRedirectURLMiddleware') try: # Super user c = make_logged_in_client() # GET works response = c.get("/useradmin/") assert_equal(200, response.status_code) # Disallow most redirects done.append(desktop.conf.REDIRECT_WHITELIST.set_for_testing('^\d+$')) response = c.get("") assert_equal(403, response.status_code) # Allow all redirects done.append(desktop.conf.REDIRECT_WHITELIST.set_for_testing('.*')) response = c.get("") assert_equal(302, response.status_code) # Allow all redirects and disallow most at the same time. # should have a logic OR functionality. done.append(desktop.conf.REDIRECT_WHITELIST.set_for_testing('\d+,.*')) response = c.get("") assert_equal(302, response.status_code) finally: settings.MIDDLEWARE_CLASSES.pop() for finish in done: finish()
def setup_class(cls): OozieServerProvider.setup_class() cls.username = '******' cls.home_dir = '/user/%s' % cls.username cls.cluster.fs.do_as_user(cls.username, cls.cluster.fs.create_home_dir, cls.home_dir) cls.client = make_logged_in_client(username=cls.username, is_superuser=False, groupname='test') cls.user = User.objects.get(username=cls.username) grant_access(cls.username, 'test', 'jobsub') grant_access(cls.username, 'test', 'jobbrowser') grant_access(cls.username, 'test', 'oozie') add_to_group(cls.username) cls.prev_user = cls.cluster.fs.user cls.cluster.fs.setuser(cls.username) cls.install_examples() cls.design = cls.create_design() # Run the sleep example, since it doesn't require user home directory design_id = cls.design.id response = cls.client.post(reverse('oozie:submit_workflow', args=[design_id]), data={u'form-MAX_NUM_FORMS': [u''], u'form-INITIAL_FORMS': [u'1'], u'form-0-name': [u'REDUCER_SLEEP_TIME'], u'form-0-value': [u'1'], u'form-TOTAL_FORMS': [u'1']}, follow=True) oozie_jobid = response.context['oozie_workflow'].id OozieServerProvider.wait_until_completion(oozie_jobid) cls.hadoop_job_id = get_hadoop_job_id(cls.oozie, oozie_jobid, 1) cls.hadoop_job_id_short = views.get_shorter_id(cls.hadoop_job_id)
def test_404_handling(): view_name = '/the-view-that-is-not-there' c = make_logged_in_client() response = c.get(view_name) assert_true('404.mako' in response.template) assert_true('Not Found' in response.content) assert_true(view_name in response.content)
def test_prefs(): c = make_logged_in_client() # Get everything response = c.get('/prefs/') assert_equal('{}', response.content) # Set and get response = c.get('/prefs/foo', dict(set="bar")) assert_equal('true', response.content) response = c.get('/prefs/foo') assert_equal('"bar"', response.content) # Reset (use post this time) c.post('/prefs/foo', dict(set="baz")) response = c.get('/prefs/foo') assert_equal('"baz"', response.content) # Check multiple values c.post('/prefs/elephant', dict(set="room")) response = c.get('/prefs/') assert_true("baz" in response.content) assert_true("room" in response.content) # Delete everything c.get('/prefs/elephant', dict(delete="")) c.get('/prefs/foo', dict(delete="")) response = c.get('/prefs/') assert_equal('{}', response.content) # Check non-existent value response = c.get('/prefs/doesNotExist') assert_equal('null', response.content)
def test_job_permissions(self): # Login as ourself finish = SHARE_JOBS.set_for_testing(True) try: response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&user='******'/jobbrowser/jobs/?format=json&user='******'not_me', is_superuser=False, groupname='test') grant_access("not_me", "test", "jobbrowser") finish = SHARE_JOBS.set_for_testing(True) try: response = client_not_me.get('/jobbrowser/jobs/?format=json&user='******'/jobbrowser/jobs/?format=json&user=') assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content) finally: finish()
def test_ensure_home_directory(): reset_all_users() reset_all_groups() # Cluster and client for home directory creation cluster = pseudo_hdfs4.shared_cluster() c = make_logged_in_client(cluster.superuser, is_superuser=True, groupname="test1") cluster.fs.setuser(cluster.superuser) # Create a user with a home directory assert_false(cluster.fs.exists("/user/test1")) response = c.post( "/useradmin/users/new", dict(username="******", password1="test", password2="test", ensure_home_directory=True) ) assert_true(cluster.fs.exists("/user/test1")) dir_stat = cluster.fs.stats("/user/test1") assert_equal("test1", dir_stat.user) assert_equal("test1", dir_stat.group) assert_equal("40755", "%o" % dir_stat.mode) # Create a user, then add their home directory assert_false(cluster.fs.exists("/user/test2")) response = c.post("/useradmin/users/new", dict(username="******", password1="test", password2="test")) assert_false(cluster.fs.exists("/user/test2")) response = c.post( "/useradmin/users/edit/%s" % "test2", dict(username="******", password1="test", password2="test", ensure_home_directory=True), ) assert_true(cluster.fs.exists("/user/test2")) dir_stat = cluster.fs.stats("/user/test2") assert_equal("test2", dir_stat.user) assert_equal("test2", dir_stat.group) assert_equal("40755", "%o" % dir_stat.mode)
def test_log_event(): c = make_logged_in_client() root = logging.getLogger("desktop.views.log_frontend_event") handler = RecordingHandler() root.addHandler(handler) c.get("/log_frontend_event?level=info&message=foo") assert_equal("INFO", handler.records[-1].levelname) assert_equal("Untrusted log event from user test: foo", handler.records[-1].message) assert_equal("desktop.views.log_frontend_event", handler.records[-1].name) c.get("/log_frontend_event?level=error&message=foo2") assert_equal("ERROR", handler.records[-1].levelname) assert_equal("Untrusted log event from user test: foo2", handler.records[-1].message) c.get("/log_frontend_event?message=foo3") assert_equal("INFO", handler.records[-1].levelname) assert_equal("Untrusted log event from user test: foo3", handler.records[-1].message) c.post("/log_frontend_event", { "message": "01234567" * 1024}) assert_equal("INFO", handler.records[-1].levelname) assert_equal("Untrusted log event from user test: " + "01234567"*(1024/8), handler.records[-1].message) root.removeHandler(handler)
def test_ensure_home_directory_add_ldap_user(): URL = reverse(add_ldap_user) reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() cluster = pseudo_hdfs4.shared_cluster() c = make_logged_in_client(cluster.superuser, is_superuser=True) cluster.fs.setuser(cluster.superuser) assert_true(c.get(URL)) response = c.post(URL, dict(username="******", password1="test", password2="test")) assert_true("/useradmin/users" in response["Location"]) assert_false(cluster.fs.exists("/user/moe")) # Try same thing with home directory creation. response = c.post(URL, dict(username="******", password1="test", password2="test", ensure_home_directory=True)) assert_true("/useradmin/users" in response["Location"]) assert_true(cluster.fs.exists("/user/curly")) response = c.post(URL, dict(username="******", password1="test", password2="test")) assert_true("Could not" in response.context["form"].errors["username"][0]) assert_false(cluster.fs.exists("/user/bad_name")) # See if moe, who did not ask for his home directory, has a home directory. assert_false(cluster.fs.exists("/user/moe")) # Clean up cluster.fs.rmtree("/user/curly")
def test_ensure_home_directory_sync_ldap_users_groups(): URL = reverse(sync_ldap_users_groups) reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() cluster = pseudo_hdfs4.shared_cluster() c = make_logged_in_client(cluster.superuser, is_superuser=True) cluster.fs.setuser(cluster.superuser) reset = [] # Set to nonsensical value just to force new config usage. # Should continue to use cached connection. reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config())) try: c.post(reverse(add_ldap_users), dict(server='nonsense', username_pattern='curly', password1='test', password2='test')) assert_false(cluster.fs.exists('/user/curly')) assert_true(c.post(URL, dict(server='nonsense', ensure_home_directory=True))) assert_true(cluster.fs.exists('/user/curly')) finally: for finish in reset: finish() if cluster.fs.exists('/user/curly'): cluster.fs.rmtree('/user/curly')
def test_end_to_end(self): if not is_live_cluster( ) or True: # Skipping as requires morplines libs to be setup raise SkipTest() cluster = shared_cluster() fs = cluster.fs make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False) user = User.objects.get(username="******") collection_name = "test_collection" indexer = MorphlineIndexer("test", fs=fs, jt=cluster.jt, solr_client=self.solr_client) input_loc = "/tmp/test.csv" # upload the test file to hdfs fs.create(input_loc, data=TestIndexer.simpleCSVString, overwrite=True) # open a filestream for the file on hdfs stream = fs.open(input_loc) # guess the format of the file file_type_format = indexer.guess_format( {'file': { "stream": stream, "name": "test.csv" }}) field_types = indexer.guess_field_types({ "file": { "stream": stream, "name": "test.csv" }, "format": file_type_format }) format_ = field_types.copy() format_['format'] = file_type_format # find a field name available to use for the record's uuid unique_field = indexer.get_unique_field(format_) is_unique_generated = indexer.is_unique_generated(format_) # generate morphline morphline = indexer.generate_morphline_config(collection_name, format_, unique_field) schema_fields = indexer.get_kept_field_list(format_['columns']) if is_unique_generated: schema_fields += [{"name": unique_field, "type": "string"}] # create the collection from the specified fields collection_manager = CollectionManagerController("test") if collection_manager.collection_exists(collection_name): collection_manager.delete_collection(collection_name, None) collection_manager.create_collection(collection_name, schema_fields, unique_key_field=unique_field) # index the file indexer.run_morphline( MockedRequest(user=user, fs=cluster.fs, jt=cluster.jt), collection_name, morphline, input_loc)
def test_login(self): client = make_logged_in_client(username=self.user1.username) client = make_logged_in_client(username=self.user1.username)
def test_thread_dump(): c = make_logged_in_client() response = c.get("/desktop/debug/threads") assert_true("test_thread_dump" in response.content)
def setUp(self): self.client = make_logged_in_client() self.user = dict(username="******", password="******") desktop.conf.REDIRECT_WHITELIST.set_for_testing( '^\/.*$,^http:\/\/example.com\/.*$')
def test_ensure_home_directory_add_ldap_users(self): URL = reverse(add_ldap_users) # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() cluster = pseudo_hdfs4.shared_cluster() c = make_logged_in_client(cluster.superuser, is_superuser=True) cluster.fs.setuser(cluster.superuser) reset = [] # Set to nonsensical value just to force new config usage. # Should continue to use cached connection. reset.append( desktop.conf.LDAP.LDAP_SERVERS.set_for_testing( get_nonsense_config())) try: assert_true(c.get(URL)) response = c.post( URL, dict(server='nonsense', username_pattern='moe', password1='test', password2='test')) assert_true('/useradmin/users' in response['Location']) assert_false(cluster.fs.exists('/user/moe')) # Try same thing with home directory creation. response = c.post( URL, dict(server='nonsense', username_pattern='curly', password1='test', password2='test', ensure_home_directory=True)) assert_true('/useradmin/users' in response['Location']) assert_true(cluster.fs.exists('/user/curly')) response = c.post( URL, dict(server='nonsense', username_pattern='bad_name', password1='test', password2='test')) assert_true('Could not' in response.context['form'].errors['username_pattern'][0]) assert_false(cluster.fs.exists('/user/bad_name')) # See if moe, who did not ask for his home directory, has a home directory. assert_false(cluster.fs.exists('/user/moe')) # Try wild card now response = c.post( URL, dict(server='nonsense', username_pattern='*rr*', password1='test', password2='test', ensure_home_directory=True)) assert_true('/useradmin/users' in response['Location']) assert_true(cluster.fs.exists('/user/curly')) assert_true(cluster.fs.exists(u'/user/lårry')) assert_false(cluster.fs.exists('/user/otherguy')) finally: # Clean up for finish in reset: finish() if cluster.fs.exists('/user/curly'): cluster.fs.rmtree('/user/curly') if cluster.fs.exists(u'/user/lårry'): cluster.fs.rmtree(u'/user/lårry') if cluster.fs.exists('/user/otherguy'): cluster.fs.rmtree('/user/otherguy')
def test_copy_files(): cluster = pseudo_hdfs4.shared_cluster() try: c = make_logged_in_client() user = User.objects.get(username='******') prefix = '/tmp/test_copy_files' if cluster.fs.exists(prefix): cluster.fs.rmtree(prefix) # Jars in various locations deployment_dir = '%s/workspace' % prefix external_deployment_dir = '%s/deployment' % prefix jar_1 = '%s/udf1.jar' % prefix jar_2 = '%s/lib/udf2.jar' % prefix jar_3 = '%s/udf3.jar' % deployment_dir jar_4 = '%s/lib/udf4.jar' % deployment_dir # Never move cluster.fs.mkdir(prefix) cluster.fs.create(jar_1) cluster.fs.create(jar_2) cluster.fs.create(jar_3) cluster.fs.create(jar_4) class MockNode(): def __init__(self, jar_path): self.jar_path = jar_path class MockJob(): def __init__(self): self.node_list = [ MockNode(jar_1), MockNode(jar_2), MockNode(jar_3), MockNode(jar_4), ] def get_application_filename(self): return 'workflow.xml' submission = Submission(user, job=MockJob(), fs=cluster.fs, jt=cluster.jt) submission._copy_files(deployment_dir, "<xml>My XML</xml>") submission._copy_files(external_deployment_dir, "<xml>My XML</xml>") # All sources still there assert_true(cluster.fs.exists(jar_1)) assert_true(cluster.fs.exists(jar_2)) assert_true(cluster.fs.exists(jar_3)) assert_true(cluster.fs.exists(jar_4)) deployment_dir = deployment_dir + '/lib' external_deployment_dir = external_deployment_dir + '/lib' list_dir_workspace = cluster.fs.listdir(deployment_dir) list_dir_deployement = cluster.fs.listdir(external_deployment_dir) # All destinations there assert_true(cluster.fs.exists(deployment_dir + '/udf1.jar'), list_dir_workspace) assert_true(cluster.fs.exists(deployment_dir + '/udf2.jar'), list_dir_workspace) assert_true(cluster.fs.exists(deployment_dir + '/udf3.jar'), list_dir_workspace) assert_true(cluster.fs.exists(deployment_dir + '/udf4.jar'), list_dir_workspace) assert_true(cluster.fs.exists(external_deployment_dir + '/udf1.jar'), list_dir_deployement) assert_true(cluster.fs.exists(external_deployment_dir + '/udf2.jar'), list_dir_deployement) assert_true(cluster.fs.exists(external_deployment_dir + '/udf3.jar'), list_dir_deployement) assert_true(cluster.fs.exists(external_deployment_dir + '/udf4.jar'), list_dir_deployement) stats_udf1 = cluster.fs.stats(deployment_dir + '/udf1.jar') stats_udf2 = cluster.fs.stats(deployment_dir + '/udf2.jar') stats_udf3 = cluster.fs.stats(deployment_dir + '/udf3.jar') stats_udf4 = cluster.fs.stats(deployment_dir + '/udf4.jar') submission._copy_files('%s/workspace' % prefix, "<xml>My XML</xml>") assert_not_equal( stats_udf1['fileId'], cluster.fs.stats(deployment_dir + '/udf1.jar')['fileId']) assert_not_equal( stats_udf2['fileId'], cluster.fs.stats(deployment_dir + '/udf2.jar')['fileId']) assert_not_equal( stats_udf3['fileId'], cluster.fs.stats(deployment_dir + '/udf3.jar')['fileId']) assert_equal(stats_udf4['fileId'], cluster.fs.stats(deployment_dir + '/udf4.jar')['fileId']) finally: try: cluster.fs.rmtree(prefix) except: LOG.exception('failed to remove %s' % prefix)
def setUp(self): self.client = make_logged_in_client()
def setUp(self): self.client = make_logged_in_client(username="******", recreate=True, is_superuser=False, is_admin=True) self.user = User.objects.get(username="******")
def test_index_page(): """Minimal test that index page renders.""" c = make_logged_in_client() c.get("/beeswax")
def test_user_admin(self): FUNNY_NAME = '~`!@#$%^&*()_-+={}[]|\;"<>?/,.' FUNNY_NAME_QUOTED = urllib.quote(FUNNY_NAME) resets = [ useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('test_default'), useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(False), ] try: reset_password_policy() c = make_logged_in_client('test', is_superuser=True) user = User.objects.get(username='******') # Test basic output. response = c.get('/useradmin/') assert_true(len(response.context["users"]) > 0) assert_true("Hue Users" in response.content) # Test editing a superuser # Just check that this comes back response = c.get('/useradmin/users/edit/test') # Edit it, to add a first and last name response = c.post('/useradmin/users/edit/test', dict(username="******", first_name=u"Inglés", last_name=u"Español", is_superuser="******", is_active="True"), follow=True) assert_true( "User information updated" in response.content, "Notification should be displayed in: %s" % response.content) # Edit it, can't change username response = c.post('/useradmin/users/edit/test', dict(username="******", first_name=u"Inglés", last_name=u"Español", is_superuser="******", is_active="True"), follow=True) assert_true("You cannot change a username" in response.content) # Now make sure that those were materialized response = c.get('/useradmin/users/edit/test') assert_equal(smart_unicode("Inglés"), response.context["form"].instance.first_name) assert_true("Español" in response.content) # Shouldn't be able to demote to non-superuser response = c.post( '/useradmin/users/edit/test', dict(username="******", first_name=u"Inglés", last_name=u"Español", is_superuser=False, is_active=True)) assert_true("You cannot remove" in response.content, "Shouldn't be able to remove the last superuser") # Shouldn't be able to delete oneself response = c.post('/useradmin/users/delete', {u'user_ids': [user.id]}) assert_true("You cannot remove yourself" in response.content, "Shouldn't be able to delete the last superuser") # Let's try changing the password response = c.post( '/useradmin/users/edit/test', dict(username="******", first_name="Tom", last_name="Tester", is_superuser=True, password1="foo", password2="foobar")) assert_equal(["Passwords do not match."], response.context["form"]["password2"].errors, "Should have complained about mismatched password") # Old password not confirmed response = c.post( '/useradmin/users/edit/test', dict(username="******", first_name="Tom", last_name="Tester", password1="foo", password2="foo", is_active=True, is_superuser=True)) assert_equal( ["The old password does not match the current password."], response.context["form"]["password_old"].errors, "Should have complained about old password") # Good now response = c.post( '/useradmin/users/edit/test', dict(username="******", first_name="Tom", last_name="Tester", password1="foo", password2="foo", password_old="test", is_active=True, is_superuser=True)) assert_true(User.objects.get(username="******").is_superuser) assert_true( User.objects.get(username="******").check_password("foo")) # Change it back! response = c.post( '/useradmin/users/edit/test', dict(username="******", first_name="Tom", last_name="Tester", password1="test", password2="test", password_old="foo", is_active="True", is_superuser="******")) assert_true( User.objects.get(username="******").check_password("test")) assert_true( make_logged_in_client(username="******", password="******"), "Check that we can still login.") # Check new user form for default group group = get_default_user_group() response = c.get('/useradmin/users/new') assert_true(response) assert_true(('<option value="%s" selected="selected">%s</option>' % (group.id, group.name)) in str(response)) # Create a new regular user (duplicate name) response = c.post( '/useradmin/users/new', dict(username="******", password1="test", password2="test")) assert_equal( {'username': ["User with this Username already exists."]}, response.context["form"].errors) # Create a new regular user (for real) response = c.post( '/useradmin/users/new', dict(username=FUNNY_NAME, password1="test", password2="test", is_active="True")) response = c.get('/useradmin/') assert_true(FUNNY_NAME_QUOTED in response.content) assert_true(len(response.context["users"]) > 1) assert_true("Hue Users" in response.content) # Validate profile is created. assert_true( UserProfile.objects.filter(user__username=FUNNY_NAME).exists()) # Need to give access to the user for the rest of the test group = Group.objects.create(name="test-group") perm = HuePermission.objects.get(app='useradmin', action='access') GroupPermission.objects.create(group=group, hue_permission=perm) # Verify that we can modify user groups through the user admin pages response = c.post( '/useradmin/users/new', dict(username="******", password1="test", password2="test", groups=[group.pk])) User.objects.get(username='******') assert_true( User.objects.get(username='******').groups.filter( name='test-group').exists()) response = c.post('/useradmin/users/edit/group_member', dict(username="******", groups=[])) assert_false( User.objects.get(username='******').groups.filter( name='test-group').exists()) # Check permissions by logging in as the new user c_reg = make_logged_in_client(username=FUNNY_NAME, password="******") test_user = User.objects.get(username=FUNNY_NAME) test_user.groups.add(Group.objects.get(name="test-group")) test_user.save() # Regular user should be able to modify oneself response = c_reg.post( '/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED, ), dict(username=FUNNY_NAME, first_name="Hello", is_active=True, groups=[group.id for group in test_user.groups.all()]), follow=True) assert_equal(response.status_code, 200) response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED, ), follow=True) assert_equal(response.status_code, 200) assert_equal("Hello", response.context["form"].instance.first_name) funny_user = User.objects.get(username=FUNNY_NAME) # Can't edit other people. response = c_reg.post("/useradmin/users/delete", {u'user_ids': [funny_user.id]}) assert_true("You must be a superuser" in response.content, "Regular user can't edit other people") # Revert to regular "test" user, that has superuser powers. c_su = make_logged_in_client() # Inactivate FUNNY_NAME c_su.post( '/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED, ), dict(username=FUNNY_NAME, first_name="Hello", is_active=False)) # Now make sure FUNNY_NAME can't log back in response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED, )) assert_true( response.status_code == 302 and "login" in response["location"], "Inactivated user gets redirected to login page") # Delete that regular user funny_profile = get_profile(test_user) response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]}) assert_equal(302, response.status_code) assert_false(User.objects.filter(username=FUNNY_NAME).exists()) assert_false( UserProfile.objects.filter(id=funny_profile.id).exists()) # Bulk delete users u1 = User.objects.create(username='******', password="******") u2 = User.objects.create(username='******', password="******") assert_equal( User.objects.filter(username__in=['u1', 'u2']).count(), 2) response = c_su.post('/useradmin/users/delete', {u'user_ids': [u1.id, u2.id]}) assert_equal( User.objects.filter(username__in=['u1', 'u2']).count(), 0) # Make sure that user deletion works if the user has never performed a request. funny_user = User.objects.create(username=FUNNY_NAME, password='******') assert_true(User.objects.filter(username=FUNNY_NAME).exists()) assert_false( UserProfile.objects.filter(user__username=FUNNY_NAME).exists()) response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]}) assert_equal(302, response.status_code) assert_false(User.objects.filter(username=FUNNY_NAME).exists()) assert_false( UserProfile.objects.filter(user__username=FUNNY_NAME).exists()) # You shouldn't be able to create a user without a password response = c_su.post('/useradmin/users/new', dict(username="******")) assert_true("You must specify a password when creating a new user." in response.content) finally: for reset in resets: reset()
def test_user_admin_password_policy(self): # Set up password policy password_hint = password_error_msg = ( "The password must be at least 8 characters long, " "and must contain both uppercase and lowercase letters, " "at least one number, and at least one special character.") password_rule = "^(?=.*?[A-Z])(?=(.*[a-z]){1,})(?=(.*[\d]){1,})(?=(.*[\W_]){1,}).{8,}$" resets = [ useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(True), useradmin.conf.PASSWORD_POLICY.PWD_RULE.set_for_testing( password_rule), useradmin.conf.PASSWORD_POLICY.PWD_HINT.set_for_testing( password_hint), useradmin.conf.PASSWORD_POLICY.PWD_ERROR_MESSAGE.set_for_testing( password_error_msg), ] try: reset_password_policy() # Test first-ever login with password policy enabled c = Client() response = c.get('/accounts/login/') assert_equal(200, response.status_code) assert_true(response.context['first_login_ever']) response = c.post( '/accounts/login/', dict(username="******", password="******")) assert_true(response.context['first_login_ever']) assert_equal([password_error_msg], response.context["form"]["password"].errors) response = c.post('/accounts/login/', dict(username="******", password="******"), follow=True) assert_equal(200, response.status_code) assert_true( User.objects.get(username="******").is_superuser) assert_true( User.objects.get(username="******").check_password( "foobarTest1[")) c.get('/accounts/logout') # Test changing a user's password c = make_logged_in_client('superuser', is_superuser=True) # Test password hint is displayed response = c.get('/useradmin/users/edit/superuser') assert_true(password_hint in response.content) # Password is less than 8 characters response = c.post( '/useradmin/users/edit/superuser', dict(username="******", is_superuser=True, password1="foo", password2="foo")) assert_equal([password_error_msg], response.context["form"]["password1"].errors) # Password is more than 8 characters long but does not have a special character response = c.post( '/useradmin/users/edit/superuser', dict(username="******", is_superuser=True, password1="foobarTest1", password2="foobarTest1")) assert_equal([password_error_msg], response.context["form"]["password1"].errors) # Password1 and Password2 are valid but they do not match response = c.post( '/useradmin/users/edit/superuser', dict(username="******", is_superuser=True, password1="foobarTest1??", password2="foobarTest1?", password_old="foobarTest1[", is_active=True)) assert_equal(["Passwords do not match."], response.context["form"]["password2"].errors) # Password is valid now c.post( '/useradmin/users/edit/superuser', dict(username="******", is_superuser=True, password1="foobarTest1[", password2="foobarTest1[", password_old="test", is_active=True)) assert_true(User.objects.get(username="******").is_superuser) assert_true( User.objects.get( username="******").check_password("foobarTest1[")) # Test creating a new user response = c.get('/useradmin/users/new') assert_true(password_hint in response.content) # Password is more than 8 characters long but does not have a special character response = c.post( '/useradmin/users/new', dict(username="******", is_superuser=False, password1="foo", password2="foo")) assert_equal( { 'password1': [password_error_msg], 'password2': [password_error_msg] }, response.context["form"].errors) # Password is more than 8 characters long but does not have a special character response = c.post( '/useradmin/users/new', dict(username="******", is_superuser=False, password1="foobarTest1", password2="foobarTest1")) assert_equal( { 'password1': [password_error_msg], 'password2': [password_error_msg] }, response.context["form"].errors) # Password1 and Password2 are valid but they do not match response = c.post( '/useradmin/users/new', dict(username="******", is_superuser=False, password1="foobarTest1[", password2="foobarTest1?")) assert_equal({'password2': ["Passwords do not match."]}, response.context["form"].errors) # Password is valid now c.post( '/useradmin/users/new', dict(username="******", is_superuser=False, password1="foobarTest1[", password2="foobarTest1[", is_active=True)) assert_false(User.objects.get(username="******").is_superuser) assert_true( User.objects.get( username="******").check_password("foobarTest1[")) finally: for reset in resets: reset()
def test_group_admin(self): c = make_logged_in_client(username="******", is_superuser=True) response = c.get('/useradmin/groups') # No groups just yet assert_true(len(response.context["groups"]) == 0) assert_true("Hue Groups" in response.content) # Create a group response = c.get('/useradmin/groups/new') assert_equal('/useradmin/groups/new', response.context['action']) c.post('/useradmin/groups/new', dict(name="testgroup")) # We should have an empty group in the DB now assert_true(len(Group.objects.all()) == 1) assert_true(Group.objects.filter(name="testgroup").exists()) assert_true( len(Group.objects.get(name="testgroup").user_set.all()) == 0) # And now, just for kicks, let's try adding a user response = c.post('/useradmin/groups/edit/testgroup', dict(name="testgroup", members=[User.objects.get(username="******").pk], save="Save"), follow=True) assert_true( len(Group.objects.get(name="testgroup").user_set.all()) == 1) assert_true( Group.objects.get(name="testgroup").user_set.filter( username="******").exists()) # Test some permissions c2 = make_logged_in_client(username="******", is_superuser=False) # Need to give access to the user for the rest of the test group = Group.objects.create(name="access-group") perm = HuePermission.objects.get(app='useradmin', action='access') GroupPermission.objects.create(group=group, hue_permission=perm) test_user = User.objects.get(username="******") test_user.groups.add(Group.objects.get(name="access-group")) test_user.save() # Make sure non-superusers can't do bad things response = c2.get('/useradmin/groups/new') assert_true("You must be a superuser" in response.content) response = c2.get('/useradmin/groups/edit/testgroup') assert_true("You must be a superuser" in response.content) response = c2.post('/useradmin/groups/new', dict(name="nonsuperuser")) assert_true("You must be a superuser" in response.content) response = c2.post('/useradmin/groups/edit/testgroup', dict(name="nonsuperuser", members=[User.objects.get(username="******").pk], save="Save"), follow=True) assert_true("You must be a superuser" in response.content) # Should be one group left, because we created the other group response = c.post('/useradmin/groups/delete', {'group_names': ['testgroup']}) assert_true(len(Group.objects.all()) == 1) group_count = len(Group.objects.all()) response = c.post('/useradmin/groups/new', dict(name="with space")) assert_equal(len(Group.objects.all()), group_count + 1)
def test_new_jobs(self): """ Submit jobs. Let them succeed or fail and view them. """ # Install examples import jobsub.management.commands.jobsub_setup as jobsub_setup if not jobsub_setup.Command().has_been_setup(): jobsub_setup.Command().handle() # Run the sleep example, since it doesn't require user home directory design_id = JobDesign.objects.get(name__contains="Example: Sleep").id response = self.client.post( "/jobsub/submit/%d" % (design_id, ), dict(map_sleep_time_millis=1, num_mappers=1, num_reducers=1, reduce_sleep_time_millis=1)) watch_id = parse_out_id(response) response = watch_till_complete(self.client, watch_id) job_id = Submission.objects.get(id=watch_id).submission_handle.id hadoop_job_id = get_hadoop_job_id(self.jobsubd, job_id) # All jobs page response = self.client.get('/jobbrowser/jobs/') assert_true(hadoop_job_id.lstrip('job_') in response.content) # Single job page response = self.client.get('/jobbrowser/jobs/%s' % hadoop_job_id) # Check some counters for single job. counters = response.context['job'].counters counters_file_bytes_written = counters['FileSystemCounters'][ 'counters']['FILE_BYTES_WRITTEN'] assert_true(counters_file_bytes_written['map'] > 0) assert_true(counters_file_bytes_written['reduce'] > 0) assert_equal(counters_file_bytes_written['displayName'], 'FILE_BYTES_WRITTEN') assert_equal(counters_file_bytes_written['displayName'], 'FILE_BYTES_WRITTEN') # We can't just check the complete contents of the python map because the # SLOTS_MILLIS_* entries have a variable number of milliseconds from # run-to-run. assert_equal( response.context['job']. counters['org.apache.hadoop.mapred.JobInProgress$Counter'] ['counters']['TOTAL_LAUNCHED_MAPS']['total'], 1) assert_equal( response.context['job']. counters['org.apache.hadoop.mapred.JobInProgress$Counter'] ['counters']['TOTAL_LAUNCHED_REDUCES']['total'], 1) assert_equal( response.context['job']. counters['org.apache.hadoop.mapred.JobInProgress$Counter'] ['counters']['FALLOW_SLOTS_MILLIS_MAPS']['total'], 0) assert_equal( response.context['job']. counters['org.apache.hadoop.mapred.JobInProgress$Counter'] ['counters']['FALLOW_SLOTS_MILLIS_REDUCES']['total'], 0) assert_true(response.context['job']. counters['org.apache.hadoop.mapred.JobInProgress$Counter'] ['counters']['SLOTS_MILLIS_MAPS']['total'] > 0) assert_true(response.context['job']. counters['org.apache.hadoop.mapred.JobInProgress$Counter'] ['counters']['SLOTS_MILLIS_REDUCES']['total'] > 0) # Check conf keys made it assert_equal(response.context['job'].conf_keys['mapredReducerClass'], 'org.apache.hadoop.examples.SleepJob') # There should be 4 tasks for this job: cleanup, setup, map, reduce response = self.client.get('/jobbrowser/jobs/%s/tasks' % (hadoop_job_id, )) assert_true('Showing 1 to 4 of 4 tasks' in response.content) # Select by tasktype response = self.client.get( '/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (hadoop_job_id, )) assert_true('Showing 1 to 1 of 1 tasks' in response.content) # Select by taskstate response = self.client.get( '/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id, )) assert_true('Showing 1 to 4 of 4 tasks' in response.content) # Select by text response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktext=clean' % (hadoop_job_id, )) assert_true('Showing 1 to 1 of 1 tasks' in response.content) # Run another sleep job but kill it response = self.client.post( "/jobsub/submit/%d" % (design_id, ), dict(map_sleep_time_millis=1, num_mappers=2000, num_reducers=2000, reduce_sleep_time_millis=1)) job_id = parse_out_id(response) time.sleep(15) # 15 seconds should be enough to start the job hadoop_job_id = get_hadoop_job_id(self.jobsubd, job_id) client2 = make_logged_in_client('test_non_superuser', is_superuser=False) response = client2.post('/jobbrowser/jobs/%s/kill' % (hadoop_job_id, )) assert_equal( "Permission denied. User test_non_superuser cannot delete user test's job.", response.context["error"]) self.client.post('/jobbrowser/jobs/%s/kill' % (hadoop_job_id, )) # It should say killed response = self.client.get('/jobbrowser/jobs/%s' % (hadoop_job_id, )) html = response.content.lower() assert_true(hadoop_job_id in html) assert_true('killed' in html) # Exercise select by taskstate self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (hadoop_job_id, )) self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=pending' % (hadoop_job_id, )) self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id, )) self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=running' % (hadoop_job_id, )) self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=killed' % (hadoop_job_id, )) # Test single task page late_task_id = hadoop_job_id.replace('job', 'task') + '_r_001999' response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, late_task_id)) assert_false('succeed' in response.content) assert_true('killed' in response.content) # The first task should've succeeded early_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000' response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, early_task_id)) assert_true('succeed' in response.content) assert_false('failed' in response.content) # Test single attempt page attempt_id = early_task_id.replace('task', 'attempt') + '_0' response = self.client.get('/jobbrowser/jobs/%s/tasks/%s/attempts/%s' % (hadoop_job_id, early_task_id, attempt_id)) assert_true('syslog' in response.content) # Test dock jobs response = self.client.get('/jobbrowser/dock_jobs/') assert_true('completed' in response.content) # TODO(atm): I'm pretty sure the following test only passes because of # failed jobs which are run in test_failed_jobs assert_true('failed' in response.content)
def test_handle_on_link_shared(self): with patch('desktop.lib.botserver.views.slack_client.chat_unfurl' ) as chat_unfurl: with patch('desktop.lib.botserver.views._make_unfurl_payload' ) as mock_unfurl_payload: with patch('desktop.lib.botserver.views.Document2.objects.get' ) as document2_objects_get: with patch('desktop.lib.botserver.views._get_gist_document' ) as _get_gist_document: with patch( 'desktop.lib.botserver.views.send_result_file' ) as send_result_file: client = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False) user = User.objects.get(username="******") channel_id = "channel" message_ts = "12.1" # qhistory link links = [{ "url": "https://demo.gethue.com/hue/editor?editor=123456" }] doc_data = { "dialect": "mysql", "snippets": [{ "database": "hue", "statement_raw": "SELECT 5000", }] } document2_objects_get.return_value = Mock( data=json.dumps(doc_data), owner=user) mock_unfurl_payload.return_value = { 'payload': {}, 'file_status': True, } handle_on_link_shared(channel_id, message_ts, links) assert_true(chat_unfurl.called) assert_true(send_result_file.called) # gist link doc_data = {"statement_raw": "SELECT 98765"} _get_gist_document.return_value = Mock( data=json.dumps(doc_data), owner=user, extra='mysql') links = [{ "url": "http://demo.gethue.com/hue/gist?uuid=random" }] mock_unfurl_payload.return_value = { 'payload': {}, 'file_status': False, } handle_on_link_shared(channel_id, message_ts, links) assert_true(chat_unfurl.called) # Cannot unfurl link with invalid links inv_qhistory_url = "https://demo.gethue.com/hue/editor/?type=4" inv_gist_url = "http://demo.gethue.com/hue/gist?uuids/=xyz" assert_raises(PopupException, handle_on_link_shared, "channel", "12.1", [{ "url": inv_qhistory_url }]) assert_raises(PopupException, handle_on_link_shared, "channel", "12.1", [{ "url": inv_gist_url }]) # Document does not exist document2_objects_get.side_effect = PopupException( 'Query document does not exist') _get_gist_document.side_effect = PopupException( 'Gist does not exist') qhistory_url = "https://demo.gethue.com/hue/editor?editor=109644" gist_url = "https://demo.gethue.com/hue/gist?uuid=6d1c407b-d999-4dfd-ad23-d3a46c19a427" assert_raises(PopupException, handle_on_link_shared, "channel", "12.1", [{ "url": qhistory_url }]) assert_raises(PopupException, handle_on_link_shared, "channel", "12.1", [{ "url": gist_url }]) # chat_unfurl exception chat_unfurl.side_effect = PopupException( 'Cannot unfurl link') assert_raises(PopupException, handle_on_link_shared, "channel", "12.1", links)
def setUp(self): self.client = make_logged_in_client() # Mock DB calls as we don't need the real ones self.prev_dbms = dbms.get dbms.get = lambda a, b: MockDbms()
def setUp(self): self.client = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False) self.user = User.objects.get(username="******")
def setUp(self): self.client = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False) self.user = User.objects.get(username="******") grant_access(self.user.username, self.user.username, "desktop")
def setUp(self): self.c = make_logged_in_client(is_superuser=False) grant_access("test", "test", "spark") self.user = User.objects.get(username='******')
def test_add_ldap_users(self): if is_live_cluster(): raise SkipTest( 'HUE-2897: Skipping because the DB may not be case sensitive') done = [] # Set to nonsensical value just to force new config usage. # Should continue to use cached connection. done.append( desktop.conf.LDAP.LDAP_SERVERS.set_for_testing( get_nonsense_config())) try: URL = reverse(add_ldap_users) # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() c = make_logged_in_client('test', is_superuser=True) assert_true(c.get(URL)) response = c.post( URL, dict(server='nonsense', username_pattern='moe', password1='test', password2='test')) assert_true('Location' in response, response) assert_true('/useradmin/users' in response['Location'], response) response = c.post( URL, dict(server='nonsense', username_pattern='bad_name', password1='test', password2='test')) assert_true( 'Could not' in response.context['form'].errors['username_pattern'][0], response) # Test wild card response = c.post( URL, dict(server='nonsense', username_pattern='*rr*', password1='test', password2='test')) assert_true('/useradmin/users' in response['Location'], response) # Test ignore case done.append( desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True)) User.objects.filter(username='******').delete() assert_false(User.objects.filter(username='******').exists()) assert_false(User.objects.filter(username='******').exists()) response = c.post( URL, dict(server='nonsense', username_pattern='Moe', password1='test', password2='test')) assert_true('Location' in response, response) assert_true('/useradmin/users' in response['Location'], response) assert_false(User.objects.filter(username='******').exists()) assert_true(User.objects.filter(username='******').exists()) # Test lower case done.append( desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing( True)) User.objects.filter(username__iexact='Rock').delete() assert_false(User.objects.filter(username='******').exists()) assert_false(User.objects.filter(username='******').exists()) response = c.post( URL, dict(server='nonsense', username_pattern='rock', password1='test', password2='test')) assert_true('Location' in response, response) assert_true('/useradmin/users' in response['Location'], response) assert_false(User.objects.filter(username='******').exists()) assert_true(User.objects.filter(username='******').exists()) # Test regular with spaces (should fail) response = c.post( URL, dict(server='nonsense', username_pattern='user with space', password1='test', password2='test')) assert_true( "Username must not contain whitespaces and ':'" in response.context['form'].errors['username_pattern'][0], response) # Test dn with spaces in username and dn (should fail) response = c.post( URL, dict(server='nonsense', username_pattern= 'uid=user with space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True)) assert_true( "Could not get LDAP details for users in pattern" in response.content, response.content) response = c.get(reverse(desktop.views.log_view)) assert_true( "{username}: Username must not contain whitespaces".format( username='******') in response.content, response.content) # Test dn with spaces in dn, but not username (should succeed) response = c.post( URL, dict(server='nonsense', username_pattern= 'uid=user without space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True)) assert_true(User.objects.filter(username='******').exists()) finally: for finish in done: finish()
def test_copy_files(): cluster = pseudo_hdfs4.shared_cluster() try: c = make_logged_in_client() user = User.objects.get(username='******') ensure_home_directory(cluster.fs, user) prefix = '/tmp/test_copy_files' if cluster.fs.exists(prefix): cluster.fs.rmtree(prefix) # Jars in various locations deployment_dir = '%s/workspace' % prefix external_deployment_dir = '%s/deployment' % prefix jar_1 = '%s/udf1.jar' % prefix jar_2 = '%s/lib/udf2.jar' % prefix jar_3 = '%s/udf3.jar' % deployment_dir jar_4 = '%s/lib/udf4.jar' % deployment_dir # Doesn't move jar_5 = 'udf5.jar' jar_6 = 'lib/udf6.jar' # Doesn't move cluster.fs.mkdir(prefix) cluster.fs.create(jar_1) cluster.fs.create(jar_2) cluster.fs.create(jar_3) cluster.fs.create(jar_4) cluster.fs.create(deployment_dir + '/' + jar_5) cluster.fs.create(deployment_dir + '/' + jar_6) class MockJob(object): XML_FILE_NAME = 'workflow.xml' def __init__(self): self.deployment_dir = deployment_dir self.nodes = [ Node({ 'id': '1', 'type': 'mapreduce', 'properties': { 'jar_path': jar_1 } }), Node({ 'id': '2', 'type': 'mapreduce', 'properties': { 'jar_path': jar_2 } }), Node({ 'id': '3', 'type': 'java', 'properties': { 'jar_path': jar_3 } }), Node({ 'id': '4', 'type': 'java', 'properties': { 'jar_path': jar_4 } }), # Workspace relative paths Node({ 'id': '5', 'type': 'java', 'properties': { 'jar_path': jar_5 } }), Node({ 'id': '6', 'type': 'java', 'properties': { 'jar_path': jar_6 } }) ] submission = Submission(user, job=MockJob(), fs=cluster.fs, jt=cluster.jt) submission._copy_files(deployment_dir, "<xml>My XML</xml>", {'prop1': 'val1'}) submission._copy_files(external_deployment_dir, "<xml>My XML</xml>", {'prop1': 'val1'}) assert_true(cluster.fs.exists(deployment_dir + '/workflow.xml'), deployment_dir) assert_true(cluster.fs.exists(deployment_dir + '/job.properties'), deployment_dir) # All sources still there assert_true(cluster.fs.exists(jar_1)) assert_true(cluster.fs.exists(jar_2)) assert_true(cluster.fs.exists(jar_3)) assert_true(cluster.fs.exists(jar_4)) assert_true(cluster.fs.exists(deployment_dir + '/' + jar_5)) assert_true(cluster.fs.exists(deployment_dir + '/' + jar_6)) # Lib deployment_dir = deployment_dir + '/lib' external_deployment_dir = external_deployment_dir + '/lib' if USE_LIBPATH_FOR_JARS.get(): assert_true(jar_1 in submission.properties['oozie.libpath']) assert_true(jar_2 in submission.properties['oozie.libpath']) assert_true(jar_3 in submission.properties['oozie.libpath']) assert_true(jar_4 in submission.properties['oozie.libpath']) print(deployment_dir + '/' + jar_5) assert_true((deployment_dir + '/' + jar_5) in submission.properties['oozie.libpath'], submission.properties['oozie.libpath']) assert_true((deployment_dir + '/' + jar_6) in submission.properties['oozie.libpath'], submission.properties['oozie.libpath']) else: list_dir_workspace = cluster.fs.listdir(deployment_dir) list_dir_deployement = cluster.fs.listdir(external_deployment_dir) # All destinations there assert_true(cluster.fs.exists(deployment_dir + '/udf1.jar'), list_dir_workspace) assert_true(cluster.fs.exists(deployment_dir + '/udf2.jar'), list_dir_workspace) assert_true(cluster.fs.exists(deployment_dir + '/udf3.jar'), list_dir_workspace) assert_true(cluster.fs.exists(deployment_dir + '/udf4.jar'), list_dir_workspace) assert_true(cluster.fs.exists(deployment_dir + '/udf5.jar'), list_dir_workspace) assert_true(cluster.fs.exists(deployment_dir + '/udf6.jar'), list_dir_workspace) assert_true( cluster.fs.exists(external_deployment_dir + '/udf1.jar'), list_dir_deployement) assert_true( cluster.fs.exists(external_deployment_dir + '/udf2.jar'), list_dir_deployement) assert_true( cluster.fs.exists(external_deployment_dir + '/udf3.jar'), list_dir_deployement) assert_true( cluster.fs.exists(external_deployment_dir + '/udf4.jar'), list_dir_deployement) assert_true( cluster.fs.exists(external_deployment_dir + '/udf5.jar'), list_dir_deployement) assert_true( cluster.fs.exists(external_deployment_dir + '/udf6.jar'), list_dir_deployement) stats_udf1 = cluster.fs.stats(deployment_dir + '/udf1.jar') stats_udf2 = cluster.fs.stats(deployment_dir + '/udf2.jar') stats_udf3 = cluster.fs.stats(deployment_dir + '/udf3.jar') stats_udf4 = cluster.fs.stats(deployment_dir + '/udf4.jar') stats_udf5 = cluster.fs.stats(deployment_dir + '/udf5.jar') stats_udf6 = cluster.fs.stats(deployment_dir + '/udf6.jar') submission._copy_files('%s/workspace' % prefix, "<xml>My XML</xml>", {'prop1': 'val1'}) assert_not_equal( stats_udf1['fileId'], cluster.fs.stats(deployment_dir + '/udf1.jar')['fileId']) assert_not_equal( stats_udf2['fileId'], cluster.fs.stats(deployment_dir + '/udf2.jar')['fileId']) assert_not_equal( stats_udf3['fileId'], cluster.fs.stats(deployment_dir + '/udf3.jar')['fileId']) assert_equal( stats_udf4['fileId'], cluster.fs.stats(deployment_dir + '/udf4.jar')['fileId']) assert_not_equal( stats_udf5['fileId'], cluster.fs.stats(deployment_dir + '/udf5.jar')['fileId']) assert_equal( stats_udf6['fileId'], cluster.fs.stats(deployment_dir + '/udf6.jar')['fileId']) # Test _create_file() submission._create_file(deployment_dir, 'test.txt', data='Test data') assert_true(cluster.fs.exists(deployment_dir + '/test.txt'), list_dir_workspace) finally: try: cluster.fs.rmtree(prefix) except: LOG.exception('failed to remove %s' % prefix)
def test_useradmin_ldap_user_group_membership_sync(self): settings.MIDDLEWARE_CLASSES.append( 'useradmin.middleware.LdapSynchronizationMiddleware') # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() # Make sure LDAP groups exist or they won't sync import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False) import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False) reset = [] # Set to nonsensical value just to force new config usage. # Should continue to use cached connection. reset.append( desktop.conf.LDAP.LDAP_SERVERS.set_for_testing( get_nonsense_config())) try: # Import curly who is part of TestUsers and Test Administrators import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=False, import_by_dn=False) # Set a password so that we can login user = User.objects.get(username='******') user.set_password('test') user.save() # Should have 0 groups assert_equal(0, user.groups.all().count()) # Make an authenticated request as curly so that we can see call middleware. c = make_logged_in_client('curly', 'test', is_superuser=False) grant_access("curly", "test", "useradmin") response = c.get('/useradmin/users') # Refresh user groups user = User.objects.get(username='******') # Should have 3 groups now. 2 from LDAP and 1 from 'grant_access' call. assert_equal(3, user.groups.all().count(), user.groups.all()) # Now remove a group and try again. old_group = ldap_access.CACHED_LDAP_CONN._instance.users['curly'][ 'groups'].pop() # Make an authenticated request as curly so that we can see call middleware. response = c.get('/useradmin/users') # Refresh user groups user = User.objects.get(username='******') # Should have 2 groups now. 1 from LDAP and 1 from 'grant_access' call. assert_equal(3, user.groups.all().count(), user.groups.all()) finally: settings.MIDDLEWARE_CLASSES.remove( 'useradmin.middleware.LdapSynchronizationMiddleware') for finish in reset: finish()
def get_shared_beeswax_server(): global _SHARED_HIVE_SERVER global _SHARED_HIVE_SERVER_CLOSER if _SHARED_HIVE_SERVER is None: cluster = pseudo_hdfs4.shared_cluster() HIVE_CONF = cluster._tmpdir + "/conf" finish = (beeswax.conf.HIVE_SERVER_HOST.set_for_testing("localhost"), beeswax.conf.HIVE_SERVER_PORT.set_for_testing( HIVE_SERVER_TEST_PORT), beeswax.conf.HIVE_SERVER_BIN.set_for_testing( get_run_root('ext/hive/hive') + '/bin/hiveserver2'), beeswax.conf.HIVE_CONF_DIR.set_for_testing(HIVE_CONF)) default_xml = """<?xml version="1.0"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <configuration> <property> <name>javax.jdo.option.ConnectionURL</name> <value>jdbc:derby:;databaseName=%(root)s/metastore_db;create=true</value> <description>JDBC connect string for a JDBC metastore</description> </property> </configuration> """ % { 'root': cluster._tmpdir } file(HIVE_CONF + '/hive-site.xml', 'w').write(default_xml) global _SHARED_HIVE_SERVER_PROCESS if _SHARED_HIVE_SERVER_PROCESS is None: p = _start_server(cluster) LOG.info("started") _SHARED_HIVE_SERVER_PROCESS = p def kill(): LOG.info("Killing server (pid %d)." % p.pid) os.kill(p.pid, 9) p.wait() atexit.register(kill) start = time.time() started = False sleep = 0.001 make_logged_in_client() user = User.objects.get(username='******') query_server = get_query_server_config() db = dbms.get(user, query_server) while not started and time.time() - start < 20.0: try: db.open_session(user) started = True break except Exception, e: LOG.info('HiveServer2 server status not started yet: %s' % e) time.sleep(sleep) sleep *= 2 if not started: raise Exception("Server took too long to come up.") # Make sure /tmp is 0777 cluster.fs.setuser(cluster.superuser) if not cluster.fs.isdir('/tmp'): cluster.fs.mkdir('/tmp', 0777) else: cluster.fs.chmod('/tmp', 0777) cluster.fs.chmod(cluster._tmpdir, 0777) cluster.fs.chmod(cluster._tmpdir + '/hadoop_tmp_dir/mapred', 0777) def s(): for f in finish: f() cluster.stop() _SHARED_HIVE_SERVER, _SHARED_HIVE_SERVER_CLOSER = cluster, s
def test_group_permissions(): reset_all_users() reset_all_groups() # Get ourselves set up with a user and a group c = make_logged_in_client(username="******", is_superuser=True) Group.objects.create(name="test-group") test_user = User.objects.get(username="******") test_user.groups.add(Group.objects.get(name="test-group")) test_user.save() # Make sure that a superuser can always access applications response = c.get('/useradmin/users') assert_true('Hue Users' in response.content) assert_true(len(GroupPermission.objects.all()) == 0) c.post('/useradmin/groups/edit/test-group', dict(name="test-group", members=[User.objects.get(username="******").pk], permissions=[ HuePermission.objects.get(app='useradmin', action='access').pk ], save="Save"), follow=True) assert_true(len(GroupPermission.objects.all()) == 1) # Now test that we have limited access c1 = make_logged_in_client(username="******", is_superuser=False) response = c1.get('/useradmin/users') assert_true( 'You do not have permission to access the Useradmin application.' in response.content) # Add the non-admin to a group that should grant permissions to the app test_user = User.objects.get(username="******") test_user.groups.add(Group.objects.get(name='test-group')) test_user.save() # Check that we have access now response = c1.get('/useradmin/users') assert_true( get_profile(test_user).has_hue_permission('access', 'useradmin')) assert_true('Hue Users' in response.content) # Make sure we can't modify permissions response = c1.get('/useradmin/permissions/edit/useradmin/access') assert_true( 'must be a superuser to change permissions' in response.content) # And revoke access from the group c.post('/useradmin/permissions/edit/useradmin/access', dict(app='useradmin', priv='access', groups=[], save="Save"), follow=True) assert_true(len(GroupPermission.objects.all()) == 0) assert_false( get_profile(test_user).has_hue_permission('access', 'useradmin')) # We should no longer have access to the app response = c1.get('/useradmin/users') assert_true( 'You do not have permission to access the Useradmin application.' in response.content)
def setup_class(cls): cls.client = make_logged_in_client(username=cls.test_username, password="******") cls.auth_backends = settings.AUTHENTICATION_BACKENDS settings.AUTHENTICATION_BACKENDS = ( 'desktop.auth.backend.ImpersonationBackend', )
def test_add_ldap_users(): done = [] try: URL = reverse(add_ldap_users) reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() c = make_logged_in_client('test', is_superuser=True) assert_true(c.get(URL)) response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test')) assert_true('Location' in response, response) assert_true('/useradmin/users' in response['Location'], response) response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test')) assert_true('Could not' in response.context['form'].errors['username_pattern'][0], response) # Test wild card response = c.post(URL, dict(username_pattern='*rr*', password1='test', password2='test')) assert_true('/useradmin/users' in response['Location'], response) # Test ignore case done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True)) User.objects.filter(username='******').delete() assert_false(User.objects.filter(username='******').exists()) assert_false(User.objects.filter(username='******').exists()) response = c.post(URL, dict(username_pattern='Moe', password1='test', password2='test')) assert_true('Location' in response, response) assert_true('/useradmin/users' in response['Location'], response) assert_false(User.objects.filter(username='******').exists()) assert_true(User.objects.filter(username='******').exists()) # Test lower case done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True)) User.objects.filter(username__iexact='Rock').delete() assert_false(User.objects.filter(username='******').exists()) assert_false(User.objects.filter(username='******').exists()) response = c.post(URL, dict(username_pattern='rock', password1='test', password2='test')) assert_true('Location' in response, response) assert_true('/useradmin/users' in response['Location'], response) assert_false(User.objects.filter(username='******').exists()) assert_true(User.objects.filter(username='******').exists()) # Test regular with spaces (should fail) response = c.post(URL, dict(username_pattern='user with space', password1='test', password2='test')) assert_true("Username must not contain whitespaces and ':'" in response.context['form'].errors['username_pattern'][0], response) # Test dn with spaces in username and dn (should fail) response = c.post(URL, dict(username_pattern='uid=user with space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True)) assert_true("There was a problem with some of the LDAP information" in response.content, response) assert_true("Username must not contain whitespaces" in response.content, response) # Test dn with spaces in dn, but not username (should succeed) response = c.post(URL, dict(username_pattern='uid=user without space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True)) assert_true(User.objects.filter(username='******').exists()) finally: for finish in done: finish()
def test_check_config_ajax(): c = make_logged_in_client() response = c.get(reverse(check_config)) assert_true("misconfiguration" in response.content, response.content)
def test_kill_job(self): """ Test job in kill state. """ # Run the sleep example, since it doesn't require user home directory design_id = self.design.id response = self.client.post(reverse('oozie:submit_workflow', args=[self.design.id]), data={ u'form-MAX_NUM_FORMS': [u''], u'form-INITIAL_FORMS': [u'1'], u'form-0-name': [u'REDUCER_SLEEP_TIME'], u'form-0-value': [u'1'], u'form-TOTAL_FORMS': [u'1'] }, follow=True) oozie_jobid = response.context['oozie_workflow'].id # Wait for a job to be created and fetch job ID hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1) client2 = make_logged_in_client('test_non_superuser', is_superuser=False, groupname='test') grant_access('test_non_superuser', 'test', 'jobbrowser') response = client2.post('/jobbrowser/jobs/%s/kill' % (hadoop_job_id, )) assert_equal( "Permission denied. User test_non_superuser cannot delete user %s's job." % self.username, response.context["error"]) # Make sure that the first map task succeeds before moving on # This will keep us from hitting timing-related failures first_mapper = 'm_000000' start = time.time() timeout_sec = 60 while first_mapper not in \ self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id,)).content: time.sleep(1) # If this assert fails, something has probably really failed assert_true(time.time() - start < timeout_sec, "Timed out waiting for first mapper to complete") # Kill task self.client.post('/jobbrowser/jobs/%s/kill' % (hadoop_job_id, )) # It should say killed at some point response = self.client.get('/jobbrowser/jobs/%s?format=json' % (hadoop_job_id, )) html = response.content.lower() i = 0 while 'killed' not in html and i < 10: time.sleep(5) response = self.client.get('/jobbrowser/jobs/%s?format=json' % (hadoop_job_id, )) html = response.content.lower() i += 1 assert_true(views.get_shorter_id(hadoop_job_id) in html) assert_true('killed' in html, html) # Exercise select by taskstate self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (hadoop_job_id, )) self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id, )) self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=running' % (hadoop_job_id, )) self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=killed' % (hadoop_job_id, )) # Test single task page late_task_id = hadoop_job_id.replace('job', 'task') + '_r_000000' response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, late_task_id)) assert_false('succeed' in response.content) assert_true('killed' in response.content) # The first task should've succeeded # We use a different method of checking success for this one early_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000' response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, early_task_id)) assert_true('succeed' in response.content) assert_false('failed' in response.content) # Test single attempt page early_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000' attempt_id = early_task_id.replace('task', 'attempt') + '_0' response = self.client.get( '/jobbrowser/jobs/%s/tasks/%s/attempts/%s/logs' % (hadoop_job_id, early_task_id, attempt_id)) assert_true('syslog' in response.content) # Test dock jobs response = self.client.get('/jobbrowser/dock_jobs/') assert_false('completed' in response.content) assert_false('failed' in response.content)
def test_get_document(self): c1 = make_logged_in_client(username='******', groupname='test_get_group', recreate=True, is_superuser=False) r1 = c1.get('/desktop/api/doc/get?id=1') assert_true(-1, json.loads(r1.content)['status'])
def test_last_activity(): c = make_logged_in_client(username="******", is_superuser=True) profile = UserProfile.objects.get(user__username='******') assert_not_equal(profile.last_activity, 0)