예제 #1
0
파일: tests.py 프로젝트: rhmiller47/hue
    def test_has_write_access_backend(self):
        client = make_logged_in_client(
            username="******", groupname="write_access_backend", is_superuser=False
        )
        grant_access("write_access_backend", "write_access_backend", "metastore")
        grant_access("write_access_backend", "write_access_backend", "beeswax")
        user = User.objects.get(username="******")

        resp = _make_query(
            client, "CREATE TABLE test_perm_1 (a int);", database=self.db_name
        )  # Only fails if we were using Sentry and won't allow SELECT to user
        resp = wait_for_query_to_finish(client, resp, max=30.0)

        def check(client, http_codes):
            resp = client.get("/metastore/tables/drop/%s" % self.db_name)
            assert_true(resp.status_code in http_codes, resp.content)

            resp = client.post("/metastore/tables/drop/%s" % self.db_name, {u"table_selection": [u"test_perm_1"]})
            assert_true(resp.status_code in http_codes, resp.content)

        check(client, [301])  # Denied

        # Add access
        group, created = Group.objects.get_or_create(name="write_access_backend")
        perm, created = HuePermission.objects.get_or_create(app="metastore", action="write")
        GroupPermission.objects.get_or_create(group=group, hue_permission=perm)

        check(client, [200, 302])  # Ok
예제 #2
0
    def test_has_write_access_backend(self):
        client = make_logged_in_client(
            username="******", groupname="write_access_backend", is_superuser=False
        )
        grant_access("write_access_backend", "write_access_backend", "metastore")
        grant_access("write_access_backend", "write_access_backend", "beeswax")
        user = User.objects.get(username="******")

        def check(client, http_code):
            resp = _make_query(client, "CREATE TABLE test_perm_1 (a int);")
            resp = wait_for_query_to_finish(client, resp, max=30.0)

            resp = client.get("/metastore/tables/drop/default", follow=True)
            # assert_true('want to delete' in resp.content, resp.content)
            assert_equal(resp.status_code, http_code, resp.content)

            resp = client.post("/metastore/tables/drop/default", {u"table_selection": [u"test_perm_1"]}, follow=True)
            assert_equal(resp.status_code, http_code, resp.content)

        check(client, 200)

        # Remove access
        group, created = Group.objects.get_or_create(name="write_access_backend")
        perm, created = HuePermission.objects.get_or_create(app="metastore", action="read_only_access")
        GroupPermission.objects.get_or_create(group=group, hue_permission=perm)

        check(client, 500)
예제 #3
0
파일: tests.py 프로젝트: neiodavince/hue
  def test_has_write_access_backend(self):
    if is_live_cluster():
      raise SkipTest('HUE-2900: Needs debugging on live cluster')

    client = make_logged_in_client(username='******', groupname='write_access_backend', is_superuser=False)
    grant_access("write_access_backend", "write_access_backend", "metastore")
    grant_access("write_access_backend", "write_access_backend", "beeswax")
    user = User.objects.get(username='******')

    resp = _make_query(client, 'CREATE TABLE test_perm_1 (a int);', database=self.db_name) # Only fails if we were using Sentry and won't allow SELECT to user
    resp = wait_for_query_to_finish(client, resp, max=30.0)

    def check(client, http_codes):
      resp = client.get('/metastore/tables/drop/%s' % self.db_name)
      assert_true(resp.status_code in http_codes, resp.content)

      resp = client.post('/metastore/tables/drop/%s' % self.db_name, {u'table_selection': [u'test_perm_1']})
      assert_true(resp.status_code in http_codes, resp.content)

    check(client, [301]) # Denied

    # Add access
    group, created = Group.objects.get_or_create(name='write_access_backend')
    perm, created = HuePermission.objects.get_or_create(app='metastore', action='write')
    GroupPermission.objects.get_or_create(group=group, hue_permission=perm)

    check(client, [200, 302]) # Ok
예제 #4
0
파일: tests.py 프로젝트: shobull/hue
    def setup_class(cls):

        if not is_live_cluster():
            raise SkipTest()

        cls.client = make_logged_in_client(username="******", is_superuser=False)
        cls.user = User.objects.get(username="******")
        add_to_group("test")
        grant_access("test", "test", "libzookeeper")

        # Create a ZKNode namespace
        cls.namespace = "TestWithZooKeeper"

        # Create temporary test directory and file with contents
        cls.local_directory = tempfile.mkdtemp()
        # Create subdirectory
        cls.subdir_name = "subdir"
        subdir_path = "%s/%s" % (cls.local_directory, cls.subdir_name)
        os.mkdir(subdir_path, 0755)
        # Create file
        cls.filename = "test.txt"
        file_path = "%s/%s" % (subdir_path, cls.filename)
        cls.file_contents = "This is a test"
        file = open(file_path, "w+")
        file.write(cls.file_contents)
        file.close()
예제 #5
0
파일: tests.py 프로젝트: bugcy013/hue
  def test_has_write_access_frontend(self):
    # HS2 bug: Proxy user substitution is not supported for unsecure hadoop
    raise SkipTest

    client = make_logged_in_client(username='******', groupname='write_access_frontend', is_superuser=False)
    grant_access("write_access_frontend", "write_access_frontend", "metastore")
    user = User.objects.get(username='******')

    def check(client, assertz):
      response = client.get("/metastore/databases")
      assertz("Drop</button>" in response.content, response.content)
      assertz("Create a new database" in response.content, response.content)

      response = client.get("/metastore/tables/")
      assertz("Drop</button>" in response.content, response.content)
      assertz("Create a new table" in response.content, response.content)

    check(client, assert_true)

    # Remove access
    group, created = Group.objects.get_or_create(name='write_access_frontend')
    perm, created = HuePermission.objects.get_or_create(app='metastore', action='read_only_access')
    GroupPermission.objects.get_or_create(group=group, hue_permission=perm)

    check(client, assert_false)
예제 #6
0
파일: tests.py 프로젝트: shobull/hue
    def setup_class(cls):
        SqoopServerProvider.setup_class()

        cls.client = make_logged_in_client(username="******", is_superuser=False)
        cls.user = User.objects.get(username="******")
        add_to_group("test")
        grant_access("test", "test", "sqoop")
예제 #7
0
파일: tests.py 프로젝트: 18600597055/hue
def test_impersonation():
  from hbased import Hbase as thrift_hbase

  c = make_logged_in_client(username='******', is_superuser=False)
  grant_access('test_hbase', 'test_hbase', 'hbase')
  user = User.objects.get(username='******')

  proto = MockProtocol()
  client = thrift_hbase.Client(proto)

  impersonation_enabled = is_impersonation_enabled()

  get_conf()[_CNF_HBASE_IMPERSONATION_ENABLED] = 'FALSE'
  try:
    client.getTableNames(doas=user.username)
  except AttributeError:
    pass # We don't mock everything
  finally:
    get_conf()[_CNF_HBASE_IMPERSONATION_ENABLED] = impersonation_enabled

  assert_equal({}, proto.get_headers())


  get_conf()[_CNF_HBASE_IMPERSONATION_ENABLED] = 'TRUE'

  try:
    client.getTableNames(doas=user.username)
  except AttributeError:
    pass # We don't mock everything
  finally:
    get_conf()[_CNF_HBASE_IMPERSONATION_ENABLED] = impersonation_enabled

  assert_equal({'doAs': u'test_hbase'}, proto.get_headers())
예제 #8
0
파일: tests.py 프로젝트: bugcy013/hue
  def test_has_write_access_backend(self):
    # HS2 bug: Proxy user substitution is not supported for unsecure hadoop
    raise SkipTest

    client = make_logged_in_client(username='******', groupname='write_access_backend', is_superuser=False)
    grant_access("write_access_backend", "write_access_backend", "metastore")
    grant_access("write_access_backend", "write_access_backend", "beeswax")
    user = User.objects.get(username='******')

    def check(client, http_code):
      resp = _make_query(client, 'CREATE TABLE test_perm_1 (a int);')
      resp = wait_for_query_to_finish(client, resp, max=30.0)

      resp = client.get('/metastore/tables/drop/default', follow=True)
      #assert_true('want to delete' in resp.content, resp.content)
      assert_equal(resp.status_code, http_code, resp.content)

      resp = client.post('/metastore/tables/drop/default', {u'table_selection': [u'test_perm_1']}, follow=True)
      assert_equal(resp.status_code, http_code, resp.content)

    check(client, 200)

    # Remove access
    group, created = Group.objects.get_or_create(name='write_access_backend')
    perm, created = HuePermission.objects.get_or_create(app='metastore', action='read_only_access')
    GroupPermission.objects.get_or_create(group=group, hue_permission=perm)

    check(client, 500)
예제 #9
0
파일: tests.py 프로젝트: OSUser/hue
  def setUp(self):
    self.client = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False)
    self.client_not_me = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False)

    self.user = User.objects.get(username="******")
    self.user_not_me = User.objects.get(username="******")

    grant_access("test", "default", "notebook")
    grant_access("not_perm_user", "default", "notebook")

    self.notebook_json = """
      {
        "selectedSnippet": "hive",
        "showHistory": false,
        "description": "Test Hive Query",
        "name": "Test Hive Query",
        "sessions": [
            {
                "type": "hive",
                "properties": [],
                "id": null
            }
        ],
        "type": "query-hive",
        "id": 50010,
        "snippets": [],
        "uuid": "5982a274-de78-083c-2efc-74f53dce744c"
    }
    """

    self.notebook = json.loads(self.notebook_json)
    self.doc2 = Document2.objects.create(id=50010, name=self.notebook['name'], type=self.notebook['type'], owner=self.user)
    self.doc1 = Document.objects.link(self.doc2, owner=self.user, name=self.doc2.name,
                                      description=self.doc2.description, extra=self.doc2.type)
예제 #10
0
파일: tests.py 프로젝트: guoqinga/hue
  def test_job_permissions(self):
    # Login as ourself
    finish = SHARE_JOBS.set_for_testing(True)
    try:
      response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&user='******'/jobbrowser/jobs/?format=json&user='******'not_me', is_superuser=False, groupname='test')
    grant_access("not_me", "test", "jobbrowser")

    finish = SHARE_JOBS.set_for_testing(True)
    try:
      response = client_not_me.get('/jobbrowser/jobs/?format=json&user='******'/jobbrowser/jobs/?format=json&user=')
      assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
    finally:
      finish()
예제 #11
0
파일: tests.py 프로젝트: Web5design/hue
  def setUp(self):
    self.c = make_logged_in_client(username='******', is_superuser=False)
    grant_access('test_search', 'test_search', 'search')
    self.user = User.objects.get(username='******')

    self.prev_resource = resource.Resource
    resource.Resource = MockResource
예제 #12
0
파일: tests.py 프로젝트: guoqinga/hue
  def setUp(self):
    # Beware: Monkey patching
    if not hasattr(resource_manager_api, 'old_get_resource_manager_api'):
      resource_manager_api.old_get_resource_manager = resource_manager_api.get_resource_manager
    if not hasattr(resource_manager_api, 'old_get_mapreduce_api'):
      mapreduce_api.old_get_mapreduce_api = mapreduce_api.get_mapreduce_api
    if not hasattr(history_server_api, 'old_get_history_server_api'):
      history_server_api.old_get_history_server_api = history_server_api.get_history_server_api

    self.c = make_logged_in_client(is_superuser=False)
    grant_access("test", "test", "jobbrowser")
    self.user = User.objects.get(username='******')

    self.c2 = make_logged_in_client(is_superuser=False, username="******")
    grant_access("test2", "test2", "jobbrowser")
    self.user2 = User.objects.get(username='******')

    resource_manager_api.get_resource_manager = lambda user: MockResourceManagerApi(user)
    mapreduce_api.get_mapreduce_api = lambda: MockMapreduceApi()
    history_server_api.get_history_server_api = lambda: HistoryServerApi()

    self.finish = [
        YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True),
        SHARE_JOBS.set_for_testing(False)
    ]
    assert_true(cluster.is_yarn())
예제 #13
0
파일: tests.py 프로젝트: iAmGhost/hue
def test_dump_config():
  c = make_logged_in_client()

  CANARY = "abracadabra"

  # Depending on the order of the conf.initialize() in settings, the set_for_testing() are not seen in the global settings variable
  clear = HIVE_SERVER_HOST.set_for_testing(CANARY)

  response1 = c.get(reverse('desktop.views.dump_config'))
  assert_true(CANARY in response1.content, response1.content)

  response2 = c.get(reverse('desktop.views.dump_config'), dict(private="true"))
  assert_true(CANARY in response2.content)

  # There are more private variables...
  assert_true(len(response1.content) < len(response2.content))

  clear()

  CANARY = "(localhost|127\.0\.0\.1):(50030|50070|50060|50075)"
  clear = proxy.conf.WHITELIST.set_for_testing(CANARY)

  response1 = c.get(reverse('desktop.views.dump_config'))
  assert_true(CANARY in response1.content)

  clear()

  # Malformed port per HUE-674
  CANARY = "asdfoijaoidfjaosdjffjfjaoojosjfiojdosjoidjfoa"
  clear = HIVE_SERVER_HOST.set_for_testing(CANARY)

  response1 = c.get(reverse('desktop.views.dump_config'))
  assert_true(CANARY in response1.content, response1.content)

  clear()

  CANARY = '/tmp/spacé.dat'
  finish = proxy.conf.WHITELIST.set_for_testing(CANARY)
  try:
    response = c.get(reverse('desktop.views.dump_config'))
    assert_true(CANARY in response.content, response.content)
  finally:
    finish()

  # Not showing some passwords
  response = c.get(reverse('desktop.views.dump_config'))
  assert_false('bind_password' in response.content)

  # Login as someone else
  client_not_me = make_logged_in_client(username='******', is_superuser=False, groupname='test')
  grant_access("not_me", "test", "desktop")

  response = client_not_me.get(reverse('desktop.views.dump_config'))
  assert_true("You must be a superuser" in response.content, response.content)

  os.environ["HUE_CONF_DIR"] = "/tmp/test_hue_conf_dir"
  resp = c.get(reverse('desktop.views.dump_config'))
  del os.environ["HUE_CONF_DIR"]
  assert_true('/tmp/test_hue_conf_dir' in resp.content, resp)
예제 #14
0
  def setUp(self):
    self.client = make_logged_in_client(username="******", groupname="test", recreate=True, is_superuser=False)
    self.user = User.objects.get(username="******")
    grant_access("test", "test", "libsentry")

    self.api_v1 = MockSentryApiV1()
    self.api_v2 = MockSentryApiV2()
    self.checker = PrivilegeChecker(user=self.user, api_v1=self.api_v1, api_v2=self.api_v2)
예제 #15
0
파일: tests.py 프로젝트: erickt/hue
 def setUp(self):
   super(TestWithHadoop, self).setUp()
   # FIXME (HUE-2562): The tests unfortunately require superuser at the
   # moment, but should be rewritten to not need it.
   self.c = make_logged_in_client(is_superuser=True)
   grant_access("test", "test", "pig")
   self.user = User.objects.get(username='******')
   self.c.post(reverse('pig:install_examples'))
예제 #16
0
파일: s3fs_test.py 프로젝트: CaeserNieh/hue
  def setUpClass(cls):
    S3TestBase.setUpClass()
    if not cls.shouldSkip():
      cls.fs = S3FileSystem(cls.s3_connection)

      cls.c = make_logged_in_client(username='******', is_superuser=False)
      grant_access('test', 'test', 'filebrowser')
      add_to_group('test')
예제 #17
0
  def test_upload_file(self):
    with tempfile.NamedTemporaryFile() as local_file:
      # Make sure we can upload larger than the UPLOAD chunk size
      file_size = UPLOAD_CHUNK_SIZE.get() * 2
      local_file.write('0' * file_size)
      local_file.flush()

      prefix = self.cluster.fs_prefix + '/test_upload_file'
      self.cluster.fs.mkdir(prefix)

      USER_NAME = 'test'
      HDFS_DEST_DIR = prefix + "/tmp/fb-upload-test"
      LOCAL_FILE = local_file.name
      HDFS_FILE = HDFS_DEST_DIR + '/' + os.path.basename(LOCAL_FILE)

      self.cluster.fs.do_as_superuser(self.cluster.fs.mkdir, HDFS_DEST_DIR)
      self.cluster.fs.do_as_superuser(self.cluster.fs.chown, HDFS_DEST_DIR, USER_NAME, USER_NAME)
      self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, HDFS_DEST_DIR, 0700)

      stats = self.cluster.fs.stats(HDFS_DEST_DIR)
      assert_equal(stats['user'], USER_NAME)
      assert_equal(stats['group'], USER_NAME)

      # Just upload the current python file
      resp = self.c.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR, # GET param avoids infinite looping
                         dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE)))
      response = json.loads(resp.content)

      assert_equal(0, response['status'], response)
      stats = self.cluster.fs.stats(HDFS_FILE)
      assert_equal(stats['user'], USER_NAME)
      assert_equal(stats['group'], USER_NAME)

      f = self.cluster.fs.open(HDFS_FILE)
      actual = f.read(file_size)
      expected = file(LOCAL_FILE).read()
      assert_equal(actual, expected, 'files do not match: %s != %s' % (len(actual), len(expected)))

      # Upload again and so fails because file already exits
      resp = self.c.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
                         dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE)))
      response = json.loads(resp.content)
      assert_equal(-1, response['status'], response)
      assert_true('already exists' in response['data'], response)

      # Upload in / and fails because of missing permissions
      not_me = make_logged_in_client("not_me", is_superuser=False)
      grant_access("not_me", "not_me", "filebrowser")
      try:
        resp = not_me.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
                           dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE)))
        response = json.loads(resp.content)
        assert_equal(-1, response['status'], response)
        assert_true('Permission denied' in response['data'], response)
      except AttributeError:
        # Seems like a Django bug.
        # StopFutureHandlers() does not seem to work in test mode as it continues to MemoryFileUploadHandler after perm issue and so fails.
        pass
예제 #18
0
파일: views_test.py 프로젝트: QLGu/hue
def test_upload_file():
  """Test file upload"""
  cluster = pseudo_hdfs4.shared_cluster()

  try:
    USER_NAME = 'test'
    HDFS_DEST_DIR = "/tmp/fb-upload-test"
    LOCAL_FILE = __file__
    HDFS_FILE = HDFS_DEST_DIR + '/' + os.path.basename(__file__)

    cluster.fs.setuser(USER_NAME)
    client = make_logged_in_client(USER_NAME)

    cluster.fs.do_as_superuser(cluster.fs.mkdir, HDFS_DEST_DIR)
    cluster.fs.do_as_superuser(cluster.fs.chown, HDFS_DEST_DIR, USER_NAME, USER_NAME)
    cluster.fs.do_as_superuser(cluster.fs.chmod, HDFS_DEST_DIR, 0700)

    stats = cluster.fs.stats(HDFS_DEST_DIR)
    assert_equal(stats['user'], USER_NAME)
    assert_equal(stats['group'], USER_NAME)

    # Just upload the current python file
    resp = client.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR, # GET param avoids infinite looping
                       dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE)))
    response = json.loads(resp.content)

    assert_equal(0, response['status'], response)
    stats = cluster.fs.stats(HDFS_FILE)
    assert_equal(stats['user'], USER_NAME)
    assert_equal(stats['group'], USER_NAME)

    f = cluster.fs.open(HDFS_FILE)
    actual = f.read()
    expected = file(LOCAL_FILE).read()
    assert_equal(actual, expected)

    # Upload again and so fails because file already exits
    resp = client.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
                       dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE)))
    response = json.loads(resp.content)
    assert_equal(-1, response['status'], response)
    assert_true('already exists' in response['data'], response)

    # Upload in / and fails because of missing permissions
    not_me = make_logged_in_client("not_me", is_superuser=False)
    grant_access("not_me", "not_me", "filebrowser")
    try:
      resp = not_me.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
                         dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE)))
      response = json.loads(resp.content)
      assert_equal(-1, response['status'], response)
      assert_true('Permission denied' in response['data'], response)
    except AttributeError:
      # Seems like a Django bug.
      # StopFutureHandlers() does not seem to work in test mode as it continues to MemoryFileUploadHandler after perm issue and so fails.
      pass
  finally:
    cleanup_file(cluster, HDFS_DEST_DIR)
예제 #19
0
파일: tests.py 프로젝트: kthguru/hue
  def test_workflow_action_permissions(self):
    # Login as someone else
    client_not_me = make_logged_in_client(username='******', is_superuser=False, groupname='test')
    grant_access("not_me", "test", "oozie")

    action1 = Node.objects.get(name='action-name-1')

    # Edit
    finish = SHARE_JOBS.set_for_testing(True)
    try:
      response = client_not_me.get(reverse('oozie:edit_action', args=[action1.id]))
      assert_true('Permission denied' in response.content, response.content)
    finally:
      finish()

    # Edit
    finish = SHARE_JOBS.set_for_testing(True)
    try:
      response = client_not_me.post(reverse('oozie:edit_action', args=[action1.id]))
      assert_true('Permission denied' in response.content, response.content)
    finally:
      finish()

    # Delete
    finish = SHARE_JOBS.set_for_testing(True)
    try:
      response = client_not_me.post(reverse('oozie:delete_action', args=[action1.id]))
      assert_true('Permission denied' in response.content, response.content)
    finally:
      finish()

    action1.workflow.is_shared = True
    action1.workflow.save()

    # Edit
    finish = SHARE_JOBS.set_for_testing(True)
    try:
      response = client_not_me.get(reverse('oozie:edit_action', args=[action1.id]))
      assert_false('Permission denied' in response.content, response.content)
    finally:
      finish()

    # Edit
    finish = SHARE_JOBS.set_for_testing(True)
    try:
      response = client_not_me.post(reverse('oozie:edit_action', args=[action1.id]))
      assert_true('Not allowed' in response.content, response.content)
    finally:
      finish()

    # Delete
    finish = SHARE_JOBS.set_for_testing(True)
    try:
      response = client_not_me.post(reverse('oozie:delete_action', args=[action1.id]))
      assert_true('Not allowed' in response.content, response.content)
    finally:
      finish()
예제 #20
0
  def setUp(self):
    self.client = make_logged_in_client(username="******", groupname="test", recreate=False, is_superuser=False)
    self.user = User.objects.get(username='******')

    add_to_group('test')
    grant_access("test", "test", "notebook")

    self.db = dbms.get(self.user, get_query_server_config())
    self.api = HS2Api(self.user)
예제 #21
0
def test_useradmin_ldap_user_group_membership_sync():
  settings.MIDDLEWARE_CLASSES.append('useradmin.middleware.LdapSynchronizationMiddleware')

  reset_all_users()
  reset_all_groups()

  # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
  ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
  # Make sure LDAP groups exist or they won't sync
  import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
  import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)

  reset = []

  # Set to nonsensical value just to force new config usage.
  # Should continue to use cached connection.
  reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))

  try:
    # Import curly who is part of TestUsers and Test Administrators
    import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=False, import_by_dn=False)

    # Set a password so that we can login
    user = User.objects.get(username='******')
    user.set_password('test')
    user.save()

    # Should have 0 groups
    assert_equal(0, user.groups.all().count())

    # Make an authenticated request as curly so that we can see call middleware.
    c = make_logged_in_client('curly', 'test', is_superuser=False)
    grant_access("curly", "test", "useradmin")
    response = c.get('/useradmin/users')

    # Refresh user groups
    user = User.objects.get(username='******')

    # Should have 3 groups now. 2 from LDAP and 1 from 'grant_access' call.
    assert_equal(3, user.groups.all().count(), user.groups.all())

    # Now remove a group and try again.
    old_group = ldap_access.CACHED_LDAP_CONN._instance.users['curly']['groups'].pop()

    # Make an authenticated request as curly so that we can see call middleware.
    response = c.get('/useradmin/users')

    # Refresh user groups
    user = User.objects.get(username='******')

    # Should have 2 groups now. 1 from LDAP and 1 from 'grant_access' call.
    assert_equal(3, user.groups.all().count(), user.groups.all())
  finally:
    settings.MIDDLEWARE_CLASSES.remove('useradmin.middleware.LdapSynchronizationMiddleware')

    for finish in reset:
      finish()
예제 #22
0
  def setup_class(cls):
    cls.client = make_logged_in_client(username='******', is_superuser=False)
    cls.user = User.objects.get(username='******')
    cls.user = rewrite_user(cls.user)
    add_to_group('test')
    grant_access("test", "test", "metadata")

    if not is_live_cluster() or not has_navigator(cls.user):
      raise SkipTest
예제 #23
0
파일: tests.py 프로젝트: apanly/hue
  def setUp(self):
    self.client = make_logged_in_client(username="******")
    self.client_not_me = make_logged_in_client(username="******")

    self.user = User.objects.get(username="******")
    self.user_not_me = User.objects.get(username="******")

    grant_access(self.user.username, self.user.username, "desktop")
    grant_access(self.user_not_me.username, self.user_not_me.username, "desktop")
예제 #24
0
파일: test_base.py 프로젝트: rnirmal/hue
 def setup_class(cls):
     cls.db_name = get_db_prefix(name="hive")
     cls.cluster, shutdown = get_shared_beeswax_server(cls.db_name)
     cls.client = make_logged_in_client(username="******", is_superuser=False)
     add_to_group("test")
     grant_access("test", "test", "beeswax")
     # Weird redirection to avoid binding nonsense.
     cls.shutdown = [shutdown]
     cls.init_beeswax_db()
예제 #25
0
파일: tests.py 프로젝트: 18600597055/hue
  def setup_class(cls):

    if not is_live_cluster():
      raise SkipTest('These tests can only run on a live cluster')

    cls.client = make_logged_in_client(username='******', is_superuser=False)
    cls.user = User.objects.get(username='******')
    add_to_group('test')
    grant_access("test", "test", "indexer")
예제 #26
0
  def setUp(self):
    self.client = make_logged_in_client(username="******", recreate=True, is_superuser=False)
    self.client_not_me = make_logged_in_client(username="******", recreate=True, is_superuser=False)

    self.user = User.objects.get(username="******")
    self.user_not_me = User.objects.get(username="******")

    grant_access(self.user.username, self.user.username, "desktop")
    grant_access(self.user_not_me.username, self.user_not_me.username, "desktop")
예제 #27
0
  def setup_class(cls):

    if not is_live_cluster():
      raise SkipTest()

    cls.client = make_logged_in_client(username='******', is_superuser=False)
    cls.user = User.objects.get(username='******')
    add_to_group('test')
    grant_access("test", "test", "libzookeeper")
예제 #28
0
파일: tests_doc2.py 프로젝트: antbell/hue
  def setUp(self):
    self.client = make_logged_in_client(username="******", groupname="doc2", recreate=True, is_superuser=False)
    self.user = User.objects.get(username="******")
    grant_access("doc2", "doc2", "beeswax")

    # Setup Home dir this way currently
    response = self.client.get('/desktop/api2/docs/')
    data = json.loads(response.content)

    assert_equal('/', data['path'], data)
예제 #29
0
파일: test_base.py 프로젝트: shobull/hue
 def setup_class(cls):
   cls.db_name = get_db_prefix(name='hive')
   cls.cluster, shutdown = get_shared_beeswax_server(cls.db_name)
   cls.client = make_logged_in_client(username='******', is_superuser=False)
   add_to_group('test', 'test')
   grant_access('test', 'test', 'beeswax')
   grant_access('test', 'test', 'metastore')
   # Weird redirection to avoid binding nonsense.
   cls.shutdown = [ shutdown ]
   cls.init_beeswax_db()
예제 #30
0
  def setUp(self):
    self.c = make_logged_in_client(username='******', is_superuser=False)
    grant_access('test', 'test', 'filebrowser')
    add_to_group('test')
    self.user = User.objects.get(username='******')

    self.cluster = pseudo_hdfs4.shared_cluster()
    self.cluster.fs.setuser('test')
    self.prefix = self.cluster.fs_prefix + '/filebrowser'
    self.cluster.fs.do_as_user('test', self.cluster.fs.create_home_dir, '/user/test')
예제 #31
0
 def setUp(self):
     self.cluster = pseudo_hdfs4.shared_cluster()
     self.cli = make_logged_in_client(username='******', is_superuser=True)
     grant_access('admin', 'admin', 'filebrowser')
     self.cluster.fs.setuser('admin')
예제 #32
0
파일: tests.py 프로젝트: thinker0/hue
def test_dump_config():
    c = make_logged_in_client()

    CANARY = "abracadabra"

    # Depending on the order of the conf.initialize() in settings, the set_for_testing() are not seen in the global settings variable
    clear = HIVE_SERVER_HOST.set_for_testing(CANARY)

    response1 = c.get(reverse('desktop.views.dump_config'))
    assert_true(CANARY in response1.content, response1.content)

    response2 = c.get(reverse('desktop.views.dump_config'),
                      dict(private="true"))
    assert_true(CANARY in response2.content)

    # There are more private variables...
    assert_true(len(response1.content) < len(response2.content))

    clear()

    CANARY = "(localhost|127\.0\.0\.1):(50030|50070|50060|50075)"
    clear = proxy.conf.WHITELIST.set_for_testing(CANARY)

    response1 = c.get(reverse('desktop.views.dump_config'))
    assert_true(CANARY in response1.content)

    clear()

    # Malformed port per HUE-674
    CANARY = "asdfoijaoidfjaosdjffjfjaoojosjfiojdosjoidjfoa"
    clear = HIVE_SERVER_HOST.set_for_testing(CANARY)

    response1 = c.get(reverse('desktop.views.dump_config'))
    assert_true(CANARY in response1.content, response1.content)

    clear()

    CANARY = '/tmp/spacé.dat'
    finish = proxy.conf.WHITELIST.set_for_testing(CANARY)
    try:
        response = c.get(reverse('desktop.views.dump_config'))
        assert_true(CANARY in response.content, response.content)
    finally:
        finish()

    # Not showing some passwords
    response = c.get(reverse('desktop.views.dump_config'))
    assert_false('bind_password' in response.content)

    # Login as someone else
    client_not_me = make_logged_in_client(username='******',
                                          is_superuser=False,
                                          groupname='test')
    grant_access("not_me", "test", "desktop")

    response = client_not_me.get(reverse('desktop.views.dump_config'))
    assert_true("You must be a superuser" in response.content,
                response.content)

    os.environ["HUE_CONF_DIR"] = "/tmp/test_hue_conf_dir"
    resp = c.get(reverse('desktop.views.dump_config'))
    del os.environ["HUE_CONF_DIR"]
    assert_true('/tmp/test_hue_conf_dir' in resp.content, resp)
예제 #33
0
  def test_job(self):
    """
    Test new job views.

    The status of the jobs should be the same as the status reported back by oozie.
    In this case, all jobs should succeed.
    """
    # Clone design
    assert_equal(0, OozieDesign.objects.filter(owner__username=self.username).count())
    self.client.post('/jobsub/clone_design/%d' % self.sleep_design_id)
    assert_equal(1, OozieDesign.objects.filter(owner__username=self.username).count())

    # Run the sleep example, since it doesn't require user home directory
    design_id = OozieDesign.objects.get(owner__username=self.username).id
    response = self.client.post("/jobsub/submit_design/%d" % (design_id,),
      dict(map_sleep_time=1,
           num_maps=1,
           num_reduces=1,
           reduce_sleep_time=1),
      follow=True)
    oozie_jobid = response.context['jobid']
    job = OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
    hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)

    # All jobs page and fetch job ID
    # Taking advantage of the fact new jobs are at the top of the list!
    response = self.client.get('/jobbrowser/jobs/')
    assert_true(hadoop_job_id in response.content)

    # Make sure job succeeded
    response = self.client.get('/jobbrowser/jobs/?state=completed')
    assert_true(hadoop_job_id in response.content)
    response = self.client.get('/jobbrowser/jobs/?state=failed')
    assert_false(hadoop_job_id in response.content)
    response = self.client.get('/jobbrowser/jobs/?state=running')
    assert_false(hadoop_job_id in response.content)
    response = self.client.get('/jobbrowser/jobs/?state=killed')
    assert_false(hadoop_job_id in response.content)

    # Check sharing permissions
    # Login as ourself
    finish = SHARE_JOBS.set_for_testing(True)
    try:
      response = self.client.get('/jobbrowser/jobs/?user='******'/jobbrowser/jobs/?user='******'not_me', is_superuser=False, groupname='test')
    grant_access("not_me", "test", "jobbrowser")

    finish = SHARE_JOBS.set_for_testing(True)
    try:
      response = client_not_me.get('/jobbrowser/jobs/?user='******'/jobbrowser/jobs/?user='******'/jobbrowser/jobs/%s' % hadoop_job_id)

    # Check some counters for single job.
    counters = response.context['job'].counters
    counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
    assert_true(counters_file_bytes_written['map'] > 0)
    assert_true(counters_file_bytes_written['reduce'] > 0)

    # We can't just check the complete contents of the python map because the
    # SLOTS_MILLIS_* entries have a variable number of milliseconds from
    # run-to-run.
    assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_MAPS']['total'], 1)
    assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['TOTAL_LAUNCHED_REDUCES']['total'], 1)
    assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_MAPS']['total'], 0)
    assert_equal(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['FALLOW_SLOTS_MILLIS_REDUCES']['total'], 0)
    assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_MAPS']['total'] > 0)
    assert_true(response.context['job'].counters['org.apache.hadoop.mapreduce.JobCounter']['counters']['SLOTS_MILLIS_REDUCES']['total'] > 0)

    # There should be 4 tasks for this job: cleanup, setup, map, reduce
    response = self.client.get('/jobbrowser/jobs/%s/tasks' % (hadoop_job_id,))
    assert_true(len(response.context['page'].object_list), 4)
    # Select by tasktype
    response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (hadoop_job_id,))
    assert_true(len(response.context['page'].object_list), 1)
    # Select by taskstate
    response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id,))
    assert_true(len(response.context['page'].object_list), 4)
    # Select by text
    response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktext=clean' % (hadoop_job_id,))
    assert_true(len(response.context['page'].object_list), 1)

    # Test job single logs page
    response = self.client.get('/jobbrowser/jobs/%s/single_logs' % (hadoop_job_id))
    assert_true('syslog' in response.content)
예제 #34
0
    def setUp(self):
        self.c = make_logged_in_client(is_superuser=False)
        grant_access("test", "test", "indexer")
        add_to_group("test")

        self.finish = ENABLE_NEW_INDEXER.set_for_testing(True)
예제 #35
0
  def setUp(self):
    self.client = make_logged_in_client(username="******", groupname="empty", recreate=True, is_superuser=False)

    self.user = User.objects.get(username="******")

    grant_access("test", "empty", "impala")
예제 #36
0
    def test_useradmin_ldap_user_group_membership_sync(self):
        settings.MIDDLEWARE_CLASSES.append(
            'useradmin.middleware.LdapSynchronizationMiddleware')

        try:
            # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
            ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
            # Make sure LDAP groups exist or they won't sync
            import_ldap_groups(ldap_access.CACHED_LDAP_CONN,
                               'TestUsers',
                               import_members=False,
                               import_members_recursive=False,
                               sync_users=False,
                               import_by_dn=False)
            import_ldap_groups(ldap_access.CACHED_LDAP_CONN,
                               'Test Administrators',
                               import_members=False,
                               import_members_recursive=False,
                               sync_users=False,
                               import_by_dn=False)

            # Import curly who is part of TestUsers and Test Administrators
            import_ldap_users(ldap_access.CACHED_LDAP_CONN,
                              'curly',
                              sync_groups=False,
                              import_by_dn=False)

            # Set a password so that we can login
            user = User.objects.get(username='******')
            user.set_password('test')
            user.save()

            # Should have 0 groups
            assert_equal(0, user.groups.all().count())

            # Make an authenticated request as curly so that we can see call middleware.
            c = make_logged_in_client('curly', 'test', is_superuser=False)
            grant_access("curly", "test", "useradmin")
            response = c.get('/useradmin/users')

            # Refresh user groups
            user = User.objects.get(username='******')

            # Should have 3 groups now. 2 from LDAP and 1 from 'grant_access' call.
            assert_equal(3, user.groups.all().count(), user.groups.all())

            # Now remove a group and try again.
            old_group = ldap_access.CACHED_LDAP_CONN._instance.users['curly'][
                'groups'].pop()

            # Make an authenticated request as curly so that we can see call middleware.
            response = c.get('/useradmin/users')

            # Refresh user groups
            user = User.objects.get(username='******')

            # Should have 2 groups now. 1 from LDAP and 1 from 'grant_access' call.
            assert_equal(3, user.groups.all().count(), user.groups.all())
        finally:
            settings.MIDDLEWARE_CLASSES.remove(
                'useradmin.middleware.LdapSynchronizationMiddleware')
예제 #37
0
  def setUp(self):
    self.client = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False)

    self.user = rewrite_user(User.objects.get(username="******"))
    grant_access("test", "default", "notebook")
예제 #38
0
    def setUp(self):
        self.client = make_logged_in_client(username="******",
                                            groupname="default",
                                            recreate=True,
                                            is_superuser=False)
        self.client_not_me = make_logged_in_client(username="******",
                                                   groupname="default",
                                                   recreate=True,
                                                   is_superuser=False)

        self.user = User.objects.get(username="******")
        self.user_not_me = User.objects.get(username="******")

        # Beware: Monkey patch HS2API Mock API
        if not hasattr(
                notebook.connectors.hiveserver2,
                'original_HS2Api'):  # Could not monkey patch base.get_api
            notebook.connectors.hiveserver2.original_HS2Api = notebook.connectors.hiveserver2.HS2Api
        notebook.connectors.hiveserver2.HS2Api = MockedApi

        originalCluster.get_hdfs()
        self.original_fs = originalCluster.FS_CACHE["default"]
        originalCluster.FS_CACHE["default"] = MockFs()

        grant_access("test", "default", "notebook")
        grant_access("test", "default", "beeswax")
        grant_access("test", "default", "hive")
        grant_access("not_perm_user", "default", "notebook")
        grant_access("not_perm_user", "default", "beeswax")
        grant_access("not_perm_user", "default", "hive")
        add_permission('test',
                       'has_adls',
                       permname='adls_access',
                       appname='filebrowser')
예제 #39
0
def test_job_design_cycle():
    """
  Tests for the "job design" CMS.
  Submission requires a cluster, so that's separate.
  """
    c = make_logged_in_client()

    # New should give us a form.
    response = c.get('/jobsub/new_design/java')
    assert_equal(
        1,
        response.content.count(
            'action="/jobsub/new_design/java" method="POST"'))

    # Streaming also:
    response = c.get('/jobsub/new_design/streaming')
    assert_equal(
        1,
        response.content.count(
            'action="/jobsub/new_design/streaming" method="POST"'))

    # Post back to create a new submission
    design_count = OozieDesign.objects.count()
    response = c.post(
        '/jobsub/new_design/java', {
            u'wf-name': [u'name-1'],
            u'wf-description': [u'description name-1'],
            u'action-args': [u'x y z'],
            u'action-main_class': [u'MyClass'],
            u'action-jar_path': [u'myfile.jar'],
            u'action-java_opts': [u''],
            u'action-archives': [u'[]'],
            u'action-job_properties': [u'[]'],
            u'action-files': [u'[]']
        })
    assert_equal(design_count + 1, OozieDesign.objects.count())
    job_id = OozieDesign.objects.get(name='name-1').id

    response = c.post(
        '/jobsub/new_design/mapreduce', {
            u'wf-name': [u'name-2'],
            u'wf-description': [u'description name-2'],
            u'action-args': [u'x y z'],
            u'action-jar_path': [u'myfile.jar'],
            u'action-archives': [u'[]'],
            u'action-job_properties': [u'[]'],
            u'action-files': [u'[]']
        })

    # Follow it
    edit_url = '/jobsub/edit_design/%d' % job_id
    response = c.get(edit_url)
    assert_true('x y z' in response.content, response.content)

    # Make an edit
    response = c.post(
        edit_url, {
            u'wf-name': [u'name-1'],
            u'wf-description': [u'description name-1'],
            u'action-args': [u'a b c'],
            u'action-main_class': [u'MyClass'],
            u'action-jar_path': [u'myfile.jar'],
            u'action-java_opts': [u''],
            u'action-archives': [u'[]'],
            u'action-job_properties': [u'[]'],
            u'action-files': [u'[]']
        })
    assert_true('a b c' in c.get(edit_url).content)

    # Try to post
    response = c.post(
        '/jobsub/new_design/java',
        dict(name='test2',
             jarfile='myfile.jar',
             arguments='x y z',
             submit='Save'))
    assert_false('This field is required' in response)

    # Now check list
    response = c.get('/jobsub/')
    for design in OozieDesign.objects.all():
        assert_true(design.name in response.content, response.content)

    # With some filters
    response = c.get('/jobsub/', dict(name='name-1'))
    assert_true('name-1' in response.content, response.content)
    assert_false('name-2' in response.content, response.content)

    response = c.get('/jobsub/', dict(owner='doesnotexist'))
    assert_false('doesnotexist' in response.content)

    response = c.get('/jobsub/', dict(owner='test', name='name-1'))
    assert_true('name-1' in response.content, response.content)
    assert_false('name-2' in response.content, response.content)

    response = c.get('/jobsub/', dict(name="name"))
    assert_true('name-1' in response.content, response.content)
    assert_true('name-2' in response.content, response.content)
    assert_false('doesnotexist' in response.content, response.content)

    # Combined filters
    response = c.get('/jobsub/', dict(owner="test", name="name-2"))
    assert_false('name-1' in response.content, response.content)
    assert_true('name-2' in response.content, response.content)
    assert_false('doesnotexist' in response.content, response.content)

    # Try delete
    job_id = OozieDesign.objects.get(name='name-1').id
    response = c.post('/jobsub/delete_design/%d' % job_id)
    assert_raises(OozieDesign.DoesNotExist, OozieDesign.objects.get, id=job_id)

    # Let's make sure we can't delete other people's designs.
    c.logout()
    c = make_logged_in_client('test2', is_superuser=False)
    grant_access('test2', 'test-grp', 'jobsub')

    not_mine = OozieDesign.objects.get(name='name-2')
    response = c.post('/jobsub/delete_design/%d' % not_mine.id)
    assert_true('Permission denied.' in response.content, response.content)
예제 #40
0
  def setUp(self):
    self.client = make_logged_in_client(username="******", groupname="default", recreate=True, is_superuser=False)

    self.user = User.objects.get(username="******")

    grant_access(self.user.username, self.user.username, "desktop")
예제 #41
0
파일: tests.py 프로젝트: yhanwen/hue
    def test_job(self):
        """
    Test new job views.

    The status of the jobs should be the same as the status reported back by oozie.
    In this case, all jobs should succeed.
    """
        # Run the sleep example, since it doesn't require user home directory
        design_id = self.design.id
        response = self.client.post(reverse('oozie:submit_workflow',
                                            args=[design_id]),
                                    data={
                                        u'form-MAX_NUM_FORMS': [u''],
                                        u'form-INITIAL_FORMS': [u'1'],
                                        u'form-0-name':
                                        [u'REDUCER_SLEEP_TIME'],
                                        u'form-0-value': [u'1'],
                                        u'form-TOTAL_FORMS': [u'1']
                                    },
                                    follow=True)
        oozie_jobid = response.context['oozie_workflow'].id
        OozieServerProvider.wait_until_completion(oozie_jobid,
                                                  timeout=120,
                                                  step=1)
        hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
        hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)

        # All jobs page and fetch job ID
        # Taking advantage of the fact new jobs are at the top of the list!
        response = self.client.get('/jobbrowser/jobs/?format=json')
        assert_true(hadoop_job_id_short in response.content, response.content)

        # Make sure job succeeded
        response = self.client.get(
            '/jobbrowser/jobs/?format=json&state=completed')
        assert_true(hadoop_job_id_short in response.content)
        response = self.client.get(
            '/jobbrowser/jobs/?format=json&state=failed')
        assert_false(hadoop_job_id_short in response.content)
        response = self.client.get(
            '/jobbrowser/jobs/?format=json&state=running')
        assert_false(hadoop_job_id_short in response.content)
        response = self.client.get(
            '/jobbrowser/jobs/?format=json&state=killed')
        assert_false(hadoop_job_id_short in response.content)

        # Test tracker page
        early_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000'
        response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' %
                                   (hadoop_job_id, early_task_id))

        tracker_url = re.search('<a href="(/jobbrowser/trackers/.+?)"',
                                response.content).group(1)
        response = self.client.get(tracker_url)
        assert_true('Tracker at' in response.content)

        # Check sharing permissions
        # Login as ourself
        finish = SHARE_JOBS.set_for_testing(True)
        try:
            response = self.client.get('/jobbrowser/jobs/?format=json&user='******'/jobbrowser/jobs/?format=json&user='******'not_me',
                                              is_superuser=False,
                                              groupname='test')
        grant_access("not_me", "test", "jobbrowser")

        finish = SHARE_JOBS.set_for_testing(True)
        try:
            response = client_not_me.get('/jobbrowser/jobs/?format=json&user='******'/jobbrowser/jobs/?format=json&user='******'/jobbrowser/jobs/%s' % hadoop_job_id)

        # Check some counters for single job.
        counters = response.context['job'].counters
        counters_file_bytes_written = counters[
            'org.apache.hadoop.mapreduce.FileSystemCounter']['counters'][
                'FILE_BYTES_WRITTEN']
        assert_true(counters_file_bytes_written['map'] > 0)
        assert_true(counters_file_bytes_written['reduce'] > 0)

        # We can't just check the complete contents of the python map because the
        # SLOTS_MILLIS_* entries have a variable number of milliseconds from
        # run-to-run.
        assert_equal(
            response.context['job'].
            counters['org.apache.hadoop.mapreduce.JobCounter']['counters']
            ['TOTAL_LAUNCHED_MAPS']['total'], 2L)
        assert_equal(
            response.context['job'].
            counters['org.apache.hadoop.mapreduce.JobCounter']['counters']
            ['TOTAL_LAUNCHED_REDUCES']['total'], 1L)
        assert_equal(
            response.context['job'].
            counters['org.apache.hadoop.mapreduce.JobCounter']['counters']
            ['FALLOW_SLOTS_MILLIS_MAPS']['total'], 0L)
        assert_equal(
            response.context['job'].
            counters['org.apache.hadoop.mapreduce.JobCounter']['counters']
            ['FALLOW_SLOTS_MILLIS_REDUCES']['total'], 0L)
        assert_true(response.context['job'].
                    counters['org.apache.hadoop.mapreduce.JobCounter']
                    ['counters']['SLOTS_MILLIS_MAPS']['total'] > 0)
        assert_true(response.context['job'].
                    counters['org.apache.hadoop.mapreduce.JobCounter']
                    ['counters']['SLOTS_MILLIS_REDUCES']['total'] > 0)

        # There should be 4 tasks for this job: cleanup, setup, map, reduce
        response = self.client.get('/jobbrowser/jobs/%s/tasks' %
                                   (hadoop_job_id, ))
        assert_true(len(response.context['page'].object_list), 4)
        # Select by tasktype
        response = self.client.get(
            '/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (hadoop_job_id, ))
        assert_true(len(response.context['page'].object_list), 1)
        # Select by taskstate
        response = self.client.get(
            '/jobbrowser/jobs/%s/tasks?taskstate=succeeded' %
            (hadoop_job_id, ))
        assert_true(len(response.context['page'].object_list), 4)
        # Select by text
        response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktext=clean' %
                                   (hadoop_job_id, ))
        assert_true(len(response.context['page'].object_list), 1)

        # Test job single logs page
        response = self.client.get('/jobbrowser/jobs/%s/single_logs' %
                                   (hadoop_job_id))
        assert_true('syslog' in response.content)
        assert_true(
            '<div class="tab-pane active" id="logsSysLog">' in response.content
            or '<div class="tab-pane active" id="logsStdErr">'
            in response.content or  # Depending on Hadoop
            '<div class="tab-pane active" id="logsStdOut">'
            in response.content,  # For jenkins
            response.content)
예제 #42
0
파일: tests.py 프로젝트: yhanwen/hue
    def test_kill_job(self):
        """
    Test job in kill state.
    """
        # Run the sleep example, since it doesn't require user home directory
        design_id = self.design.id
        response = self.client.post(reverse('oozie:submit_workflow',
                                            args=[self.design.id]),
                                    data={
                                        u'form-MAX_NUM_FORMS': [u''],
                                        u'form-INITIAL_FORMS': [u'1'],
                                        u'form-0-name':
                                        [u'REDUCER_SLEEP_TIME'],
                                        u'form-0-value': [u'1'],
                                        u'form-TOTAL_FORMS': [u'1']
                                    },
                                    follow=True)
        oozie_jobid = response.context['oozie_workflow'].id

        # Wait for a job to be created and fetch job ID
        hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)

        client2 = make_logged_in_client('test_non_superuser',
                                        is_superuser=False,
                                        groupname='test')
        grant_access('test_non_superuser', 'test', 'jobbrowser')
        response = client2.post('/jobbrowser/jobs/%s/kill' % (hadoop_job_id, ))
        assert_equal(
            "Permission denied.  User test_non_superuser cannot delete user %s's job."
            % self.username, response.context["error"])

        # Make sure that the first map task succeeds before moving on
        # This will keep us from hitting timing-related failures
        first_mapper = 'm_000000'
        start = time.time()
        timeout_sec = 60
        while first_mapper not in \
            self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id,)).content:
            time.sleep(1)
            # If this assert fails, something has probably really failed
            assert_true(time.time() - start < timeout_sec,
                        "Timed out waiting for first mapper to complete")

        # Kill task
        self.client.post('/jobbrowser/jobs/%s/kill' % (hadoop_job_id, ))

        # It should say killed at some point
        response = self.client.get('/jobbrowser/jobs/%s?format=json' %
                                   (hadoop_job_id, ))
        html = response.content.lower()
        i = 0
        while 'killed' not in html and i < 10:
            time.sleep(5)
            response = self.client.get('/jobbrowser/jobs/%s?format=json' %
                                       (hadoop_job_id, ))
            html = response.content.lower()
            i += 1

        assert_true(views.get_shorter_id(hadoop_job_id) in html)
        assert_true('killed' in html, html)

        # Exercise select by taskstate
        self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' %
                        (hadoop_job_id, ))
        self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' %
                        (hadoop_job_id, ))
        self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=running' %
                        (hadoop_job_id, ))
        self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=killed' %
                        (hadoop_job_id, ))

        # Test single task page
        late_task_id = hadoop_job_id.replace('job', 'task') + '_r_000000'
        response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' %
                                   (hadoop_job_id, late_task_id))
        assert_false('succeed' in response.content)
        assert_true('killed' in response.content)

        # The first task should've succeeded
        # We use a different method of checking success for this one
        early_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000'
        response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' %
                                   (hadoop_job_id, early_task_id))
        assert_true('succeed' in response.content)
        assert_false('failed' in response.content)

        # Test single attempt page
        early_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000'
        attempt_id = early_task_id.replace('task', 'attempt') + '_0'
        response = self.client.get(
            '/jobbrowser/jobs/%s/tasks/%s/attempts/%s/logs' %
            (hadoop_job_id, early_task_id, attempt_id))
        assert_true('syslog' in response.content)

        # Test dock jobs
        response = self.client.get('/jobbrowser/dock_jobs/')
        assert_false('completed' in response.content)
        assert_false('failed' in response.content)
예제 #43
0
  def test_kill_job(self):
    """
    Test job in kill state.
    """
    # Clone design
    assert_equal(0, OozieDesign.objects.filter(owner__username=self.username).count())
    self.client.post('/jobsub/clone_design/%d' % self.sleep_design_id)
    assert_equal(1, OozieDesign.objects.filter(owner__username=self.username).count())

    # Run the sleep example, since it doesn't require user home directory
    design_id = OozieDesign.objects.get(owner__username=self.username).id
    response = self.client.post("/jobsub/submit_design/%d" % (design_id,),
      dict(map_sleep_time=1,
           num_maps=1,
           num_reduces=1,
           reduce_sleep_time=1),
      follow=True)
    oozie_jobid = response.context['jobid']

    # Wait for a job to be created and fetch job ID
    hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)

    client2 = make_logged_in_client('test_non_superuser', is_superuser=False, groupname='test')
    grant_access('test_non_superuser', 'test', 'jobbrowser')
    response = client2.post('/jobbrowser/jobs/%s/kill' % (hadoop_job_id,))
    assert_equal("Permission denied.  User test_non_superuser cannot delete user %s's job." % self.username, response.context["error"])

    # Make sure that the first map task succeeds before moving on
    # This will keep us from hitting timing-related failures
    first_mapper = hadoop_job_id.replace('job', 'task') + '_m_000000'
    start = time.time()
    timeout_sec = 60
    while first_mapper not in \
        self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id,)).content:
      time.sleep(1)
      # If this assert fails, something has probably really failed
      assert_true(time.time() - start < timeout_sec,
          "Timed out waiting for first mapper to complete")

    # Kill task
    self.client.post('/jobbrowser/jobs/%s/kill' % (hadoop_job_id,))

    # It should say killed
    response = self.client.get('/jobbrowser/jobs/%s' % (hadoop_job_id,))
    html = response.content.lower()
    assert_true(hadoop_job_id in html)
    assert_true('killed' in html)

    # Exercise select by taskstate
    self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (hadoop_job_id,))
    self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (hadoop_job_id,))
    self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=running' % (hadoop_job_id,))
    self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=killed' % (hadoop_job_id,))

    # Test single task page
    late_task_id = hadoop_job_id.replace('job', 'task') + '_r_000000'
    response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, late_task_id))
    assert_false('succeed' in response.content)
    assert_true('killed' in response.content)

    # The first task should've succeeded
    # We use a different method of checking success for this one
    early_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000'
    response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, early_task_id))
    assert_true('succeed' in response.content)
    assert_false('failed' in response.content)

    # Test single attempt page
    early_task_id = hadoop_job_id.replace('job', 'task') + '_m_000000'
    attempt_id = early_task_id.replace('task', 'attempt') + '_0'
    response = self.client.get('/jobbrowser/jobs/%s/tasks/%s/attempts/%s/logs' %
                          (hadoop_job_id, early_task_id, attempt_id))
    assert_true('syslog' in response.content)

    # Test dock jobs
    response = self.client.get('/jobbrowser/dock_jobs/')
    assert_false('completed' in response.content)
    assert_false('failed' in response.content)
예제 #44
0
파일: tests.py 프로젝트: igloosec/hue
 def setUp(self):
   self.c = make_logged_in_client(is_superuser=False)
   grant_access("test", "test", "pig")
   self.user = User.objects.get(username='******')