Example #1
0
def test_hdfs_full_copy():
  minicluster = pseudo_hdfs4.shared_cluster()
  minifs = minicluster.fs

  try:
    minifs.do_as_superuser(minifs.chmod, '/', 0777)
    minifs.mkdir('/copy_test')
    minifs.mkdir('/copy_test/src')
    minifs.mkdir('/copy_test/dest')

    # File to directory copy.
    # No guarantees on file permissions at the moment.
    data = "I will not make flatuent noises in class\n" * 2000
    minifs.create('/copy_test/src/file.txt', permission=0646, data=data)
    minifs.copy('/copy_test/src/file.txt', '/copy_test/dest')
    assert_true(minifs.exists('/copy_test/dest/file.txt'))

    # Directory to directory copy.
    # No guarantees on directory permissions at the moment.
    minifs.copy('/copy_test/src', '/copy_test/dest', True)
    assert_true(minifs.exists('/copy_test/dest/src'))

    # Copy directory to file should fail.
    try:
      minifs.copy('/copy_test/src', '/copy_test/dest/file.txt', True)
    except IOError, e:
      pass
    except Exception, e:
      raise
Example #2
0
def test_ensure_home_directory_sync_ldap_users_groups():
  URL = reverse(sync_ldap_users_groups)

  reset_all_users()
  reset_all_groups()

  # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
  ldap_access.CACHED_LDAP_CONN = LdapTestConnection()

  cluster = pseudo_hdfs4.shared_cluster()
  c = make_logged_in_client(cluster.superuser, is_superuser=True)
  cluster.fs.setuser(cluster.superuser)

  reset = []

  # Set to nonsensical value just to force new config usage.
  # Should continue to use cached connection.
  reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))

  try:
    c.post(reverse(add_ldap_users), dict(server='nonsense', username_pattern='curly', password1='test', password2='test'))
    assert_false(cluster.fs.exists('/user/curly'))
    assert_true(c.post(URL, dict(server='nonsense', ensure_home_directory=True)))
    assert_true(cluster.fs.exists('/user/curly'))
  finally:
    for finish in reset:
      finish()

    if cluster.fs.exists('/user/curly'):
      cluster.fs.rmtree('/user/curly')
Example #3
0
  def setUpClass(cls):
    cls.cluster = pseudo_hdfs4.shared_cluster()
    cls.prefix = cls.cluster.fs_prefix + '/WebhdfsTests'

    cls.cluster.fs.setuser('test')
    cls.cluster.fs.mkdir(cls.prefix)
    cls.cluster.fs.chmod(cls.prefix, 01777)
Example #4
0
def test_config_validator_more():
  # TODO: Setup DN to not load the plugin, which is a common user error.

  # We don't actually use the mini_cluster. But the cluster sets up the correct
  # configuration that forms the test basis.
  minicluster = pseudo_hdfs4.shared_cluster()
  cli = make_logged_in_client()

  reset = (
    conf.MR_CLUSTERS["default"].HOST.set_for_testing("localhost"),
    conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(23),
  )
  old = cluster.clear_caches()
  try:
    resp = cli.get('/debug/check_config')

    assert_false('Failed to access filesystem root' in resp.content)
    assert_false('Failed to create' in resp.content)
    assert_false('Failed to chown' in resp.content)
    assert_false('Failed to delete' in resp.content)
    assert_true('Failed to contact JobTracker plugin' in resp.content)
  finally:
    for old_conf in reset:
      old_conf()
    cluster.restore_caches(old)
Example #5
0
def test_chown():
  cluster = pseudo_hdfs4.shared_cluster()

  # Only the Hadoop superuser really has carte blanche here
  c = make_logged_in_client(cluster.superuser)
  cluster.fs.setuser(cluster.superuser)

  PATH = u"/test-chown-en-Español"
  cluster.fs.mkdir(PATH)
  c.post("/filebrowser/chown", dict(path=PATH, user="******", group="y"))
  assert_equal("x", cluster.fs.stats(PATH)["user"])
  assert_equal("y", cluster.fs.stats(PATH)["group"])
  c.post("/filebrowser/chown", dict(path=PATH, user="******", user_other="z", group="y"))
  assert_equal("z", cluster.fs.stats(PATH)["user"])

  # Make sure that the regular user chown form doesn't have useless fields,
  # and that the superuser's form has all the fields it could dream of.
  PATH = '/filebrowser/chown-regular-user'
  cluster.fs.mkdir(PATH)
  cluster.fs.chown(PATH, 'chown_test', 'chown_test')
  response = c.get('/filebrowser/chown', dict(path=PATH, user='******', group='chown_test'))
  assert_true('<option value="__other__"' in response.content)
  c = make_logged_in_client('chown_test')
  response = c.get('/filebrowser/chown', dict(path=PATH, user='******', group='chown_test'))
  assert_false('<option value="__other__"' in response.content)
Example #6
0
def test_view_i18n():
    cluster = pseudo_hdfs4.shared_cluster()
    try:
        cluster.fs.setuser(cluster.superuser)
        cluster.fs.mkdir("/test-filebrowser/")

        # Test viewing files in different encodings
        content = u"pt-Olá en-hello ch-你好 ko-안녕 ru-Здравствуйте"
        view_helper(cluster, "utf-8", content)
        view_helper(cluster, "utf-16", content)

        content = u"你好-big5"
        view_helper(cluster, "big5", content)

        content = u"こんにちは-shift-jis"
        view_helper(cluster, "shift_jis", content)

        content = u"안녕하세요-johab"
        view_helper(cluster, "johab", content)

        # Test that the default view is home
        c = make_logged_in_client()
        response = c.get("/filebrowser/view/")
        assert_equal(response.context["path"], "/")
        cluster.fs.mkdir("/user/test")
        cluster.fs.chown("/user/test", "test", "test")
        response = c.get("/filebrowser/view/?default_to_home=1")
        assert_equal("http://testserver/filebrowser/view/user/test", response["location"])
    finally:
        try:
            cluster.fs.rmtree("/test-filebrowser/")
            cluster.fs.rmtree("/user/test")
        except Exception, ex:
            LOG.error("Failed to cleanup test directory: %s" % (ex,))
Example #7
0
def test_remove():
    cluster = pseudo_hdfs4.shared_cluster()

    try:
        c = make_logged_in_client(cluster.superuser)
        cluster.fs.setuser(cluster.superuser)

        prefix = "/test-delete"
        PATH_1 = "/%s/1" % prefix
        PATH_2 = "/%s/2" % prefix
        PATH_3 = "/%s/3" % prefix
        cluster.fs.mkdir(prefix)
        cluster.fs.mkdir(PATH_1)
        cluster.fs.mkdir(PATH_2)
        cluster.fs.mkdir(PATH_3)

        assert_true(cluster.fs.exists(PATH_1))
        assert_true(cluster.fs.exists(PATH_2))
        assert_true(cluster.fs.exists(PATH_3))

        c.post("/filebrowser/rmtree", dict(path=[PATH_1]))
        assert_false(cluster.fs.exists(PATH_1))
        assert_true(cluster.fs.exists(PATH_2))
        assert_true(cluster.fs.exists(PATH_3))

        c.post("/filebrowser/rmtree", dict(path=[PATH_2, PATH_3]))
        assert_false(cluster.fs.exists(PATH_1))
        assert_false(cluster.fs.exists(PATH_2))
        assert_false(cluster.fs.exists(PATH_3))

    finally:
        try:
            cluster.fs.rmtree(prefix)  # Clean up
        except:
            pass  # Don't let cleanup errors mask earlier failures
Example #8
0
def test_edit_i18n():
    cluster = pseudo_hdfs4.shared_cluster()
    try:
        cluster.fs.setuser(cluster.superuser)
        cluster.fs.mkdir("/test-filebrowser/")

        # Test utf-8
        pass_1 = u"en-hello pt-Olá ch-你好 ko-안녕 ru-Здравствуйте"
        pass_2 = pass_1 + u"yi-העלא"
        edit_helper(cluster, "utf-8", pass_1, pass_2)

        # Test utf-16
        edit_helper(cluster, "utf-16", pass_1, pass_2)

        # Test cjk
        pass_1 = u"big5-你好"
        pass_2 = pass_1 + u"世界"
        edit_helper(cluster, "big5", pass_1, pass_2)

        pass_1 = u"shift_jis-こんにちは"
        pass_2 = pass_1 + u"世界"
        edit_helper(cluster, "shift_jis", pass_1, pass_2)

        pass_1 = u"johab-안녕하세요"
        pass_2 = pass_1 + u"세상"
        edit_helper(cluster, "johab", pass_1, pass_2)
    finally:
        try:
            cluster.fs.rmtree("/test-filebrowser/")
        except Exception, ex:
            LOG.error("Failed to remove tree /test-filebrowser: %s" % (ex,))
Example #9
0
def test_touch():
    cluster = pseudo_hdfs4.shared_cluster()
    cluster.fs.setuser("test")
    c = make_logged_in_client()

    try:
        success_path = "touch_file"
        path_absolute = "/touch_file"
        path_fail = "touch_fail/file"
        prefix = "/tmp/test-filebrowser-touch/"

        cluster.fs.mkdir(prefix)

        resp = c.post("/filebrowser/touch", dict(path=prefix, name=path_fail))
        assert_equal(500, resp.status_code)
        resp = c.post("/filebrowser/touch", dict(path=prefix, name=path_absolute))
        assert_equal(500, resp.status_code)
        resp = c.post("/filebrowser/touch", dict(path=prefix, name=success_path))
        assert_equal(200, resp.status_code)

        # Read the parent dir and make sure we created 'success_path' only.
        response = c.get("/filebrowser/view" + prefix)
        file_listing = response.context["files"]
        assert_equal(3, len(file_listing))
        assert_equal(file_listing[2]["name"], success_path)

    finally:
        try:
            cluster.fs.rmtree(prefix)
        except:
            pass
Example #10
0
def test_trash():
  cluster = pseudo_hdfs4.shared_cluster()

  try:
    c = make_logged_in_client()
    USERNAME = '******'
    cluster.fs.setuser(USERNAME)

    cluster.fs.do_as_superuser(cluster.fs.chown, '/user/%s' % USERNAME, USERNAME, USERNAME)

    HOME_TRASH_DIR = '/user/%s/.Trash/Current/user/%s' % (USERNAME, USERNAME)
    prefix = '/tmp/test_trash'
    PATH_1 = '/%s/1' % prefix
    cluster.fs.mkdir(prefix)
    cluster.fs.mkdir(PATH_1)

    c.post('/filebrowser/rmtree?skip_trash=true', dict(path=[HOME_TRASH_DIR]))

    # No trash folder
    response = c.get('/filebrowser/view/user/test?default_to_trash', follow=True)
    assert_equal([], response.redirect_chain)

    c.post('/filebrowser/rmtree', dict(path=[PATH_1]))

    # We have a trash folder so a redirect (Current not always there)
    response = c.get('/filebrowser/view/user/test?default_to_trash', follow=True)
    assert_true(any(['.Trash' in page for page, code in response.redirect_chain]), response.redirect_chain)

    c.post('/filebrowser/rmtree?skip_trash=true', dict(path=[HOME_TRASH_DIR]))

    # No home trash, just regular root trash
    response = c.get('/filebrowser/view/user/test?default_to_trash', follow=True)
    assert_true(any(['.Trash' in page for page, code in response.redirect_chain]), response.redirect_chain)
  finally:
    cleanup_tree(cluster, prefix)
Example #11
0
def test_hdfs_full_copy():
  minicluster = pseudo_hdfs4.shared_cluster()
  minifs = minicluster.fs
  minifs.setuser('test')

  prefix = minicluster.fs_prefix + '/copy_test'
  try:
    minifs.mkdir(prefix)
    minifs.mkdir(prefix + '/src')
    minifs.mkdir(prefix + '/dest')

    # File to directory copy.
    # No guarantees on file permissions at the moment.
    data = "I will not make flatulent noises in class\n" * 2000
    minifs.create(prefix + '/src/file.txt', permission=0646, data=data)
    minifs.copy(prefix + '/src/file.txt', prefix + '/dest')
    assert_true(minifs.exists(prefix + '/dest/file.txt'))

    # Directory to directory copy.
    # No guarantees on directory permissions at the moment.
    minifs.copy(prefix + '/src', prefix + '/dest', True)
    assert_true(minifs.exists(prefix + '/dest/src'))

    # Copy directory to file should fail.
    try:
      minifs.copy(prefix + '/src', prefix + '/dest/file.txt', True)
    except IOError:
      pass
    except Exception:
      raise
  finally:
    minifs.do_as_superuser(minifs.rmtree, prefix)
Example #12
0
File: tests.py Project: kthguru/hue
def test_ensure_home_directory_add_ldap_user():
    URL = reverse(add_ldap_user)

    reset_all_users()
    reset_all_groups()

    # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
    ldap_access.CACHED_LDAP_CONN = LdapTestConnection()

    cluster = pseudo_hdfs4.shared_cluster()
    c = make_logged_in_client(cluster.superuser, is_superuser=True)
    cluster.fs.setuser(cluster.superuser)

    assert_true(c.get(URL))

    response = c.post(URL, dict(username="******", password1="test", password2="test"))
    assert_true("/useradmin/users" in response["Location"])
    assert_false(cluster.fs.exists("/user/moe"))

    # Try same thing with home directory creation.
    response = c.post(URL, dict(username="******", password1="test", password2="test", ensure_home_directory=True))
    assert_true("/useradmin/users" in response["Location"])
    assert_true(cluster.fs.exists("/user/curly"))

    response = c.post(URL, dict(username="******", password1="test", password2="test"))
    assert_true("Could not" in response.context["form"].errors["username"][0])
    assert_false(cluster.fs.exists("/user/bad_name"))

    # See if moe, who did not ask for his home directory, has a home directory.
    assert_false(cluster.fs.exists("/user/moe"))

    # Clean up
    cluster.fs.rmtree("/user/curly")
Example #13
0
def get_shared_beeswax_server(db_name='default'):
  global _SHARED_HIVE_SERVER
  global _SHARED_HIVE_SERVER_CLOSER
  if _SHARED_HIVE_SERVER is None:

    cluster = pseudo_hdfs4.shared_cluster()

    if is_live_cluster():
      def s():
        pass
    else:
      s = _start_mini_hs2(cluster)

    start = time.time()
    started = False
    sleep = 1

    make_logged_in_client()
    user = User.objects.get(username='******')
    query_server = get_query_server_config()
    db = dbms.get(user, query_server)

    while not started and time.time() - start <= 30:
      try:
        db.open_session(user)
        started = True
        break
      except Exception, e:
        LOG.info('HiveServer2 server could not be found after: %s' % e)
        time.sleep(sleep)

    if not started:
      raise Exception("Server took too long to come up.")

    _SHARED_HIVE_SERVER, _SHARED_HIVE_SERVER_CLOSER = cluster, s
Example #14
0
def test_view_parquet():
  cluster = pseudo_hdfs4.shared_cluster()
  try:
    c = make_logged_in_client()
    cluster.fs.setuser(cluster.superuser)
    if cluster.fs.isdir("/test-parquet-filebrowser"):
      cluster.fs.rmtree('/test-parquet-filebrowser/')

    cluster.fs.mkdir('/test-parquet-filebrowser/')

    # Parquet file encoded as hex.
    test_data = "50415231150015d40115d4012c15321500150615080000020000003201000000000100000002000000030000000400000005000000060000000700000008000000090000000a0000000b0000000c0000000d0000000e0000000f000000100000001100000012000000130000001400000015000000160000001700000018000000150015b60415b6042c1532150015061508000002000000320107000000414c474552494109000000415247454e54494e41060000004252415a494c0600000043414e41444105000000454759505408000000455448494f504941060000004652414e4345070000004745524d414e5905000000494e44494109000000494e444f4e45534941040000004952414e0400000049524151050000004a4150414e060000004a4f5244414e050000004b454e5941070000004d4f524f43434f0a0000004d4f5a414d42495155450400000050455255050000004348494e4107000000524f4d414e49410c00000053415544492041524142494107000000564945544e414d060000005255535349410e000000554e49544544204b494e47444f4d0d000000554e4954454420535441544553150015d40115d4012c1532150015061508000002000000320100000000010000000100000001000000040000000000000003000000030000000200000002000000040000000400000002000000040000000000000000000000000000000100000002000000030000000400000002000000030000000300000001000000150015d61e15d61e2c153215001506150800000200000032013300000020686167676c652e206361726566756c6c792066696e616c206465706f736974732064657465637420736c796c7920616761694c000000616c20666f7865732070726f6d69736520736c796c79206163636f7264696e6720746f2074686520726567756c6172206163636f756e74732e20626f6c6420726571756573747320616c6f6e6b0000007920616c6f6e6773696465206f66207468652070656e64696e67206465706f736974732e206361726566756c6c79207370656369616c207061636b61676573206172652061626f7574207468652069726f6e696320666f726765732e20736c796c79207370656369616c20650000006561732068616e672069726f6e69632c2073696c656e74207061636b616765732e20736c796c7920726567756c6172207061636b616765732061726520667572696f75736c79206f76657220746865207469746865732e20666c756666696c7920626f6c6463000000792061626f766520746865206361726566756c6c7920756e757375616c207468656f646f6c697465732e2066696e616c206475676f7574732061726520717569636b6c79206163726f73732074686520667572696f75736c7920726567756c617220641f00000076656e207061636b616765732077616b6520717569636b6c792e207265677526000000726566756c6c792066696e616c2072657175657374732e20726567756c61722c2069726f6e693a0000006c20706c6174656c6574732e20726567756c6172206163636f756e747320782d7261793a20756e757375616c2c20726567756c6172206163636f41000000737320657863757365732063616a6f6c6520736c796c79206163726f737320746865207061636b616765732e206465706f73697473207072696e742061726f756e7200000020736c796c792065787072657373206173796d70746f7465732e20726567756c6172206465706f7369747320686167676c6520736c796c792e206361726566756c6c792069726f6e696320686f636b657920706c617965727320736c65657020626c697468656c792e206361726566756c6c320000006566756c6c7920616c6f6e6773696465206f662074686520736c796c792066696e616c20646570656e64656e636965732e20420000006e6963206465706f7369747320626f6f73742061746f702074686520717569636b6c792066696e616c2072657175657374733f20717569636b6c7920726567756c61240000006f75736c792e2066696e616c2c20657870726573732067696674732063616a6f6c652061370000006963206465706f736974732061726520626c697468656c792061626f757420746865206361726566756c6c7920726567756c61722070615d0000002070656e64696e67206578637573657320686167676c6520667572696f75736c79206465706f736974732e2070656e64696e672c20657870726573732070696e746f206265616e732077616b6520666c756666696c79207061737420745a000000726e732e20626c697468656c7920626f6c6420636f7572747320616d6f6e672074686520636c6f73656c7920726567756c6172207061636b616765732075736520667572696f75736c7920626f6c6420706c6174656c6574733f2d000000732e2069726f6e69632c20756e757375616c206173796d70746f7465732077616b6520626c697468656c7920726a000000706c6174656c6574732e20626c697468656c792070656e64696e6720646570656e64656e636965732075736520666c756666696c79206163726f737320746865206576656e2070696e746f206265616e732e206361726566756c6c792073696c656e74206163636f756e5b0000006320646570656e64656e636965732e20667572696f75736c792065787072657373206e6f746f726e697320736c65657020736c796c7920726567756c6172206163636f756e74732e20696465617320736c6565702e206465706f736f000000756c6172206173796d70746f746573206172652061626f75742074686520667572696f7573206d756c7469706c696572732e206578707265737320646570656e64656e63696573206e61672061626f7665207468652069726f6e6963616c6c792069726f6e6963206163636f756e744e00000074732e2073696c656e7420726571756573747320686167676c652e20636c6f73656c792065787072657373207061636b6167657320736c656570206163726f73732074686520626c697468656c792e00000068656c7920656e746963696e676c792065787072657373206163636f756e74732e206576656e2c2066696e616c204f00000020726571756573747320616761696e73742074686520706c6174656c65747320757365206e65766572206163636f7264696e6720746f2074686520717569636b6c7920726567756c61722070696e743d00000065616e7320626f6f7374206361726566756c6c79207370656369616c2072657175657374732e206163636f756e7473206172652e206361726566756c6c6e000000792066696e616c207061636b616765732e20736c6f7720666f7865732063616a6f6c6520717569636b6c792e20717569636b6c792073696c656e7420706c6174656c657473206272656163682069726f6e6963206163636f756e74732e20756e757375616c2070696e746f2062651502195c48016d15080015022502180a6e6174696f6e5f6b657900150c250218046e616d650015022502180a726567696f6e5f6b657900150c2502180b636f6d6d656e745f636f6c001632191c194c26081c1502190519180a6e6174696f6e5f6b65791500163216fa0116fa01260800002682021c150c19051918046e616d651500163216dc0416dc04268202000026de061c1502190519180a726567696f6e5f6b65791500163216fa0116fa0126de06000026d8081c150c190519180b636f6d6d656e745f636f6c1500163216fc1e16fc1e26d80800001600163200280a706172717565742d6d7200ea00000050415231"

    f = cluster.fs.open('/test-parquet-filebrowser/test-parquet.parquet', "w")
    f.write(test_data.decode('hex'))

    # autodetect
    response = c.get('/filebrowser/view/test-parquet-filebrowser/test-parquet.parquet')

    assert_true('FRANCE' in response.context['view']['contents'])

  finally:
    try:
      cluster.fs.rmtree('/test-parquet-filebrowser/')
    except:
      pass      # Don't let cleanup errors mask earlier failures
Example #15
0
def test_touch():
  cluster = pseudo_hdfs4.shared_cluster()
  cluster.fs.setuser('test')
  c = make_logged_in_client()

  try:
    success_path = 'touch_file'
    path_absolute = '/touch_file'
    path_fail = 'touch_fail/file'
    prefix = '/tmp/test-filebrowser-touch/'

    cluster.fs.mkdir(prefix)

    resp = c.post('/filebrowser/touch', dict(path=prefix, name=path_fail))
    assert_equal(500, resp.status_code)
    resp = c.post('/filebrowser/touch', dict(path=prefix, name=path_absolute))
    assert_equal(500, resp.status_code)
    resp = c.post('/filebrowser/touch', dict(path=prefix, name=success_path))
    assert_equal(200, resp.status_code)

    # Read the parent dir and make sure we created 'success_path' only.
    response = c.get('/filebrowser/view' + prefix)
    file_listing = response.context['files']
    assert_equal(3, len(file_listing))
    assert_equal(file_listing[2]['name'], success_path)

  finally:
    cleanup_tree(cluster, prefix)
Example #16
0
  def setup_class(cls):
    raise SkipTest

    cls.cluster = pseudo_hdfs4.shared_cluster()
    cls.client = make_logged_in_client()
    cls.oozie, callback = cls._get_shared_oozie_server()
    cls.shutdown = [ callback ]
Example #17
0
def test_edit_i18n():
  cluster = pseudo_hdfs4.shared_cluster()
  try:
    cluster.fs.setuser(cluster.superuser)
    cluster.fs.mkdir('/test-filebrowser/')

    # Test utf-8
    pass_1 = u'en-hello pt-Olá ch-你好 ko-안녕 ru-Здравствуйте'
    pass_2 = pass_1 + u'yi-העלא'
    edit_helper(cluster, 'utf-8', pass_1, pass_2)

    # Test utf-16
    edit_helper(cluster, 'utf-16', pass_1, pass_2)

    # Test cjk
    pass_1 = u'big5-你好'
    pass_2 = pass_1 + u'世界'
    edit_helper(cluster, 'big5', pass_1, pass_2)

    pass_1 = u'shift_jis-こんにちは'
    pass_2 = pass_1 + u'世界'
    edit_helper(cluster, 'shift_jis', pass_1, pass_2)

    pass_1 = u'johab-안녕하세요'
    pass_2 = pass_1 + u'세상'
    edit_helper(cluster, 'johab', pass_1, pass_2)
  finally:
    cleanup_tree(cluster, '/test-filebrowser/')
Example #18
0
def test_edit_i18n():
  cluster = pseudo_hdfs4.shared_cluster()
  try:
    cluster.fs.setuser(cluster.superuser)
    cluster.fs.mkdir('/test-filebrowser/')

    # Test utf-8
    pass_1 = u'en-hello pt-Olá ch-你好 ko-안녕 ru-Здравствуйте'
    pass_2 = pass_1 + u'yi-העלא'
    edit_helper(cluster, 'utf-8', pass_1, pass_2)

    # Test utf-16
    edit_helper(cluster, 'utf-16', pass_1, pass_2)

    # Test cjk
    pass_1 = u'big5-你好'
    pass_2 = pass_1 + u'世界'
    edit_helper(cluster, 'big5', pass_1, pass_2)

    pass_1 = u'shift_jis-こんにちは'
    pass_2 = pass_1 + u'世界'
    edit_helper(cluster, 'shift_jis', pass_1, pass_2)

    pass_1 = u'johab-안녕하세요'
    pass_2 = pass_1 + u'세상'
    edit_helper(cluster, 'johab', pass_1, pass_2)
  finally:
    try:
      cluster.fs.rmtree('/test-filebrowser/')
    except Exception, ex:
      LOG.error('Failed to remove tree /test-filebrowser: %s' % (ex,))
Example #19
0
def test_view_i18n():
  cluster = pseudo_hdfs4.shared_cluster()
  try:
    cluster.fs.setuser(cluster.superuser)
    cluster.fs.mkdir('/test-filebrowser/')

    # Test viewing files in different encodings
    content = u'pt-Olá en-hello ch-你好 ko-안녕 ru-Здравствуйте'
    view_helper(cluster, 'utf-8', content)
    view_helper(cluster, 'utf-16', content)

    content = u'你好-big5'
    view_helper(cluster, 'big5', content)

    content = u'こんにちは-shift-jis'
    view_helper(cluster, 'shift_jis', content)

    content = u'안녕하세요-johab'
    view_helper(cluster, 'johab', content)

    # Test that the default view is home
    c = make_logged_in_client()
    response = c.get('/filebrowser/view/')
    assert_equal(response.context['path'], '/')
    response = c.get('/filebrowser/view/?default_to_home=1')
    assert_equal("http://testserver/filebrowser/view/user/test", response["location"])
  finally:
    try:
      cluster.fs.rmtree('/test-filebrowser/')
    except Exception, ex:
      LOG.error('Failed to cleanup test directory: %s' % (ex,))
Example #20
0
  def get_shared_server(cls, username='******', language=settings.LANGUAGE_CODE):
    callback = lambda: None

    service_lock.acquire()

    if not SqoopServerProvider.is_running:
      # Setup
      cluster = pseudo_hdfs4.shared_cluster()

      if is_live_cluster():
        finish = ()
      else:
        LOG.info('\nStarting a Mini Sqoop. Requires "tools/jenkins/jenkins.sh" to be previously ran.\n')

        finish = (
          SERVER_URL.set_for_testing("http://%s:%s/sqoop" % (socket.getfqdn(), SqoopServerProvider.TEST_PORT)),
        )

        p = cls.start(cluster)

        def kill():
          with open(os.path.join(cluster._tmpdir, 'sqoop/sqoop.pid'), 'r') as pidfile:
            pid = pidfile.read()
            LOG.info("Killing Sqoop server (pid %s)." % pid)
            os.kill(int(pid), 9)
            p.wait()
        atexit.register(kill)

      start = time.time()
      started = False
      sleep = 0.01

      client = SqoopClient(SERVER_URL.get(), username, language)

      while not started and time.time() - start < 60.0:
        try:
          LOG.info('Check Sqoop status...')
          version = client.get_version()
          if version:
            started = True
            break
          time.sleep(sleep)
          sleep *= 2
        except Exception, e:
          LOG.info('Sqoop server not started yet: %s' % e)
          time.sleep(sleep)
          sleep *= 2
          pass

      if not started:
        service_lock.release()
        raise Exception("Sqoop server took too long to come up.")

      def shutdown():
        for f in finish:
          f()
        cluster.stop()
      callback = shutdown

      SqoopServerProvider.is_running = True
Example #21
0
def test_chown():
  cluster = pseudo_hdfs4.shared_cluster()

  # Only the Hadoop superuser really has carte blanche here
  c = make_logged_in_client(cluster.superuser)
  cluster.fs.setuser(cluster.superuser)

  PATH = u"/test-chown-en-Español"
  cluster.fs.mkdir(PATH)
  c.post("/filebrowser/chown", dict(path=[PATH], user="******", group="y"))
  assert_equal("x", cluster.fs.stats(PATH)["user"])
  assert_equal("y", cluster.fs.stats(PATH)["group"])
  c.post("/filebrowser/chown", dict(path=[PATH], user="******", user_other="z", group="y"))
  assert_equal("z", cluster.fs.stats(PATH)["user"])

  # Now check recursive
  SUBPATH = PATH + '/test'
  cluster.fs.mkdir(SUBPATH)
  c.post("/filebrowser/chown", dict(path=[PATH], user="******", group="y", recursive=True))
  assert_equal("x", cluster.fs.stats(SUBPATH)["user"])
  assert_equal("y", cluster.fs.stats(SUBPATH)["group"])
  c.post("/filebrowser/chown", dict(path=[PATH], user="******", user_other="z", group="y", recursive=True))
  assert_equal("z", cluster.fs.stats(SUBPATH)["user"])

  # Test bulk chown
  PATH_2 = u"/test-chown-en-Español2"
  PATH_3 = u"/test-chown-en-Español2"
  cluster.fs.mkdir(PATH_2)
  cluster.fs.mkdir(PATH_3)
  c.post("/filebrowser/chown", dict(path=[PATH_2, PATH_3], user="******", group="y", recursive=True))
  assert_equal("x", cluster.fs.stats(PATH_2)["user"])
  assert_equal("y", cluster.fs.stats(PATH_2)["group"])
  assert_equal("x", cluster.fs.stats(PATH_3)["user"])
  assert_equal("y", cluster.fs.stats(PATH_3)["group"])
Example #22
0
File: tests.py Project: gigfork/hue
def test_jobsub_setup():
  # User 'test' triggers the setup of the examples.
  # 'hue' home will be deleted, the examples installed in the new one
  # and 'test' will try to access them.
  cluster = pseudo_hdfs4.shared_cluster()
  cluster.fs.setuser('test')

  username = '******'
  home_dir = '/user/%s/' % username
  finish = conf.REMOTE_DATA_DIR.set_for_testing('%s/jobsub' % home_dir)

  try:
    data_dir = conf.REMOTE_DATA_DIR.get()
    cluster.fs.setuser(cluster.fs.superuser)
    if cluster.fs.exists(home_dir):
      cluster.fs.rmtree(home_dir)
    cluster.fs.setuser('test')

    jobsub_setup.Command().handle()

    cluster.fs.setuser('test')
    stats = cluster.fs.stats(home_dir)
    assert_equal(stats['user'], username)
    assert_equal(oct(stats['mode']), '040755') #04 because is a dir

    stats = cluster.fs.stats(data_dir)
    assert_equal(stats['user'], username)
    assert_equal(oct(stats['mode']), '041777')

    stats = cluster.fs.listdir_stats(data_dir)
    assert_equal(len(stats), 2) # 2 files inside
  finally:
    finish()
Example #23
0
def test_upload():
  """Test file upload"""
  cluster = pseudo_hdfs4.shared_cluster()
  try:
    USER_NAME = cluster.fs.superuser
    cluster.fs.setuser(USER_NAME)
    DEST = "/tmp/fb-upload-test"
    client = make_logged_in_client(USER_NAME)

    # Just upload the current python file
    resp = client.post('/filebrowser/upload',
                       dict(dest=DEST, hdfs_file=file(__file__)))

    assert_true("View uploaded file" in resp.content)
    stats = cluster.fs.stats(DEST)
    assert_equal(stats['user'], USER_NAME)
    assert_equal(stats['group'], USER_NAME)

    f = cluster.fs.open(DEST)
    actual = f.read()
    expected = file(__file__).read()
    assert_equal(actual, expected)
  finally:
    try:
      cluster.fs.remove(DEST)
    except Exception, ex:
      pass
Example #24
0
File: tests.py Project: kthguru/hue
def test_ensure_home_directory():
    reset_all_users()
    reset_all_groups()

    # Cluster and client for home directory creation
    cluster = pseudo_hdfs4.shared_cluster()
    c = make_logged_in_client(cluster.superuser, is_superuser=True, groupname="test1")
    cluster.fs.setuser(cluster.superuser)

    # Create a user with a home directory
    assert_false(cluster.fs.exists("/user/test1"))
    response = c.post(
        "/useradmin/users/new", dict(username="******", password1="test", password2="test", ensure_home_directory=True)
    )
    assert_true(cluster.fs.exists("/user/test1"))
    dir_stat = cluster.fs.stats("/user/test1")
    assert_equal("test1", dir_stat.user)
    assert_equal("test1", dir_stat.group)
    assert_equal("40755", "%o" % dir_stat.mode)

    # Create a user, then add their home directory
    assert_false(cluster.fs.exists("/user/test2"))
    response = c.post("/useradmin/users/new", dict(username="******", password1="test", password2="test"))
    assert_false(cluster.fs.exists("/user/test2"))
    response = c.post(
        "/useradmin/users/edit/%s" % "test2",
        dict(username="******", password1="test", password2="test", ensure_home_directory=True),
    )
    assert_true(cluster.fs.exists("/user/test2"))
    dir_stat = cluster.fs.stats("/user/test2")
    assert_equal("test2", dir_stat.user)
    assert_equal("test2", dir_stat.group)
    assert_equal("40755", "%o" % dir_stat.mode)
Example #25
0
def test_mkdir_singledir():
    cluster = pseudo_hdfs4.shared_cluster()
    cluster.fs.setuser("test")
    c = make_logged_in_client()

    try:
        # We test that mkdir fails when a non-relative path is provided and a multi-level path is provided.
        success_path = "mkdir_singledir"
        path_absolute = "/mkdir_singledir"
        path_fail = "fail/foo"
        path_other_failure = "fail#bar"
        prefix = "/tmp/test-filebrowser/"
        # Two of the following post requests should throw exceptions.
        # See https://issues.cloudera.org/browse/HUE-793.
        c.post("/filebrowser/mkdir", dict(path=prefix, name=path_fail))
        c.post("/filebrowser/mkdir", dict(path=prefix, name=path_other_failure))
        c.post("/filebrowser/mkdir", dict(path=prefix, name=path_absolute))
        c.post("/filebrowser/mkdir", dict(path=prefix, name=success_path))

        # Read the parent dir and make sure we created 'success_path' only.
        response = c.get("/filebrowser/view" + prefix)
        dir_listing = response.context["files"]
        assert_equal(3, len(dir_listing))
        assert_equal(dir_listing[2]["name"], success_path)

    finally:
        try:
            cluster.fs.rmtree(prefix)  # Clean up
        except:
            pass  # Don't let cleanup errors mask earlier failures
Example #26
0
def test_remove():
  cluster = pseudo_hdfs4.shared_cluster()

  try:
    c = make_logged_in_client(cluster.superuser)
    cluster.fs.setuser(cluster.superuser)

    prefix = '/test-delete'
    PATH_1 = '/%s/1' % prefix
    PATH_2 = '/%s/2' % prefix
    PATH_3 = '/%s/3' % prefix
    cluster.fs.mkdir(prefix)
    cluster.fs.mkdir(PATH_1)
    cluster.fs.mkdir(PATH_2)
    cluster.fs.mkdir(PATH_3)

    assert_true(cluster.fs.exists(PATH_1))
    assert_true(cluster.fs.exists(PATH_2))
    assert_true(cluster.fs.exists(PATH_3))

    c.post('/filebrowser/rmtree', dict(path=[PATH_1]))
    assert_false(cluster.fs.exists(PATH_1))
    assert_true(cluster.fs.exists(PATH_2))
    assert_true(cluster.fs.exists(PATH_3))

    c.post('/filebrowser/rmtree', dict(path=[PATH_2, PATH_3]))
    assert_false(cluster.fs.exists(PATH_1))
    assert_false(cluster.fs.exists(PATH_2))
    assert_false(cluster.fs.exists(PATH_3))

  finally:
    cleanup_tree(cluster, prefix)
Example #27
0
def test_upload_zip():
  """Test archive upload"""
  cluster = pseudo_hdfs4.shared_cluster()

  try:
    USER_NAME = 'test'
    HDFS_DEST_DIR = "/tmp/fb-upload-test"
    ZIP_FILE = os.path.realpath('apps/filebrowser/src/filebrowser/test_data/test.zip')
    HDFS_ZIP_FILE = HDFS_DEST_DIR + '/test.zip'
    HDFS_UNZIPPED_FILE = HDFS_DEST_DIR + '/test'

    cluster.fs.setuser(USER_NAME)
    client = make_logged_in_client(USER_NAME)

    cluster.fs.mkdir(HDFS_DEST_DIR)
    cluster.fs.chown(HDFS_DEST_DIR, USER_NAME)
    cluster.fs.chmod(HDFS_DEST_DIR, 0700)

    # Upload and unzip archive
    resp = client.post('/filebrowser/upload/archive?dest=%s' % HDFS_DEST_DIR,
                       dict(dest=HDFS_DEST_DIR, archive=file(ZIP_FILE)))
    response = json.loads(resp.content)
    assert_equal(0, response['status'], response)
    assert_false(cluster.fs.exists(HDFS_ZIP_FILE))
    assert_true(cluster.fs.isdir(HDFS_UNZIPPED_FILE))
    assert_true(cluster.fs.isfile(HDFS_UNZIPPED_FILE + '/test.txt'))

    # Upload archive
    resp = client.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
                       dict(dest=HDFS_DEST_DIR, hdfs_file=file(ZIP_FILE)))
    response = json.loads(resp.content)
    assert_equal(0, response['status'], response)
    assert_true(cluster.fs.exists(HDFS_ZIP_FILE))
  finally:
    cleanup_file(cluster, HDFS_DEST_DIR)
Example #28
0
File: tests.py Project: hwl-py/hue
def test_ensure_home_directory():
  raise SkipTest

  reset_all_users()
  reset_all_groups()

  useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(False)
  reset_password_policy()

  # Cluster and client for home directory creation
  cluster = pseudo_hdfs4.shared_cluster()
  c = make_logged_in_client(cluster.superuser, is_superuser=True, groupname='test1')
  cluster.fs.setuser(cluster.superuser)

  # Create a user with a home directory
  assert_false(cluster.fs.exists('/user/test1'))
  response = c.post('/useradmin/users/new', dict(username="******", password1='test', password2='test', ensure_home_directory=True))
  assert_true(cluster.fs.exists('/user/test1'))
  dir_stat = cluster.fs.stats('/user/test1')
  assert_equal('test1', dir_stat.user)
  assert_equal('test1', dir_stat.group)
  assert_equal('40755', '%o' % dir_stat.mode)

  # Create a user, then add their home directory
  assert_false(cluster.fs.exists('/user/test2'))
  response = c.post('/useradmin/users/new', dict(username="******", password1='test', password2='test'))
  assert_false(cluster.fs.exists('/user/test2'))
  response = c.post('/useradmin/users/edit/%s' % "test2", dict(username="******", password1='test', password2='test', password_old="test", ensure_home_directory=True))
  assert_true(cluster.fs.exists('/user/test2'))
  dir_stat = cluster.fs.stats('/user/test2')
  assert_equal('test2', dir_stat.user)
  assert_equal('test2', dir_stat.group)
  assert_equal('40755', '%o' % dir_stat.mode)
Example #29
0
def test_mkdir_singledir():
  cluster = pseudo_hdfs4.shared_cluster()
  cluster.fs.setuser('test')
  c = make_logged_in_client()

  try:
    # We test that mkdir fails when a non-relative path is provided and a multi-level path is provided.
    success_path = 'mkdir_singledir'
    path_absolute = '/mkdir_singledir'
    path_fail = 'fail/foo'
    path_other_failure = 'fail#bar'
    prefix = '/tmp/test-filebrowser/'
    # Two of the following post requests should throw exceptions.
    # See https://issues.cloudera.org/browse/HUE-793.
    c.post('/filebrowser/mkdir', dict(path=prefix, name=path_fail))
    c.post('/filebrowser/mkdir', dict(path=prefix, name=path_other_failure))
    c.post('/filebrowser/mkdir', dict(path=prefix, name=path_absolute))
    c.post('/filebrowser/mkdir', dict(path=prefix, name=success_path))

    # Read the parent dir and make sure we created 'success_path' only.
    response = c.get('/filebrowser/view' + prefix)
    dir_listing = response.context['files']
    assert_equal(3, len(dir_listing))
    assert_equal(dir_listing[2]['name'], success_path)

  finally:
    cleanup_tree(cluster, prefix)
Example #30
0
def test_view_avro():
  cluster = pseudo_hdfs4.shared_cluster()
  try:
    c = make_logged_in_client()
    cluster.fs.setuser(cluster.superuser)
    if cluster.fs.isdir("/test-avro-filebrowser"):
      cluster.fs.rmtree('/test-avro-filebrowser/')

    cluster.fs.mkdir('/test-avro-filebrowser/')

    test_schema = schema.parse("""
      {
        "name": "test",
        "type": "record",
        "fields": [
          { "name": "name", "type": "string" },
          { "name": "integer", "type": "int" }
        ]
      }
    """)

    f = cluster.fs.open('/test-avro-filebrowser/test-view.avro', "w")
    data_file_writer = datafile.DataFileWriter(f, io.DatumWriter(),
                                                writers_schema=test_schema,
                                                codec='deflate')
    dummy_datum = {
      'name': 'Test',
      'integer': 10,
    }
    data_file_writer.append(dummy_datum)
    data_file_writer.close()

    # autodetect
    response = c.get('/filebrowser/view/test-avro-filebrowser/test-view.avro')
    # (Note: we use eval here cause of an incompatibility issue between
    # the representation string of JSON dicts in simplejson vs. json)
    assert_equal(eval(response.context['view']['contents']), dummy_datum)

    # offsetting should work as well
    response = c.get('/filebrowser/view/test-avro-filebrowser/test-view.avro?offset=1')
    assert_equal('avro', response.context['view']['compression'])

    f = cluster.fs.open('/test-avro-filebrowser/test-view2.avro', "w")
    f.write("hello")
    f.close()

    # we shouldn't autodetect non avro files
    response = c.get('/filebrowser/view/test-avro-filebrowser/test-view2.avro')
    assert_equal(response.context['view']['contents'], "hello")

    # we should fail to do a bad thing if they specify compression when it's not set.
    response = c.get('/filebrowser/view/test-avro-filebrowser/test-view2.avro?compression=gzip')
    assert_true('Failed to decompress' in response.context['message'])

  finally:
    try:
      cluster.fs.rmtree('/test-avro-filebrowser/')
    except:
      pass      # Don't let cleanup errors mask earlier failures
Example #31
0
def test_view_access():
    cluster = pseudo_hdfs4.shared_cluster()
    NO_PERM_DIR = u'/test-no-perm'

    try:
        c = make_logged_in_client()
        cluster.fs.setuser(cluster.superuser)
        cluster.fs.mkdir(NO_PERM_DIR, mode='700')

        response = c.get('/filebrowser/view/test-no-perm')
        assert_true('Cannot access' in response.context['message'])

        response = c.get('/filebrowser/view/test-does-not-exist')
        assert_true('Cannot access' in response.context['message'])
    finally:
        try:
            cluster.fs.rmtree(NO_PERM_DIR)
        except:
            pass  # Don't let cleanup errors mask earlier failures
Example #32
0
File: tests.py Project: mravi/hue
def test_ensure_home_directory_sync_ldap_users_groups():
    URL = reverse(sync_ldap_users_groups)

    reset_all_users()
    reset_all_groups()

    # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
    ldap_access.CACHED_LDAP_CONN = LdapTestConnection()

    cluster = pseudo_hdfs4.shared_cluster()
    c = make_logged_in_client(cluster.superuser, is_superuser=True)
    cluster.fs.setuser(cluster.superuser)

    response = c.post(
        reverse(add_ldap_users),
        dict(username_pattern='curly', password1='test', password2='test'))
    assert_false(cluster.fs.exists('/user/curly'))
    assert_true(c.post(URL, dict(ensure_home_directory=True)))
    assert_true(cluster.fs.exists('/user/curly'))
Example #33
0
def test_chown():
    cluster = pseudo_hdfs4.shared_cluster()

    # Only the Hadoop superuser really has carte blanche here
    c = make_logged_in_client(cluster.superuser)
    cluster.fs.setuser(cluster.superuser)

    PATH = u"/test-chown-en-Español"
    cluster.fs.mkdir(PATH)
    c.post("/filebrowser/chown", dict(path=[PATH], user="******", group="y"))
    assert_equal("x", cluster.fs.stats(PATH)["user"])
    assert_equal("y", cluster.fs.stats(PATH)["group"])
    c.post("/filebrowser/chown",
           dict(path=[PATH], user="******", user_other="z", group="y"))
    assert_equal("z", cluster.fs.stats(PATH)["user"])

    # Now check recursive
    SUBPATH = PATH + '/test'
    cluster.fs.mkdir(SUBPATH)
    c.post("/filebrowser/chown",
           dict(path=[PATH], user="******", group="y", recursive=True))
    assert_equal("x", cluster.fs.stats(SUBPATH)["user"])
    assert_equal("y", cluster.fs.stats(SUBPATH)["group"])
    c.post(
        "/filebrowser/chown",
        dict(path=[PATH],
             user="******",
             user_other="z",
             group="y",
             recursive=True))
    assert_equal("z", cluster.fs.stats(SUBPATH)["user"])

    # Test bulk chown
    PATH_2 = u"/test-chown-en-Español2"
    PATH_3 = u"/test-chown-en-Español2"
    cluster.fs.mkdir(PATH_2)
    cluster.fs.mkdir(PATH_3)
    c.post("/filebrowser/chown",
           dict(path=[PATH_2, PATH_3], user="******", group="y", recursive=True))
    assert_equal("x", cluster.fs.stats(PATH_2)["user"])
    assert_equal("y", cluster.fs.stats(PATH_2)["group"])
    assert_equal("x", cluster.fs.stats(PATH_3)["user"])
    assert_equal("y", cluster.fs.stats(PATH_3)["group"])
Example #34
0
    def test_login_home_creation_failure(self):
        response = self.c.get('/accounts/login/')
        assert_equal(200, response.status_code, "Expected ok status.")
        assert_true(response.context['first_login_ever'])

        # Create home directory as a file in order to fail in the home creation later
        cluster = pseudo_hdfs4.shared_cluster()
        fs = cluster.fs
        assert_false(cluster.fs.exists("/user/%s" % self.test_username))
        fs.do_as_superuser(fs.create, "/user/%s" % self.test_username)

        response = self.c.post('/accounts/login/', {
            'username': self.test_username,
            'password': "******",
        },
                               follow=True)

        assert_equal(200, response.status_code, "Expected ok status.")
        assert_true('/beeswax' in response.content, response.content)
Example #35
0
def test_upload_tgz():
    """Test archive upload"""
    cluster = pseudo_hdfs4.shared_cluster()

    try:
        USER_NAME = 'test'
        HDFS_DEST_DIR = "/tmp/fb-upload-test"
        TGZ_FILE = os.path.realpath(
            'apps/filebrowser/src/filebrowser/test_data/test.tar.gz')
        HDFS_TGZ_FILE = HDFS_DEST_DIR + '/test.tar.gz'
        HDFS_DECOMPRESSED_FILE = HDFS_DEST_DIR + '/test'

        cluster.fs.setuser(USER_NAME)
        client = make_logged_in_client(USER_NAME)

        cluster.fs.mkdir(HDFS_DEST_DIR)
        cluster.fs.chown(HDFS_DEST_DIR, USER_NAME)
        cluster.fs.chmod(HDFS_DEST_DIR, 0700)

        # Upload and decompress archive
        resp = client.post(
            '/filebrowser/upload/archive?dest=%s' % HDFS_DEST_DIR,
            dict(dest=HDFS_DEST_DIR, archive=file(TGZ_FILE)))
        response = json.loads(resp.content)
        assert_equal(0, response['status'], response)
        assert_false(cluster.fs.exists(HDFS_TGZ_FILE))
        assert_true(cluster.fs.isdir(HDFS_DECOMPRESSED_FILE))
        assert_true(cluster.fs.isfile(HDFS_DECOMPRESSED_FILE + '/test.txt'))
        assert_equal(
            cluster.fs.read(HDFS_DECOMPRESSED_FILE + '/test.txt', 0, 4),
            "test")

        # Upload archive
        resp = client.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
                           dict(dest=HDFS_DEST_DIR, hdfs_file=file(TGZ_FILE)))
        response = json.loads(resp.content)
        assert_equal(0, response['status'], response)
        assert_true(cluster.fs.exists(HDFS_TGZ_FILE))
    finally:
        try:
            cluster.fs.remove(HDFS_DEST_DIR)
        except:
            pass
Example #36
0
def test_index():
    HOME_DIR = u'/user/test'
    NO_HOME_DIR = u'/user/no_home'

    c = make_logged_in_client()
    c_no_home = make_logged_in_client(username='******')
    cluster = pseudo_hdfs4.shared_cluster()

    if not cluster.fs.exists(HOME_DIR):
        cluster.fs.create_home_dir(HOME_DIR)
    assert_false(cluster.fs.exists(NO_HOME_DIR))

    response = c.get('/filebrowser', follow=True)
    assert_equal(HOME_DIR, response.context['path'])
    assert_equal(HOME_DIR, response.context['home_directory'])

    response = c_no_home.get('/filebrowser', follow=True)
    assert_equal('/', response.context['path'])
    assert_equal(None, response.context['home_directory'])
Example #37
0
def test_rename():
    cluster = pseudo_hdfs4.shared_cluster()

    c = make_logged_in_client(cluster.superuser)
    cluster.fs.setuser(cluster.superuser)

    PREFIX = u"/test-rename/"
    NAME = u"test-rename-before"
    NEW_NAME = u"test-rename-after"
    cluster.fs.mkdir(PREFIX + NAME)
    op = "rename"
    # test for full path rename
    c.post("/filebrowser/rename",
           dict(src_path=PREFIX + NAME, dest_path=PREFIX + NEW_NAME))
    assert_true(cluster.fs.exists(PREFIX + NEW_NAME))
    # test for smart rename
    c.post("/filebrowser/rename",
           dict(src_path=PREFIX + NAME, dest_path=NEW_NAME))
    assert_true(cluster.fs.exists(PREFIX + NEW_NAME))
Example #38
0
def test_hdfs_copy_from_local():
    minicluster = pseudo_hdfs4.shared_cluster()
    minifs = minicluster.fs

    olduser = minifs.setuser(minifs.superuser)
    minifs.chmod('/', 0777)
    minifs.setuser(olduser)

    path = os.path.join(tempfile.gettempdir(), 'copy_test_src')
    logging.info(path)

    data = "I will not make flatuent noises in class\n" * 2000
    f = open(path, 'w')
    f.write(data)
    f.close()

    minifs.copyFromLocal(path, '/copy_test_dst')
    actual = minifs.read('/copy_test_dst', 0, len(data) + 100)
    assert_equal(data, actual)
Example #39
0
File: tests.py Project: ozzie00/hue
def test_ensure_home_directory_add_ldap_users():
  URL = reverse(add_ldap_users)

  reset_all_users()
  reset_all_groups()

  # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
  ldap_access.CACHED_LDAP_CONN = LdapTestConnection()

  cluster = pseudo_hdfs4.shared_cluster()
  c = make_logged_in_client(cluster.superuser, is_superuser=True)
  cluster.fs.setuser(cluster.superuser)

  assert_true(c.get(URL))

  response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test'))
  assert_true('/useradmin/users' in response['Location'])
  assert_false(cluster.fs.exists('/user/moe'))

  # Try same thing with home directory creation.
  response = c.post(URL, dict(username_pattern='curly', password1='test', password2='test', ensure_home_directory=True))
  assert_true('/useradmin/users' in response['Location'])
  assert_true(cluster.fs.exists('/user/curly'))

  response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test'))
  assert_true('Could not' in response.context['form'].errors['username_pattern'][0])
  assert_false(cluster.fs.exists('/user/bad_name'))

  # See if moe, who did not ask for his home directory, has a home directory.
  assert_false(cluster.fs.exists('/user/moe'))

  # Try wild card now
  response = c.post(URL, dict(username_pattern='*r*', password1='test', password2='test', ensure_home_directory=True))
  assert_true('/useradmin/users' in response['Location'])
  assert_true(cluster.fs.exists('/user/curly'))
  assert_true(cluster.fs.exists('/user/larry'))
  assert_true(cluster.fs.exists('/user/otherguy'))

  # Clean up
  cluster.fs.rmtree('/user/curly')
  cluster.fs.rmtree('/user/larry')
  cluster.fs.rmtree('/user/otherguy')
Example #40
0
def test_ensure_home_directory():
    reset_all_users()
    reset_all_groups()

    # Cluster and client for home directory creation
    cluster = pseudo_hdfs4.shared_cluster()
    c = make_logged_in_client(cluster.superuser,
                              is_superuser=True,
                              groupname='test1')
    cluster.fs.setuser(cluster.superuser)

    # Create a user with a home directory
    assert_false(cluster.fs.exists('/user/test1'))
    response = c.post(
        '/useradmin/users/new',
        dict(username="******",
             password1='test',
             password2='test',
             ensure_home_directory=True))
    assert_true(cluster.fs.exists('/user/test1'))
    dir_stat = cluster.fs.stats('/user/test1')
    assert_equal('test1', dir_stat.user)
    assert_equal('test1', dir_stat.group)
    assert_equal('40755', '%o' % dir_stat.mode)

    # Create a user, then add their home directory
    assert_false(cluster.fs.exists('/user/test2'))
    response = c.post(
        '/useradmin/users/new',
        dict(username="******", password1='test', password2='test'))
    assert_false(cluster.fs.exists('/user/test2'))
    response = c.post(
        '/useradmin/users/edit/%s' % "test2",
        dict(username="******",
             password1='test',
             password2='test',
             ensure_home_directory=True))
    assert_true(cluster.fs.exists('/user/test2'))
    dir_stat = cluster.fs.stats('/user/test2')
    assert_equal('test2', dir_stat.user)
    assert_equal('test2', dir_stat.group)
    assert_equal('40755', '%o' % dir_stat.mode)
Example #41
0
def test_hdfs_copy():
    minicluster = pseudo_hdfs4.shared_cluster()
    minifs = minicluster.fs

    copy_test_src = minicluster.fs_prefix + '/copy_test_src'
    copy_test_dst = minicluster.fs_prefix + '/copy_test_dst'
    try:
        data = "I will not make flatulent noises in class\n" * 2000
        minifs.create(copy_test_src, permission=0646, data=data)
        minifs.create(copy_test_dst, data="some initial data")

        minifs.copyfile(copy_test_src, copy_test_dst)
        actual = minifs.read(copy_test_dst, 0, len(data) + 100)
        assert_equal(data, actual)

        sb = minifs.stats(copy_test_dst)
        assert_equal(0646, stat.S_IMODE(sb.mode))
    finally:
        minifs.do_as_superuser(minifs.rmtree, copy_test_src)
        minifs.do_as_superuser(minifs.rmtree, copy_test_dst)
Example #42
0
def test_chmod_sticky():
    cluster = pseudo_hdfs4.shared_cluster()

    try:
        c = make_logged_in_client(cluster.superuser)
        cluster.fs.setuser(cluster.superuser)

        PATH = "/chmod_test"
        cluster.fs.mkdir(PATH)

        # Get current mode and make sure sticky bit is off
        mode = expand_mode(int(cluster.fs.stats(PATH)["mode"]))
        assert_equal(False, mode[-1])

        # Setup post data
        permissions = ('user_read', 'user_write', 'user_execute', 'group_read',
                       'group_write', 'group_execute', 'other_read',
                       'other_write', 'other_execute', 'sticky'
                       )  # Order matters!
        permissions_dict = dict(filter(lambda x: x[1], zip(permissions, mode)))
        permissions_dict['sticky'] = True
        kwargs = {'path': [PATH]}
        kwargs.update(permissions_dict)

        # Set sticky bit, then check sticky bit is on in hdfs
        response = c.post("/filebrowser/chmod", kwargs)
        mode = expand_mode(int(cluster.fs.stats(PATH)["mode"]))
        assert_equal(True, mode[-1])

        # Unset sticky bit, then check sticky bit is off in hdfs
        del kwargs['sticky']
        response = c.post("/filebrowser/chmod", kwargs)
        mode = expand_mode(int(cluster.fs.stats(PATH)["mode"]))
        assert_equal(False, mode[-1])

    finally:
        try:
            cluster.fs.rmtree(PATH)  # Clean up
        except:
            pass  # Don't let cleanup errors mask earlier failures
Example #43
0
  def test_login_home_creation_failure(self):
    response = self.c.get('/hue/accounts/login/')
    assert_equal(200, response.status_code, "Expected ok status.")
    assert_false(response.context[0]['first_login_ever'])

    # Create home directory as a file in order to fail in the home creation later
    cluster = pseudo_hdfs4.shared_cluster()
    fs = cluster.fs
    assert_false(self.cluster.fs.do_as_user(self.test_username, cluster.fs.exists, "/user/%s" % self.test_username))
    fs.do_as_superuser(fs.create, "/user/%s" % self.test_username)

    finish = conf.LDAP.SYNC_GROUPS_ON_LOGIN.set_for_testing(False)
    try:
      response = self.c.post('/hue/accounts/login/', {
          'username': self.test_username,
          'password': "******",
          'server': "LDAP"
      }, follow=True)
      assert_equal(200, response.status_code, "Expected ok status.")
      assert_true('/about' in response.content, response.content)
    finally:
      finish()
Example #44
0
def test_upload_archive():
    """Test archive upload"""
    cluster = pseudo_hdfs4.shared_cluster()

    try:
        USER_NAME = 'test'
        HDFS_DEST_DIR = "/tmp/fb-upload-test"
        ZIP_FILE = os.path.realpath(
            'apps/filebrowser/src/filebrowser/test_data/test.zip')
        HDFS_ZIP_FILE = HDFS_DEST_DIR + '/test.zip'
        HDFS_UNZIPPED_FILE = HDFS_DEST_DIR + '/test'

        cluster.fs.setuser(USER_NAME)
        client = make_logged_in_client(USER_NAME)

        cluster.fs.mkdir(HDFS_DEST_DIR)
        cluster.fs.chown(HDFS_DEST_DIR, USER_NAME)
        cluster.fs.chmod(HDFS_DEST_DIR, 0700)

        # Upload and unzip archive
        resp = client.post('/filebrowser/upload/archive',
                           dict(dest=HDFS_DEST_DIR, archive=file(ZIP_FILE)))
        response = json.loads(resp.content)
        assert_equal(0, response['status'], response)
        assert_false(cluster.fs.exists(HDFS_ZIP_FILE))
        assert_true(cluster.fs.isdir(HDFS_UNZIPPED_FILE))
        assert_true(cluster.fs.isfile(HDFS_UNZIPPED_FILE + '/test.txt'))

        # Upload archive
        resp = client.post('/filebrowser/upload/file',
                           dict(dest=HDFS_DEST_DIR, hdfs_file=file(ZIP_FILE)))
        response = json.loads(resp.content)
        assert_equal(0, response['status'], response)
        assert_true(cluster.fs.exists(HDFS_ZIP_FILE))
    finally:
        try:
            cluster.fs.remove(HDFS_DEST_DIR)
        except Exception, ex:
            pass
Example #45
0
File: tests.py Project: yhanwen/hue
def test_live_jobtracker():
    """
  Checks that LiveJobTracker never raises
  exceptions for most of its calls.
  """
    minicluster = pseudo_hdfs4.shared_cluster()

    jt = minicluster.jt
    # Make sure that none of the following
    # raise.
    assert_true(jt.queues())
    assert_true(jt.cluster_status())
    assert_true(jt.all_task_trackers())
    assert_true(jt.active_trackers())
    assert_true(jt.blacklisted_trackers())
    # not tested: task_tracker
    assert_true(jt.running_jobs())
    assert_true(jt.completed_jobs())
    assert_true(jt.failed_jobs())
    assert_true(jt.all_jobs())
    # not tested: get_job_counters
    assert_true(jt.get_current_time())
Example #46
0
def test_seek():
    """Test for DESKTOP-293 - ensure seek works in python2.4"""
    cluster = pseudo_hdfs4.shared_cluster()
    fs = cluster.fs
    fs.setuser(cluster.superuser)
    f = fs.open("/fortest.txt", "w")
    try:
        f.write("hello")
        f.close()

        f = fs.open("/fortest.txt", "r")
        f.seek(0, posixfile.SEEK_SET)
        assert_equals("he", f.read(2))
        f.seek(1, posixfile.SEEK_SET)
        assert_equals("el", f.read(2))
        f.seek(-1, posixfile.SEEK_END)
        assert_equals("o", f.read())
        f.seek(0, posixfile.SEEK_SET)
        f.seek(2, posixfile.SEEK_CUR)
        assert_equals("ll", f.read(2))
    finally:
        fs.remove("/fortest.txt")
Example #47
0
def test_copy_remote_dir():
    cluster = pseudo_hdfs4.shared_cluster()
    fs = cluster.fs
    fs.setuser(cluster.superuser)

    src_dir = '/copy_remote_dir'
    fs.mkdir(src_dir)
    f1 = fs.open("/copy_remote_dir/test_one.txt", "w")
    f1.write("foo")
    f1.close()
    f2 = fs.open("/copy_remote_dir/test_two.txt", "w")
    f2.write("bar")
    f2.close()

    new_owner = 'testcopy'
    new_owner_home = '/user/testcopy'
    new_owner_dir = new_owner_home + '/test-copy'
    fs.mkdir(new_owner_home)
    fs.chown(new_owner_home, new_owner, new_owner)

    fs.copy_remote_dir(src_dir, new_owner_dir, dir_mode=0755, owner=new_owner)

    dir_stat = fs.stats(new_owner_dir)
    assert_equals(new_owner, dir_stat.user)
    assert_equals(new_owner, dir_stat.group)
    assert_equals('40755', '%o' % dir_stat.mode)

    src_stat = fs.listdir_stats(src_dir)
    dest_stat = fs.listdir_stats(new_owner_dir)

    src_names = set([stat.name for stat in src_stat])
    dest_names = set([stat.name for stat in dest_stat])
    assert_true(src_names)
    assert_equals(src_names, dest_names)

    for stat in dest_stat:
        assert_equals('testcopy', stat.user)
        assert_equals('testcopy', stat.group)
        assert_equals('100644', '%o' % stat.mode)
Example #48
0
def test_trash():
  cluster = pseudo_hdfs4.shared_cluster()

  try:
    c = make_logged_in_client()
    USERNAME = '******'
    cluster.fs.setuser(USERNAME)

    cluster.fs.do_as_superuser(cluster.fs.chown, '/user/%s' % USERNAME, USERNAME, USERNAME)

    HOME_TRASH_DIR = '/user/%s/.Trash/Current/user/%s' % (USERNAME, USERNAME)
    prefix = '/tmp/test_trash'
    PATH_1 = '/%s/1' % prefix
    cluster.fs.mkdir(prefix)
    cluster.fs.mkdir(PATH_1)

    c.post('/filebrowser/rmtree?skip_trash=true', dict(path=[HOME_TRASH_DIR]))

    # No trash folder
    response = c.get('/filebrowser/view/user/test?default_to_trash', follow=True)
    assert_equal([], response.redirect_chain)

    c.post('/filebrowser/rmtree', dict(path=[PATH_1]))

    # We have a trash folder so a redirect (Current not always there)
    response = c.get('/filebrowser/view/user/test?default_to_trash', follow=True)
    assert_true(any(['.Trash' in page for page, code in response.redirect_chain]), response.redirect_chain)

    c.post('/filebrowser/rmtree?skip_trash=true', dict(path=[HOME_TRASH_DIR]))

    # No home trash, just regular root trash
    response = c.get('/filebrowser/view/user/test?default_to_trash', follow=True)
    assert_true(any(['.Trash' in page for page, code in response.redirect_chain]), response.redirect_chain)
  finally:
    try:
      cluster.fs.rmtree(prefix)     # Clean up
    except:
      pass      # Don't let cleanup errors mask earlier failures
Example #49
0
def test_hdfs_copy():
    minicluster = pseudo_hdfs4.shared_cluster()
    minifs = minicluster.fs

    try:
        olduser = minifs.setuser(minifs.superuser)
        minifs.chmod('/', 0777)
        minifs.setuser(olduser)

        data = "I will not make flatuent noises in class\n" * 2000
        minifs.create('/copy_test_src', permission=0646, data=data)
        minifs.create('/copy_test_dst', data="some initial data")

        minifs.copyfile('/copy_test_src', '/copy_test_dst')
        actual = minifs.read('/copy_test_dst', 0, len(data) + 100)
        assert_equal(data, actual)

        sb = minifs.stats('/copy_test_dst')
        assert_equal(0646, stat.S_IMODE(sb.mode))

    finally:
        minifs.do_as_superuser(minifs.rmtree, '/copy_test_src')
        minifs.do_as_superuser(minifs.rmtree, '/copy_test_dst')
Example #50
0
def test_seek_across_blocks():
    """Makes a file with a lot of blocks, seeks around"""
    cluster = pseudo_hdfs4.shared_cluster()
    fs = cluster.fs
    fs.setuser(cluster.superuser)
    fs.create("/fortest-blocks.txt", replication=1, blocksize=1024)
    f = fs.open("/fortest-blocks.txt", "w")
    try:
        data = "abcdefghijklmnopqrstuvwxyz" * 3000
        f.write(data)
        f.close()

        for i in xrange(1, 10):
            f = fs.open("/fortest-blocks.txt", "r")

            for j in xrange(1, 100):
                offset = random.randint(0, len(data) - 1)
                f.seek(offset, posixfile.SEEK_SET)
                assert_equals(data[offset:offset + 50], f.read(50))
            f.close()

    finally:
        fs.remove("/fortest-blocks.txt")
Example #51
0
def test_ensure_home_directory_sync_ldap_users_groups():
    URL = reverse(sync_ldap_users_groups)

    reset_all_users()
    reset_all_groups()

    # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
    ldap_access.CACHED_LDAP_CONN = LdapTestConnection()

    cluster = pseudo_hdfs4.shared_cluster()
    c = make_logged_in_client(cluster.superuser, is_superuser=True)
    cluster.fs.setuser(cluster.superuser)

    reset = []

    # Set to nonsensical value just to force new config usage.
    # Should continue to use cached connection.
    reset.append(
        desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))

    try:
        c.post(
            reverse(add_ldap_users),
            dict(server='nonsense',
                 username_pattern='curly',
                 password1='test',
                 password2='test'))
        assert_false(cluster.fs.exists('/user/curly'))
        assert_true(
            c.post(URL, dict(server='nonsense', ensure_home_directory=True)))
        assert_true(cluster.fs.exists('/user/curly'))
    finally:
        for finish in reset:
            finish()

        if cluster.fs.exists('/user/curly'):
            cluster.fs.rmtree('/user/curly')
Example #52
0
def test_chown():
    # Create a test directory with
    # a subdirectory and a few files.
    dir1 = '/test'
    subdir1 = dir1 + '/test1'
    file1 = subdir1 + '/test1.txt'
    cluster = pseudo_hdfs4.shared_cluster()
    fs = cluster.fs
    fs.setuser(cluster.superuser)
    try:
        fs.mkdir(subdir1)
        f = fs.open(file1, "w")
        f.write("hello")
        f.close()

        # Check currrent owners are not user test
        LOG.info(str(fs.stats(dir1).__dict__))
        assert_not_equals('test', fs.stats(dir1).user)
        assert_not_equals('test', fs.stats(subdir1).user)
        assert_not_equals('test', fs.stats(file1).user)

        # Chown non-recursive
        fs.chown(dir1, 'test', recursive=False)
        assert_equals('test', fs.stats(dir1).user)
        assert_not_equals('test', fs.stats(subdir1).user)
        assert_not_equals('test', fs.stats(file1).user)

        # Chown recursive
        fs.chown(dir1, 'test', recursive=True)
        assert_equals('test', fs.stats(dir1).user)
        assert_equals('test', fs.stats(subdir1).user)
        assert_equals('test', fs.stats(file1).user)
    finally:
        try:
            fs.rmtree(dir1)
        finally:
            pass
Example #53
0
  def test_ensure_home_directory(self):
    raise SkipTest

    resets = [
      useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(False),
    ]

    try:
      reset_password_policy()

      # Cluster and client for home directory creation
      cluster = pseudo_hdfs4.shared_cluster()
      c = make_logged_in_client(cluster.superuser, is_superuser=True, groupname='test1')
      cluster.fs.setuser(cluster.superuser)

      # Create a user with a home directory
      assert_false(cluster.fs.exists('/user/test1'))
      response = c.post('/useradmin/users/new', dict(username="******", password1='test', password2='test', ensure_home_directory=True))
      assert_true(cluster.fs.exists('/user/test1'))
      dir_stat = cluster.fs.stats('/user/test1')
      assert_equal('test1', dir_stat.user)
      assert_equal('test1', dir_stat.group)
      assert_equal('40755', '%o' % dir_stat.mode)

      # Create a user, then add their home directory
      assert_false(cluster.fs.exists('/user/test2'))
      response = c.post('/useradmin/users/new', dict(username="******", password1='test', password2='test'))
      assert_false(cluster.fs.exists('/user/test2'))
      response = c.post('/useradmin/users/edit/%s' % "test2", dict(username="******", password1='test', password2='test', password_old="test", ensure_home_directory=True))
      assert_true(cluster.fs.exists('/user/test2'))
      dir_stat = cluster.fs.stats('/user/test2')
      assert_equal('test2', dir_stat.user)
      assert_equal('test2', dir_stat.group)
      assert_equal('40755', '%o' % dir_stat.mode)
    finally:
      for reset in resets:
        reset()
Example #54
0
def test_chmod():
    # Create a test directory with
    # a subdirectory and a few files.
    dir1 = '/test'
    subdir1 = dir1 + '/test1'
    file1 = subdir1 + '/test1.txt'
    cluster = pseudo_hdfs4.shared_cluster()
    fs = cluster.fs
    fs.setuser(cluster.superuser)
    try:
        fs.mkdir(subdir1)
        f = fs.open(file1, "w")
        f.write("hello")
        f.close()

        # Check currrent permissions are not 777 (666 for file)
        fs.chmod(dir1, 01000, recursive=True)
        assert_equals(041000, fs.stats(dir1).mode)
        assert_equals(041000, fs.stats(subdir1).mode)
        assert_equals(0100000, fs.stats(file1).mode)

        # Chmod non-recursive
        fs.chmod(dir1, 01222, recursive=False)
        assert_equals(041222, fs.stats(dir1).mode)
        assert_equals(041000, fs.stats(subdir1).mode)
        assert_equals(0100000, fs.stats(file1).mode)

        # Chmod recursive
        fs.chmod(dir1, 01444, recursive=True)
        assert_equals(041444, fs.stats(dir1).mode)
        assert_equals(041444, fs.stats(subdir1).mode)
        assert_equals(0100444, fs.stats(file1).mode)
    finally:
        try:
            fs.rmtree(dir1)
        finally:
            pass
Example #55
0
def test_config_validator_more():
    # TODO: Setup DN to not load the plugin, which is a common user error.

    # We don't actually use the mini_cluster. But the cluster sets up the correct
    # configuration that forms the test basis.
    minicluster = pseudo_hdfs4.shared_cluster()
    cli = make_logged_in_client()

    reset = (
        conf.MR_CLUSTERS["default"].HOST.set_for_testing("localhost"),
        conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(23),
    )
    old = cluster.clear_caches()
    try:
        resp = cli.get('/debug/check_config')

        assert_false('Failed to access filesystem root' in resp.content)
        assert_false('Failed to create' in resp.content)
        assert_false('Failed to chown' in resp.content)
        assert_false('Failed to delete' in resp.content)
    finally:
        for old_conf in reset:
            old_conf()
        cluster.restore_caches(old)
Example #56
0
def get_shared_beeswax_server(db_name='default'):
    global _SHARED_HIVE_SERVER
    global _SHARED_HIVE_SERVER_CLOSER
    if _SHARED_HIVE_SERVER is None:

        cluster = pseudo_hdfs4.shared_cluster()

        if is_live_cluster():

            def s():
                pass
        else:
            s = _start_mini_hs2(cluster)

        start = time.time()
        started = False
        sleep = 1

        make_logged_in_client()
        user = User.objects.get(username='******')
        query_server = get_query_server_config()
        db = dbms.get(user, query_server)

        while not started and time.time() - start <= 30:
            try:
                db.open_session(user)
                started = True
                break
            except Exception, e:
                LOG.info('HiveServer2 server could not be found after: %s' % e)
                time.sleep(sleep)

        if not started:
            raise Exception("Server took too long to come up.")

        _SHARED_HIVE_SERVER, _SHARED_HIVE_SERVER_CLOSER = cluster, s
Example #57
0
def test_view_avro():
    cluster = pseudo_hdfs4.shared_cluster()
    try:
        c = make_logged_in_client()
        cluster.fs.setuser(cluster.superuser)
        if cluster.fs.isdir("/test-avro-filebrowser"):
            cluster.fs.rmtree('/test-avro-filebrowser/')

        cluster.fs.mkdir('/test-avro-filebrowser/')

        test_schema = schema.parse("""
      {
        "name": "test",
        "type": "record",
        "fields": [
          { "name": "name", "type": "string" },
          { "name": "integer", "type": "int" }
        ]
      }
    """)

        f = cluster.fs.open('/test-avro-filebrowser/test-view.avro', "w")
        data_file_writer = datafile.DataFileWriter(f,
                                                   io.DatumWriter(),
                                                   writers_schema=test_schema,
                                                   codec='deflate')
        dummy_datum = {
            'name': 'Test',
            'integer': 10,
        }
        data_file_writer.append(dummy_datum)
        data_file_writer.close()

        # autodetect
        response = c.get(
            '/filebrowser/view/test-avro-filebrowser/test-view.avro')
        # (Note: we use eval here cause of an incompatibility issue between
        # the representation string of JSON dicts in simplejson vs. json)
        assert_equal(eval(response.context['view']['contents']), dummy_datum)

        # offsetting should work as well
        response = c.get(
            '/filebrowser/view/test-avro-filebrowser/test-view.avro?offset=1')
        assert_equal('avro', response.context['view']['compression'])

        f = cluster.fs.open('/test-avro-filebrowser/test-view2.avro', "w")
        f.write("hello")
        f.close()

        # we shouldn't autodetect non avro files
        response = c.get(
            '/filebrowser/view/test-avro-filebrowser/test-view2.avro')
        assert_equal(response.context['view']['contents'], "hello")

        # we should fail to do a bad thing if they specify compression when it's not set.
        response = c.get(
            '/filebrowser/view/test-avro-filebrowser/test-view2.avro?compression=gzip'
        )
        assert_true('Failed to decompress' in response.context['message'])

    finally:
        try:
            cluster.fs.rmtree('/test-avro-filebrowser/')
        except:
            pass  # Don't let cleanup errors mask earlier failures
Example #58
0
def test_view_snappy_compressed():
    if not snappy_installed():
        raise SkipTest
    import snappy

    cluster = pseudo_hdfs4.shared_cluster()
    finish = []
    try:
        c = make_logged_in_client()
        cluster.fs.setuser(cluster.superuser)
        if cluster.fs.isdir('/tmp/test-snappy-filebrowser'):
            cluster.fs.rmtree('/tmp/test-snappy-filebrowser')

        cluster.fs.mkdir('/tmp/test-snappy-avro-filebrowser/')

        f = cluster.fs.open('/tmp/test-snappy-filebrowser/test-view.snappy',
                            "w")
        f.write(
            snappy.compress(
                'This is a test of the emergency broadcasting system.'))
        f.close()

        f = cluster.fs.open(
            '/tmp/test-snappy-filebrowser/test-view.stillsnappy', "w")
        f.write(
            snappy.compress(
                'The broadcasters of your area in voluntary cooperation with the FCC and other authorities.'
            ))
        f.close()

        f = cluster.fs.open('/tmp/test-snappy-filebrowser/test-view.notsnappy',
                            "w")
        f.write('foobar')
        f.close()

        # Snappy compressed fail
        response = c.get(
            '/filebrowser/view/tmp/test-snappy-filebrowser/test-view.notsnappy?compression=snappy'
        )
        assert_true('Failed to decompress' in response.context['message'],
                    response)

        # Snappy compressed succeed
        response = c.get(
            '/filebrowser/view/tmp/test-snappy-filebrowser/test-view.snappy')
        assert_equal('snappy', response.context['view']['compression'])
        assert_equal(response.context['view']['contents'],
                     'This is a test of the emergency broadcasting system.',
                     response)

        # Snappy compressed succeed
        response = c.get(
            '/filebrowser/view/tmp/test-snappy-filebrowser/test-view.stillsnappy'
        )
        assert_equal('snappy', response.context['view']['compression'])
        assert_equal(
            response.context['view']['contents'],
            'The broadcasters of your area in voluntary cooperation with the FCC and other authorities.',
            response)

        # Largest snappy compressed file
        finish.append(MAX_SNAPPY_DECOMPRESSION_SIZE.set_for_testing(1))
        response = c.get(
            '/filebrowser/view/tmp/test-snappy-filebrowser/test-view.stillsnappy?compression=snappy'
        )
        assert_true(
            'File size is greater than allowed max snappy decompression size of 1'
            in response.context['message'], response)

    finally:
        for done in finish:
            done()
        try:
            cluster.fs.rmtree('/test-snappy-avro-filebrowser/')
        except:
            pass  # Don't let cleanup errors mask earlier failures
Example #59
0
def test_view_snappy_compressed_avro():
    if not snappy_installed():
        raise SkipTest
    import snappy

    cluster = pseudo_hdfs4.shared_cluster()
    finish = []
    try:
        c = make_logged_in_client()
        cluster.fs.setuser(cluster.superuser)
        if cluster.fs.isdir("/test-snappy-avro-filebrowser"):
            cluster.fs.rmtree('/test-snappy-avro-filebrowser/')

        cluster.fs.mkdir('/test-snappy-avro-filebrowser/')

        test_schema = schema.parse("""
      {
        "name": "test",
        "type": "record",
        "fields": [
          { "name": "name", "type": "string" },
          { "name": "integer", "type": "int" }
        ]
      }
    """)

        # Cannot use StringIO with datafile writer!
        f = cluster.fs.open('/test-snappy-avro-filebrowser/test-view.avro',
                            "w")
        data_file_writer = datafile.DataFileWriter(f,
                                                   io.DatumWriter(),
                                                   writers_schema=test_schema,
                                                   codec='deflate')
        dummy_datum = {
            'name': 'Test',
            'integer': 10,
        }
        data_file_writer.append(dummy_datum)
        data_file_writer.close()

        fh = cluster.fs.open('/test-snappy-avro-filebrowser/test-view.avro',
                             'r')
        f = cluster.fs.open(
            '/test-snappy-avro-filebrowser/test-view.compressed.avro', "w")
        f.write(snappy.compress(fh.read()))
        f.close()
        fh.close()

        # Snappy compressed fail
        response = c.get(
            '/filebrowser/view/test-snappy-avro-filebrowser/test-view.avro?compression=snappy_avro'
        )
        assert_true('Failed to decompress' in response.context['message'],
                    response)

        # Snappy compressed succeed
        response = c.get(
            '/filebrowser/view/test-snappy-avro-filebrowser/test-view.compressed.avro'
        )
        assert_equal('snappy_avro', response.context['view']['compression'])
        assert_equal(eval(response.context['view']['contents']), dummy_datum,
                     response)
        response = c.get(
            '/filebrowser/view/test-snappy-avro-filebrowser/test-view.compressed.avro?compression=snappy_avro'
        )
        assert_equal('snappy_avro', response.context['view']['compression'])
        assert_equal(eval(response.context['view']['contents']), dummy_datum,
                     response)

        # Avro should also decompress snappy
        response = c.get(
            '/filebrowser/view/test-snappy-avro-filebrowser/test-view.compressed.avro?compression=avro'
        )
        assert_equal('snappy_avro', response.context['view']['compression'])
        assert_equal(eval(response.context['view']['contents']), dummy_datum,
                     response)

        # Largest snappy compressed file
        finish.append(MAX_SNAPPY_DECOMPRESSION_SIZE.set_for_testing(1))
        response = c.get(
            '/filebrowser/view/test-snappy-avro-filebrowser/test-view.avro?compression=snappy_avro'
        )
        assert_true(
            'File size is greater than allowed max snappy decompression size of 1'
            in response.context['message'], response)

    finally:
        for done in finish:
            done()
        try:
            cluster.fs.rmtree('/test-snappy-avro-filebrowser/')
        except:
            pass  # Don't let cleanup errors mask earlier failures
Example #60
0
def test_listdir_sort_and_filter():
    cluster = pseudo_hdfs4.shared_cluster()
    c = make_logged_in_client(cluster.superuser)
    cluster.fs.setuser(cluster.superuser)

    BASE = '/test_sort_and_filter'
    FUNNY_NAME = u'greek-Ελληνικά'
    try:
        cluster.fs.mkdir(BASE)
        # Create 10 files
        for i in range(1, 11):
            cluster.fs.create(cluster.fs.join(BASE, str(i)), data="foo" * i)

        # Create 1 funny name directory
        cluster.fs.mkdir(cluster.fs.join(BASE, FUNNY_NAME))

        # All 12 of the entries
        expect = ['.', '..', FUNNY_NAME] + [str(i) for i in range(1, 11)]

        # Check pagination
        listing = c.get('/filebrowser/view' + BASE +
                        '?pagesize=20').context['files']
        assert_equal(len(expect), len(listing))

        listing = c.get('/filebrowser/view' + BASE +
                        '?pagesize=10').context['files']
        assert_equal(12, len(listing))

        listing = c.get('/filebrowser/view' + BASE +
                        '?pagesize=10&pagenum=1').context['files']
        assert_equal(12, len(listing))

        listing = c.get('/filebrowser/view' + BASE +
                        '?pagesize=10&pagenum=2').context['files']
        assert_equal(3, len(listing))

        # Check sorting (name)
        listing = c.get('/filebrowser/view' + BASE +
                        '?sortby=name').context['files']
        assert_equal(sorted(expect), [f['name'] for f in listing])

        listing = c.get('/filebrowser/view' + BASE +
                        '?sortby=name&descending=false').context['files']
        assert_equal(sorted(expect), [f['name'] for f in listing])

        listing = c.get('/filebrowser/view' + BASE +
                        '?sortby=name&descending=true').context['files']
        assert_equal(".", listing[0]['name'])
        assert_equal("..", listing[1]['name'])
        assert_equal(FUNNY_NAME, listing[2]['name'])

        # Check sorting (size)
        listing = c.get('/filebrowser/view' + BASE +
                        '?sortby=size').context['files']
        assert_equal(expect, [f['name'] for f in listing])

        # Check sorting (mtime)
        listing = c.get('/filebrowser/view' + BASE +
                        '?sortby=mtime').context['files']
        assert_equal(".", listing[0]['name'])
        assert_equal("..", listing[1]['name'])
        assert_equal(FUNNY_NAME, listing[-1]['name'])

        # Check filter
        listing = c.get('/filebrowser/view' + BASE +
                        '?filter=1').context['files']
        assert_equal(['.', '..', '1', '10'], [f['name'] for f in listing])

        listing = c.get('/filebrowser/view' + BASE + '?filter=' +
                        FUNNY_NAME).context['files']
        assert_equal(['.', '..', FUNNY_NAME], [f['name'] for f in listing])

        # Check filter + sorting
        listing = c.get(
            '/filebrowser/view' + BASE +
            '?filter=1&sortby=name&descending=true').context['files']
        assert_equal(['.', '..', '10', '1'], [f['name'] for f in listing])

        # Check filter + sorting + pagination
        listing = c.get(
            '/filebrowser/view' + BASE +
            '?filter=1&sortby=name&descending=true&pagesize=1&pagenum=2'
        ).context['files']
        assert_equal(['.', '..', '1'], [f['name'] for f in listing])
    finally:
        try:
            cluster.fs.rmtree(BASE)
        except:
            pass  # Don't let cleanup errors mask earlier failures