コード例 #1
0
 def setup(cls, S3_AWS_ACCESS_KEY=None, S3_AWS_SECRET=None):
     Machine.installPackageWithPip(packages="awscli",
                                   hosts=Hadoop.getAllNodes(),
                                   logoutput=True)
     aws_home = "/root/.aws"
     if not os.path.exists(aws_home):
         Machine.makedirs(ADMIN_USER, GATEWAY_NODE, "/root/.aws", ADMIN_PWD)
         util.writeToFile("[default]\nregion = us-west-2\noutput=json",
                          os.path.join(ARTIFACTS_DIR, "config"))
         if S3_AWS_ACCESS_KEY:
             cls._aws_access_key_id = S3_AWS_ACCESS_KEY
         else:
             cls._aws_access_key_id = Config.get('machine',
                                                 'S3_AWS_ACCESS_KEY')
         if S3_AWS_SECRET:
             cls._aws_secret_access_key = S3_AWS_SECRET
         else:
             cls._aws_secret_access_key = Config.get(
                 'machine', 'S3_AWS_SECRET')
         util.writeToFile(
             "[default]\naws_access_key_id = %s\naws_secret_access_key = %s"
             % (cls._aws_access_key_id, cls._aws_secret_access_key),
             os.path.join(ARTIFACTS_DIR, "credentials"))
         Machine.runas(
             ADMIN_USER,
             "chown  %s '%s/config'" % (ADMIN_USER, ARTIFACTS_DIR),
             GATEWAY_NODE, ADMIN_PWD)
         Machine.runas(
             ADMIN_USER,
             "chown  %s '%s/credentials'" % (ADMIN_USER, ARTIFACTS_DIR),
             GATEWAY_NODE, ADMIN_PWD)
         Machine.copy(os.path.join(ARTIFACTS_DIR, "config"), aws_home,
                      ADMIN_USER, ADMIN_PWD)
         Machine.copy(os.path.join(ARTIFACTS_DIR, "credentials"), aws_home,
                      ADMIN_USER, ADMIN_PWD)
コード例 #2
0
ファイル: ruStorm.py プロジェクト: thakkardharmik/beaver
    def setup_storm_hdfs_topology(cls, useStandaloneCmd):
        storm_version = Storm.getVersion(useStandaloneCmd=True)
        try:
            file_obj = open(HDFS_CONFIG_FILE, 'w')
            if Hadoop.isSecure():
                file_obj.write('hdfs.keytab.file: ' +
                               Machine.getHeadlessUserKeytab(
                                   user=HADOOPQA_USER) + '\n')
                file_obj.write('hdfs.kerberos.principal: ' +
                               Machine.get_user_principal(user=HADOOPQA_USER) +
                               '\n')
        finally:
            file_obj.close()

        HDFS.createDirectory("/tmp/mySeqTopology", HDFS_USER, "777", False)
        HDFS.createDirectory("/tmp/dest", HDFS_USER, "777", False)
        HDFS.createDirectory("/tmp/dest2", HDFS_USER, "777", False)
        HDFS.createDirectory("/tmp/foo", HDFS_USER, "777", False)
        HDFS.createDirectory("/tmp/trident", HDFS_USER, "777", False)
        HDFS.createDirectory("/tmp/trident-seq", HDFS_USER, "777", False)

        Machine.copy(JAVA_HDFS_SRC_DIR,
                     LOCAL_HDFS_WORK_DIR,
                     user=None,
                     passwd=None)
        if not Machine.isWindows():
            (exit_code, _) = Maven.run('package',
                                       cwd=LOCAL_HDFS_WORK_DIR,
                                       env={
                                           HADOOP_VERSION_MAVEN_PARAMETER:
                                           HADOOP_VERSION,
                                           STORM_VERSION_MAVEN_PARAMETER:
                                           storm_version,
                                           HADOOP_CONF_MAVEN_PARAMETER:
                                           HADOOP_CONF,
                                           HDFS_FILE_MAVEN_PARAMETER:
                                           HDFS_FILE,
                                           HADOOP_CORE_MAVEN_PARAMETER:
                                           HADOOP_CONF,
                                           CORE_FILE_MAVEN_PARAMETER:
                                           CORE_FILE,
                                           PUBLIC_REPO_MAVEN_PARAMETER:
                                           Maven.getPublicRepoUrl()
                                       })
        else:
            filepath = os.path.join(MOD_CONF_PATH, "core-site.xml")
            (exit_code, _) = Maven.run(
                'package -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s'
                % (HADOOP_VERSION_MAVEN_PARAMETER, HADOOP_VERSION,
                   STORM_VERSION_MAVEN_PARAMETER, storm_version,
                   HADOOP_CONF_MAVEN_PARAMETER, HADOOP_CONF,
                   HDFS_FILE_MAVEN_PARAMETER, HDFS_FILE,
                   HADOOP_CORE_MAVEN_PARAMETER, HADOOP_CONF,
                   CORE_FILE_MAVEN_PARAMETER, CORE_FILE,
                   PUBLIC_REPO_MAVEN_PARAMETER, Maven.getPublicRepoUrl()),
                cwd=LOCAL_HDFS_WORK_DIR)
        ruAssert("Storm", exit_code == 0,
                 "[StormHDFSSetup] maven package command failed")
コード例 #3
0
ファイル: hiveutils.py プロジェクト: thakkardharmik/beaver
def startLLAPWithChaosMonkey(interval='300'):
    hive_changes = {'tez-site.xml': {'tez.am.task.max.failed.attempts': '0'}}
    Hive.modifyConfig(hive_changes, services=['hiveserver2'])

    AMBARI_AGENT_TMP_DIR = '/var/lib/ambari-agent/tmp'
    ARTIFACTS_DIR = Config.getEnv('ARTIFACTS_DIR')
    LLAP_START_USER = Config.get('hive', 'HIVE_USER')
    dirs = [
        name for name in os.listdir(AMBARI_AGENT_TMP_DIR) if os.path.isdir(os.path.join(AMBARI_AGENT_TMP_DIR, name))
    ]

    llap_dirs = []
    for dir in dirs:
        if dir.startswith('llap-slider'): llap_dirs.append(dir)

    if len(llap_dirs) < 1:
        logger.info("Could not find llap dir under %s" % AMBARI_AGENT_TMP_DIR)
        Hive.startService(services=['hiveserver2'])
    else:
        llap_dir = llap_dirs[-1]

        resourceConfig = os.path.join(AMBARI_AGENT_TMP_DIR, llap_dir, 'resources.json')
        tmpResourceConfig = os.path.join(ARTIFACTS_DIR, 'resources.json')
        propertyMap = [(["components", "LLAP"], {"yarn.container.failure.threshold": "1000"})]
        util.writePropertiesToConfigJSONFileMulti(resourceConfig, tmpResourceConfig, propertyMap)
        Machine.copy(tmpResourceConfig, resourceConfig, user=Machine.getAdminUser(), passwd=Machine.getAdminPasswd())

        appConfig = os.path.join(AMBARI_AGENT_TMP_DIR, llap_dir, 'appConfig.json')
        tmpAppConfig = os.path.join(ARTIFACTS_DIR, 'appConfig.json')
        propertyMap = [
            (
                ["global"], {
                    "internal.chaos.monkey.probability.containerfailure": "10000",
                    "internal.chaos.monkey.interval.seconds": interval,
                    "internal.chaos.monkey.enabled": "True"
                }
            )
        ]
        util.writePropertiesToConfigJSONFileMulti(appConfig, tmpAppConfig, propertyMap)
        Machine.copy(tmpAppConfig, appConfig, user=Machine.getAdminUser(), passwd=Machine.getAdminPasswd())

        llapShellScript = os.path.join(AMBARI_AGENT_TMP_DIR, llap_dir, 'run.sh')
        exit_code, stdout = Machine.runas(LLAP_START_USER, llapShellScript)
        if exit_code != 0: logger.info("LLAP Shell Script failed to run successfully with %d" % exit_code)

        for i in range(10):
            time.sleep(30)
            logger.info("@%d: Check if LLAP cluster is successfully deployed" % i)
            exit_code, stdout = Machine.runas(LLAP_START_USER, 'slider status llap0')
            if exit_code == 0:
                break
            elif i == 9:
                logger.info("LLAP cluster failed to deploy")
コード例 #4
0
    def setup_capacity_scheduler(cls, components):
        """
        Setup yarn capacity scheduler based on components.
        This API is not called during setup_module.
        :param components: list of components
        :type components: list of str
        :return: None
        """
        if RuSetup._defaultQueue:
            components.append("default")

        logger.info("*** setup_capacity_scheduler ***")
        if RuSetup._skipQueue != None:
            logger.info("Components do not have a queue: " +
                        str(RuSetup._skipQueue))
            components = list(set(components) - RuSetup._skipQueue)
        logger.info("components = %s" % components)
        numComponents = len(components)
        percentPerQueue = 100.0 / numComponents
        percentPerQueueStr = "{0:0.2f}".format(percentPerQueue)
        xmlDict = {}
        rootQueues = ",".join(components)
        xmlDict["yarn.scheduler.capacity.root.queues"] = rootQueues
        for component in components:
            xmlDict["yarn.scheduler.capacity.root.%s.capacity" %
                    component] = percentPerQueueStr
            xmlDict["yarn.scheduler.capacity.root.%s.user-limit-factor" %
                    component] = 1
            xmlDict["yarn.scheduler.capacity.root.%s.maximum-capacity" %
                    component] = percentPerQueueStr
            xmlDict["yarn.scheduler.capacity.root.%s.state" %
                    component] = "RUNNING"
            xmlDict["yarn.scheduler.capacity.root.%s.acl_submit_jobs" %
                    component] = "*"
            xmlDict["yarn.scheduler.capacity.root.%s.acl_administer_jobs" %
                    component] = "*"
        util.dumpTextString(xmlDict, "====== PLANNED QUEUES ======",
                            "==================")
        master_capacity_file = os.path.join(Config.getEnv("WORKSPACE"),
                                            "tests", "rolling_upgrade", "yarn",
                                            "data", "capacity-scheduler.xml")
        modified_capacity_file = os.path.join(Config.getEnv("ARTIFACTS_DIR"),
                                              "capacity-scheduler.xml")
        Machine.copy(master_capacity_file, modified_capacity_file)
        util.writePropertiesToConfigXMLFile(modified_capacity_file,
                                            modified_capacity_file, xmlDict)
        #util.dumpText(modified_capacity_file, "====== capacity-scheduler.xml ======", "==================")
        if RuSetup._defaultQueue:
            components.remove("default")
        return modified_capacity_file
コード例 #5
0
ファイル: ruStorm.py プロジェクト: thakkardharmik/beaver
    def setup_storm_hbase_topology(cls, useStandaloneCmd):
        from beaver.component.hbase import HBase

        storm_version = Storm.getVersion(useStandaloneCmd=True)

        Machine.copy(JAVA_HBASE_SRC_DIR,
                     LOCAL_HBASE_WORK_DIR,
                     user=None,
                     passwd=None)

        if Machine.isWindows():
            (_, _) = Maven.run(
                'package -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s'
                % (HADOOP_VERSION_MAVEN_PARAMETER, HADOOP_VERSION,
                   STORM_VERSION_MAVEN_PARAMETER, storm_version,
                   HBASE_CONF_MAVEN_PARAMETER, HBASE_CONF,
                   HBASE_FILE_MAVEN_PARAMETER, HBASE_FILE,
                   HADOOP_CONF_MAVEN_PARAMETER, HADOOP_CONF,
                   HDFS_FILE_MAVEN_PARAMETER, HDFS_FILE,
                   CORE_FILE_MAVEN_PARAMETER, CORE_FILE,
                   PUBLIC_REPO_MAVEN_PARAMETER, Maven.getPublicRepoUrl()),
                cwd=LOCAL_HBASE_WORK_DIR)
        else:
            (_, _) = Maven.run('package',
                               cwd=LOCAL_HBASE_WORK_DIR,
                               env={
                                   HADOOP_VERSION_MAVEN_PARAMETER:
                                   HADOOP_VERSION,
                                   STORM_VERSION_MAVEN_PARAMETER:
                                   storm_version,
                                   HBASE_CONF_MAVEN_PARAMETER:
                                   HBASE_CONF,
                                   HBASE_FILE_MAVEN_PARAMETER:
                                   HBASE_FILE,
                                   HADOOP_CONF_MAVEN_PARAMETER:
                                   HADOOP_CONF,
                                   HDFS_FILE_MAVEN_PARAMETER:
                                   HDFS_FILE,
                                   CORE_FILE_MAVEN_PARAMETER:
                                   CORE_FILE,
                                   PUBLIC_REPO_MAVEN_PARAMETER:
                                   Maven.getPublicRepoUrl()
                               })

        exit_code, stdout = HBase.createTable(HBASE_TABLE_NAME, "cf", True,
                                              None)
        ruAssert("Storm", exit_code == 0)
        grant_cmd = "grant '%s', 'RWCXA', '%s', 'cf'" % (Storm.getStormUser(),
                                                         HBASE_TABLE_NAME)
        exit_code, stdout = HBase.runShellCmds([grant_cmd])
コード例 #6
0
ファイル: sliderhbase.py プロジェクト: thakkardharmik/beaver
 def write_hbase_site(cls, hbase_site):
     tmpHBaseConfFile = os.path.join(Config.getEnv('ARTIFACTS_DIR'),
                                     "hbase-site.xml")
     fout = open(tmpHBaseConfFile, "w")
     fout.write(hbase_site)
     fout.close()
     propertyMap = {'hbase.tmp.dir': '/tmp/hbase-tmp'}
     util.writePropertiesToConfigXMLFile(tmpHBaseConfFile, tmpHBaseConfFile,
                                         propertyMap)
     hbase_conf = Config.get('hbase', 'HBASE_CONF_DIR')
     hbaseConfFile = os.path.join(hbase_conf, "hbase-site.xml")
     Machine.copy(tmpHBaseConfFile,
                  hbaseConfFile,
                  user=Machine.getAdminUser(),
                  passwd=Machine.getAdminPasswd())
     return tmpHBaseConfFile
コード例 #7
0
ファイル: ruStorm.py プロジェクト: thakkardharmik/beaver
 def setup_storm_jdbc_topology(cls, useStandaloneCmd):
     from beaver.dbutil import MySQL
     MySQL.createUserWithAllPriveleges(STORM_JDBC_TEST_USER,
                                       STORM_JDBC_TEST_PASSWD)
     storm_version = Storm.getVersion(useStandaloneCmd=True)
     try:
         file_obj = open(HDFS_CONFIG_FILE, 'w')
         if Hadoop.isSecure():
             file_obj.write('hdfs.keytab.file: ' +
                            Machine.getHeadlessUserKeytab(
                                user=HADOOPQA_USER) + '\n')
             file_obj.write('hdfs.kerberos.principal: ' +
                            Machine.get_user_principal(user=HADOOPQA_USER) +
                            '\n')
     finally:
         file_obj.close()
     Machine.copy(JAVA_JDBC_SRC_DIR,
                  LOCAL_JDBC_WORK_DIR,
                  user=None,
                  passwd=None)
     post_fenton_opt = " -DpostFenton=true" if Storm.isAfterFenton() else ""
     package_cmd = 'package ' + post_fenton_opt
     (exit_code, _) = Maven.run(package_cmd,
                                cwd=LOCAL_JDBC_WORK_DIR,
                                env={
                                    HADOOP_VERSION_MAVEN_PARAMETER:
                                    HADOOP_VERSION,
                                    STORM_VERSION_MAVEN_PARAMETER:
                                    storm_version,
                                    HADOOP_CONF_MAVEN_PARAMETER:
                                    HADOOP_CONF,
                                    HDFS_FILE_MAVEN_PARAMETER:
                                    HDFS_FILE,
                                    CORE_FILE_MAVEN_PARAMETER:
                                    CORE_FILE,
                                    PUBLIC_REPO_MAVEN_PARAMETER:
                                    Maven.getPublicRepoUrl()
                                })
     ruAssert("Storm", exit_code == 0,
              "[StormJDBCSetup] maven package command failed")
コード例 #8
0
ファイル: ruSlider.py プロジェクト: thakkardharmik/beaver
    def smoke_test_setup(cls):
        '''
        Setup required to run Smoke test
        '''
        from beaver.component.slider import Slider
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        HADOOPQA_USER = Config.get('hadoop', 'HADOOPQA_USER')
        UpgradePerNode.reportProgress("###  Starting set up for Slider smoke test  ####")
        if Hadoop.isSecure():
            keytabFile = Machine.getHeadlessUserKeytab(HADOOPQA_USER)
            kinitloc = Machine.which("kinit", "root")
            cmd = "%s  -k -t %s %s" % (kinitloc, keytabFile, Machine.get_user_principal(HADOOPQA_USER))
            exit_code, stdout = Machine.run(cmd)
            if exit_code != 0:
                UpgradePerNode.reportProgress("###  smoke test setup for Slider failed due to kinit failed  ####")

        # Local directory in artifacts that we'll run tests from
        # it is possible the download_source() will fail
        try:
            cls._LOCAL_WORK_DIR = Slider.download_source(useHDPBaseRepoFile=False, isRUcluster=True)
            logger.info("Local work dir = %s" % cls._LOCAL_WORK_DIR)
        except TypeError as err:
            UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider Source download fail.")
            return
        if not cls._LOCAL_WORK_DIR:
            UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider Source Code missing.")
            return
        # Local conf directory with modified conf for tests
        if not os.path.exists(cls._LOCAL_CONF_DIR):
            os.makedirs(cls._LOCAL_CONF_DIR)
        slider_conf = os.path.join(Slider.getSliderHome(), 'conf')
        logger.info("slider_conf = %s" % slider_conf)
        logger.info("os path exist slider_conf = %s" % os.path.exists(slider_conf))
        if os.path.exists(slider_conf):
            Machine.copy(
                os.path.join(slider_conf, 'log4j.properties'), os.path.join(cls._LOCAL_CONF_DIR, 'log4j.properties')
            )
            Machine.copy(
                os.path.join(slider_conf, 'slider-env.sh'), os.path.join(cls._LOCAL_CONF_DIR, 'slider-env.sh')
            )
        else:
            UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider Conf %s missing" % slider_conf)
            return

        if Hadoop.isSecure():
            util.writePropertiesToConfigXMLFile(
                os.path.join(Slider.getSliderHome(), 'conf', 'slider-client.xml'),
                os.path.join(cls._LOCAL_CONF_DIR, 'slider-client.xml'), {
                    "slider.funtest.enabled": "true",
                    "slider.test.agent.enabled": "true",
                    "HADOOP_CONF_DIR": Config.get('hadoop', 'HADOOP_CONF'),
                    "slider.am.keytab.local.path": Machine.getHeadlessUserKeytab(HADOOPQA_USER),
                    "slider.keytab.principal.name": Machine.get_user_principal(HADOOPQA_USER)
                }
            )
        else:
            util.writePropertiesToConfigXMLFile(
                os.path.join(Slider.getSliderHome(), 'conf', 'slider-client.xml'),
                os.path.join(cls._LOCAL_CONF_DIR, 'slider-client.xml'), {
                    "slider.funtest.enabled": "true",
                    "HADOOP_CONF_DIR": Config.get('hadoop', 'HADOOP_CONF'),
                    "slider.test.agent.enabled": "true"
                }
            )

        logger.info("Local work dir = %s" % cls._LOCAL_WORK_DIR)
        # Check that the precondition is met and the source is available
        if not os.path.exists(cls._LOCAL_WORK_DIR) or not os.path.exists(os.path.join(cls._LOCAL_WORK_DIR, 'pom.xml')):
            logger.info('Slider source does not appear to exist at %s' % (cls._LOCAL_WORK_DIR))
            UpgradePerNode.reportProgress(
                "###  Slider source does not appear to exist at %s ####" % (cls._LOCAL_WORK_DIR)
            )
        logger.info("Local work dir = %s" % cls._LOCAL_WORK_DIR)
        if cls._LOCAL_WORK_DIR == None:
            logger.info("ERROR: cls._LOCAL_WORK_DIR is None")
        # Install first so isolated modules can be tested
        exit_code, stdout = Maven.run(
            "clean install -DskipTests "
            "-Dhadoop.version=%s "
            "-Dprivate.repo.url=%s " % (Hadoop.getVersion(), Maven.getPublicRepoUrl()),
            cwd=cls._LOCAL_WORK_DIR
        )
        if exit_code != 0:
            UpgradePerNode.reportProgress("### Error installing Slider source : %d: %s ####" % (exit_code, stdout))
        else:
            UpgradePerNode.reportProgress("### Slider source install passed ####")
コード例 #9
0
ファイル: ruStorm.py プロジェクト: thakkardharmik/beaver
    def setup_storm_hive_topology(cls, useStandaloneCmd):
        from beaver.component.hive import Hive

        storm_version = Storm.getVersion(useStandaloneCmd=True)
        hive_version = Hive.getVersion()
        HIVE_METASTORE_URI = Hive.getConfigValue(
            "hive.metastore.uris", defaultValue="thrift://localhost:9083")

        global HIVE_METASTORE_URI
        global HIVE_HOST
        global HIVE_PORT
        global HIVE_WAREHOUSE_DIR
        HIVE_WAREHOUSE_DIR = Hive.getConfigValue(
            "hive.metastore.warehouse.dir",
            defaultValue="/apps/hive/warehouse")
        HIVE_HOST = Hive.getHiveHost()
        HIVE_PORT = Hive.getMetastoreThriftPort()
        if Storm.isDalorBeyond():
            JAVA_HIVE_SRC_DIR = os.path.join(Config.getEnv('WORKSPACE'),
                                             'tests', 'rolling_upgrade',
                                             'Storm', '2_3', 'storm-hive',
                                             'java')
        else:
            JAVA_HIVE_SRC_DIR = os.path.join(Config.getEnv('WORKSPACE'),
                                             'tests', 'rolling_upgrade',
                                             'Storm', '2_2', 'storm-hive',
                                             'java')
        # hive.txn.manager and hive.support.concurrency are set through ambari as per bug-40500
        #logger.info("Restart Hive")
        #changes = {'hive-site.xml': {'hive.txn.manager': 'org.apache.hadoop.hive.ql.lockmgr.DbTxnManager',
        #                             'hive.support.concurrency': 'true'}}
        #Hive.modifyConfig(changes, services=['metastore'], restartService=True)
        logger.info("Create test database in Hive")

        exit_code, stdout = Hive.runQuery(
            cls.get_set_queue_cmd(useStandaloneCmd) +
            " drop database if exists stormdb cascade; \
                                               create database stormdb;")
        ruAssert("Storm", exit_code == 0,
                 "[StormHiveSetup] Failed to create test database" + stdout)
        HDFS.chmod(runasUser=HDFS.getHDFSUser(),
                   perm=777,
                   directory=HIVE_WAREHOUSE_DIR + "/" + DATABASE_NAME + ".db")
        #copy tests/storm/storm-hive/java to artifacts/storm-hive-tests
        logger.info("JAVA_SRC_DIR " + JAVA_HIVE_SRC_DIR)
        logger.info("LOCAL_WORK_DIR " + LOCAL_HIVE_WORK_DIR)
        Machine.copy(JAVA_HIVE_SRC_DIR,
                     LOCAL_HIVE_WORK_DIR,
                     user=None,
                     passwd=None)
        #mvn package
        if Machine.isWindows():
            (_, _) = Maven.run(
                'package -D%s=%s -D%s=%s -D%s=%s -D%s=%s' %
                (HADOOP_VERSION_MAVEN_PARAMETER, HADOOP_VERSION,
                 STORM_VERSION_MAVEN_PARAMETER, storm_version,
                 HIVE_VERSION_MAVEN_PARAMETER,
                 hive_version, PUBLIC_REPO_MAVEN_PARAMETER,
                 Maven.getPublicRepoUrl(), CORE_FILE_MAVEN_PARAMETER,
                 CORE_FILE, HADOOP_CORE_MAVEN_PARAMETER, HADOOP_CONF,
                 HIVE_CORE_MAVEN_PARAMETER, HIVE_CORE_DIR,
                 HIVE_FILE_MAVEN_PARAMETER, HIVE_FILE),
                cwd=LOCAL_HIVE_WORK_DIR)
        else:
            (_, _) = Maven.run('package',
                               cwd=LOCAL_HIVE_WORK_DIR,
                               env={
                                   HADOOP_VERSION_MAVEN_PARAMETER:
                                   HADOOP_VERSION,
                                   STORM_VERSION_MAVEN_PARAMETER:
                                   storm_version,
                                   HIVE_VERSION_MAVEN_PARAMETER:
                                   hive_version,
                                   PUBLIC_REPO_MAVEN_PARAMETER:
                                   Maven.getPublicRepoUrl(),
                                   CORE_FILE_MAVEN_PARAMETER:
                                   CORE_FILE,
                                   HADOOP_CONF_MAVEN_PARAMETER:
                                   HADOOP_CONF,
                                   HDFS_FILE_MAVEN_PARAMETER:
                                   HDFS_FILE,
                                   HADOOP_CORE_MAVEN_PARAMETER:
                                   HADOOP_CONF,
                                   HIVE_CORE_MAVEN_PARAMETER:
                                   HIVE_CORE_DIR,
                                   HIVE_FILE_MAVEN_PARAMETER:
                                   HIVE_FILE
                               })
        create_table_q = "use %s; \
          drop table if exists %s; \
          create table %s (id int, name string, phone string, street string) \
          partitioned by (city string, state string) \
          clustered by (id) into %s buckets \
          stored as orc \
          tblproperties ('transactional'='true');" % (
            DATABASE_NAME, HIVE_TABLE_NAME, HIVE_TABLE_NAME, "5")

        exit_code, stdout = Hive.runQuery(
            cls.get_set_queue_cmd(useStandaloneCmd) + create_table_q)
        ruAssert(
            "Storm", exit_code == 0,
            "[StormHiveSetup] Failed to create test table userdata_partitioned"
        )
        HDFS.chmod(runasUser=HDFS.getHDFSUser(),
                   perm=777,
                   directory=HIVE_WAREHOUSE_DIR + "/" + DATABASE_NAME +
                   ".db/" + HIVE_TABLE_NAME)