예제 #1
0
 def getZooLogDir(cls, logoutput=False):
     '''
 Returns Zookeeper log directory (String).
 '''
     matchObjList = None
     if Machine.isHumboldt():
         try:
             from beaver.component.hbase import HBase
             #get some zookeeper node
             hmaster_nodes = HBase.getAllMasterNodes()
             if hmaster_nodes:
                 zkNode = hmaster_nodes[0]
             if zkNode:
                 Machine.copyToLocal(None, zkNode, ZOOKEEPER_ENV_FILE,
                                     Machine.getTempDir())
                 REMOTE_ZOOKEEPER_ENV_FILE = os.path.join(
                     Machine.getTempDir(), 'zookeeper-env.sh')
                 matchObjList = util.findMatchingPatternInFile(
                     REMOTE_ZOOKEEPER_ENV_FILE,
                     "export ZOO_LOG_DIR=(.*)",
                     return0Or1=False)
         except Exception:
             pass
     if not matchObjList:
         #gateway should have the config file.
         matchObjList = util.findMatchingPatternInFile(
             ZOOKEEPER_ENV_FILE,
             "export ZOO_LOG_DIR=(.*)",
             return0Or1=False)
     returnValue = None
     if matchObjList:
         returnValue = matchObjList[0].group(1)
     if logoutput:
         logger.info("Zookeeper.getZooLogDir returns %s", returnValue)
     return returnValue
예제 #2
0
 def get_config_value(cls, *keys):
     streamline_host = cls.get_hosts()[0]
     streamline_yaml = "/etc/streamline/conf/streamline.yaml"
     destination_path = os.path.join(Machine.getTempDir(), "local_streamline.yaml")
     Machine.copyToLocal(user="******", host=streamline_host, srcpath=streamline_yaml, destpath=destination_path)
     import yaml
     with open(destination_path) as f:
         yaml_obj = yaml.load(f)
         for akey in keys:
             yaml_obj = yaml_obj.get(akey)
         return yaml_obj
예제 #3
0
def verifyLogMessageInServiceLog(text, service, timestamp=0, dateTimeFormat=None):
    '''
  Returns true when given log message appears in service log
  '''
    hiveLog = Hive.getServiceLog(service)
    if not hiveLog or not text:
        return None
    hiveHost = Hive.getHiveHost(service)
    destlog = os.path.join(Config.getEnv('ARTIFACTS_DIR'), 'tmp-%d.log' % int(999999 * random.random()))
    Machine.copyToLocal(None, hiveHost, hiveLog, destlog)
    return util.findMatchingPatternInFileAfterTimestamp(destlog, text, timestamp, dateTimeFormat=dateTimeFormat)
예제 #4
0
 def get_local_job_summary_logs(cls, component):
     """
     Copy Job_summary Logs to local dirs [artifacts/job_summary_local.log]
     parameter: component : Component name for which log collection is taking place
     return: List of Local copies of Job summary log
     Note: Some components need special handling where there are multiple Job Summary Log files
           such as HA and Falcon
     """
     LocalJobSummaryLogs = []
     try:
         if component == FALCON_STR:
             from beaver.component.falcon import Falcon  # pylint: disable=redefined-outer-name
             host1 = Falcon.get_cluster_1_masters()['rm']
             host2 = Falcon.get_cluster_2_masters()['rm']
             host3 = Falcon.get_cluster_3_masters()['rm']
             for host in [host1, host2, host3]:
                 JobSummaryLog = ComponentLogUtil.MAPRED_getJobSummaryLogFile(
                     host)
                 LocalJobSummaryLog = os.path.join(
                     Config.getEnv('ARTIFACTS_DIR'),
                     "jobsummary_" + host + ".log")
                 Machine.copyToLocal(None, host, JobSummaryLog,
                                     LocalJobSummaryLog, None)
                 if Machine.pathExists(None,
                                       None,
                                       LocalJobSummaryLog,
                                       passwd=None):
                     LocalJobSummaryLogs.append(LocalJobSummaryLog)
         else:
             for host in ComponentLogUtil.YARN_getRMHANodes():
                 JobSummaryLog = ComponentLogUtil.MAPRED_getJobSummaryLogFile(
                     host)
                 LocalJobSummaryLog = os.path.join(
                     Config.getEnv('ARTIFACTS_DIR'),
                     "jobsummary_" + host + ".log")
                 Machine.copyToLocal(Machine.getAdminUser(), host,
                                     JobSummaryLog, LocalJobSummaryLog,
                                     Machine.getAdminPasswd())
                 Machine.chmod("777",
                               LocalJobSummaryLog,
                               user=Machine.getAdminUser(),
                               passwd=Machine.getAdminPasswd())
                 if Machine.pathExists(Machine.getAdminUser(),
                                       None,
                                       LocalJobSummaryLog,
                                       passwd=Machine.getAdminPasswd()):
                     LocalJobSummaryLogs.append(LocalJobSummaryLog)
         return LocalJobSummaryLogs
     except Exception as e:
         logger.info("Exception occurs at job_summary_log collection %s", e)
         tb = traceback.format_exc()
         logger.info(tb)
         return LocalJobSummaryLogs
예제 #5
0
    def Zookeeper_getZooLogDir(cls, logoutput=True):
        try:
            from beaver.component.zookeeper import ZOOKEEPER_ENV_FILE
            matchObjList = None
            if Machine.isHumboldt():
                for node in cls.Zookeeper_getZKHosts():
                    try:
                        Machine.copyToLocal(None, node, ZOOKEEPER_ENV_FILE,
                                            Machine.getTempDir())
                        REMOTE_ZOOKEEPER_ENV_FILE = os.path.join(
                            Machine.getTempDir(), 'zookeeper-env.sh')
                        matchObjList = util.findMatchingPatternInFile(
                            REMOTE_ZOOKEEPER_ENV_FILE,
                            "export ZOO_LOG_DIR=(.*)",
                            return0Or1=False)
                        if matchObjList:
                            returnValue = matchObjList[0].group(1)
                            if returnValue == "${LOG_DIR}":
                                continue
                            else:
                                break
                    except Exception:
                        pass

            if not matchObjList:
                try:
                    matchObjList = util.findMatchingPatternInFile(
                        ZOOKEEPER_ENV_FILE,
                        "export ZOO_LOG_DIR=(.*)",
                        return0Or1=False)
                except Exception as e:
                    logger.info(
                        "Exception occured during Zookeeper_getZooLogDir() call: %s",
                        str(e))
                    if Machine.isWindows():
                        logger.info("Using the default zookeer log dir")
                        winZKLogDir = "D:\\hadoop\\logs\\zookeeper"
                        return winZKLogDir

            returnValue = None
            if matchObjList:
                returnValue = matchObjList[0].group(1)
                if returnValue == "${LOG_DIR}":
                    return None
            return returnValue
        except Exception:
            if logoutput:
                logger.error(
                    "Exception occured during Zookeeper_getZooLogDir() call")
                logger.error(traceback.format_exc())
            return None
예제 #6
0
 def smoke_test_setup(cls):
     '''
     Setup required to run Smoke test
     created new topology file to check service registry does not persist after upgrade
     '''
     Machine.copyToLocal(user=Machine.getAdminUser(),
                         host=KNOX_HOST,
                         srcpath=os.path.join(KNOX_TOPOLOGY, 'default.xml'),
                         destpath=os.path.join(KNOX_TOPOLOGY,
                                               'default_new.xml'),
                         passwd=Machine.getAdminPasswd())
     time.sleep(15)
     #Knox.setupHttpRequestsVerboseLogging()
     cls.setupRanger()
     logger.info("TODO")
예제 #7
0
 def getConfigValue(cls, propertyValue, defaultValue=None):
     _localHiveConf = os.path.join(Config.getEnv('ARTIFACTS_DIR'), cls._localConfDir)
     if not os.path.exists(_localHiveConf):
         admin_user = Machine.getAdminUser()
         Machine.copyToLocal(admin_user, cls.getHiveHost(), cls._configDir, _localHiveConf)
         Machine.chmod('777', _localHiveConf, recursive=True, user=admin_user)
     if cls._modifiedConfigs.has_key(propertyValue):
         return cls._modifiedConfigs[propertyValue]
     value = util.getPropertyValueFromConfigXMLFile(
         os.path.join(_localHiveConf, 'hiveserver2-site.xml'), propertyValue
     )
     if value is None or value == '':
         value = util.getPropertyValueFromConfigXMLFile(
             os.path.join(_localHiveConf, 'hive-site.xml'), propertyValue, defaultValue=defaultValue
         )
     return value
예제 #8
0
 def retrieveGatewaySiteConfig(cls):
     # Retrieve the remote file to a local temp file.
     timestamp = str(time.time())
     tempConfFile = os.path.join(Machine.getTempDir(),
                                 "gateway-site.xml_%s" % timestamp)
     remoteConfFile = os.path.join(Config.get('knox', 'KNOX_CONF'),
                                   'gateway-site.xml')
     Machine.copyToLocal(user=None,
                         host=KNOX_HOST,
                         srcpath=remoteConfFile,
                         destpath=tempConfFile)
     assert os.stat(
         tempConfFile
     ).st_size > 0, "Failed to copy Knox config from remote host, file empty %s" % tempConfFile
     # Parse the local temp file
     conf = util.readDictFromConfigXMLFile(tempConfFile)
     # Delete the local temp file
     Machine.rm(filepath=tempConfFile, user=None, host=None)
     assert not os.path.isfile(
         tempConfFile
     ), "Failed to delete temp Knox config file %s" & tempConfFile
     # Return the dict
     return conf
예제 #9
0
def modifyConfigRemote(changes, OriginalConfDir, ConfDir, nodes, id=None):  # pylint: disable=redefined-builtin
    '''
    Modifies hadoop config or config with similar structure.
    Returns None.

    Linux:
    1. Create tmpModifyConfDir_<time> in artifacts dir based on source config directory in gateway
    2. Modify contents in created directory.
    3. Copy the directory to /tmp/hadoopConf in target machines

    '''
    _backuploc = getBackupConfigLocation(id=id)
    tmpConfDir = os.path.join(
        ARTIFACTS_DIR,
        'tmpModifyConfDir_' + str(int(round(time.time() * 1000))))
    Config.set(PYTHON_CONFIG_NAME,
               TMP_CONF_DIR_VAR,
               tmpConfDir,
               overwrite=True)
    tmpConfDir = Config.get(PYTHON_CONFIG_NAME, TMP_CONF_DIR_VAR)
    for node in nodes:
        Machine.rm(Machine.getAdminUser(), node, ConfDir, isdir=True)
        Machine.rm(Machine.getAdminUser(),
                   Machine.getfqdn(),
                   tmpConfDir,
                   isdir=True)
        logger.info("*** COPY ORIGINAL CONFIGS FROM REMOTE TO LOCAL ***")
        Machine.copyToLocal(None, node, OriginalConfDir, tmpConfDir)
        #if node == Machine.getfqdn():
        #   Machine.copy(OriginalConfDir,tmpConfDir)
        for filename, values in changes.items():
            filepath = os.path.join(tmpConfDir, filename)
            if os.path.isfile(filepath):
                logger.info("Modifying file locally: %s", filepath)
                _fname, fext = os.path.splitext(filepath)
                if fext == ".xml":
                    util.writePropertiesToConfigXMLFile(
                        filepath, filepath, values)
                elif fext == ".json":
                    util.writePropertiesToConfigJSONFile(
                        filepath, filepath, values, ["global"],
                        "site.hbase-site.")
                elif fext == ".properties":
                    util.writePropertiesToFile(filepath, filepath, values)
                elif fext == ".cfg":
                    util.writePropertiesToFile(filepath, filepath, values)
                elif fext == ".conf":
                    util.writePropertiesToConfFile(filepath, filepath, values)
                elif fext == ".sh":
                    text = ""
                    for value in values:
                        text += "\n" + value
                    util.writeToFile(text, filepath, isAppend=True)
                elif fext == ".yaml":
                    text = ""
                    for k, v in values.iteritems():
                        text += k + " : " + v
                    util.writeToFile(text, filepath, isAppend=True)
                elif fext == ".cmd":
                    text = ""
                    for value in values:
                        text += "\n" + value
                    util.writeToFile(text, filepath, isAppend=True)
                elif fext is None or fext == "" or fext == ".include":
                    text = ""
                    isFirst = True
                    for value in values:
                        if isFirst:
                            text += value
                        else:
                            text += "\n" + value
                            isFirst = False
                    util.writeToFile(text, filepath, isAppend=True)
        logger.info("****** Copy back the configs to remote ******")
        #if node!=Machine.getfqdn():
        Machine.copyFromLocal(None, node, tmpConfDir, ConfDir)
        Machine.chmod('777', ConfDir, recursive=True, host=node)
예제 #10
0
def generateTestReportConf(infile, outfile, results, startTime, endTime):
    config = ConfigParser()
    config.optionxform = str
    config.read(infile)

    if config.has_section(SECTION):
        # set the version to 2.0 so new keys are processed
        config.set(SECTION, 'report_version', '2.0')
        # Stores the original component value, some testsuite runs like HiveServer2Concurr, Sqoop
        # change this for reporting, but we need to preserve for log archiving for uniqueness
        config.set(SECTION, "TESTSUITE_COMPONENT", config.get(SECTION, "COMPONENT"))
        for option, value in config.items(SECTION):
            try:
                if ((option != "SECURE" and value != "")
                        or (Config.getEnv("HDP_STACK_INSTALLED").lower() == "false" and value != "")):
                    continue
                elif option == "BUILD_ID":
                    # if REPO_URL is not set, set the BUILD_ID to 0
                    # otherwise get the BUILD_ID from the file
                    if config.get(SECTION, "REPO_URL") == "" or not config.has_option(SECTION, "REPO_URL"):
                        config.set(SECTION, option, 0)
                    else:
                        config.set(SECTION, option, getBuildId(config.get(SECTION, "REPO_URL")))
                elif option == "HOSTNAME":
                    config.set(SECTION, option, socket.getfqdn())
                elif option == "COMPONENT_VERSION":
                    if not config.has_option(SECTION, "COMPONENT") or config.get(SECTION, "COMPONENT") == "":
                        config.set(SECTION, "COMPONENT", "Hadoop")
                    if "ambarieu-hdf" in config.get(SECTION, "COMPONENT"):
                        config.set(SECTION, option, getComponentVersion(config.get(SECTION, "COMPONENT")))
                    elif "ambari" in config.get(SECTION, "COMPONENT"):
                        config.set(SECTION, option, getComponentVersion("Ambari"))
                    else:
                        config.set(SECTION, option, getComponentVersion(config.get(SECTION, "COMPONENT")))
                elif option == "OS":
                    if Machine.isWindows():
                        cmd = 'powershell (Get-WmiObject -class Win32_OperatingSystem).Caption'
                        _exit_code, stdout = Machine.runasDeprecated(
                            user=Machine.getAdminUser(), cmd=cmd, passwd=Machine.getAdminPasswd()
                        )
                        config.set(SECTION, option, stdout)
                        continue

                    osname = platform.dist()[0]
                    # hack to check for oracle os as there
                    # is no diff for python
                    if os.path.exists('/etc/oracle-release'):
                        osname = 'oracle'
                    ver = platform.dist()[1]
                    # Need a hack for SLES as python cannot determine 11.1 vs 11.3
                    if osname.lower() == 'suse':
                        # read the file /etc/SuSE-release and determine the patch version.
                        f = open('/etc/SuSE-release', 'r')
                        txt = f.read()
                        f.close()
                        # get the patch level. For example
                        # PATCHLEVEL = 3
                        m = re.search('PATCHLEVEL = (.*)', txt, re.MULTILINE)
                        # if you find a match append to the version string
                        if m and m.group(1):
                            ver = '%s.%s' % (ver, m.group(1))

                    arch = platform.architecture()[0]

                    if os.path.exists('/etc/os-release'):
                        try:
                            f = open('/etc/os-release', 'r')
                            txt = f.read()
                            f.close()
                            m = re.search('NAME="(.*)"', txt, re.MULTILINE)
                            if m and m.group(1):
                                if m.group(1) == "Amazon Linux":
                                    osname = "amazonlinux"
                                    m = re.search('VERSION="(.*)"', txt, re.MULTILINE)
                                    if m and m.group(1):
                                        ver = m.group(1)
                                        if "2 (2017.12)" in ver:
                                            ver = "2"
                                    # the amzn ami which qe team is using is of 64 bit
                                    arch = "64bit"
                        except Exception:
                            logger.error(traceback.format_exc())

                    config.set(SECTION, option, '%s-%s-%s' % (osname, ver, arch))
                elif option == "HDP_STACK":
                    if "ambari" in config.get(SECTION, "COMPONENT"):
                        from beaver.component.ambari import Ambari
                        hdpVersion = Ambari.getHDPVersion()
                        if hdpVersion and hdpVersion[0] in ('1', '2'):
                            config.set(SECTION, option, "h" + hdpVersion[0])
                        else:
                            config.set(SECTION, option, 'h2')
                    else:
                        hadoopVersion = getComponentVersion("Hadoop")
                        if hadoopVersion and hadoopVersion[0] in ('1', '2'):
                            config.set(SECTION, option, "h" + hadoopVersion[0])
                elif option == "TDE":
                    from beaver.component.hadoop import HDFS2
                    if HDFS2.isKMSEnabled():
                        config.set(SECTION, option, "on")
                    else:
                        config.set(SECTION, option, "off")

                elif option == "SECURE":
                    if "ambari" in config.get(SECTION, "COMPONENT"):
                        from beaver.component.ambari import Ambari

                        config.set(SECTION, option, str(Ambari.isSecure()).lower())
                        secure_str = str(Ambari.isSecure()).lower()
                    else:
                        from beaver.component.hadoop import Hadoop
                        secure_str = str(Hadoop.isSecure()).lower()
                        if config.get(SECTION, "COMPONENT") == "HiveServer2Concurr":
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            if Hadoop.isSecure():
                                config.set(SECTION, "hs2_authentication", "Kerberos")
                            else:
                                config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrHTTP":
                            if Hadoop.isEncrypted():
                                secure_str += "-http-en"
                            else:
                                secure_str += "-http"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            if Hadoop.isSecure():
                                config.set(SECTION, "hs2_authentication", "Kerberos")
                            else:
                                config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "HTTP")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrLDAP":
                            if Hadoop.isEncrypted():
                                secure_str += "-ldap-en"
                            else:
                                secure_str += "-ldap"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "LDAP")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrLDAPHTTP":
                            if Hadoop.isEncrypted():
                                secure_str += "-ldap-http-en"
                            else:
                                secure_str += "-ldap-http"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "LDAP")
                            config.set(SECTION, "hs2_transport", "HTTP")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrSSL":
                            if Hadoop.isEncrypted():
                                secure_str += "-ssl-en"
                            else:
                                secure_str += "-ssl"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "true")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrSSLHTTP":
                            if Hadoop.isEncrypted():
                                secure_str += "-ssl-http-en"
                            else:
                                secure_str += "-ssl-http"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "HTTP")
                            config.set(SECTION, "hs2_ssl", "true")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrTPUser":
                            if Hadoop.isEncrypted():
                                secure_str += "-tpuser-en"
                            else:
                                secure_str += "-tpuser"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "Kerberos")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "true")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrLongRunning":
                            if Hadoop.isEncrypted():
                                secure_str += "-longrun-en"
                            else:
                                secure_str += "-longrun"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            if Hadoop.isSecure():
                                config.set(SECTION, "hs2_authentication", "Kerberos")
                            else:
                                config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "SqoopDb2":
                            config.set(SECTION, "COMPONENT", "Sqoop")
                        else:
                            if Hadoop.isEncrypted():
                                secure_str += '-en'
                        config.set(SECTION, option, secure_str)
                elif option == "BLOB":
                    pass
                elif option == "RAN":
                    # dont add skipped, just pass + fail + aborted
                    config.set(SECTION, option, results[0] + len(results[1]) + results[3])
                elif option == "PASS":
                    config.set(SECTION, option, results[0])
                elif option == "FAIL":
                    config.set(SECTION, option, len(results[1]))
                elif option == "SKIPPED":
                    config.set(SECTION, option, results[2])
                elif option == "ABORTED":
                    config.set(SECTION, option, results[3])
                elif option == "FAILED_TESTS":
                    failedTests = ",".join(results[1])
                    failureSummary = ReportHelper.getFailureSummary(failedTests)
                    config.set(SECTION, "FAILURE_SUMMARY", failureSummary)
                    tmpFailedTests = ReportHelper.getGroupedFailedTests(failedTests)
                    config.set(SECTION, option, ReportHelper.getMergedFailedTests(tmpFailedTests, failureSummary))
                elif option == "NUM_OF_DATANODES":
                    if "ambari" in config.get(SECTION, "COMPONENT"):
                        config.set(SECTION, option, "N/A")
                    else:
                        from beaver.component.hadoop import HDFS

                        config.set(SECTION, option, HDFS.getDatanodeCount())
                elif option == "BUILD_URL":
                    if 'BUILD_URL' in os.environ:
                        config.set(SECTION, option, os.environ['BUILD_URL'])
                elif option == "HDP_RELEASE":
                    # If RU/RB, we must override HDP_RELEASE
                    #   (we can't fix this with product front. Discussed in BUG-31369.)
                    if config.get(SECTION, "TESTSUITE_COMPONENT").lower() in ["rollingupgrade", "rollback",
                                                                              "rollingupgrade-ha", "rollback-ha"]:
                        config.set(SECTION, option, "dal")
                    else:
                        config.set(SECTION, option, getRepoId(config.get(SECTION, "REPO_URL")))
                elif option == "JDK":
                    config.set(SECTION, option, Machine.getJDK())
                elif option == "DB":
                    if not config.has_option(SECTION, "COMPONENT") or config.get(SECTION, "COMPONENT") == "":
                        config.set(SECTION, "COMPONENT", "Hadoop")
                    config.set(SECTION, option, getDatabaseFlavor(config.get(SECTION, "COMPONENT")))
            except Exception as error:
                logger.error("ERROR processing option: %s", option)
                logger.error("Exception: %s", error)
        # make sure Hadoop is installed before append Tez to the component name
        if Config.getEnv("HDP_STACK_INSTALLED").lower() == "true" and config.has_option(SECTION, "COMPONENT"):
            if "ambari" in config.get(SECTION, "COMPONENT"):
                kerberos_server_type = 'n/a'
                from beaver.component.ambari import Ambari
                if Ambari.isSecure():
                    kerberos_server_type = 'mit'
                config.set(SECTION, 'kerberos_server_type', kerberos_server_type)
            else:
                from beaver.component.hadoop import Hadoop, HDFS
                from beaver.component.slider import Slider

                # set execution_framework. New columns for dashboard v2
                # TODO: This needs to be improved to be component specific.
                if Hadoop.isTez():
                    if Slider.isInstalled():
                        config.set(SECTION, 'execution_framework', 'tez-slider')
                    else:
                        config.set(SECTION, 'execution_framework', 'tez')
                else:
                    if Slider.isInstalled():
                        config.set(SECTION, 'execution_framework', 'mr-slider')
                    else:
                        config.set(SECTION, 'execution_framework', 'mr')
                # set wire_encryption
                # TODO: This needs to be improved to be component specific.
                if Hadoop.isEncrypted():
                    config.set(SECTION, 'wire_encryption', 'true')
                else:
                    config.set(SECTION, 'wire_encryption', 'false')
                # set kerberos_server_type
                kerberos_server_type = 'n/a'
                if Hadoop.isSecure():
                    kerberos_server_type = 'mit'
                    # add a check for AD
                    if Machine.isLinux():
                        gateway = Config.get("machine", "GATEWAY")
                        Machine.copyToLocal(Machine.getAdminUser(), gateway, '/etc/krb5.conf', '/tmp/krb5.conf')
                        f = open('/tmp/krb5.conf', 'r')
                        txt = f.read()
                        f.close()
                        #Finding all the admin_server in the krb5.conf with ports, if any
                        p = re.compile('admin_server = ((?!FILE).*)')
                        admin_server_list_with_ports = p.findall(txt)
                        admin_server_list = []
                        for admin_server_with_port in admin_server_list_with_ports:
                            admin_server_list.append(admin_server_with_port.split(':')[0])
                        #If len is greater than 1, first checking if one of the admin server is AD host,
                        #  than to ensure that not all the hosts are AD hosts, checking if one of the admin
                        #  server is not in AD Hosts Lists.
                        if len(admin_server_list) > 1:
                            for ad_host in AD_HOSTS_LIST:
                                if ad_host in admin_server_list:
                                    for admin_server in admin_server_list:
                                        if admin_server not in AD_HOSTS_LIST:
                                            kerberos_server_type = 'ad+mit'
                                            break
                        else:
                            for ad_host in AD_HOSTS_LIST:
                                if ad_host in admin_server_list:
                                    kerberos_server_type = 'ad'
                                    break
                config.set(SECTION, 'kerberos_server_type', kerberos_server_type)

                try:
                    from beaver.component.xa import Xa
                    # set argus. New column for dashboard v2
                    if Xa.isArgus():
                        config.set(SECTION, 'argus', 'true')
                    else:
                        config.set(SECTION, 'argus', 'false')
                except Exception as error:
                    logger.error("ERROR processing argus")
                    logger.error("Exception: %s", error)

                #set TDE
                if HDFS.isKMSEnabled():
                    config.set(SECTION, 'tde', 'true')
                else:
                    config.set(SECTION, 'tde', 'false')

        config.set(SECTION, 'START_TIME', startTime)
        config.set(SECTION, 'END_TIME', endTime)
        coverage_summary_file = os.path.join(Config.getEnv('ARTIFACTS_DIR'), "coverage_summary.json")
        if os.path.exists(coverage_summary_file):
            fp = open(coverage_summary_file, "r")
            json_str = "\n".join(fp.readlines())
            fp.close()
            coverage_summary = json.loads(json_str)
            for key, value in coverage_summary.items():
                config.set(SECTION, key, value)
        config.write(open(outfile, 'w'))

    elif config.has_section('SANDBOX'):
        out_config = ConfigParser()
        out_config.optionxform = str
        out_config.add_section(SECTION)

        sb_type = config.get('SANDBOX', 'vm_env')
        out_config.set(SECTION, 'BUILD_ID', '0')
        ova_uri = ''
        if sb_type == 'VBOX':
            ova_uri = config.get(sb_type, 'vbox_ova_uri')
        elif sb_type == 'FUSION':
            ova_uri = config.get(sb_type, 'fus_ova_uri')
        if sb_type == 'HYPERV':
            ova_uri = config.get(sb_type, 'hyperv_ova_uri')
        out_config.set(SECTION, 'REPO_URL', ova_uri)
        sb_host = ''
        if os.name != 'nt':
            sb_host = os.popen("hostname -f").read().strip()
            sb_host = sb_host + '(' + os.popen("ifconfig en0 | grep 'inet ' | awk -F ' ' '{print $2}'"
                                               ).read().strip() + ')'
        else:
            sb_host = 'Kiev local host'
        out_config.set(SECTION, 'HOSTNAME', sb_host)

        out_config.set(SECTION, 'HDP_STACK', "h" + (config.get('VERSIONS', 'hadoop_version')[0]))
        out_config.set(SECTION, 'COMPONENT', 'SANDBOX')
        out_config.set(SECTION, 'TESTSUITE_COMPONENT', 'SANDBOX')

        if sb_type == 'HYPERV':
            sb_ver = 'hyper-v'
        else:
            tmp = ['%20', 'Hortonworks', 'VirtualBox', '.ova', 'VMware', '_']
            sb_ver = ova_uri.split('/')[5]
            for rstr in tmp:
                sb_ver = sb_ver.replace(rstr, '')

        out_config.set(SECTION, 'COMPONENT_VERSION', sb_ver)
        out_config.set(SECTION, 'CHECKSUM', 'N/A')

        ver_num = os.popen("sw_vers | grep 'ProductVersion:' | awk -F ' ' '{print $2}'").read().strip()
        if sb_type == 'HYPERV':
            out_config.set(SECTION, 'OS', 'Windows 8.1')
        else:
            out_config.set(SECTION, 'OS', 'MAC OS X ' + ver_num)
        out_config.set(SECTION, 'SECURE', 'false')
        out_config.set(SECTION, 'TYPE', 'System, UI')
        out_config.set(SECTION, 'BLOB', 'N/A')
        out_config.set(SECTION, 'PKG', 'OVA')
        if sb_type == 'VBOX':
            out_config.set(SECTION, 'INSTALLER', 'Oracle VirtualBox')
        elif sb_type == 'FUSION':
            out_config.set(SECTION, 'INSTALLER', 'VMWare Fusion')
        elif sb_type == 'HYPERV':
            out_config.set(SECTION, 'INSTALLER', 'Windows Hyper-V')
        out_config.set(SECTION, 'RAN', results[0] + len(results[1]) + results[3])
        out_config.set(SECTION, 'PASS', results[0])
        out_config.set(SECTION, 'FAIL', len(results[1]))
        out_config.set(SECTION, 'SKIPPED', results[2])
        out_config.set(SECTION, 'ABORTED', results[3])
        out_config.set(SECTION, 'FAILED_DEPENDENCY', 'N/A')
        out_config.set(SECTION, 'FAILED_TESTS', ",".join(results[1]))

        out_config.set(SECTION, 'NUM_OF_DATANODES', '1')
        out_config.set(SECTION, 'HDP_RELEASE', ova_uri.split('/')[4])
        out_config.set(SECTION, 'JDK', '1.6.0_51')
        out_config.set(SECTION, 'DB', 'N/A')
        out_config.set(SECTION, 'BROWSER', config.get('SANDBOX', 'test_browser'))

        out_config.write(open(outfile, 'w'))