Ejemplo n.º 1
0
def getNameNodeURL(nameservice2=False):
    if Hadoop.isEncrypted():
        baseUrl = "https://%s" % (HDFS.getNamenodeHttpsAddress(nameservice2))
    else:
        baseUrl = "http://%s" % (HDFS.getNamenodeHttpAddress(nameservice2))
    logger.info("URL being returned is - %s" % baseUrl)
    return baseUrl
Ejemplo n.º 2
0
    def __init__(self, host='localhost', port=None, isHttps=False):
        if port is None:
            if (Hadoop.isEncrypted() or Ambari.is_ambari_encrypted() and Machine.isHumboldt() == False):
                port = 8443
                isHttps = True
            else:
                port = 8080
        if isHttps or self.isCloudbreak():
            self.baseUrl = 'https://' + host
        else:
            self.baseUrl = 'http://' + host

        if self.isCloudbreak():
            self.baseUrl = self.baseUrl + '/ambari'
        else:
            self.baseUrl = self.baseUrl + ':' + str(port)

        if Machine.isHumboldt():
            self.username_password = Config.get('ambari', 'AMBARI_USERNAME', 'admin') + ':HdpCli123!'
            ambari_gateway = Config.get('machine', 'GATEWAY').replace("-ssh", "")
            self.baseUrl = 'https://%s' % ambari_gateway
        elif Machine.getInstaller() == 'cloudbreak':
            self.username_password = Config.get('ambari', 'AMBARI_USERNAME', 'admin') + ':cloudbreak1'
        else:
            self.username_password = Config.get('ambari', 'AMBARI_USERNAME', 'admin'
                                                ) + ':' + Config.get('ambari', 'AMBARI_PASSWORD', 'admin')

        self.urlLogin = self.baseUrl + '#/login'
        self.urlGetClusters = self.baseUrl + '/api/v1/clusters'
        self.urlGetAmbClusters = self.baseUrl + '/api/v1/services'
        self.urlConfig = '/configurations'
        self.backupDataJson = dict()
        self.logger = logging.getLogger(__name__)
Ejemplo n.º 3
0
def getAmbariURL():
    ambariHost = socket.getfqdn()
    if Hadoop.isEncrypted():
        baseUrl = "https://%s:8443" % (ambariHost)
    else:
        baseUrl = "http://%s:8080" % (ambariHost)
    logger.info("URL being returned is - %s" % baseUrl)
    return baseUrl
Ejemplo n.º 4
0
 def getBaseUrl(self):
     from beaver.component.ambari import Ambari
     GRAFANA_HOST = Ambari.getHostsForComponent('METRICS_GRAFANA')[0]
     if Hadoop.isEncrypted() or Machine.isHumboldt():
         GRAFANA_URL = "https://%s:3000/dashboard/db/hbase-tuning" % (GRAFANA_HOST)
     else:
         GRAFANA_URL = "http://%s:3000/dashboard/db/hbase-tuning" % (GRAFANA_HOST)
     self.base_url = GRAFANA_URL
     return self.base_url
Ejemplo n.º 5
0
    def ensure_jns_have_new_txn(cls, nodes, last_tx_id):
        num_of_jns = len(nodes)
        actual_tx_ids = {}
        jns_updated = 0
        protocol = 'http'
        jn_port = '8480'
        if Hadoop.isEncrypted():
            protocol = 'https'
            jn_port = '8481'

        # time out of 3 mins
        time_out = 3 * 60
        # stop time for 10s
        step_time = 10

        itr = int(time_out / step_time)

        for i in range(itr):
            logger.info(
                '******************** Check if all Journal Nodes are updated Iteration %s or %s *************************'
                % (i + 1, itr))
            for node in nodes:
                # if all JNS are updated break
                if jns_updated == num_of_jns:
                    return

                try:
                    # if JN is already ahead skip it
                    if actual_tx_ids[node] and int(
                            actual_tx_ids[node]) >= last_tx_id:
                        continue
                except KeyError:
                    pass

                # other wise get the data and compare it
                url = '%s://%s:%s/jmx' % (protocol, node, jn_port)
                actual_tx_ids[node] = util.getJMXData(
                    url, 'Hadoop:service=JournalNode,name=Journal-',
                    'LastWrittenTxId')
                logger.info(
                    '******************** JN: %s LAST TX ID: %s *************************'
                    % (node, last_tx_id))
                if int(actual_tx_ids[node]) >= last_tx_id:
                    jns_updated += 1

            # if all JNS are updated break
            if jns_updated == num_of_jns:
                return

            time.sleep(step_time)

        ruAssert("HDFS", jns_updated == num_of_jns)
Ejemplo n.º 6
0
    def getComponnetsToTest(cls, compFile, depFile):
        '''
        Get the components that are being tested according to depFile
        '''
        # read in the config file
        conf = RuSetup.readJson(compFile)
        isStandalone = conf[RuSetup.CONF_STANDALONE]
        RuSetup._skipQueue = set(conf[RuSetup.CONF_SKIP_QUEUE])
        RuSetup._defaultQueue = conf[RuSetup.CONF_DEFAULT_QUEUE]
        returnSet = None
        if isStandalone:
            # get the components to test
            returnSet = set(conf[RuSetup.CONF_COMPONENTS_TEST])
        else:
            returnSet = set(RuSetup.getComponentsAffected(compFile, depFile))

        # skip tests according to cluster settings
        if not HDFS.isHAEnabled():
            logger.info("Skip HDFS since HA is not enabled")
            returnSet.discard("hdfs")

        # as discussed in Ru standup for 11/13, enabling storm-slider for non HA cluster and storm standalone for HA cluster
        if YARN.isHAEnabled():
            returnSet.discard("storm-slider")
        else:
            returnSet.discard("storm")

        if Hadoop.isEncrypted():
            returnSet.discard("knox")
            returnSet.discard("falcon")

        if Hadoop.isTez():
            logger.info("Add tez since Hadoop.isTez()")
            returnSet.add("tez")
        else:
            logger.info(
                "Make sure tez is not in the list since Hadoop.isTez() is false"
            )
            returnSet.discard("tez")
        # Note: component.xa is always available, even if xa is not installed
        # So this line should work even if the cluster does not have xa installed
        from beaver.component.xa import Xa
        if Xa.isArgusInstalled():
            logger.info("Add argus since argus is there")
            returnSet.add("argus")
        else:
            logger.info(
                "Make sure argus is not in the list since it's not available")
            returnSet.discard("argus")

        return list(returnSet)
Ejemplo n.º 7
0
def enable_knox_SSO(prop_file, knox_sso_enable_prop, provide_url_prop,
                    public_key_prop, browser_prop):
    base_cmd = "/var/lib/ambari-server/resources/scripts/configs.py --action set --host %s --cluster %s --config-type %s --unsafe" % (
        CONF['AMBARI_HOST'], CLUSTER_NAME, prop_file)
    if Hadoop.isEncrypted():
        base_cmd = "%s --port 8443" % (base_cmd)
        base_cmd = "%s --protocol https" % (base_cmd)
    Machine.runas(user=Machine.getAdminUser(),
                  cmd="%s --key  %s --value true" %
                  (base_cmd, knox_sso_enable_prop),
                  host=CONF['AMBARI_HOST'],
                  cwd=None,
                  env=None,
                  logoutput=True,
                  passwd=Machine.getAdminPasswd())

    # get FQDN+".com" and then set.
    Machine.runas(user=Machine.getAdminUser(),
                  cmd=" %s --key %s --value %s" %
                  (base_cmd, provide_url_prop, KNOXSSO_PROVIDER_URL),
                  host=CONF['AMBARI_HOST'],
                  cwd=None,
                  env=None,
                  logoutput=True,
                  passwd=Machine.getAdminPasswd())

    logger.info("Knox Host: %s Knox Port: %s" %
                (CONF['KNOX_HOST'], CONF['KNOX_PORT']))
    KNOX_CERT = get_knox_cert()
    Machine.runas(user=Machine.getAdminUser(),
                  cmd="%s --key %s --value '%s'" %
                  (base_cmd, public_key_prop, KNOX_CERT),
                  host=CONF['AMBARI_HOST'],
                  cwd=None,
                  env=None,
                  logoutput=True,
                  passwd=Machine.getAdminPasswd())

    if browser_prop:
        Machine.runas(user=Machine.getAdminUser(),
                      cmd="%s --key %s --value '%s'" %
                      (base_cmd, browser_prop, "Mozilla,Chrome,Opera"),
                      host=CONF['AMBARI_HOST'],
                      cwd=None,
                      env=None,
                      logoutput=True,
                      passwd=Machine.getAdminPasswd())
Ejemplo n.º 8
0
def restoreRangerHost():
    logger.info(
        "============================== Restoring Ranger Host ============================="
    )
    if Hadoop.isEncrypted():
        orig_url = "HOST=https:\/\/" + CONF[
            'XA_ADMIN_HOST'] + ":6182/' " + admin_prop_loc
    else:
        orig_url = "HOST=http:\/\/" + CONF[
            'XA_ADMIN_HOST'] + ":6080/' " + admin_prop_loc
    Machine.runas(user=Machine.getAdminUser(),
                  cmd="sed -i '0,/^HOST.*/s//%s" % (orig_url),
                  host=CONF['AMBARI_HOST'],
                  cwd=None,
                  env=None,
                  logoutput=True,
                  passwd=Machine.getAdminPasswd())
Ejemplo n.º 9
0
def setupLocalLoginRangerSSO():
    logger.info(
        "============================== Setup Local Login Ranger SSO ============================="
    )
    if Hadoop.isEncrypted():
        ranger_url = "HOST=https:\/\/" + CONF[
            'XA_ADMIN_HOST'] + ":6182\/locallogin/' " + admin_prop_loc
    else:
        ranger_url = "HOST=http:\/\/" + CONF[
            'XA_ADMIN_HOST'] + ":6080\/locallogin/' " + admin_prop_loc

    Machine.runas(user=Machine.getAdminUser(),
                  cmd="sed -i '0,/^HOST.*/s//%s" % (ranger_url),
                  host=CONF['AMBARI_HOST'],
                  cwd=None,
                  env=None,
                  logoutput=True,
                  passwd=Machine.getAdminPasswd())
Ejemplo n.º 10
0
def disableAtlasKnoxSSO():
    logger.info(
        "============================== %s.%s =============================" %
        (__name__, sys._getframe().f_code.co_name))
    base_cmd = "/var/lib/ambari-server/resources/scripts/configs.py --action set --host %s --cluster %s --config-type %s --unsafe " % (
        CONF['AMBARI_HOST'], CLUSTER_NAME, "application-properties")
    if Hadoop.isEncrypted():
        base_cmd = "%s --port 8443" % (base_cmd)
        base_cmd = "%s --protocol https" % (base_cmd)
    Machine.runas(user=Machine.getAdminUser(),
                  cmd="%s --key  %s --value false" %
                  (base_cmd, "atlas.sso.knox.enabled"),
                  host=CONF['AMBARI_HOST'],
                  cwd=None,
                  env=None,
                  logoutput=True,
                  passwd=Machine.getAdminPasswd())
    logger.info("Atlas restart")
    newAmbariUtil.restart_service("ATLAS")
    logger.info("Atlas restart after disabling SSO.")
Ejemplo n.º 11
0
def setup_for_ranger():
    global ranger_orig_url, admin_prop_loc
    if (isRangerInstalled()):
        CONF['XA_ADMIN_HOST'] = Ambari.getConfig(
            'admin-properties',
            webURL=ambari_url)['policymgr_external_url'].split('//',
                                                               1)[1].split(
                                                                   ':', 1)[0]
        if Hadoop.isEncrypted():
            CONF['XA_ADMIN_PORT'] = Ambari.getConfig(
                'ranger-admin-site',
                webURL=ambari_url)['ranger.service.https.port']
        else:
            CONF['XA_ADMIN_PORT'] = Ambari.getConfig(
                'ranger-admin-site',
                webURL=ambari_url)['ranger.service.http.port']
        admin_prop_loc = os.path.join(_workspace, _artifact,
                                      "xaagents-knoxsso", "knox-sso-ui",
                                      "Test.properties")
        ranger_orig_url = "HOST=http://" + CONF['XA_ADMIN_HOST'] + ":" + str(
            CONF['XA_ADMIN_PORT']) + "' " + admin_prop_loc
Ejemplo n.º 12
0
def setup_KnoxSSO_form_module():
    logger.info(
        "============================== %s.%s =============================" %
        (__name__, sys._getframe().f_code.co_name))
    #read knoxssoform.xml(has whitelist) and set using configs.sh
    #read defaultknoxssoform.xml, read and set
    #restart Knox & ldap
    knoxsso_topo_file = os.path.join(_workspace, "data", "knox",
                                     "knoxssoform.xml")
    with open(knoxsso_topo_file, 'r') as myfile:
        knoxsso_topo_form = myfile.read().replace('\n', '').replace('"', '\'')

    knoxsso_topo_default = os.path.join(_workspace, "data", "knox",
                                        "defaultknoxssoform.xml")
    with open(knoxsso_topo_default, 'r') as myfile:
        knoxsso_topo_def = myfile.read().replace('\n', '').replace('"', '\'')
    if Hadoop.isEncrypted():
        port = "8443"
        protocol = "https"
    else:
        port = "8080"
        protocol = "http"
    Machine.runas(
        user=Machine.getAdminUser(),
        cmd=
        "/var/lib/ambari-server/resources/scripts/configs.py --port %s --protocol %s --action set --host %s --cluster %s --config-type knoxsso-topology --key content --value '%s' --unsafe"
        %
        (port, protocol, CONF['AMBARI_HOST'], CLUSTER_NAME, knoxsso_topo_form),
        host=CONF['AMBARI_HOST'],
        cwd=None,
        env=None,
        logoutput=True,
        passwd=Machine.getAdminPasswd())
    Machine.runas(
        user=Machine.getAdminUser(),
        cmd=
        "/var/lib/ambari-server/resources/scripts/configs.py --port %s --protocol %s --action set --host %s --cluster %s --config-type topology --key content --value '%s' --unsafe"
        %
        (port, protocol, CONF['AMBARI_HOST'], CLUSTER_NAME, knoxsso_topo_def),
        host=CONF['AMBARI_HOST'],
        cwd=None,
        env=None,
        logoutput=True,
        passwd=Machine.getAdminPasswd())
    if isRangerInstalled():
        Machine.runas(user=Machine.getAdminUser(),
                      cmd="sed -i '0,/^HOST.*/s//%s" % ranger_orig_url,
                      host=CONF['AMBARI_HOST'],
                      cwd=None,
                      env=None,
                      logoutput=True,
                      passwd=Machine.getAdminPasswd())
        Machine.runas(user=Machine.getAdminUser(),
                      cmd="sed -i '0,/^KNOX_AUTH.*/s//KNOX_AUTH=form/' %s" %
                      admin_prop_loc,
                      host=CONF['AMBARI_HOST'],
                      cwd=None,
                      env=None,
                      logoutput=True,
                      passwd=Machine.getAdminPasswd())
    newAmbariUtil.restart_service("KNOX")
    Knox.restartLdap()
    logger.info("Knox Restarted after Form based Auth setup")
Ejemplo n.º 13
0
def setupKnoxProxyAtlas(scheme, atlas_host, atlas_port):
    logger.info(
        "============================== %s.%s =============================" %
        (__name__, sys._getframe().f_code.co_name))
    #Copy ui.xml to topologies after replacing hostnames
    knoxproxy_topo_file = os.path.join(_workspace, "data", "knox", "ui.xml")

    atlas_host_with_port = "%s://%s:%s" % (scheme, atlas_host, atlas_port)

    tree = ET.parse(knoxproxy_topo_file)

    doc = tree.getroot()

    service_nodes = doc.findall('service')

    for node in service_nodes:
        for params in node:
            if params.tag == "role":
                param_text = params.text
                print "param_text" + param_text
            if params.tag == "url" and "ATLAS" in param_text:
                params.text = atlas_host_with_port

    tree.write(knoxproxy_topo_file)

    # SCP ui.xml to Knox Host
    knox_proxy_topo = os.path.join(_workspace, "data", "knox", "ui.xml")
    Machine.runas(
        user=Machine.getAdminUser(),
        cmd=
        "scp -o StrictHostKeyChecking=no -r -i /root/ec2-keypair %s root@%s:%s"
        % (knox_proxy_topo, CONF['KNOX_HOST'], KNOX_TOPOLOGY_DIR),
        host=CONF['AMBARI_HOST'],
        cwd=None,
        env=None,
        logoutput=True,
        passwd=Machine.getAdminPasswd())
    Machine.runas(user=Machine.getAdminUser(),
                  cmd="chown knox:knox %sui.xml" % (KNOX_TOPOLOGY_DIR),
                  host=CONF['KNOX_HOST'],
                  cwd=None,
                  env=None,
                  logoutput=True,
                  passwd=Machine.getAdminPasswd())
    Machine.runas(user=Machine.getAdminUser(),
                  cmd="chmod 644 %sui.xml" % (KNOX_TOPOLOGY_DIR),
                  host=CONF['KNOX_HOST'],
                  cwd=None,
                  env=None,
                  logoutput=True,
                  passwd=Machine.getAdminPasswd())

    if Hadoop.isEncrypted():

        # get the atlas cert
        Machine.runas(
            user=Machine.getAdminUser(),
            cmd=
            "openssl s_client -connect %s:%s < /dev/null | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > /tmp/knoxatlas.crt"
            % (atlas_host, atlas_port),
            host=CONF['KNOX_HOST'],
            cwd=None,
            env=None,
            logoutput=True,
            passwd=Machine.getAdminPasswd())

        # import it into java key store

        Machine.runas(
            user=Machine.getAdminUser(),
            cmd=
            "%s/jre/bin/keytool -import -alias knoxsso -keystore %s/jre/lib/security/cacerts -storepass changeit -file /tmp/knoxatlas.crt -noprompt"
            % (JAVA_HOME, JAVA_HOME),
            host=CONF['KNOX_HOST'],
            cwd=None,
            env=None,
            logoutput=True,
            passwd=Machine.getAdminPasswd())

        # import it into knox key store
        Machine.runas(
            user=Machine.getAdminUser(),
            cmd=
            "%s/jre/bin/keytool -import -alias knoxsso -storepass %s -keystore %sgateway.jks -file /tmp/knoxatlas.crt -noprompt"
            % (JAVA_HOME, KNOX_TRUSTSTORE_PASSWORD, KNOX_KEYSTORE_PATH),
            host=CONF['KNOX_HOST'],
            cwd=None,
            env=None,
            logoutput=True,
            passwd=Machine.getAdminPasswd())
Ejemplo n.º 14
0
def setupRangerKnoxSSO():
    logger.info(
        "============================== %s.%s =============================" %
        (__name__, sys._getframe().f_code.co_name))
    #Set up Ranger for KnoxSSO from Ambari interface
    #Step 1: set "ranger.sso.enabled" : "true",
    #/var/lib/ambari-server/resources/scripts/configs.sh set knoxsso-kpandey-erietp-1.novalocal cl1 ranger-admin-site 'ranger.sso.enabled' 'true'
    #Step 2: set "ranger.sso.providerurl" : "https://<knox_gateway_ip>:8443/gateway/knoxsso/api/v1/websso",
    # e.g. "ranger.sso.providerurl" : "https://knoxsso-kpandey-erietp-1.novalocal:8443/gateway/knoxsso/api/v1/websso"
    #Step 3: set "ranger.sso.publicKey" : < Content of cert.pem between BEGIN & END CERTIFICATE >
    #Step 4: Restart Ranger
    logger.info("Knox Host: %s Knox Port: %s" %
                (CONF['KNOX_HOST'], CONF['KNOX_PORT']))

    #Machine.runas(user=Machine.getAdminUser(), cmd="keytool -export -alias gateway-identity -storepass %s -rfc -file %sknoxcert.pem -keystore %sgateway.jks" %(KNOX_TRUSTSTORE_PASSWORD,KNOX_KEYSTORE_PATH,KNOX_KEYSTORE_PATH), host=CONF['KNOX_HOST'], cwd=None, env=None, logoutput=True, passwd=Machine.getAdminPasswd())
    #Machine.runas(user=Machine.getAdminUser(), cmd="yum -y install dos2unix",host=CONF['AMBARI_HOST'], cwd=None, env=None, logoutput=True, passwd=Machine.getAdminPasswd())
    #Machine.runas(user=Machine.getAdminUser(), cmd="scp -o StrictHostKeyChecking=no -r %sknoxcert.pem root@%s:/tmp/" %(KNOX_KEYSTORE_PATH,CONF['AMBARI_HOST']),host=CONF['KNOX_HOST'], cwd=None, env=None, logoutput=True, passwd=Machine.getAdminPasswd())
    #Machine.runas(user=Machine.getAdminUser(), cmd="dos2unix /tmp/knoxcert.pem",host=CONF['AMBARI_HOST'], cwd=None, env=None, logoutput=True, passwd=Machine.getAdminPasswd())
    #cert_loc = "/tmp/knoxcert.pem"

    #with open(cert_loc, 'r') as mfile:
    #    cert=mfile.read()

    KNOX_CERT = get_knox_cert()
    base_cmd = "/var/lib/ambari-server/resources/scripts/configs.py --action set --host %s --cluster %s --config-type %s --unsafe" % (
        CONF['AMBARI_HOST'], CLUSTER_NAME, "ranger-admin-site")
    if Hadoop.isEncrypted():
        base_cmd = "%s --port 8443" % (base_cmd)
        base_cmd = "%s --protocol https" % (base_cmd)

    if isRangerInstalled():
        Machine.runas(user=Machine.getAdminUser(),
                      cmd="%s --key ranger.sso.enabled --value true" %
                      (base_cmd),
                      host=CONF['AMBARI_HOST'],
                      cwd=None,
                      env=None,
                      logoutput=True,
                      passwd=Machine.getAdminPasswd())
        #get FQDN+".com" and then set.
        Machine.runas(user=Machine.getAdminUser(),
                      cmd="%s --key ranger.sso.providerurl --value %s" %
                      (base_cmd, KNOXSSO_PROVIDER_URL),
                      host=CONF['AMBARI_HOST'],
                      cwd=None,
                      env=None,
                      logoutput=True,
                      passwd=Machine.getAdminPasswd())
        Machine.runas(user=Machine.getAdminUser(),
                      cmd="%s --key ranger.sso.publicKey --value %s" %
                      (base_cmd, KNOX_CERT),
                      host=CONF['AMBARI_HOST'],
                      cwd=None,
                      env=None,
                      logoutput=True,
                      passwd=Machine.getAdminPasswd())
        Machine.runas(
            user=Machine.getAdminUser(),
            cmd=
            "grep -q '^KNOX_SSO_ENABLED' %s && sed -i 's/^KNOX_SSO_ENABLED.*/KNOX_SSO_ENABLED=true/' %s || echo 'KNOX_SSO_ENABLED=true' >> %s"
            % (admin_prop_loc, admin_prop_loc, admin_prop_loc),
            host=CONF['AMBARI_HOST'],
            cwd=None,
            env=None,
            logoutput=True,
            passwd=Machine.getAdminPasswd())
        newAmbariUtil.restart_service("RANGER")
        logger.info("Ranger restart after SSO setup.")
 def getweburl(cls, host):
     if Hadoop.isEncrypted():
         weburl = "https://%s:8443" % host
     else:
         weburl = "http://%s:8080" % host
     return weburl
Ejemplo n.º 16
0
def setup_KnoxSSO_basic_module():
    logger.info(
        "============================== %s.%s =============================" %
        (__name__, sys._getframe().f_code.co_name))
    #Step 1: Import Knox Certificate into Java Keystore
    # $ openssl s_client -connect knoxsso-kpandey-erietp-1.novalocal:8443 < /dev/null | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > knoxssoRanger.crt
    # keytool -import -alias knoxsso-kpandey-erietp-1.novalocal -keystore /usr/jdk64/jdk1.8.0_40/jre/lib/security/cacerts -storepass changeit -file knoxssoRanger.crt -noprompt
    #Step 2: Set "Advanced knoxsso-topology" with contents from knoxssobasic.xml in resources directory using configs.sh
    #Step 3: Update "knoxsso-topology" property with KnoxSSO.xml using configs.sh

    knoxsso_topo_file = os.path.join(_workspace, "data", "knox",
                                     "knoxssobasic.xml")
    with open(knoxsso_topo_file, 'r') as myfile:
        #knoxsso_topo_form=myfile.read().replace('\n', '').replace('"','\'')
        knoxsso_topo_basic = myfile.read().replace('"', '\'')
    knoxsso_topo_default = os.path.join(_workspace, "data", "knox",
                                        "defaultknoxssobasic.xml")
    with open(knoxsso_topo_default, 'r') as myfile:
        knoxsso_topo_def = myfile.read().replace('"', '\'')

    if Hadoop.isEncrypted():
        port = "8443"
        protocol = "https"
    else:
        port = "8080"
        protocol = "http"
    Machine.runas(
        user=Machine.getAdminUser(),
        cmd=
        "/var/lib/ambari-server/resources/scripts/configs.py --port %s --protocol %s --action set --host %s --cluster %s --config-type knoxsso-topology --key content --value '%s' --unsafe"
        % (port, protocol, CONF['AMBARI_HOST'], CLUSTER_NAME,
           knoxsso_topo_basic),
        host=CONF['AMBARI_HOST'],
        cwd=None,
        env=None,
        logoutput=True,
        passwd=Machine.getAdminPasswd())
    Machine.runas(
        user=Machine.getAdminUser(),
        cmd=
        "/var/lib/ambari-server/resources/scripts/configs.py --port %s --protocol %s --action set --host %s --cluster %s --config-type topology --key content --value '%s' --unsafe"
        %
        (port, protocol, CONF['AMBARI_HOST'], CLUSTER_NAME, knoxsso_topo_def),
        host=CONF['AMBARI_HOST'],
        cwd=None,
        env=None,
        logoutput=True,
        passwd=Machine.getAdminPasswd())
    newAmbariUtil.restart_service("KNOX")
    Knox.restartLdap()

    if isRangerInstalled():
        Machine.runas(user=Machine.getAdminUser(),
                      cmd="sed -i '0,/^KNOX_AUTH.*/s//KNOX_AUTH=basic/' %s" %
                      admin_prop_loc,
                      host=CONF['AMBARI_HOST'],
                      cwd=None,
                      env=None,
                      logoutput=True,
                      passwd=Machine.getAdminPasswd())

    logger.info("Knox Restarted after Basic Auth setup")
    Knox.logConfig(CONF)
Ejemplo n.º 17
0
CONF['KNOX_BASE_URL'] = "%s://%s:%s/gateway" % (
    CONF['KNOX_PROTO'], CONF['KNOX_HOST'], CONF['KNOX_PORT'])
CONF['KNOX_TOPO_URL'] = "%s/%s" % (CONF['KNOX_BASE_URL'], CONF['KNOX_TOPO'])
CONF['KNOX_WEBHDFS_URL'] = "%s/%s/webhdfs/v1/" % (CONF['KNOX_BASE_URL'],
                                                  CONF['KNOX_TOPO'])
CONF['DIRECT_WEBHDFS_URL'] = "http://%s/webhdfs/v1/" % (
    HDFS.getNamenodeHttpAddress())
CONF['RANGER_KNOX_POLICY'] = None

#CONF['SRC_DIR'] = os.path.join(Config.getEnv('WORKSPACE'), 'tests', 'knox', 'knox_2')
knox_host = CONF['KNOX_HOST']
if Machine.isOpenStack():
    knox_host = knox_host + ".com"
KNOXSSO_PROVIDER_URL = "%s://%s:%s/gateway/knoxsso/api/v1/websso" % (
    CONF['KNOX_PROTO'], knox_host, CONF['KNOX_PORT'])
CLUSTER_NAME = Ambari.getClusterName(is_enc=Hadoop.isEncrypted())
KNOX_TRUSTSTORE_PASSWORD = "******"
KNOX_KEYSTORE_PATH = "/usr/hdp/current/knox-server/data/security/keystores/"
KNOX_TOPOLOGY_DIR = "/etc/knox/conf/topologies/"

JAVA_HOME = Config.get("machine", "QA_CODE_JAVA_HOME")

_workspace = Config.getEnv('WORKSPACE')
_artifact = Config.getEnv('ARTIFACTS_DIR')

global apicorelib
DEPLOY_CODE_DIR = os.path.join(Config.getEnv('WORKSPACE'), '..',
                               'ambari_deploy')
uifrm_folder = "uifrm_old/uifrm"
amb_prop_file = os.path.join(DEPLOY_CODE_DIR, uifrm_folder,
                             'ambari.properties')
Ejemplo n.º 18
0
KNOX_HOME = Config.get('knox', 'KNOX_HOME')
KNOX_HOST = Config.get('knox', 'KNOX_HOST').split(',')[
    0]  # Just use the first Knox instance in the list for now.
KNOX_USER = Config.get('knox', 'KNOX_USER')
KNOX_CONF = Config.get('knox', 'KNOX_CONF')
knox_host_list = [Config.get('knox', 'KNOX_HOST')]  #Initialized with Knox Host

proxy_enabled = 'no'
#HDC Config
if Config.hasSection('hdc'):
    proxy_enabled = Config.get('hdc', 'USE_CLI')

#get list of hosts running Knox Gateway
if proxy_enabled == 'no':
    if (Hadoop.isEncrypted() and Machine.isHumboldt() == False):
        newAmbariUtil = AmbariAPIUtil(port=8443, isHttps=True)
    else:
        newAmbariUtil = AmbariAPIUtil(port=8080, isHttps=False)

if ((Machine.isHumboldt() == False) and (proxy_enabled == 'no')):
    # If multiple Knox Hosts, get the List using Ambari API
    knoxHosts = newAmbariUtil.getComponentHosts('KNOX', 'KNOX_GATEWAY')
    if knoxHosts is not None:
        knox_host_list = knoxHosts.split(",")
    logger.info("List of Hosts running Knox Gateway * knoxHosts: %s " %
                knoxHosts)

if (len(KNOX_HOST.split()) == 1):
    # If only one Knox Host, get the info from Config (takes care of HDC and Knox Proxy setup)
    logger.info("Knox Host is running on * KNOX_HOST = %s " % KNOX_HOST)
Ejemplo n.º 19
0
def setup_KnoxSSO_basic_wlist_module():
    logger.info(
        "============================== %s.%s =============================" %
        (__name__, sys._getframe().f_code.co_name))
    knoxsso_topo_file = os.path.join(_workspace, "data", "knox",
                                     "knoxssobasicwlist.xml")
    with open(knoxsso_topo_file, 'r') as myfile:
        #knoxsso_topo_form=myfile.read().replace('\n', '').replace('"','\'')
        knoxsso_topo_basic = myfile.read().replace('"', '\'')
    knoxsso_topo_default = os.path.join(_workspace, "data", "knox",
                                        "defaultknoxssobasic.xml")
    with open(knoxsso_topo_default, 'r') as myfile:
        knoxsso_topo_def = myfile.read().replace('"', '\'')

    if Hadoop.isEncrypted():
        port = "8443"
        protocol = "https"
    else:
        port = "8080"
        protocol = "http"
    Machine.runas(
        user=Machine.getAdminUser(),
        cmd=
        "/var/lib/ambari-server/resources/scripts/configs.py --port %s --protocol %s --action set --host %s --cluster %s --config-type knoxsso-topology --key content --value '%s' --unsafe"
        % (port, protocol, CONF['AMBARI_HOST'], CLUSTER_NAME,
           knoxsso_topo_basic),
        host=CONF['AMBARI_HOST'],
        cwd=None,
        env=None,
        logoutput=True,
        passwd=Machine.getAdminPasswd())
    Machine.runas(
        user=Machine.getAdminUser(),
        cmd=
        "/var/lib/ambari-server/resources/scripts/configs.py --port %s --protocol %s --action set --host %s --cluster %s --config-type topology --key content --value '%s' --unsafe"
        %
        (port, protocol, CONF['AMBARI_HOST'], CLUSTER_NAME, knoxsso_topo_def),
        host=CONF['AMBARI_HOST'],
        cwd=None,
        env=None,
        logoutput=True,
        passwd=Machine.getAdminPasswd())
    newAmbariUtil.restart_service("KNOX")
    Knox.restartLdap()

    if isRangerInstalled():
        sso_prov_url = "PROVIDERURL=https:\/\/" + CONF[
            'KNOX_HOST'] + ":8443\/gateway\/knoxsso\/api\/v1\/websso/' " + admin_prop_loc
        Machine.runas(user=Machine.getAdminUser(),
                      cmd="sed -i '0,/^PROVIDERURL.*/s//%s" % (sso_prov_url),
                      host=CONF['AMBARI_HOST'],
                      cwd=None,
                      env=None,
                      logoutput=True,
                      passwd=Machine.getAdminPasswd())
        Machine.runas(user=Machine.getAdminUser(),
                      cmd="sed -i '0,/^KNOX_AUTH.*/s//KNOX_AUTH=basic/' %s" %
                      admin_prop_loc,
                      host=CONF['AMBARI_HOST'],
                      cwd=None,
                      env=None,
                      logoutput=True,
                      passwd=Machine.getAdminPasswd())

    newAmbariUtil.restart_service("KNOX")
    logger.info("Knox Restarted after whitelist setup")
Ejemplo n.º 20
0
def generateTestReportConf(infile, outfile, results, startTime, endTime):
    config = ConfigParser()
    config.optionxform = str
    config.read(infile)

    if config.has_section(SECTION):
        # set the version to 2.0 so new keys are processed
        config.set(SECTION, 'report_version', '2.0')
        # Stores the original component value, some testsuite runs like HiveServer2Concurr, Sqoop
        # change this for reporting, but we need to preserve for log archiving for uniqueness
        config.set(SECTION, "TESTSUITE_COMPONENT", config.get(SECTION, "COMPONENT"))
        for option, value in config.items(SECTION):
            try:
                if ((option != "SECURE" and value != "")
                        or (Config.getEnv("HDP_STACK_INSTALLED").lower() == "false" and value != "")):
                    continue
                elif option == "BUILD_ID":
                    # if REPO_URL is not set, set the BUILD_ID to 0
                    # otherwise get the BUILD_ID from the file
                    if config.get(SECTION, "REPO_URL") == "" or not config.has_option(SECTION, "REPO_URL"):
                        config.set(SECTION, option, 0)
                    else:
                        config.set(SECTION, option, getBuildId(config.get(SECTION, "REPO_URL")))
                elif option == "HOSTNAME":
                    config.set(SECTION, option, socket.getfqdn())
                elif option == "COMPONENT_VERSION":
                    if not config.has_option(SECTION, "COMPONENT") or config.get(SECTION, "COMPONENT") == "":
                        config.set(SECTION, "COMPONENT", "Hadoop")
                    if "ambarieu-hdf" in config.get(SECTION, "COMPONENT"):
                        config.set(SECTION, option, getComponentVersion(config.get(SECTION, "COMPONENT")))
                    elif "ambari" in config.get(SECTION, "COMPONENT"):
                        config.set(SECTION, option, getComponentVersion("Ambari"))
                    else:
                        config.set(SECTION, option, getComponentVersion(config.get(SECTION, "COMPONENT")))
                elif option == "OS":
                    if Machine.isWindows():
                        cmd = 'powershell (Get-WmiObject -class Win32_OperatingSystem).Caption'
                        _exit_code, stdout = Machine.runasDeprecated(
                            user=Machine.getAdminUser(), cmd=cmd, passwd=Machine.getAdminPasswd()
                        )
                        config.set(SECTION, option, stdout)
                        continue

                    osname = platform.dist()[0]
                    # hack to check for oracle os as there
                    # is no diff for python
                    if os.path.exists('/etc/oracle-release'):
                        osname = 'oracle'
                    ver = platform.dist()[1]
                    # Need a hack for SLES as python cannot determine 11.1 vs 11.3
                    if osname.lower() == 'suse':
                        # read the file /etc/SuSE-release and determine the patch version.
                        f = open('/etc/SuSE-release', 'r')
                        txt = f.read()
                        f.close()
                        # get the patch level. For example
                        # PATCHLEVEL = 3
                        m = re.search('PATCHLEVEL = (.*)', txt, re.MULTILINE)
                        # if you find a match append to the version string
                        if m and m.group(1):
                            ver = '%s.%s' % (ver, m.group(1))

                    arch = platform.architecture()[0]

                    if os.path.exists('/etc/os-release'):
                        try:
                            f = open('/etc/os-release', 'r')
                            txt = f.read()
                            f.close()
                            m = re.search('NAME="(.*)"', txt, re.MULTILINE)
                            if m and m.group(1):
                                if m.group(1) == "Amazon Linux":
                                    osname = "amazonlinux"
                                    m = re.search('VERSION="(.*)"', txt, re.MULTILINE)
                                    if m and m.group(1):
                                        ver = m.group(1)
                                        if "2 (2017.12)" in ver:
                                            ver = "2"
                                    # the amzn ami which qe team is using is of 64 bit
                                    arch = "64bit"
                        except Exception:
                            logger.error(traceback.format_exc())

                    config.set(SECTION, option, '%s-%s-%s' % (osname, ver, arch))
                elif option == "HDP_STACK":
                    if "ambari" in config.get(SECTION, "COMPONENT"):
                        from beaver.component.ambari import Ambari
                        hdpVersion = Ambari.getHDPVersion()
                        if hdpVersion and hdpVersion[0] in ('1', '2'):
                            config.set(SECTION, option, "h" + hdpVersion[0])
                        else:
                            config.set(SECTION, option, 'h2')
                    else:
                        hadoopVersion = getComponentVersion("Hadoop")
                        if hadoopVersion and hadoopVersion[0] in ('1', '2'):
                            config.set(SECTION, option, "h" + hadoopVersion[0])
                elif option == "TDE":
                    from beaver.component.hadoop import HDFS2
                    if HDFS2.isKMSEnabled():
                        config.set(SECTION, option, "on")
                    else:
                        config.set(SECTION, option, "off")

                elif option == "SECURE":
                    if "ambari" in config.get(SECTION, "COMPONENT"):
                        from beaver.component.ambari import Ambari

                        config.set(SECTION, option, str(Ambari.isSecure()).lower())
                        secure_str = str(Ambari.isSecure()).lower()
                    else:
                        from beaver.component.hadoop import Hadoop
                        secure_str = str(Hadoop.isSecure()).lower()
                        if config.get(SECTION, "COMPONENT") == "HiveServer2Concurr":
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            if Hadoop.isSecure():
                                config.set(SECTION, "hs2_authentication", "Kerberos")
                            else:
                                config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrHTTP":
                            if Hadoop.isEncrypted():
                                secure_str += "-http-en"
                            else:
                                secure_str += "-http"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            if Hadoop.isSecure():
                                config.set(SECTION, "hs2_authentication", "Kerberos")
                            else:
                                config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "HTTP")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrLDAP":
                            if Hadoop.isEncrypted():
                                secure_str += "-ldap-en"
                            else:
                                secure_str += "-ldap"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "LDAP")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrLDAPHTTP":
                            if Hadoop.isEncrypted():
                                secure_str += "-ldap-http-en"
                            else:
                                secure_str += "-ldap-http"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "LDAP")
                            config.set(SECTION, "hs2_transport", "HTTP")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrSSL":
                            if Hadoop.isEncrypted():
                                secure_str += "-ssl-en"
                            else:
                                secure_str += "-ssl"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "true")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrSSLHTTP":
                            if Hadoop.isEncrypted():
                                secure_str += "-ssl-http-en"
                            else:
                                secure_str += "-ssl-http"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "HTTP")
                            config.set(SECTION, "hs2_ssl", "true")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrTPUser":
                            if Hadoop.isEncrypted():
                                secure_str += "-tpuser-en"
                            else:
                                secure_str += "-tpuser"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "Kerberos")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "true")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrLongRunning":
                            if Hadoop.isEncrypted():
                                secure_str += "-longrun-en"
                            else:
                                secure_str += "-longrun"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            if Hadoop.isSecure():
                                config.set(SECTION, "hs2_authentication", "Kerberos")
                            else:
                                config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "SqoopDb2":
                            config.set(SECTION, "COMPONENT", "Sqoop")
                        else:
                            if Hadoop.isEncrypted():
                                secure_str += '-en'
                        config.set(SECTION, option, secure_str)
                elif option == "BLOB":
                    pass
                elif option == "RAN":
                    # dont add skipped, just pass + fail + aborted
                    config.set(SECTION, option, results[0] + len(results[1]) + results[3])
                elif option == "PASS":
                    config.set(SECTION, option, results[0])
                elif option == "FAIL":
                    config.set(SECTION, option, len(results[1]))
                elif option == "SKIPPED":
                    config.set(SECTION, option, results[2])
                elif option == "ABORTED":
                    config.set(SECTION, option, results[3])
                elif option == "FAILED_TESTS":
                    failedTests = ",".join(results[1])
                    failureSummary = ReportHelper.getFailureSummary(failedTests)
                    config.set(SECTION, "FAILURE_SUMMARY", failureSummary)
                    tmpFailedTests = ReportHelper.getGroupedFailedTests(failedTests)
                    config.set(SECTION, option, ReportHelper.getMergedFailedTests(tmpFailedTests, failureSummary))
                elif option == "NUM_OF_DATANODES":
                    if "ambari" in config.get(SECTION, "COMPONENT"):
                        config.set(SECTION, option, "N/A")
                    else:
                        from beaver.component.hadoop import HDFS

                        config.set(SECTION, option, HDFS.getDatanodeCount())
                elif option == "BUILD_URL":
                    if 'BUILD_URL' in os.environ:
                        config.set(SECTION, option, os.environ['BUILD_URL'])
                elif option == "HDP_RELEASE":
                    # If RU/RB, we must override HDP_RELEASE
                    #   (we can't fix this with product front. Discussed in BUG-31369.)
                    if config.get(SECTION, "TESTSUITE_COMPONENT").lower() in ["rollingupgrade", "rollback",
                                                                              "rollingupgrade-ha", "rollback-ha"]:
                        config.set(SECTION, option, "dal")
                    else:
                        config.set(SECTION, option, getRepoId(config.get(SECTION, "REPO_URL")))
                elif option == "JDK":
                    config.set(SECTION, option, Machine.getJDK())
                elif option == "DB":
                    if not config.has_option(SECTION, "COMPONENT") or config.get(SECTION, "COMPONENT") == "":
                        config.set(SECTION, "COMPONENT", "Hadoop")
                    config.set(SECTION, option, getDatabaseFlavor(config.get(SECTION, "COMPONENT")))
            except Exception as error:
                logger.error("ERROR processing option: %s", option)
                logger.error("Exception: %s", error)
        # make sure Hadoop is installed before append Tez to the component name
        if Config.getEnv("HDP_STACK_INSTALLED").lower() == "true" and config.has_option(SECTION, "COMPONENT"):
            if "ambari" in config.get(SECTION, "COMPONENT"):
                kerberos_server_type = 'n/a'
                from beaver.component.ambari import Ambari
                if Ambari.isSecure():
                    kerberos_server_type = 'mit'
                config.set(SECTION, 'kerberos_server_type', kerberos_server_type)
            else:
                from beaver.component.hadoop import Hadoop, HDFS
                from beaver.component.slider import Slider

                # set execution_framework. New columns for dashboard v2
                # TODO: This needs to be improved to be component specific.
                if Hadoop.isTez():
                    if Slider.isInstalled():
                        config.set(SECTION, 'execution_framework', 'tez-slider')
                    else:
                        config.set(SECTION, 'execution_framework', 'tez')
                else:
                    if Slider.isInstalled():
                        config.set(SECTION, 'execution_framework', 'mr-slider')
                    else:
                        config.set(SECTION, 'execution_framework', 'mr')
                # set wire_encryption
                # TODO: This needs to be improved to be component specific.
                if Hadoop.isEncrypted():
                    config.set(SECTION, 'wire_encryption', 'true')
                else:
                    config.set(SECTION, 'wire_encryption', 'false')
                # set kerberos_server_type
                kerberos_server_type = 'n/a'
                if Hadoop.isSecure():
                    kerberos_server_type = 'mit'
                    # add a check for AD
                    if Machine.isLinux():
                        gateway = Config.get("machine", "GATEWAY")
                        Machine.copyToLocal(Machine.getAdminUser(), gateway, '/etc/krb5.conf', '/tmp/krb5.conf')
                        f = open('/tmp/krb5.conf', 'r')
                        txt = f.read()
                        f.close()
                        #Finding all the admin_server in the krb5.conf with ports, if any
                        p = re.compile('admin_server = ((?!FILE).*)')
                        admin_server_list_with_ports = p.findall(txt)
                        admin_server_list = []
                        for admin_server_with_port in admin_server_list_with_ports:
                            admin_server_list.append(admin_server_with_port.split(':')[0])
                        #If len is greater than 1, first checking if one of the admin server is AD host,
                        #  than to ensure that not all the hosts are AD hosts, checking if one of the admin
                        #  server is not in AD Hosts Lists.
                        if len(admin_server_list) > 1:
                            for ad_host in AD_HOSTS_LIST:
                                if ad_host in admin_server_list:
                                    for admin_server in admin_server_list:
                                        if admin_server not in AD_HOSTS_LIST:
                                            kerberos_server_type = 'ad+mit'
                                            break
                        else:
                            for ad_host in AD_HOSTS_LIST:
                                if ad_host in admin_server_list:
                                    kerberos_server_type = 'ad'
                                    break
                config.set(SECTION, 'kerberos_server_type', kerberos_server_type)

                try:
                    from beaver.component.xa import Xa
                    # set argus. New column for dashboard v2
                    if Xa.isArgus():
                        config.set(SECTION, 'argus', 'true')
                    else:
                        config.set(SECTION, 'argus', 'false')
                except Exception as error:
                    logger.error("ERROR processing argus")
                    logger.error("Exception: %s", error)

                #set TDE
                if HDFS.isKMSEnabled():
                    config.set(SECTION, 'tde', 'true')
                else:
                    config.set(SECTION, 'tde', 'false')

        config.set(SECTION, 'START_TIME', startTime)
        config.set(SECTION, 'END_TIME', endTime)
        coverage_summary_file = os.path.join(Config.getEnv('ARTIFACTS_DIR'), "coverage_summary.json")
        if os.path.exists(coverage_summary_file):
            fp = open(coverage_summary_file, "r")
            json_str = "\n".join(fp.readlines())
            fp.close()
            coverage_summary = json.loads(json_str)
            for key, value in coverage_summary.items():
                config.set(SECTION, key, value)
        config.write(open(outfile, 'w'))

    elif config.has_section('SANDBOX'):
        out_config = ConfigParser()
        out_config.optionxform = str
        out_config.add_section(SECTION)

        sb_type = config.get('SANDBOX', 'vm_env')
        out_config.set(SECTION, 'BUILD_ID', '0')
        ova_uri = ''
        if sb_type == 'VBOX':
            ova_uri = config.get(sb_type, 'vbox_ova_uri')
        elif sb_type == 'FUSION':
            ova_uri = config.get(sb_type, 'fus_ova_uri')
        if sb_type == 'HYPERV':
            ova_uri = config.get(sb_type, 'hyperv_ova_uri')
        out_config.set(SECTION, 'REPO_URL', ova_uri)
        sb_host = ''
        if os.name != 'nt':
            sb_host = os.popen("hostname -f").read().strip()
            sb_host = sb_host + '(' + os.popen("ifconfig en0 | grep 'inet ' | awk -F ' ' '{print $2}'"
                                               ).read().strip() + ')'
        else:
            sb_host = 'Kiev local host'
        out_config.set(SECTION, 'HOSTNAME', sb_host)

        out_config.set(SECTION, 'HDP_STACK', "h" + (config.get('VERSIONS', 'hadoop_version')[0]))
        out_config.set(SECTION, 'COMPONENT', 'SANDBOX')
        out_config.set(SECTION, 'TESTSUITE_COMPONENT', 'SANDBOX')

        if sb_type == 'HYPERV':
            sb_ver = 'hyper-v'
        else:
            tmp = ['%20', 'Hortonworks', 'VirtualBox', '.ova', 'VMware', '_']
            sb_ver = ova_uri.split('/')[5]
            for rstr in tmp:
                sb_ver = sb_ver.replace(rstr, '')

        out_config.set(SECTION, 'COMPONENT_VERSION', sb_ver)
        out_config.set(SECTION, 'CHECKSUM', 'N/A')

        ver_num = os.popen("sw_vers | grep 'ProductVersion:' | awk -F ' ' '{print $2}'").read().strip()
        if sb_type == 'HYPERV':
            out_config.set(SECTION, 'OS', 'Windows 8.1')
        else:
            out_config.set(SECTION, 'OS', 'MAC OS X ' + ver_num)
        out_config.set(SECTION, 'SECURE', 'false')
        out_config.set(SECTION, 'TYPE', 'System, UI')
        out_config.set(SECTION, 'BLOB', 'N/A')
        out_config.set(SECTION, 'PKG', 'OVA')
        if sb_type == 'VBOX':
            out_config.set(SECTION, 'INSTALLER', 'Oracle VirtualBox')
        elif sb_type == 'FUSION':
            out_config.set(SECTION, 'INSTALLER', 'VMWare Fusion')
        elif sb_type == 'HYPERV':
            out_config.set(SECTION, 'INSTALLER', 'Windows Hyper-V')
        out_config.set(SECTION, 'RAN', results[0] + len(results[1]) + results[3])
        out_config.set(SECTION, 'PASS', results[0])
        out_config.set(SECTION, 'FAIL', len(results[1]))
        out_config.set(SECTION, 'SKIPPED', results[2])
        out_config.set(SECTION, 'ABORTED', results[3])
        out_config.set(SECTION, 'FAILED_DEPENDENCY', 'N/A')
        out_config.set(SECTION, 'FAILED_TESTS', ",".join(results[1]))

        out_config.set(SECTION, 'NUM_OF_DATANODES', '1')
        out_config.set(SECTION, 'HDP_RELEASE', ova_uri.split('/')[4])
        out_config.set(SECTION, 'JDK', '1.6.0_51')
        out_config.set(SECTION, 'DB', 'N/A')
        out_config.set(SECTION, 'BROWSER', config.get('SANDBOX', 'test_browser'))

        out_config.write(open(outfile, 'w'))