Exemplo n.º 1
0
def run():
    """ install Trafodion dependencies """

    dbcfgs = json.loads(dbcfgs_json)

    node_list = dbcfgs['node_list'].split(',')

    offline = True if dbcfgs['offline_mode'] == 'Y' else False

    if offline:
        repo_content = LOCAL_REPO_PTR % (dbcfgs['repo_ip'],
                                         dbcfgs['repo_http_port'])
        with open(REPO_FILE, 'w') as f:
            f.write(repo_content)

    if not offline and not os.path.exists(EPEL_FILE):
        run_cmd('yum install -y epel-release')

    package_list = [
        'apr',
        'apr-util',
        'expect',
        'gzip',
        'libiodbc-devel',
        'lzo',
        'lzop',
        'pdsh',  # epel
        'perl-DBD-SQLite',
        'perl-Params-Validate',
        'perl-Time-HiRes',
        'protobuf',  # epel
        'sqlite',
        'snappy',
        'unixODBC-devel',
        'unzip'
    ]

    if dbcfgs['ldap_security'].upper() == 'Y':
        package_list += ['openldap-clients']

    all_pkg_list = run_cmd('rpm -qa')
    for pkg in package_list:
        if pkg in all_pkg_list:
            print 'Package %s had already been installed' % pkg
        else:
            print 'Installing %s ...' % pkg
            if offline:
                run_cmd(
                    'yum install -y --disablerepo=\* --enablerepo=traflocal %s'
                    % pkg)
            else:
                run_cmd('yum install -y %s' % pkg)

    # pdsh should not exist on single node
    if len(node_list) == 1 and not dbcfgs.has_key(
            'traf_shadow'):  # means we are running in adding node mode
        cmd_output('yum remove -y pdsh')

    # remove temp repo file
    if offline: os.remove(REPO_FILE)
Exemplo n.º 2
0
def run():
    """ start trafodion instance """
    dbcfgs = json.loads(dbcfgs_json)

    print 'Starting trafodion'
    run_cmd('sqstart')

    tmp_file = '/tmp/initialize.out'
    if dbcfgs.has_key('upgrade') and dbcfgs['upgrade'].upper() == 'Y':
        print 'Initialize trafodion upgrade'
        run_cmd('echo "initialize trafodion, upgrade;" | sqlci > %s' % tmp_file)
        init_output = cmd_output('cat %s' % tmp_file)
        if 'ERROR' in init_output:
            err('Failed to upgrade initialize trafodion:\n %s' % init_output)
    else:
        print 'Initialize trafodion'
        run_cmd('echo "initialize trafodion;" | sqlci > %s' % tmp_file)
        init_output = cmd_output('cat %s' % tmp_file)
        # skip error 1392
        # ERROR[1392] Trafodion is already initialized on this system. No action is needed.
        if 'ERROR' in init_output and not '1392' in init_output:
            err('Failed to initialize trafodion:\n %s' % init_output)

    if dbcfgs['ldap_security'] == 'Y':
        run_cmd('echo "initialize authorization; alter user DB__ROOT set external name \"%s\";" | sqlci > %s' % (dbcfgs['db_root_user'], tmp_file))
        if dbcfgs.has_key('db_admin_user'):
            run_cmd('echo "alter user DB__ADMIN set external name \"%s\";" | sqlci >> %s' % (dbcfgs['db_admin_user'], tmp_file))

        secure_output = cmd_output('cat %s' % tmp_file)
        if 'ERROR' in secure_output:
            err('Failed to setup security for trafodion:\n %s' % secure_output)

    run_cmd('rm %s' % tmp_file)
    print 'Start trafodion successfully.'
Exemplo n.º 3
0
def run():
    """ install Trafodion dependencies """

    dbcfgs = json.loads(dbcfgs_json)

    node_list = dbcfgs['node_list'].split(',')

    offline = True if dbcfgs['offline_mode'] == 'Y' else False

    if offline:
        repo_content = LOCAL_REPO_PTR % (dbcfgs['repo_ip'], dbcfgs['repo_http_port'])
        with open(REPO_FILE, 'w') as f:
            f.write(repo_content)

    if not offline and not os.path.exists(EPEL_FILE):
        run_cmd('yum install -y epel-release')

    package_list = [
        'apr',
        'apr-util',
        'expect',
        'gzip',
        'libiodbc-devel',
        'lzo',
        'lzop',
        'pdsh', # epel
        'perl-DBD-SQLite',
        'perl-Params-Validate',
        'perl-Time-HiRes',
        'protobuf', # epel
        'sqlite',
        'snappy',
        'unixODBC-devel',
        'unzip'
    ]

    if dbcfgs['ldap_security'].upper() == 'Y':
        package_list += ['openldap-clients']

    all_pkg_list = run_cmd('rpm -qa')
    for pkg in package_list:
        if pkg in all_pkg_list:
            print 'Package %s had already been installed' % pkg
        else:
            print 'Installing %s ...' % pkg
            if offline:
                run_cmd('yum install -y --disablerepo=\* --enablerepo=traflocal %s' % pkg)
            else:
                run_cmd('yum install -y %s' % pkg)

    # pdsh should not exist on single node
    if len(node_list) == 1 and not dbcfgs.has_key('traf_shadow'): # means we are running in adding node mode
        cmd_output('yum remove -y pdsh')

    # remove temp repo file
    if offline: os.remove(REPO_FILE)
Exemplo n.º 4
0
def run():
    hdfs_bin = DEF_HDFS_BIN

    dbcfgs = json.loads(dbcfgs_json)
    distro = dbcfgs['distro']

    if 'CDH' in distro:
        parcel_lib = PARCEL_HBASE_LIB
        if os.path.exists(parcel_lib): hdfs_bin = PARCEL_HDFS_BIN
    elif 'APACHE' in distro:
        hdfs_bin = dbcfgs['hadoop_home'] + '/bin/hdfs'

    traf_loc = '/user/trafodion'
    traf_user = dbcfgs['traf_user']
    hdfs_user = dbcfgs['hdfs_user']
    hbase_user = dbcfgs['hbase_user']
    hbase_group = cmd_output('%s groups %s | cut -d" " -f3' %
                             (hdfs_bin, hbase_user))

    run_cmd_as_user(hdfs_user, '%s dfsadmin -safemode wait' % hdfs_bin)
    run_cmd_as_user(
        hdfs_user,
        '%s dfs -mkdir -p %s/{trafodion_backups,bulkload,lobs} /hbase/archive'
        % (hdfs_bin, traf_loc))
    run_cmd_as_user(
        hdfs_user, '%s dfs -chown -R %s:%s /hbase/archive' %
        (hdfs_bin, hbase_user, hbase_user))
    run_cmd_as_user(
        hdfs_user,
        '%s dfs -chown -R %s:%s %s %s/{trafodion_backups,bulkload,lobs}' %
        (hdfs_bin, traf_user, traf_user, traf_loc, traf_loc))
    run_cmd_as_user(hdfs_user, '%s dfs -chmod 0755 %s' % (hdfs_bin, traf_loc))
    run_cmd_as_user(
        hdfs_user, '%s dfs -chmod 0750 %s/{trafodion_backups,bulkload,lobs}' %
        (hdfs_bin, traf_loc))
    run_cmd_as_user(
        hdfs_user,
        '%s dfs -chgrp %s %s/bulkload' % (hdfs_bin, hbase_group, traf_loc))
    run_cmd_as_user(
        hdfs_user, '%s dfs -setfacl -R -m user:%s:rwx /hbase/archive' %
        (hdfs_bin, traf_user))
    run_cmd_as_user(
        hdfs_user, '%s dfs -setfacl -R -m default:user:%s:rwx /hbase/archive' %
        (hdfs_bin, traf_user))
    run_cmd_as_user(
        hdfs_user, '%s dfs -setfacl -R -m mask::rwx /hbase/archive' % hdfs_bin)

    # Grant all privileges to the Trafodion principal in HBase
    if dbcfgs['secure_hadoop'] == 'Y':
        run_cmd(
            'echo "grant \'%s\', \'RWXC\'" | %s su - %s -s /bin/bash -c "hbase shell" > /tmp/hbase_shell.out'
            % (traf_user, get_sudo_prefix(), hbase_user))
        has_err = cmd_output('grep -c ERROR /tmp/hbase_shell.out')
        if int(has_err):
            err('Failed to grant HBase privileges to %s' % traf_user)
        run_cmd('rm /tmp/hbase_shell.out')
Exemplo n.º 5
0
def run():
    """ start trafodion instance """
    dbcfgs = json.loads(dbcfgs_json)

    print 'Starting trafodion'
    traf_home = os.environ['TRAF_HOME']
    if os.path.exists('%s/sql/scripts/trafstart' % traf_home):
        run_cmd('trafstart')
    else:
        run_cmd('sqstart')

    # set a uniq file name
    tmp_file = '/tmp/initialize.out.' + str(int(time.time()))
    print 'Initialize trafodion'
    run_cmd('echo "initialize trafodion;" | sqlci > %s' % tmp_file)
    init_output = cmd_output('cat %s' % tmp_file)
    # error 1392, 1395
    if '1392' in init_output or '1395' in init_output:
        run_cmd('echo "get version of metadata;" | sqlci > %s' % tmp_file)
        meta_current = cmd_output('grep \'Metadata is current\' %s | wc -l' %
                                  tmp_file)
        if meta_current != "1":
            print 'Initialize trafodion, upgrade'
            run_cmd('echo "initialize trafodion, upgrade;" | sqlci > %s' %
                    tmp_file)

        # update system library procedures and functions
        run_cmd(
            'echo "initialize trafodion, upgrade library management;" | sqlci > %s'
            % tmp_file)
        library_output = cmd_output('cat %s' % tmp_file)
        if 'ERROR' in library_output:
            err('Failed to initialize trafodion, upgrade library management:\n %s'
                % library_output)

    # other errors
    elif 'ERROR' in init_output:
        err('Failed to initialize trafodion:\n %s' % init_output)

    run_cmd('rm -rf %s' % tmp_file)
    if dbcfgs['ldap_security'] == 'Y':
        run_cmd(
            'echo "initialize authorization; alter user DB__ROOT set external name \\\"%s\\\";" | sqlci > %s'
            % (dbcfgs['db_root_user'], tmp_file))

        secure_output = cmd_output('cat %s' % tmp_file)
        if 'ERROR' in secure_output:
            err('Failed to setup security for trafodion:\n %s' % secure_output)

    run_cmd('rm -rf %s' % tmp_file)
    if os.path.exists('%s/sql/scripts/connstart' % traf_home):
        run_cmd('connstart')

    print 'Start trafodion successfully.'
Exemplo n.º 6
0
 def get_hive(self):
     """ get Hive status """
     hive_stat = cmd_output('which hive')
     if 'no hive' in hive_stat:
         return NA
     else:
         return OK
Exemplo n.º 7
0
    def get_default_java(self):
        """ get default java version """
        jdk_path = glob('/usr/java/*') + \
                   glob('/usr/jdk64/*') + \
                   glob('/usr/lib/jvm/java-*-openjdk.x86_64')

        jdk_list = {}  # {jdk_version: jdk_path}
        for path in jdk_path:
            jdk_ver = cmd_output('%s/bin/javac -version' % path)

            try:
                main_ver, sub_ver = re.search(r'(\d\.\d\.\d)_(\d+)',
                                              jdk_ver).groups()
                # don't support JDK version less than 1.7.0_65
                if main_ver == '1.7.0' and int(sub_ver) < 65:
                    continue
                jdk_list[main_ver] = path
            except AttributeError:
                continue

        if not jdk_list:
            return NA
        else:
            # use JDK1.8 first
            if jdk_list.has_key('1.8.0'):
                return jdk_list['1.8.0']
            elif jdk_list.has_key('1.7.0'):
                return jdk_list['1.7.0']
Exemplo n.º 8
0
    def get_default_java(self):
        """ get default java version """
        jdk_path = glob('/usr/java/*') + \
                   glob('/usr/jdk64/*') + \
                   glob('/usr/lib/jvm/java-*-openjdk.x86_64')

        jdk_list = {} # {jdk_version: jdk_path}
        for path in jdk_path:
            jdk_ver = cmd_output('%s/bin/javac -version' % path)

            try:
                main_ver, sub_ver = re.search(r'(\d\.\d\.\d)_(\d+)', jdk_ver).groups()
                # don't support JDK version less than 1.7.0_65
                if main_ver == '1.7.0' and int(sub_ver) < 65:
                    continue
                jdk_list[main_ver] = path
            except AttributeError:
                continue

        if not jdk_list:
            return NA
        else:
            # use JDK1.8 first
            if jdk_list.has_key('1.8.0'):
                return jdk_list['1.8.0']
            elif jdk_list.has_key('1.7.0'):
                return jdk_list['1.7.0']
Exemplo n.º 9
0
 def get_hive(self):
     """ get Hive status """
     hive_stat = cmd_output('which hive')
     if 'no hive' in hive_stat:
         return NA
     else:
         return OK
Exemplo n.º 10
0
    def get_hbase(self):
        """ get HBase version """
        if self.dbcfgs.has_key('hbase_home'): # apache distro
            hbase_home = self.dbcfgs['hbase_home']
            hbase_ver = cmd_output('%s/bin/hbase version | head -n1' % hbase_home)
        else:
            hbase_ver = cmd_output('hbase version | head -n1')

        support_hbase_ver = self.version.get_version('hbase')
        try:
            hbase_ver = re.search(r'HBase (\d\.\d)', hbase_ver).groups()[0]
        except AttributeError:
            return NA
        if hbase_ver not in support_hbase_ver:
            return NS
        return hbase_ver
Exemplo n.º 11
0
 def get_traf_status(self):
     """ get trafodion running status """
     mon_process = cmd_output('ps -ef|grep -v grep|grep -c "monitor COLD"')
     if int(mon_process) > 0:
         return 'Running'
     else:
         return 'Stopped'
Exemplo n.º 12
0
 def get_traf_status(self):
     """ get trafodion running status """
     mon_process = cmd_output('ps -ef|grep -v grep|grep -c "monitor COLD"')
     if int(mon_process) > 0:
         return 'Running'
     else:
         return 'Stopped'
Exemplo n.º 13
0
 def get_firewall_status(self):
     """ get firewall running status """
     iptables_stat = cmd_output('iptables -nL|grep -vE "(Chain|target)"').strip()
     if iptables_stat:
         return 'Running'
     else:
         return 'Stopped'
Exemplo n.º 14
0
    def get_hbase(self):
        """ get HBase version """
        if self.dbcfgs.has_key('hbase_home'):  # apache distro
            hbase_home = self.dbcfgs['hbase_home']
            hbase_ver = cmd_output('%s/bin/hbase version | head -n1' %
                                   hbase_home)
        else:
            hbase_ver = cmd_output('hbase version | head -n1')

        support_hbase_ver = self.version.get_version('hbase')
        try:
            hbase_ver = re.search(r'HBase (\d\.\d)', hbase_ver).groups()[0]
        except AttributeError:
            return NA
        if hbase_ver not in support_hbase_ver:
            return NS
        return hbase_ver
Exemplo n.º 15
0
 def get_firewall_status(self):
     """ get firewall running status """
     iptables_stat = cmd_output(
         'iptables -nL|grep -vE "(Chain|target)"').strip()
     if iptables_stat:
         return 'Running'
     else:
         return 'Stopped'
Exemplo n.º 16
0
 def get_home_dir(self):
     if self.dbcfgs.has_key('traf_user'):
         traf_user = self.dbcfgs['traf_user']
         return cmd_output(
             "getent passwd %s | awk -F: '{print $6}' | sed 's/\/%s//g'" %
             (traf_user, traf_user))
     else:
         return ''
Exemplo n.º 17
0
def run():
    """ install Trafodion dependencies """

    dbcfgs = json.loads(dbcfgs_json)

    if dbcfgs['offline_mode'] == 'Y':
        print 'Installing pdsh in offline mode ...'

        # setup temp local repo
        repo_content = LOCAL_REPO_PTR % (dbcfgs['repo_ip'],
                                         dbcfgs['repo_port'])
        with open(REPO_FILE, 'w') as f:
            f.write(repo_content)

        run_cmd(
            'yum install -y --disablerepo=\* --enablerepo=traflocal pdsh-rcmd-ssh pdsh'
        )
    else:
        pdsh_installed = cmd_output('rpm -qa|grep -c pdsh')
        if pdsh_installed == '0':
            release = platform.release()
            releasever, arch = re.search(r'el(\d).(\w+)', release).groups()

            if releasever == '7':
                pdsh_pkg = 'http://mirrors.neusoft.edu.cn/epel/7/%s/p/pdsh-2.31-1.el7.%s.rpm' % (
                    arch, arch)
            elif releasever == '6':
                pdsh_pkg = 'http://mirrors.neusoft.edu.cn/epel/6/%s/pdsh-2.26-4.el6.%s.rpm' % (
                    arch, arch)
            else:
                err('Unsupported Linux version')

            print 'Installing pdsh ...'
            run_cmd('yum install -y %s' % pdsh_pkg)

    package_list = [
        'apr', 'apr-util', 'expect', 'gzip', 'libiodbc-devel', 'lzo', 'lzop',
        'openldap-clients', 'perl-DBD-SQLite', 'perl-Params-Validate',
        'perl-Time-HiRes', 'sqlite', 'snappy', 'unixODBC-devel', 'unzip'
    ]

    all_pkg_list = run_cmd('rpm -qa')
    for pkg in package_list:
        if pkg in all_pkg_list:
            print 'Package %s had already been installed' % pkg
        else:
            print 'Installing %s ...' % pkg
            if dbcfgs['offline_mode'] == 'Y':
                run_cmd(
                    'yum install -y --disablerepo=\* --enablerepo=traflocal %s'
                    % pkg)
            else:
                run_cmd('yum install -y %s' % pkg)

    # remove temp repo file
    if dbcfgs['offline_mode'] == 'Y':
        os.remove(REPO_FILE)
Exemplo n.º 18
0
def run():
    """ start trafodion instance """
    dbcfgs = json.loads(dbcfgs_json)

    print 'Starting trafodion'
    traf_home = os.environ['TRAF_HOME']
    if os.path.exists('%s/sql/scripts/trafstart' % traf_home):
        run_cmd('trafstart')
    else:
        run_cmd('sqstart')

    # set a uniq file name
    tmp_file = '/tmp/initialize.out.' + str(int(time.time()))
    print 'Initialize trafodion'
    run_cmd('echo "initialize trafodion;" | sqlci > %s' % tmp_file)
    init_output = cmd_output('cat %s' % tmp_file)
    # error 1392, 1395
    if '1392' in init_output or '1395' in init_output:
        run_cmd('echo "get version of metadata;" | sqlci > %s' % tmp_file)
        meta_current = cmd_output('grep \'Metadata is current\' %s | wc -l' % tmp_file)
        if meta_current != "1":
            print 'Initialize trafodion, upgrade'
            run_cmd('echo "initialize trafodion, upgrade;" | sqlci > %s' % tmp_file)
    # other errors
    elif 'ERROR' in init_output:
        err('Failed to initialize trafodion:\n %s' % init_output)

    run_cmd('rm -rf %s' % tmp_file)
    if dbcfgs['ldap_security'] == 'Y':
        run_cmd('echo "initialize authorization; alter user DB__ROOT set external name \\\"%s\\\";" | sqlci > %s' % (dbcfgs['db_root_user'], tmp_file))

        secure_output = cmd_output('cat %s' % tmp_file)
        if 'ERROR' in secure_output:
            err('Failed to setup security for trafodion:\n %s' % secure_output)

    run_cmd('rm -rf %s' % tmp_file)
    if os.path.exists('%s/sql/scripts/connstart' % traf_home):
        run_cmd('connstart')

    print 'Start trafodion successfully.'
def run():
    """ start trafodion instance """
    dbcfgs = json.loads(dbcfgs_json)

    print 'Starting trafodion'
    run_cmd('sqstart')

    tmp_file = '/tmp/initialize.out'
    if dbcfgs.has_key('upgrade') and dbcfgs['upgrade'].upper() == 'Y':
        print 'Initialize trafodion upgrade'
        run_cmd('echo "initialize trafodion, upgrade;" | sqlci > %s' %
                tmp_file)
        init_output = cmd_output('cat %s' % tmp_file)
        if 'ERROR' in init_output:
            err('Failed to upgrade initialize trafodion:\n %s' % init_output)
    else:
        print 'Initialize trafodion'
        run_cmd('echo "initialize trafodion;" | sqlci > %s' % tmp_file)
        init_output = cmd_output('cat %s' % tmp_file)
        # skip error 1392
        # ERROR[1392] Trafodion is already initialized on this system. No action is needed.
        if 'ERROR' in init_output and not '1392' in init_output:
            err('Failed to initialize trafodion:\n %s' % init_output)

    if dbcfgs['ldap_security'] == 'Y':
        run_cmd(
            'echo "initialize authorization; alter user DB__ROOT set external name \"%s\";" | sqlci > %s'
            % (dbcfgs['db_root_user'], tmp_file))
        if dbcfgs.has_key('db_admin_user'):
            run_cmd(
                'echo "alter user DB__ADMIN set external name \"%s\";" | sqlci >> %s'
                % (dbcfgs['db_admin_user'], tmp_file))

        secure_output = cmd_output('cat %s' % tmp_file)
        if 'ERROR' in secure_output:
            err('Failed to setup security for trafodion:\n %s' % secure_output)

    run_cmd('rm %s' % tmp_file)
    print 'Start trafodion successfully.'
Exemplo n.º 20
0
def run():
    hdfs_bin = DEF_HDFS_BIN

    dbcfgs = json.loads(dbcfgs_json)
    distro = dbcfgs['distro']

    if 'CDH' in distro:
        parcel_lib = PARCEL_HBASE_LIB
        if os.path.exists(parcel_lib): hdfs_bin = PARCEL_HDFS_BIN
    elif 'APACHE' in distro:
        hdfs_bin = dbcfgs['hadoop_home'] + '/bin/hdfs'

    traf_loc = '/user/trafodion'
    traf_user = dbcfgs['traf_user']
    hdfs_user = dbcfgs['hdfs_user']
    hbase_user = dbcfgs['hbase_user']
    hbase_group = cmd_output('%s groups %s | cut -d" " -f3' % (hdfs_bin, hbase_user))

    run_cmd_as_user(hdfs_user, '%s dfsadmin -safemode wait' % hdfs_bin)
    run_cmd_as_user(hdfs_user, '%s dfs -mkdir -p %s/{trafodion_backups,bulkload,lobs} /hbase/archive' % (hdfs_bin, traf_loc))
    run_cmd_as_user(hdfs_user, '%s dfs -chown -R %s:%s /hbase/archive' % (hdfs_bin, hbase_user, hbase_user))
    run_cmd_as_user(hdfs_user, '%s dfs -chown -R %s:%s %s %s/{trafodion_backups,bulkload,lobs}' % (hdfs_bin, traf_user, traf_user, traf_loc, traf_loc))
    run_cmd_as_user(hdfs_user, '%s dfs -chmod 0755 %s' % (hdfs_bin, traf_loc))
    run_cmd_as_user(hdfs_user, '%s dfs -chmod 0750 %s/{trafodion_backups,bulkload,lobs}' % (hdfs_bin, traf_loc))
    run_cmd_as_user(hdfs_user, '%s dfs -chgrp %s %s/bulkload' % (hdfs_bin, hbase_group, traf_loc))
    run_cmd_as_user(hdfs_user, '%s dfs -setfacl -R -m user:%s:rwx /hbase/archive' % (hdfs_bin, traf_user))
    run_cmd_as_user(hdfs_user, '%s dfs -setfacl -R -m default:user:%s:rwx /hbase/archive' % (hdfs_bin, traf_user))
    run_cmd_as_user(hdfs_user, '%s dfs -setfacl -R -m mask::rwx /hbase/archive' % hdfs_bin)

    # Grant all privileges to the Trafodion principal in HBase
    if dbcfgs['secure_hadoop'] == 'Y':
        run_cmd('echo "grant \'%s\', \'RWXC\'" | %s su - %s -s /bin/bash -c "hbase shell" > /tmp/hbase_shell.out' % (traf_user, get_sudo_prefix(), hbase_user))
        has_err = cmd_output('grep -c ERROR /tmp/hbase_shell.out')
        if int(has_err):
            err('Failed to grant HBase privileges to %s' % traf_user)
        run_cmd('rm /tmp/hbase_shell.out')
Exemplo n.º 21
0
def run():
    hdfs_bin = '/usr/bin/hdfs'

    dbcfgs = json.loads(dbcfgs_json)
    DISTRO = dbcfgs['distro']

    if 'CDH' in DISTRO:
        parcel_lib = '/opt/cloudera/parcels/CDH/lib/hbase/lib'
        if os.path.exists(parcel_lib):
            hdfs_bin = '/opt/cloudera/parcels/CDH/bin/hdfs'
    elif 'APACHE' in DISTRO:
        hdfs_bin = dbcfgs['hadoop_home'] + '/bin/hdfs'

    traf_loc = '/user/trafodion'
    traf_user = dbcfgs['traf_user']
    hdfs_user = dbcfgs['hdfs_user']
    hbase_user = dbcfgs['hbase_user']

    run_cmd_as_user(hdfs_user, '%s dfsadmin -safemode wait' % hdfs_bin)
    run_cmd_as_user(
        hdfs_user,
        '%s dfs -mkdir -p %s/{trafodion_backups,bulkload,lobs} /bulkload /lobs /hbase/archive /hbase-staging'
        % (hdfs_bin, traf_loc))
    run_cmd_as_user(
        hdfs_user, '%s dfs -chown -R %s:%s /hbase/archive /hbase-staging' %
        (hdfs_bin, hbase_user, hbase_user))
    run_cmd_as_user(
        hdfs_user,
        '%s dfs -chown -R %s:%s %s/{trafodion_backups,bulkload,lobs} /bulkload /lobs'
        % (hdfs_bin, traf_user, traf_user, traf_loc))
    run_cmd_as_user(
        hdfs_user, '%s dfs -setfacl -R -m user:%s:rwx /hbase/archive' %
        (hdfs_bin, traf_user))
    run_cmd_as_user(
        hdfs_user, '%s dfs -setfacl -R -m default:user:%s:rwx /hbase/archive' %
        (hdfs_bin, traf_user))
    run_cmd_as_user(
        hdfs_user, '%s dfs -setfacl -R -m mask::rwx /hbase/archive' % hdfs_bin)

    # Grant all privileges to the Trafodion principal in HBase
    if dbcfgs['secure_hadoop'] == 'Y':
        run_cmd(
            'grant "%s", "RWXC" | sudo -u %s hbase shell > /tmp/hbase_shell.out'
            % (traf_user, hbase_user))
        has_err = cmd_output('grep -c ERROR /tmp/hbase_shell.out')
        if int(has_err):
            err('Failed to grant HBase privileges to %s' % traf_user)
        run_cmd('rm /tmp/hbase_shell.out')
Exemplo n.º 22
0
    def check_java(self):
        """ check JDK version """
        jdk_path = self.dbcfgs['java_home']
        jdk_ver = cmd_output('%s/bin/javac -version' % jdk_path)
        try:
            jdk_ver, sub_ver = re.search(r'javac (\d\.\d).\d_(\d+)', jdk_ver).groups()
        except AttributeError:
            err('No JDK found')

        if self.dbcfgs['req_java8'] == 'Y': # only allow JDK1.8
            support_java = '1.8'
        else:
            support_java = self.version.get_version('java')

        if jdk_ver == '1.7' and int(sub_ver) < 65:
            err('Unsupported JDK1.7 version, sub version should be higher than 65')
        if jdk_ver not in support_java:
            err('Unsupported JDK version %s, supported version: %s' % (jdk_ver, support_java))
Exemplo n.º 23
0
    def check_java(self):
        """ check JDK version """
        jdk_path = self.dbcfgs['java_home']
        jdk_ver = cmd_output('%s/bin/javac -version' % jdk_path)
        try:
            jdk_ver, sub_ver = re.search(r'javac (\d\.\d).\d_(\d+)', jdk_ver).groups()
        except AttributeError:
            err('No JDK found')

        if self.dbcfgs['req_java8'] == 'Y': # only allow JDK1.8
            support_java = '1.8'
        else:
            support_java = self.version.get_version('java')

        if jdk_ver == '1.7' and int(sub_ver) < 65:
            err('Unsupported JDK1.7 version, sub version should be higher than 65')
        if jdk_ver not in support_java:
            err('Unsupported JDK version %s, supported version: %s' % (jdk_ver, support_java))
Exemplo n.º 24
0
def run():
    hdfs_bin = "/usr/bin/hdfs"

    dbcfgs = json.loads(dbcfgs_json)
    DISTRO = dbcfgs["distro"]

    if "CDH" in DISTRO:
        parcel_lib = "/opt/cloudera/parcels/CDH/lib/hbase/lib"
        if os.path.exists(parcel_lib):
            hdfs_bin = "/opt/cloudera/parcels/CDH/bin/hdfs"
    elif "APACHE" in DISTRO:
        hdfs_bin = dbcfgs["hadoop_home"] + "/bin/hdfs"

    traf_loc = "/user/trafodion"
    traf_user = dbcfgs["traf_user"]
    hdfs_user = dbcfgs["hdfs_user"]
    hbase_user = dbcfgs["hbase_user"]

    run_cmd_as_user(hdfs_user, "%s dfsadmin -safemode wait" % hdfs_bin)
    run_cmd_as_user(
        hdfs_user,
        "%s dfs -mkdir -p %s/{trafodion_backups,bulkload,lobs} /hbase/archive /hbase-staging" % (hdfs_bin, traf_loc),
    )
    run_cmd_as_user(
        hdfs_user, "%s dfs -chown -R %s:%s /hbase/archive /hbase-staging" % (hdfs_bin, hbase_user, hbase_user)
    )
    run_cmd_as_user(
        hdfs_user,
        "%s dfs -chown -R %s:%s %s/{trafodion_backups,bulkload,lobs}" % (hdfs_bin, traf_user, traf_user, traf_loc),
    )
    run_cmd_as_user(hdfs_user, "%s dfs -setfacl -R -m user:%s:rwx /hbase/archive" % (hdfs_bin, traf_user))
    run_cmd_as_user(hdfs_user, "%s dfs -setfacl -R -m default:user:%s:rwx /hbase/archive" % (hdfs_bin, traf_user))
    run_cmd_as_user(hdfs_user, "%s dfs -setfacl -R -m mask::rwx /hbase/archive" % hdfs_bin)

    # Grant all privileges to the Trafodion principal in HBase
    if dbcfgs["secure_hadoop"] == "Y":
        run_cmd('grant "%s", "RWXC" | sudo -u %s hbase shell > /tmp/hbase_shell.out' % (traf_user, hbase_user))
        has_err = cmd_output("grep -c ERROR /tmp/hbase_shell.out")
        if int(has_err):
            err("Failed to grant HBase privileges to %s" % traf_user)
        run_cmd("rm /tmp/hbase_shell.out")
Exemplo n.º 25
0
def run():
    dbcfgs = json.loads(dbcfgs_json)

    TRAF_HOME = cmd_output('cat /etc/default/useradd |grep HOME |cut -d "=" -f 2').strip()
    TRAF_USER = dbcfgs['traf_user']
    SQ_ROOT = '%s/%s/%s-%s' % (TRAF_HOME, TRAF_USER, dbcfgs['traf_basename'], dbcfgs['traf_version'])

    TRAF_VER = dbcfgs['traf_version']
    DISTRO = dbcfgs['distro']
    TRAF_LIB_PATH = SQ_ROOT + '/export/lib'
    SCRATCH_LOCS = dbcfgs['scratch_locs'].split(',')

    SUDOER_FILE = '/etc/sudoers.d/trafodion'
    SUDOER_CFG = """
## Allow trafodion id to run commands needed for backup and restore
%%%s ALL =(hbase) NOPASSWD: /usr/bin/hbase"
""" % TRAF_USER

    ### kernel settings ###
    run_cmd('sysctl -w kernel.pid_max=65535 2>&1 > /dev/null')
    run_cmd('echo "kernel.pid_max=65535" >> /etc/sysctl.conf')

    ### create and set permission for scratch file dir ###
    for loc in SCRATCH_LOCS:
        # don't set permission for HOME folder
        if not os.path.exists(loc):
            run_cmd('mkdir -p %s' % loc)
        if TRAF_HOME not in loc:
            run_cmd('chmod 777 %s' % loc)

    ### copy jar files ###
    hbase_lib_path = '/usr/lib/hbase/lib'
    if 'CDH' in DISTRO:
        parcel_lib = '/opt/cloudera/parcels/CDH/lib/hbase/lib'
        if os.path.exists(parcel_lib): hbase_lib_path = parcel_lib
    elif 'HDP' in DISTRO:
        hbase_lib_path = '/usr/hdp/current/hbase-regionserver/lib'
    elif 'APACHE' in DISTRO:
        hbase_home = dbcfgs['hbase_home']
        hbase_lib_path = hbase_home + '/lib'
        # for apache distro, get hbase version from cmdline
        hbase_ver = cmd_output('%s/bin/hbase version | head -n1' % hbase_home)
        hbase_ver = re.search(r'HBase (\d\.\d)', hbase_ver).groups()[0]
        DISTRO += hbase_ver

    distro, v1, v2 = re.search(r'(\w+)-*(\d)\.(\d)', DISTRO).groups()
    if distro == 'CDH':
        if v2 == '6': v2 = '5'
        if v2 == '8': v2 = '7'
    elif distro == 'HDP':
        if v2 == '4': v2 = '3'

    hbase_trx_jar = 'hbase-trx-%s%s_%s-%s.jar' % (distro.lower(), v1, v2, TRAF_VER)
    traf_hbase_trx_path = '%s/%s' % (TRAF_LIB_PATH, hbase_trx_jar)
    hbase_trx_path = '%s/%s' % (hbase_lib_path, hbase_trx_jar)
    if not os.path.exists(traf_hbase_trx_path):
        err('Cannot find HBase trx jar \'%s\' for your Hadoop distribution' % hbase_trx_jar)

    # upgrade mode, check if existing trx jar doesn't match the new trx jar file
    if dbcfgs.has_key('upgrade') and dbcfgs['upgrade'].upper() == 'Y':
        if not os.path.exists(hbase_trx_path):
            err('The trx jar \'%s\' doesn\'t exist in hbase lib path, cannot do upgrade, please do regular install' % hbase_trx_jar)
    else:
        # remove old trx and trafodion-utility jar files
        run_cmd('rm -rf %s/{hbase-trx-*,trafodion-utility-*}' % hbase_lib_path)

        # copy new ones
        run_cmd('cp %s %s' % (traf_hbase_trx_path, hbase_lib_path))
        run_cmd('cp %s/trafodion-utility-* %s' % (TRAF_LIB_PATH, hbase_lib_path))

    # set permission
    run_cmd('chmod +r %s/{hbase-trx-*,trafodion-utility-*}' % hbase_lib_path)

    if dbcfgs['dcs_ha'] == 'Y':
        # set trafodion sudoer file for specific cmds
        SUDOER_CFG += """
## Trafodion Floating IP commands
Cmnd_Alias IP = /sbin/ip
Cmnd_Alias ARP = /sbin/arping

## Allow Trafodion id to run commands needed to configure floating IP
%%%s ALL = NOPASSWD: IP, ARP
""" % TRAF_USER

    ### write trafodion sudoer file ###
    with open(SUDOER_FILE, 'w') as f:
        f.write(SUDOER_CFG)
Exemplo n.º 26
0
def run():
    dbcfgs = json.loads(dbcfgs_json)

    SQ_ROOT = os.environ['MY_SQROOT']
    TRAF_VER = dbcfgs['traf_version']
    HBASE_XML_FILE = dbcfgs['hbase_xml_file']

    DCS_INSTALL_ENV = 'export DCS_INSTALL_DIR=%s/dcs-%s' % (SQ_ROOT, TRAF_VER)
    REST_INSTALL_ENV = 'export REST_INSTALL_DIR=%s/rest-%s' % (SQ_ROOT,
                                                               TRAF_VER)

    DCS_CONF_DIR = '%s/dcs-%s/conf' % (SQ_ROOT, TRAF_VER)
    DCS_SRV_FILE = DCS_CONF_DIR + '/servers'
    DCS_MASTER_FILE = DCS_CONF_DIR + '/master'
    DCS_BKMASTER_FILE = DCS_CONF_DIR + '/backup-masters'
    DCS_ENV_FILE = DCS_CONF_DIR + '/dcs-env.sh'
    DCS_SITE_FILE = DCS_CONF_DIR + '/dcs-site.xml'
    REST_SITE_FILE = '%s/rest-%s/conf/rest-site.xml' % (SQ_ROOT, TRAF_VER)
    TRAFCI_FILE = SQ_ROOT + '/trafci/bin/trafci'
    SQENV_FILE = SQ_ROOT + '/sqenvcom.sh'

    ### dcs setting ###
    # servers
    nodes = dbcfgs['node_list'].split(',')
    dcs_cnt = dbcfgs['dcs_cnt_per_node']
    dcs_servers = ''
    for node in nodes:
        dcs_servers += '%s %s\n' % (node, dcs_cnt)

    write_file(DCS_SRV_FILE, dcs_servers)

    ### modify dcs config files ###
    # modify master
    dcs_master = nodes[0]
    append_file(DCS_MASTER_FILE, dcs_master)

    # modify sqenvcom.sh
    append_file(SQENV_FILE, DCS_INSTALL_ENV)
    append_file(SQENV_FILE, REST_INSTALL_ENV)

    # modify dcs-env.sh
    mod_file(DCS_ENV_FILE,
             {'.*DCS_MANAGES_ZK=.*': 'export DCS_MANAGES_ZK=false'})

    # modify trafci
    mod_file(TRAFCI_FILE, {'HNAME=.*': 'HNAME=%s:23400' % dcs_master})

    # modify dcs-site.xml
    net_interface = cmd_output(
        'netstat -rn | grep "^0.0.0.0" | awk \'{print $8}\'').strip()
    hb = ParseXML(HBASE_XML_FILE)
    zk_hosts = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')

    p = ParseXML(DCS_SITE_FILE)
    p.add_property('dcs.zookeeper.property.clientPort', zk_port)
    p.add_property('dcs.zookeeper.quorum', zk_hosts)
    p.add_property('dcs.dns.interface', net_interface)

    if dbcfgs['dcs_ha'] == 'Y':
        dcs_floating_ip = dbcfgs['dcs_floating_ip']
        dcs_backup_nodes = dbcfgs['dcs_backup_nodes']
        p.add_property('dcs.master.floating.ip', 'true')
        p.add_property('dcs.master.floating.ip.external.interface',
                       net_interface)
        p.add_property('dcs.master.floating.ip.external.ip.address',
                       dcs_floating_ip)
        p.rm_property('dcs.dns.interface')

        # modify backup_master
        write_file(DCS_BKMASTER_FILE, dcs_backup_nodes)

    p.write_xml()

    ### rest setting ###
    p = ParseXML(REST_SITE_FILE)
    p.add_property('rest.zookeeper.property.clientPort', zk_port)
    p.add_property('rest.zookeeper.quorum', zk_hosts)
    p.write_xml()

    ### run sqcertgen ###
    run_cmd('sqcertgen')
Exemplo n.º 27
0
 def get_ext_interface(self):
     """ get external network interface """
     return cmd_output('netstat -rn | grep "^0.0.0.0" | awk \'{print $8}\'').strip()
Exemplo n.º 28
0
def run():
    """ install Trafodion dependencies """

    dbcfgs = json.loads(dbcfgs_json)

    if dbcfgs['offline_mode'] == 'Y':
        print 'Installing pdsh in offline mode ...'

        # setup temp local repo
        repo_content = LOCAL_REPO_PTR % (dbcfgs['repo_ip'], dbcfgs['repo_port'])
        with open(REPO_FILE, 'w') as f:
            f.write(repo_content)

        run_cmd('yum install -y --disablerepo=\* --enablerepo=traflocal pdsh-rcmd-ssh pdsh')
    else:
        pdsh_installed = cmd_output('rpm -qa|grep -c pdsh')
        if pdsh_installed == '0':
            release = platform.release()
            releasever, arch = re.search(r'el(\d).(\w+)', release).groups()

            if releasever == '7':
                pdsh_pkg = 'http://mirrors.neusoft.edu.cn/epel/7/%s/p/pdsh-2.31-1.el7.%s.rpm' % (arch, arch)
            elif releasever == '6':
                pdsh_pkg = 'http://mirrors.neusoft.edu.cn/epel/6/%s/pdsh-2.26-4.el6.%s.rpm' % (arch, arch)
            else:
                err('Unsupported Linux version')

            print 'Installing pdsh ...'
            run_cmd('yum install -y %s' % pdsh_pkg)

    package_list = [
        'apr',
        'apr-util',
        'expect',
        'gzip',
        'libiodbc-devel',
        'lzo',
        'lzop',
        'openldap-clients',
        'perl-DBD-SQLite',
        'perl-Params-Validate',
        'perl-Time-HiRes',
        'sqlite',
        'snappy',
        'unixODBC-devel',
        'unzip'
    ]

    all_pkg_list = run_cmd('rpm -qa')
    for pkg in package_list:
        if pkg in all_pkg_list:
            print 'Package %s had already been installed' % pkg
        else:
            print 'Installing %s ...' % pkg
            if dbcfgs['offline_mode'] == 'Y':
                run_cmd('yum install -y --disablerepo=\* --enablerepo=traflocal %s' % pkg)
            else:
                run_cmd('yum install -y %s' % pkg)

    # remove temp repo file
    if dbcfgs['offline_mode'] == 'Y':
        os.remove(REPO_FILE)
Exemplo n.º 29
0
def run():
    """ setup Kerberos security """
    dbcfgs = json.loads(dbcfgs_json)

    distro = dbcfgs['distro']
    admin_principal = dbcfgs['admin_principal']
    admin_passwd = dbcfgs['kdcadmin_pwd']
    kdc_server = dbcfgs['kdc_server']
    cluster_name = dbcfgs['cluster_name']
    # maxlife = dbcfgs['max_lifetime']
    # max_renewlife = dbcfgs['max_renew_lifetime']
    maxlife = '24hours'
    max_renewlife = '7days'
    kadmin_cmd = 'kadmin -p %s -w %s -s %s -q' % (admin_principal,
                                                  admin_passwd, kdc_server)

    host_name = socket.getfqdn()
    traf_user = dbcfgs['traf_user']
    hdfs_user = '******'
    hbase_user = '******'
    realm = re.match('.*@(.*)', admin_principal).groups()[0]
    traf_keytab_dir = '/etc/%s/keytab' % traf_user
    traf_keytab = '%s/%s.keytab' % (traf_keytab_dir, traf_user)
    traf_principal = '%s/%s@%s' % (traf_user, host_name, realm)
    hbase_principal = '%s/%s@%s' % (hbase_user, host_name, realm)

    ### setting start ###
    print 'Checking KDC server connection'
    run_cmd('%s listprincs' % kadmin_cmd)

    # create principals and keytabs for trafodion user
    principal_exists = cmd_output('%s listprincs | grep -c %s' %
                                  (kadmin_cmd, traf_principal))
    if int(principal_exists) == 0:  # not exist
        run_cmd('%s \'addprinc -randkey %s\'' % (kadmin_cmd, traf_principal))
        # Adjust principal's maxlife and maxrenewlife
        run_cmd(
            '%s \'modprinc -maxlife %s -maxrenewlife %s\' %s >/dev/null 2>&1' %
            (kadmin_cmd, maxlife, max_renewlife, traf_principal))

    run_cmd('mkdir -p %s' % traf_keytab_dir)

    # TODO: need skip add keytab if exist?
    print 'Create keytab file for trafodion user'
    run_cmd('%s \'ktadd -k %s %s\'' %
            (kadmin_cmd, traf_keytab, traf_principal))
    run_cmd('chown %s %s' % (traf_user, traf_keytab))
    run_cmd('chmod 400 %s' % traf_keytab)

    # create principals for hdfs/hbase user
    print 'Create principals for hdfs/hbase user'
    if 'CDH' in distro:
        hdfs_keytab = cmd_output(
            'find /var/run/cloudera-scm-agent/process/ -name hdfs.keytab | head -n 1'
        )
        hbase_keytab = cmd_output(
            'find /var/run/cloudera-scm-agent/process/ -name hbase.keytab | head -n 1'
        )
        hdfs_principal = '%s/%s@%s' % (hdfs_user, host_name, realm)
    elif 'HDP' in distro:
        hdfs_keytab = '/etc/security/keytabs/hdfs.headless.keytab'
        hbase_keytab = '/etc/security/keytabs/hbase.service.keytab'
        hdfs_principal = '%s-%s@%s' % (hdfs_user, cluster_name, realm)

    sudo_prefix = get_sudo_prefix()
    kinit_cmd_ptr = '%s su - %s -s /bin/bash -c "kinit -kt %s %s"'
    run_cmd(kinit_cmd_ptr %
            (sudo_prefix, hdfs_user, hdfs_keytab, hdfs_principal))
    run_cmd(kinit_cmd_ptr %
            (sudo_prefix, hbase_user, hbase_keytab, hbase_principal))

    print 'Done creating principals and keytabs'

    kinit_bashrc = """

# ---------------------------------------------------------------
# if needed obtain and cache the Kerberos ticket-granting ticket
# start automatic ticket renewal process
# ---------------------------------------------------------------
klist -s >/dev/null 2>&1
if [[ $? -eq 1 ]]; then
    kinit -kt %s %s >/dev/null 2>&1
fi

# ---------------------------------------------------------------
# Start trafodion kerberos ticket manager process
# ---------------------------------------------------------------
$TRAF_HOME/sql/scripts/krb5service start >/dev/null 2>&1
""" % (traf_keytab, traf_principal)

    traf_bashrc = '/home/%s/.bashrc' % traf_user
    with open(traf_bashrc, 'a') as f:
        f.write(kinit_bashrc)
Exemplo n.º 30
0
def run():
    dbcfgs = json.loads(dbcfgs_json)

    home_dir = get_default_home()
    if dbcfgs.has_key('home_dir'):
        home_dir = dbcfgs['home_dir']

    traf_user = dbcfgs['traf_user']
    traf_dirname = dbcfgs['traf_dirname']
    traf_home = '%s/%s/%s' % (home_dir, traf_user, traf_dirname)

    traf_ver = dbcfgs['traf_version']
    distro = dbcfgs['distro']
    traf_lib_path = traf_home + '/export/lib'
    scratch_locs = dbcfgs['scratch_locs'].split(',')

    SUDOER_CFG = """
## Allow trafodion id to run commands needed for backup and restore
%%%s ALL =(hbase) NOPASSWD: %s/bin/hbase"
""" % (traf_user, DEF_HBASE_HOME)

    ### kernel settings ###
    run_cmd('sysctl -w kernel.pid_max=65535 2>&1 > /dev/null')
    run_cmd('echo "kernel.pid_max=65535" >> /etc/sysctl.conf')

    ### copy trafodion bashrc ###
    bashrc_template = '%s/sysinstall/home/trafodion/.bashrc' % traf_home
    bashrc_file = '%s/%s/.bashrc' % (home_dir, traf_user)
    # backup orig bashrc
    if os.path.exists(bashrc_file):
        run_cmd('cp -f %s %s.bak' % (bashrc_file, bashrc_file))
    run_cmd('cp -f %s %s' % (bashrc_template, bashrc_file))
    run_cmd('chown -R %s:%s %s*' % (traf_user, traf_user, bashrc_file))

    ### copy init script ###
    init_script = '%s/sysinstall/etc/init.d/trafodion' % traf_home
    if os.path.exists(init_script):
        run_cmd('cp -rf %s /etc/init.d/' % init_script)
        run_cmd('chkconfig --add trafodion')
        run_cmd('chkconfig --level 06 trafodion on')

    ### create and set permission for scratch file dir ###
    for loc in scratch_locs:
        # expand any shell variables
        locpath = cmd_output('source %s ; echo %s' % (TRAF_CFG_FILE,loc))
        if not os.path.exists(locpath):
            run_cmd('mkdir -p %s' % locpath)
            run_cmd('chown %s %s' % (traf_user,locpath))

    ### copy jar files ###
    hbase_lib_path = dbcfgs['hbase_lib_path']
    if 'APACHE' in distro:
        distro += dbcfgs['hbase_ver']

    distro, v1, v2 = re.search(r'(\w+)-*(\d)\.(\d)', distro).groups()
    if distro == 'CDH':
        if v2 == '6': v2 = '5'
        if v2 == '8': v2 = '7'
    elif distro == 'HDP':
        if v2 == '4': v2 = '3'

    hbase_trx_jar = 'hbase-trx-%s%s_%s-%s.jar' % (distro.lower(), v1, v2, traf_ver)
    traf_hbase_trx_path = '%s/%s' % (traf_lib_path, hbase_trx_jar)
    hbase_trx_path = '%s/%s' % (hbase_lib_path, hbase_trx_jar)
    if not os.path.exists(traf_hbase_trx_path):
        err('Cannot find HBase trx jar \'%s\' for your Hadoop distribution' % hbase_trx_jar)

    # reinstall mode, check if existing trx jar doesn't match the new trx jar file
    if dbcfgs.has_key('reinstall') and dbcfgs['reinstall'].upper() == 'Y':
        if not os.path.exists(hbase_trx_path):
            err('The trx jar \'%s\' doesn\'t exist in hbase lib path, cannot do reinstall, please do regular install' % hbase_trx_jar)
    else:
        # remove old trx and trafodion-utility jar files
        run_cmd('rm -rf %s/{hbase-trx-*,trafodion-utility-*}' % hbase_lib_path)

        # copy new ones
        run_cmd('cp %s %s' % (traf_hbase_trx_path, hbase_lib_path))
        run_cmd('cp %s/trafodion-utility-* %s' % (traf_lib_path, hbase_lib_path))

    # set permission
    run_cmd('chmod +r %s/{hbase-trx-*,trafodion-utility-*}' % hbase_lib_path)

    if dbcfgs['dcs_ha'] == 'Y':
        # set trafodion sudoer file for specific cmds
        SUDOER_CFG += """
## Trafodion Floating IP commands
Cmnd_Alias IP = /sbin/ip
Cmnd_Alias ARP = /sbin/arping

## Allow Trafodion id to run commands needed to configure floating IP
%%%s ALL = NOPASSWD: IP, ARP
""" % traf_user

    ### write trafodion sudoer file ###
    with open(TRAF_SUDOER_FILE, 'w') as f:
        f.write(SUDOER_CFG)
Exemplo n.º 31
0
 def get_home_dir(self):
     if self.dbcfgs.has_key('traf_user'):
         traf_user = self.dbcfgs['traf_user']
         return cmd_output("getent passwd %s | awk -F: '{print $6}' | sed 's/\/%s//g'" % (traf_user, traf_user))
     else:
         return ''
Exemplo n.º 32
0
def run():
    """ create trafodion user, bashrc, setup passwordless SSH """
    dbcfgs = json.loads(dbcfgs_json)

    DISTRO = dbcfgs['distro']
    if 'CDH' in DISTRO:
        hadoop_type = 'cloudera'
    elif 'HDP' in DISTRO:
        hadoop_type = 'hortonworks'
    elif 'APACHE' in DISTRO:
        hadoop_type = 'apache'

    TRAF_USER = dbcfgs['traf_user']
    TRAF_PWD = dbcfgs['traf_pwd']
    TRAF_GROUP = TRAF_USER
    TRAF_HOME = cmd_output('cat /etc/default/useradd |grep HOME |cut -d "=" -f 2').strip()
    TRAF_USER_DIR = '%s/%s' % (TRAF_HOME, TRAF_USER)
    SQ_ROOT = '%s/%s-%s' % (TRAF_USER_DIR, dbcfgs['traf_basename'], dbcfgs['traf_version'])

    KEY_FILE = '/tmp/id_rsa'
    AUTH_KEY_FILE = '%s/.ssh/authorized_keys' % TRAF_USER_DIR
    SSH_CFG_FILE = '%s/.ssh/config' % TRAF_USER_DIR
    BASHRC_TEMPLATE = '%s/bashrc.template' % TMP_DIR
    BASHRC_FILE = '%s/.bashrc' % TRAF_USER_DIR
    ULIMITS_FILE = '/etc/security/limits.d/%s.conf' % TRAF_USER
    HSPERFDATA_FILE = '/tmp/hsperfdata_trafodion'

    # create trafodion user and group
    if not cmd_output('getent group %s' % TRAF_GROUP):
        run_cmd('groupadd %s > /dev/null 2>&1' % TRAF_GROUP)

    if not cmd_output('getent passwd %s' % TRAF_USER):
        run_cmd('useradd --shell /bin/bash -m %s -g %s --password "$(openssl passwd %s)"' % (TRAF_USER, TRAF_GROUP, TRAF_PWD))
    elif not os.path.exists(TRAF_USER_DIR):
        run_cmd('mkdir -p %s' % TRAF_USER_DIR)
        run_cmd('chmod 700 %s' % TRAF_USER_DIR)

    # set ssh key
    run_cmd_as_user(TRAF_USER, 'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
    # the key is generated in copy_file script running on the installer node
    run_cmd('cp %s{,.pub} %s/.ssh/' % (KEY_FILE, TRAF_USER_DIR))

    run_cmd_as_user(TRAF_USER, 'cat ~/.ssh/id_rsa.pub > %s' % AUTH_KEY_FILE)
    run_cmd('chmod 644 %s' % AUTH_KEY_FILE)

    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(SSH_CFG_FILE, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % SSH_CFG_FILE)

    run_cmd('chown -R %s:%s %s/.ssh/' % (TRAF_USER, TRAF_GROUP, TRAF_USER_DIR))

    # set bashrc
    nodes = dbcfgs['node_list'].split(',')
    change_items = {
        '{{ java_home }}': dbcfgs['java_home'],
        '{{ sq_home }}': SQ_ROOT,
        '{{ hadoop_type }}': hadoop_type,
        '{{ node_list }}': ' '.join(nodes),
        '{{ node_count }}': str(len(nodes)),
        '{{ enable_ha }}': dbcfgs['enable_ha'],
        '{{ my_nodes }}': ' -w ' + ' -w '.join(nodes)
    }

    mod_file(BASHRC_TEMPLATE, change_items)

    if 'APACHE' in DISTRO:
        bashrc_content = """
export HADOOP_PREFIX=%s
export HBASE_HOME=%s
export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'])
        append_file(BASHRC_TEMPLATE, bashrc_content, position='HADOOP_TYPE')

    # backup bashrc if exsits
    if os.path.exists(BASHRC_FILE):
        run_cmd('cp %s %s.bak' % ((BASHRC_FILE,) *2))

    # copy bashrc to trafodion's home
    run_cmd('cp %s %s' % (BASHRC_TEMPLATE, BASHRC_FILE))
    run_cmd('chown -R %s:%s %s*' % (TRAF_USER, TRAF_GROUP, BASHRC_FILE))

    # set ulimits for trafodion user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
%s   soft nofile 8192
%s   hard nofile 65535
hbase soft nofile 8192
''' % ((TRAF_USER,) * 10)

    with open(ULIMITS_FILE, 'w') as f:
        f.write(ulimits_config)

    # change permission for hsperfdata
    if os.path.exists(HSPERFDATA_FILE):
        run_cmd('chown -R %s:%s %s' % (TRAF_USER, TRAF_GROUP, HSPERFDATA_FILE))

    # clean up unused key file at the last step
    run_cmd('rm -rf %s{,.pub}' % KEY_FILE)

    print 'Setup trafodion user successfully!'
Exemplo n.º 33
0
def run():
    dbcfgs = json.loads(dbcfgs_json)

    SQ_ROOT = os.environ['MY_SQROOT']
    TRAF_VER = dbcfgs['traf_version']
    HBASE_XML_FILE = dbcfgs['hbase_xml_file']

    DCS_INSTALL_ENV = 'export DCS_INSTALL_DIR=%s/dcs-%s' % (SQ_ROOT, TRAF_VER)
    REST_INSTALL_ENV = 'export REST_INSTALL_DIR=%s/rest-%s' % (SQ_ROOT, TRAF_VER)

    DCS_CONF_DIR = '%s/dcs-%s/conf' % (SQ_ROOT, TRAF_VER)
    DCS_SRV_FILE = DCS_CONF_DIR + '/servers'
    DCS_MASTER_FILE = DCS_CONF_DIR + '/master'
    DCS_BKMASTER_FILE = DCS_CONF_DIR + '/backup-masters'
    DCS_ENV_FILE = DCS_CONF_DIR + '/dcs-env.sh'
    DCS_SITE_FILE = DCS_CONF_DIR + '/dcs-site.xml'
    REST_SITE_FILE = '%s/rest-%s/conf/rest-site.xml' % (SQ_ROOT, TRAF_VER)
    TRAFCI_FILE = SQ_ROOT + '/trafci/bin/trafci'
    SQENV_FILE = SQ_ROOT + '/sqenvcom.sh'

    ### dcs setting ###
    # servers
    nodes = dbcfgs['node_list'].split(',')
    dcs_cnt = dbcfgs['dcs_cnt_per_node']
    dcs_servers = ''
    for node in nodes:
        dcs_servers += '%s %s\n' % (node, dcs_cnt)

    write_file(DCS_SRV_FILE, dcs_servers)

    ### modify dcs config files ###
    # modify master
    dcs_master = nodes[0]
    append_file(DCS_MASTER_FILE, dcs_master)

    # modify sqenvcom.sh
    append_file(SQENV_FILE, DCS_INSTALL_ENV)
    append_file(SQENV_FILE, REST_INSTALL_ENV)

    # modify dcs-env.sh
    mod_file(DCS_ENV_FILE, {'.*DCS_MANAGES_ZK=.*':'export DCS_MANAGES_ZK=false'})

    # modify trafci
    mod_file(TRAFCI_FILE, {'HNAME=.*':'HNAME=%s:23400' % dcs_master})

    # modify dcs-site.xml
    net_interface = cmd_output('netstat -rn | grep "^0.0.0.0" | awk \'{print $8}\'').strip()
    hb = ParseXML(HBASE_XML_FILE)
    zk_hosts = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')

    p = ParseXML(DCS_SITE_FILE)
    p.add_property('dcs.zookeeper.property.clientPort', zk_port)
    p.add_property('dcs.zookeeper.quorum', zk_hosts)
    p.add_property('dcs.dns.interface', net_interface)

    if dbcfgs['dcs_ha'] == 'Y':
        dcs_floating_ip = dbcfgs['dcs_floating_ip']
        dcs_backup_nodes = dbcfgs['dcs_backup_nodes']
        p.add_property('dcs.master.floating.ip', 'true')
        p.add_property('dcs.master.floating.ip.external.interface', net_interface)
        p.add_property('dcs.master.floating.ip.external.ip.address', dcs_floating_ip)
        p.rm_property('dcs.dns.interface')

        # modify backup_master
        write_file(DCS_BKMASTER_FILE, dcs_backup_nodes)

    p.write_xml()

    ### rest setting ###
    p = ParseXML(REST_SITE_FILE)
    p.add_property('rest.zookeeper.property.clientPort', zk_port)
    p.add_property('rest.zookeeper.quorum', zk_hosts)
    p.write_xml()

    ### run sqcertgen ###
    run_cmd('sqcertgen')
Exemplo n.º 34
0
 def get_ext_interface(self):
     """ get external network interface """
     return cmd_output(
         'netstat -rn | grep "^0.0.0.0" | awk \'{print $8}\'').strip()
Exemplo n.º 35
0
 def get_rootdisk_free(self):
     """ get root disk space left """
     space = cmd_output('df -h|grep "\/$" | awk \'{print $4}\'')
     return space.strip()
Exemplo n.º 36
0
 def __init__(self, dbcfgs):
     self.CPUINFO = cmd_output('cat /proc/cpuinfo')
     self.MEMINFO = cmd_output('cat /proc/meminfo')
     self.SYSCTLINFO = cmd_output('sysctl -a')
     self.version = Version()
     self.dbcfgs = dbcfgs
Exemplo n.º 37
0
 def get_ext_interface(self):
     """ get external network interface """
     return cmd_output('ip route |grep default|awk \'{print $5}\'')
Exemplo n.º 38
0
def run():
    """ create trafodion user, bashrc, setup passwordless SSH """
    dbcfgs = json.loads(dbcfgs_json)

    DISTRO = dbcfgs['distro']
    if 'CDH' in DISTRO:
        hadoop_type = 'cloudera'
    elif 'HDP' in DISTRO:
        hadoop_type = 'hortonworks'
    elif 'APACHE' in DISTRO:
        hadoop_type = 'apache'

    TRAF_USER = dbcfgs['traf_user']
    TRAF_GROUP = TRAF_USER
    HOME_DIR = cmd_output('cat /etc/default/useradd |grep HOME |cut -d "=" -f 2').strip()
    # customize trafodion home dir
    if dbcfgs.has_key('home_dir') and dbcfgs['home_dir']:
        HOME_DIR = dbcfgs['home_dir']

    TRAF_USER_DIR = '%s/%s' % (HOME_DIR, TRAF_USER)
    TRAF_DIRNAME = dbcfgs['traf_dirname']
    TRAF_HOME = '%s/%s' % (TRAF_USER_DIR, TRAF_DIRNAME)

    TRAFODION_CFG_DIR = '/etc/trafodion/'
    TRAFODION_CFG_FILE = '/etc/trafodion/trafodion_config'
    HBASE_XML_FILE = dbcfgs['hbase_xml_file']
    KEY_FILE = '/tmp/id_rsa'
    AUTH_KEY_FILE = '%s/.ssh/authorized_keys' % TRAF_USER_DIR
    SSH_CFG_FILE = '%s/.ssh/config' % TRAF_USER_DIR
    ULIMITS_FILE = '/etc/security/limits.d/%s.conf' % TRAF_USER
    HSPERFDATA_FILE = '/tmp/hsperfdata_trafodion'

    # create trafodion user and group
    if not cmd_output('getent group %s' % TRAF_GROUP):
        run_cmd('groupadd %s > /dev/null 2>&1' % TRAF_GROUP)

    if not cmd_output('getent passwd %s' % TRAF_USER):
        TRAF_PWD = dbcfgs['traf_pwd']
        run_cmd('useradd --shell /bin/bash -m %s -g %s --home %s --password "$(openssl passwd %s)"' % (TRAF_USER, TRAF_GROUP, TRAF_USER_DIR, TRAF_PWD))
    elif not os.path.exists(TRAF_USER_DIR):
        run_cmd('mkdir -p %s' % TRAF_USER_DIR)
        run_cmd('chmod 700 %s' % TRAF_USER_DIR)

    # set ssh key
    run_cmd_as_user(TRAF_USER, 'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
    # the key is generated in copy_file script running on the installer node
    run_cmd('cp %s{,.pub} %s/.ssh/' % (KEY_FILE, TRAF_USER_DIR))

    run_cmd_as_user(TRAF_USER, 'cat ~/.ssh/id_rsa.pub > %s' % AUTH_KEY_FILE)
    run_cmd('chmod 644 %s' % AUTH_KEY_FILE)

    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(SSH_CFG_FILE, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % SSH_CFG_FILE)

    run_cmd('chown -R %s:%s %s/.ssh/' % (TRAF_USER, TRAF_GROUP, TRAF_USER_DIR))

    hb = ParseXML(HBASE_XML_FILE)
    zk_nodes = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')
    # set trafodion_config
    nodes = dbcfgs['node_list'].split(',')
    trafodion_config = """
export TRAF_HOME="%s"
export MY_SQROOT=$TRAF_HOME # for compatibility
export JAVA_HOME="%s"
export NODE_LIST="%s"
export MY_NODES="%s"
export node_count="%s"
export HADOOP_TYPE="%s"
export ENABLE_HA="%s"
export ZOOKEEPER_NODES="%s"
export ZOOKEEPER_PORT="%s"
export SECURE_HADOOP="%s"
""" % (TRAF_HOME, dbcfgs['java_home'], ' '.join(nodes), ' -w ' + ' -w '.join(nodes),
       str(len(nodes)), hadoop_type, dbcfgs['enable_ha'], zk_nodes, zk_port, dbcfgs['secure_hadoop'])

    run_cmd('mkdir -p %s' % TRAFODION_CFG_DIR)
    write_file(TRAFODION_CFG_FILE, trafodion_config)

    if 'APACHE' in DISTRO:
        extra_config = """
export HADOOP_PREFIX=%s
export HBASE_HOME=%s
export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'])
        append_file(TRAFODION_CFG_FILE, extra_config)

    # set permission
    run_cmd('chown -R %s:%s %s*' % (TRAF_USER, TRAF_GROUP, TRAFODION_CFG_DIR))


    # set ulimits for trafodion user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
%s   soft nofile 8192
%s   hard nofile 65535
''' % ((TRAF_USER,) * 10)

    write_file(ULIMITS_FILE, ulimits_config)

    # change permission for hsperfdata
    if os.path.exists(HSPERFDATA_FILE):
        run_cmd('chown -R %s:%s %s' % (TRAF_USER, TRAF_GROUP, HSPERFDATA_FILE))

    # clean up unused key file at the last step
    run_cmd('rm -rf %s{,.pub}' % KEY_FILE)

    print 'Setup trafodion user successfully!'
Exemplo n.º 39
0
def run():
    dbcfgs = json.loads(dbcfgs_json)

    if not os.path.exists(dbcfgs['hbase_lib_path']):
        err('Cannot find HBase lib folder')
    if not os.path.exists(dbcfgs['java_home']):
        err('Cannot find Java, please set the JAVA_HOME on the new nodes to: %s'
            % dbcfgs['java_home'])

    home_dir = get_default_home()
    if dbcfgs.has_key('home_dir'):
        home_dir = dbcfgs['home_dir']

    traf_user = dbcfgs['traf_user']
    traf_home = dbcfgs['traf_home']
    traf_user_dir = '%s/%s' % (home_dir, traf_user)

    traf_ver = dbcfgs['traf_version']
    #    scratch_locs = dbcfgs['scratch_locs'].split(',')

    SUDOER_CFG = """
## Allow trafodion id to run commands needed for backup and restore
%%%s ALL =(hbase) NOPASSWD: %s/bin/hbase"
""" % (traf_user, DEF_HBASE_HOME)

    ### add trafodion user ###
    # create trafodion user and group
    if cmd_output('getent passwd %s' % traf_user):
        print 'user [%s] exists' % traf_user
        # trafodion user exists, set actual trafodion group
        traf_group = cmd_output('id -ng %s' % traf_user)
    else:
        # default trafodion group
        traf_group = traf_user
        if not cmd_output('getent group %s' % traf_group):
            run_cmd('groupadd %s' % traf_group)
        traf_shadow = dbcfgs['traf_shadow']
        print 'Adding user [%s]' % traf_user
        run_cmd(
            'useradd --shell /bin/bash -m %s -g %s --home %s --password "%s"' %
            (traf_user, traf_group, traf_user_dir, traf_shadow))
        print 'Added user [%s]' % traf_user

    if not os.path.exists(traf_user_dir):
        run_cmd('mkdir -p %s' % traf_user_dir)
        run_cmd('chmod 700 %s' % traf_user_dir)

    ### untar the copied trafoion binaries ###
    TRAF_PKG_FILE = '/tmp/traf_bin.tar.gz'
    run_cmd('mkdir -p %s' % traf_home)
    run_cmd('mkdir -p /etc/trafodion')
    run_cmd('tar xf %s -C %s' % (TRAF_PKG_FILE, traf_home))

    run_cmd('mv -f /tmp/trafodion_config %s' % TRAF_CFG_FILE)
    run_cmd('cp -rf /tmp/.ssh %s/..' % traf_home)
    run_cmd('mv -f /tmp/hbase-trx-* %s' % dbcfgs['hbase_lib_path'])
    run_cmd('mv -f /tmp/trafodion-utility-* %s' % dbcfgs['hbase_lib_path'])

    ### copy trafodion bashrc ###
    bashrc_template = '%s/sysinstall/home/trafodion/.bashrc' % traf_home
    bashrc_file = '%s/%s/.bashrc' % (home_dir, traf_user)
    # backup orig bashrc
    if os.path.exists(bashrc_file):
        run_cmd('cp -f %s %s.bak' % (bashrc_file, bashrc_file))
    run_cmd('cp -f %s %s' % (bashrc_template, bashrc_file))

    # set permission
    run_cmd('chmod 700 %s/../.ssh' % traf_home)
    cmd_output('chmod 600 %s/../.ssh/{id_rsa,config,authorized_keys}' %
               traf_home)
    run_cmd('chmod 777 %s' % TRAF_CFG_FILE)
    run_cmd('chown -R %s:%s /etc/trafodion' % (traf_user, traf_group))
    run_cmd('chmod +r %s/{hbase-trx-*,trafodion-utility-*}' %
            dbcfgs['hbase_lib_path'])
    run_cmd('chown -R %s:%s %s' % (traf_user, traf_group, traf_user_dir))

    ### modify CLUSTERNAME ###
    mod_file(TRAF_CFG_FILE,
             {'CLUSTERNAME=.*': 'CLUSTERNAME=%s' % socket.gethostname()})

    ### kernel settings ###
    run_cmd('echo "kernel.pid_max=65535" >> /etc/sysctl.conf')
    run_cmd('echo "kernel.msgmnb=65536" >> /etc/sysctl.conf')
    run_cmd('echo "kernel.msgmax=65536" >> /etc/sysctl.conf')
    run_cmd('/sbin/sysctl -p /etc/sysctl.conf 2>&1 > /dev/null')

    ### copy init script ###
    init_script = '%s/sysinstall/etc/init.d/trafodion' % traf_home
    if os.path.exists(init_script):
        run_cmd('cp -rf %s /etc/init.d/' % init_script)
        run_cmd('chkconfig --add trafodion')
        run_cmd('chkconfig --level 06 trafodion on')

    ### create and set permission for scratch file dir ###
#    for loc in scratch_locs:
#        # don't set permission for HOME folder
#        if not os.path.exists(loc):
#            run_cmd('mkdir -p %s' % loc)
#        if home_dir not in loc:
#            run_cmd('chmod 777 %s' % loc)

    if dbcfgs['enable_ha'] == 'true':
        # set trafodion sudoer file for specific cmds
        SUDOER_CFG += """
## Trafodion Floating IP commands
Cmnd_Alias IP = /sbin/ip
Cmnd_Alias ARP = /sbin/arping

## Allow Trafodion id to run commands needed to configure floating IP
%%%s ALL = NOPASSWD: IP, ARP
""" % traf_user

    ### write trafodion sudoer file ###
    with open(TRAF_SUDOER_FILE, 'w') as f:
        f.write(SUDOER_CFG)

    # set ulimits for trafodion user
    ulimits_file = '/etc/security/limits.d/%s.conf' % traf_user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
''' % ((traf_user, ) * 8)

    write_file(ulimits_file, ulimits_config)
Exemplo n.º 40
0
def run():
    dbcfgs = json.loads(dbcfgs_json)

    HOME_DIR = cmd_output(
        'sudo cat /etc/default/useradd |grep HOME |cut -d "=" -f 2').strip()
    if dbcfgs.has_key('home_dir'):
        HOME_DIR = dbcfgs['home_dir']

    TRAF_USER = dbcfgs['traf_user']
    TRAF_DIRNAME = dbcfgs['traf_dirname']
    TRAF_HOME = '%s/%s/%s' % (HOME_DIR, TRAF_USER, TRAF_DIRNAME)

    TRAF_VER = dbcfgs['traf_version']
    DISTRO = dbcfgs['distro']
    TRAF_LIB_PATH = TRAF_HOME + '/export/lib'
    SCRATCH_LOCS = dbcfgs['scratch_locs'].split(',')

    SUDOER_FILE = '/etc/sudoers.d/trafodion'
    SUDOER_CFG = """
## Allow trafodion id to run commands needed for backup and restore
%%%s ALL =(hbase) NOPASSWD: /usr/bin/hbase"
""" % TRAF_USER

    ### kernel settings ###
    run_cmd('sysctl -w kernel.pid_max=65535 2>&1 > /dev/null')
    run_cmd('echo "kernel.pid_max=65535" >> /etc/sysctl.conf')

    ### copy trafodion bashrc ###
    BASHRC_TEMPLATE = '%s/sysinstall/home/trafodion/.bashrc' % TRAF_HOME
    BASHRC_FILE = '%s/%s/.bashrc' % (HOME_DIR, TRAF_USER)
    run_cmd('cp -f %s %s' % (BASHRC_TEMPLATE, BASHRC_FILE))
    run_cmd('chown -R %s:%s %s*' % (TRAF_USER, TRAF_USER, BASHRC_FILE))

    ### copy init script ###
    init_script = '%s/sysinstall/etc/init.d/trafodion' % TRAF_HOME
    if os.path.exists(init_script):
        run_cmd('cp -rf %s /etc/init.d/' % init_script)
        run_cmd('chkconfig --add trafodion')
        run_cmd('chkconfig --level 06 trafodion on')

    ### create and set permission for scratch file dir ###
    for loc in SCRATCH_LOCS:
        # don't set permission for HOME folder
        if not os.path.exists(loc):
            run_cmd('mkdir -p %s' % loc)
        if HOME_DIR not in loc:
            run_cmd('chmod 777 %s' % loc)

    ### copy jar files ###
    hbase_lib_path = dbcfgs['hbase_lib_path']
    if 'APACHE' in DISTRO:
        hbase_home = dbcfgs['hbase_home']
        hbase_lib_path = hbase_home + '/lib'
        # for apache distro, get hbase version from cmdline
        hbase_ver = cmd_output('%s/bin/hbase version | head -n1' % hbase_home)
        hbase_ver = re.search(r'HBase (\d\.\d)', hbase_ver).groups()[0]
        DISTRO += hbase_ver

    distro, v1, v2 = re.search(r'(\w+)-*(\d)\.(\d)', DISTRO).groups()
    if distro == 'CDH':
        if v2 == '6': v2 = '5'
        if v2 == '8': v2 = '7'
    elif distro == 'HDP':
        if v2 == '4': v2 = '3'

    hbase_trx_jar = 'hbase-trx-%s%s_%s-%s.jar' % (distro.lower(), v1, v2,
                                                  TRAF_VER)
    traf_hbase_trx_path = '%s/%s' % (TRAF_LIB_PATH, hbase_trx_jar)
    hbase_trx_path = '%s/%s' % (hbase_lib_path, hbase_trx_jar)
    if not os.path.exists(traf_hbase_trx_path):
        err('Cannot find HBase trx jar \'%s\' for your Hadoop distribution' %
            hbase_trx_jar)

    # reinstall mode, check if existing trx jar doesn't match the new trx jar file
    if dbcfgs.has_key('reinstall') and dbcfgs['reinstall'].upper() == 'Y':
        if not os.path.exists(hbase_trx_path):
            err('The trx jar \'%s\' doesn\'t exist in hbase lib path, cannot do reinstall, please do regular install'
                % hbase_trx_jar)
    else:
        # remove old trx and trafodion-utility jar files
        run_cmd('rm -rf %s/{hbase-trx-*,trafodion-utility-*}' % hbase_lib_path)

        # copy new ones
        run_cmd('cp %s %s' % (traf_hbase_trx_path, hbase_lib_path))
        run_cmd('cp %s/trafodion-utility-* %s' %
                (TRAF_LIB_PATH, hbase_lib_path))

    # set permission
    run_cmd('chmod +r %s/{hbase-trx-*,trafodion-utility-*}' % hbase_lib_path)

    if dbcfgs['dcs_ha'] == 'Y':
        # set trafodion sudoer file for specific cmds
        SUDOER_CFG += """
## Trafodion Floating IP commands
Cmnd_Alias IP = /sbin/ip
Cmnd_Alias ARP = /sbin/arping

## Allow Trafodion id to run commands needed to configure floating IP
%%%s ALL = NOPASSWD: IP, ARP
""" % TRAF_USER

    ### write trafodion sudoer file ###
    with open(SUDOER_FILE, 'w') as f:
        f.write(SUDOER_CFG)
Exemplo n.º 41
0
 def get_ext_interface(self):
     """ get external network interface """
     return cmd_output('ip route |grep default|awk \'{print $5}\'')
Exemplo n.º 42
0
def run():
    dbcfgs = json.loads(dbcfgs_json)

    home_dir = get_default_home()
    if dbcfgs.has_key('home_dir'):
        home_dir = dbcfgs['home_dir']

    traf_user = dbcfgs['traf_user']
    traf_dirname = dbcfgs['traf_dirname']
    traf_home = '%s/%s/%s' % (home_dir, traf_user, traf_dirname)

    traf_ver = dbcfgs['traf_version']
    distro = dbcfgs['distro']
    traf_lib_path = traf_home + '/export/lib'
    scratch_locs = dbcfgs['scratch_locs'].split(',')

    SUDOER_CFG = """
## Allow trafodion id to run commands needed for backup and restore
%%%s ALL =(hbase) NOPASSWD: %s/bin/hbase"
""" % (traf_user, DEF_HBASE_HOME)

    ### kernel settings ###
    run_cmd('sysctl -w kernel.pid_max=65535 2>&1 > /dev/null')
    run_cmd('echo "kernel.pid_max=65535" >> /etc/sysctl.conf')

    ### copy trafodion bashrc ###
    bashrc_template = '%s/sysinstall/home/trafodion/.bashrc' % traf_home
    bashrc_file = '%s/%s/.bashrc' % (home_dir, traf_user)
    # backup orig bashrc
    if os.path.exists(bashrc_file):
        run_cmd('cp -f %s %s.bak' % (bashrc_file, bashrc_file))
    run_cmd('cp -f %s %s' % (bashrc_template, bashrc_file))
    run_cmd('chown -R %s:%s %s*' % (traf_user, traf_user, bashrc_file))

    ### copy init script ###
    init_script = '%s/sysinstall/etc/init.d/trafodion' % traf_home
    if os.path.exists(init_script):
        run_cmd('cp -rf %s /etc/init.d/' % init_script)
        run_cmd('chkconfig --add trafodion')
        run_cmd('chkconfig --level 06 trafodion on')

    ### create and set permission for scratch file dir ###
    for loc in scratch_locs:
        # expand any shell variables
        locpath = cmd_output('source %s ; echo %s' % (TRAF_CFG_FILE, loc))
        if not os.path.exists(locpath):
            run_cmd('mkdir -p %s' % locpath)
            run_cmd('chown %s %s' % (traf_user, locpath))

    ### copy jar files ###
    hbase_lib_path = dbcfgs['hbase_lib_path']
    if 'APACHE' in distro:
        distro += dbcfgs['hbase_ver']

    distro, v1, v2 = re.search(r'(\w+)-*(\d)\.(\d)', distro).groups()
    if distro == 'CDH':
        if v2 == '6': v2 = '5'
        if v2 == '8': v2 = '7'
    elif distro == 'HDP':
        if v2 == '4': v2 = '3'

    hbase_trx_jar = 'hbase-trx-%s%s_%s-%s.jar' % (distro.lower(), v1, v2,
                                                  traf_ver)
    traf_hbase_trx_path = '%s/%s' % (traf_lib_path, hbase_trx_jar)
    hbase_trx_path = '%s/%s' % (hbase_lib_path, hbase_trx_jar)
    if not os.path.exists(traf_hbase_trx_path):
        err('Cannot find HBase trx jar \'%s\' for your Hadoop distribution' %
            hbase_trx_jar)

    # reinstall mode, check if existing trx jar doesn't match the new trx jar file
    if dbcfgs.has_key('reinstall') and dbcfgs['reinstall'].upper() == 'Y':
        if not os.path.exists(hbase_trx_path):
            err('The trx jar \'%s\' doesn\'t exist in hbase lib path, cannot do reinstall, please do regular install'
                % hbase_trx_jar)
    else:
        # remove old trx and trafodion-utility jar files
        run_cmd('rm -rf %s/{hbase-trx-*,trafodion-utility-*}' % hbase_lib_path)

        # copy new ones
        run_cmd('cp %s %s' % (traf_hbase_trx_path, hbase_lib_path))
        run_cmd('cp %s/trafodion-utility-* %s' %
                (traf_lib_path, hbase_lib_path))

    # set permission
    run_cmd('chmod +r %s/{hbase-trx-*,trafodion-utility-*}' % hbase_lib_path)

    if dbcfgs['dcs_ha'] == 'Y':
        # set trafodion sudoer file for specific cmds
        SUDOER_CFG += """
## Trafodion Floating IP commands
Cmnd_Alias IP = /sbin/ip
Cmnd_Alias ARP = /sbin/arping

## Allow Trafodion id to run commands needed to configure floating IP
%%%s ALL = NOPASSWD: IP, ARP
""" % traf_user

    ### write trafodion sudoer file ###
    with open(TRAF_SUDOER_FILE, 'w') as f:
        f.write(SUDOER_CFG)
Exemplo n.º 43
0
def run():
    """ create trafodion user, bashrc, setup passwordless SSH """
    dbcfgs = json.loads(dbcfgs_json)

    distro = dbcfgs['distro']
    if 'CDH' in distro:
        hadoop_type = 'cloudera'
    elif 'HDP' in distro:
        hadoop_type = 'hortonworks'
    elif 'APACHE' in distro:
        hadoop_type = 'apache'

    home_dir = get_default_home()
    # customize trafodion home dir
    if dbcfgs.has_key('home_dir') and dbcfgs['home_dir']:
        home_dir = dbcfgs['home_dir']

    traf_user = dbcfgs['traf_user']
    traf_user_dir = '%s/%s' % (home_dir, traf_user)
    traf_dirname = dbcfgs['traf_dirname']
    traf_home = '%s/%s' % (traf_user_dir, traf_dirname)

    hbase_xml_file = dbcfgs['hbase_xml_file']
    auth_key_file = '%s/.ssh/authorized_keys' % traf_user_dir
    ssh_cfg_file = '%s/.ssh/config' % traf_user_dir
    ulimits_file = '/etc/security/limits.d/%s.conf' % traf_user

    # create trafodion user and group
    if cmd_output('getent passwd %s' % traf_user):
        # trafodion user exists, set actual trafodion group
        traf_group = cmd_output('id -ng %s' % traf_user)
    else:
        # default trafodion group
        traf_group = traf_user
        if not cmd_output('getent group %s' % traf_group):
            run_cmd('groupadd %s > /dev/null 2>&1' % traf_group)
        traf_pwd = dbcfgs['traf_pwd']
        run_cmd('useradd --shell /bin/bash -m %s -g %s --home %s --password "$(openssl passwd %s)"' % (traf_user, traf_group, traf_user_dir, traf_pwd))
    # hbase group is generally either hbase or hadoop, depending on distro
    if cmd_output('getent group hbase'):
        cmd_output('/usr/sbin/usermod -a -G hbase %s' % traf_user)
    if cmd_output('getent group hadoop'):
        cmd_output('/usr/sbin/usermod -a -G hadoop %s' % traf_user)
    if cmd_output('getent group hive'):
        cmd_output('/usr/sbin/usermod -a -G hive %s' % traf_user)

    if not os.path.exists(traf_user_dir):
        run_cmd('mkdir -p %s' % traf_user_dir)
        run_cmd('chmod 700 %s' % traf_user_dir)

    # set ssh key
    run_cmd_as_user(traf_user, 'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
    # the key is generated in copy_file script running on the installer node
    run_cmd('cp %s{,.pub} %s/.ssh/' % (SSHKEY_FILE, traf_user_dir))

    run_cmd_as_user(traf_user, 'cat ~/.ssh/id_rsa.pub > %s' % auth_key_file)
    run_cmd('chmod 644 %s' % auth_key_file)

    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(ssh_cfg_file, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % ssh_cfg_file)

    run_cmd('chown -R %s:%s %s/.ssh/' % (traf_user, traf_group, traf_user_dir))

    hb = ParseXML(hbase_xml_file)
    zk_nodes = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')
    # set trafodion_config
    nodes = dbcfgs['node_list'].split(',')
    trafodion_config = """
export TRAF_HOME="%s"
export TRAF_VAR=$TRAF_HOME/tmp
export MY_SQROOT=$TRAF_HOME # for compatibility
export JAVA_HOME="%s"
export node_count="%s"
export HADOOP_TYPE="%s"
export ENABLE_HA="%s"
export ZOOKEEPER_NODES="%s"
export ZOOKEEPER_PORT="%s"
export SECURE_HADOOP="%s"
export CLUSTERNAME="%s"
""" % (traf_home, dbcfgs['java_home'], str(len(nodes)), hadoop_type, dbcfgs['enable_ha'],
       zk_nodes, zk_port, dbcfgs['secure_hadoop'], socket.gethostname())

    # save additonal configs for elastic
    trafodion_config += """
export hbase_xml_file="%s"
export hbase_lib_path="%s"
export traf_user="******"
export traf_version="%s"
export dcs_cnt_per_node="%s"
""" % (dbcfgs['hbase_xml_file'], dbcfgs['hbase_lib_path'], dbcfgs['traf_user'], dbcfgs['traf_version'], dbcfgs['dcs_cnt_per_node'])

    run_cmd('mkdir -p %s' % TRAF_CFG_DIR)
    write_file(TRAF_CFG_FILE, trafodion_config)

    if 'APACHE' in distro:
        extra_config = """
export HADOOP_PREFIX=%s
export HBASE_HOME=%s
export HIVE_HOME=%s
export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'], dbcfgs['hive_home'])
        append_file(TRAFODION_CFG_FILE, extra_config)

    # set permission
    run_cmd('chown -R %s:%s %s*' % (traf_user, traf_group, TRAF_CFG_DIR))

    # set ulimits for trafodion user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
%s   soft nofile 8192
%s   hard nofile 65535
''' % ((traf_user,) * 10)

    write_file(ulimits_file, ulimits_config)

    # change permission for hsperfdata
    if os.path.exists(TRAF_HSPERFDATA_FILE):
        run_cmd('chown -R %s:%s %s' % (traf_user, traf_group, TRAF_HSPERFDATA_FILE))

    # clean up unused key file at the last step
    run_cmd('rm -rf %s{,.pub}' % SSHKEY_FILE)

    print 'Setup trafodion user successfully!'
Exemplo n.º 44
0
def run():
    """ create trafodion user, bashrc, setup passwordless SSH """
    dbcfgs = json.loads(dbcfgs_json)

    distro = dbcfgs['distro']
    if 'CDH' in distro:
        hadoop_type = 'cloudera'
    elif 'HDP' in distro:
        hadoop_type = 'hortonworks'
    elif 'APACHE' in distro:
        hadoop_type = 'apache'

    home_dir = get_default_home()
    # customize trafodion home dir
    if dbcfgs.has_key('home_dir') and dbcfgs['home_dir']:
        home_dir = dbcfgs['home_dir']

    traf_user = dbcfgs['traf_user']
    traf_user_dir = '%s/%s' % (home_dir, traf_user)
    traf_dirname = dbcfgs['traf_dirname']
    traf_home = '%s/%s' % (traf_user_dir, traf_dirname)
    traf_log = dbcfgs['traf_log']
    traf_var = dbcfgs['traf_var']

    hbase_xml_file = dbcfgs['hbase_xml_file']
    auth_key_file = '%s/.ssh/authorized_keys' % traf_user_dir
    ssh_cfg_file = '%s/.ssh/config' % traf_user_dir
    ulimits_file = '/etc/security/limits.d/%s.conf' % traf_user

    # create trafodion user and group
    if cmd_output('getent passwd %s' % traf_user):
        # trafodion user exists, set actual trafodion group
        traf_group = cmd_output('id -ng %s' % traf_user)
    else:
        # default trafodion group
        traf_group = traf_user
        if not cmd_output('getent group %s' % traf_group):
            run_cmd('groupadd %s > /dev/null 2>&1' % traf_group)
        traf_pwd = dbcfgs['traf_pwd']
        run_cmd('useradd --shell /bin/bash -m %s -g %s --home %s --password "$(openssl passwd %s)"' % (traf_user, traf_group, traf_user_dir, traf_pwd))
    # hbase group is generally either hbase or hadoop, depending on distro
    if cmd_output('getent group hbase'):
        cmd_output('/usr/sbin/usermod -a -G hbase %s' % traf_user)
    if cmd_output('getent group hadoop'):
        cmd_output('/usr/sbin/usermod -a -G hadoop %s' % traf_user)
    if cmd_output('getent group hive'):
        cmd_output('/usr/sbin/usermod -a -G hive %s' % traf_user)

    if not os.path.exists(traf_user_dir):
        run_cmd('mkdir -p %s' % traf_user_dir)
        run_cmd('chmod 700 %s' % traf_user_dir)

    # set ssh key
    run_cmd_as_user(traf_user, 'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
    # the key is generated in copy_file script running on the installer node
    run_cmd('cp %s{,.pub} %s/.ssh/' % (SSHKEY_FILE, traf_user_dir))

    run_cmd_as_user(traf_user, 'cat ~/.ssh/id_rsa.pub > %s' % auth_key_file)
    run_cmd('chmod 644 %s' % auth_key_file)

    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(ssh_cfg_file, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % ssh_cfg_file)

    run_cmd('chown -R %s:%s %s/.ssh/' % (traf_user, traf_group, traf_user_dir))

    hb = ParseXML(hbase_xml_file)
    zk_nodes = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')
    # set trafodion_config
    nodes = dbcfgs['node_list'].split(',')
    trafodion_config = """
export TRAF_HOME="%s"
export TRAF_VAR="%s"
export TRAF_CONF="%s"
export TRAF_LOG="%s"
export JAVA_HOME="%s"
export node_count="%s"
export HADOOP_TYPE="%s"
export ENABLE_HA="%s"
export ZOOKEEPER_NODES="%s"
export ZOOKEEPER_PORT="%s"
export SECURE_HADOOP="%s"
export CLUSTERNAME="%s"
""" % (traf_home, traf_var, TRAF_CFG_DIR, traf_log, dbcfgs['java_home'], str(len(nodes)), hadoop_type, dbcfgs['enable_ha'],
       zk_nodes, zk_port, dbcfgs['secure_hadoop'], socket.gethostname())

    # save additonal configs for elastic
    trafodion_config += """
export hbase_xml_file="%s"
export hbase_lib_path="%s"
export traf_user="******"
export traf_version="%s"
export dcs_cnt_per_node="%s"
""" % (dbcfgs['hbase_xml_file'], dbcfgs['hbase_lib_path'], dbcfgs['traf_user'], dbcfgs['traf_version'], dbcfgs['dcs_cnt_per_node'])

    # save additonal configs for multi instance support
    trafodion_config += """
export TRAF_CLUSTER_NAME="%s"
export TRAF_INSTANCE_NAME="%s"
export TRAF_CLUSTER_ID="%s"
export TRAF_INSTANCE_ID="%s"
export TRAF_ROOT_ZNODE="/%s"
""" % (dbcfgs['cluster_name'], dbcfgs['traf_instance_name'], dbcfgs['traf_cluster_id'], dbcfgs['traf_instance_id'], dbcfgs['traf_user'])

    run_cmd('mkdir -p %s' % TRAF_CFG_DIR)
    write_file(TRAF_CFG_FILE, trafodion_config)

    if 'APACHE' in distro:
        extra_config = """
export HADOOP_PREFIX=%s
export HBASE_HOME=%s
export HIVE_HOME=%s
export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'], dbcfgs['hive_home'])
        append_file(TRAFODION_CFG_FILE, extra_config)

    # set permission
    run_cmd('chown -R %s:%s %s*' % (traf_user, traf_group, TRAF_CFG_DIR))

    # set ulimits for trafodion user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
%s   soft nofile 8192
%s   hard nofile 65535
''' % ((traf_user,) * 10)

    write_file(ulimits_file, ulimits_config)

    # change permission for hsperfdata
    if os.path.exists(TRAF_HSPERFDATA_FILE):
        run_cmd('chown -R %s:%s %s' % (traf_user, traf_group, TRAF_HSPERFDATA_FILE))

    # clean up unused key file at the last step
    run_cmd('rm -rf %s{,.pub}' % SSHKEY_FILE)

    print 'Setup trafodion user successfully!'
Exemplo n.º 45
0
 def get_rootdisk_free(self):
     """ get root disk space left """
     space = cmd_output('df -h|grep "\/$" | awk \'{print $4}\'')
     return space.strip()
Exemplo n.º 46
0
def run():
    """ create trafodion user, bashrc, setup passwordless SSH """
    dbcfgs = json.loads(dbcfgs_json)

    DISTRO = dbcfgs['distro']
    if 'CDH' in DISTRO:
        hadoop_type = 'cloudera'
    elif 'HDP' in DISTRO:
        hadoop_type = 'hortonworks'
    elif 'APACHE' in DISTRO:
        hadoop_type = 'apache'

    TRAF_USER = dbcfgs['traf_user']
    TRAF_PWD = dbcfgs['traf_pwd']
    TRAF_GROUP = TRAF_USER
    TRAF_HOME = cmd_output(
        'cat /etc/default/useradd |grep HOME |cut -d "=" -f 2').strip()
    TRAF_USER_DIR = '%s/%s' % (TRAF_HOME, TRAF_USER)
    SQ_ROOT = '%s/%s-%s' % (TRAF_USER_DIR, dbcfgs['traf_basename'],
                            dbcfgs['traf_version'])

    KEY_FILE = '/tmp/id_rsa'
    AUTH_KEY_FILE = '%s/.ssh/authorized_keys' % TRAF_USER_DIR
    SSH_CFG_FILE = '%s/.ssh/config' % TRAF_USER_DIR
    BASHRC_TEMPLATE = '%s/bashrc.template' % TMP_DIR
    BASHRC_FILE = '%s/.bashrc' % TRAF_USER_DIR
    ULIMITS_FILE = '/etc/security/limits.d/%s.conf' % TRAF_USER
    HSPERFDATA_FILE = '/tmp/hsperfdata_trafodion'

    # create trafodion user and group
    if not cmd_output('getent group %s' % TRAF_GROUP):
        run_cmd('groupadd %s > /dev/null 2>&1' % TRAF_GROUP)

    if not cmd_output('getent passwd %s' % TRAF_USER):
        run_cmd(
            'useradd --shell /bin/bash -m %s -g %s --password "$(openssl passwd %s)"'
            % (TRAF_USER, TRAF_GROUP, TRAF_PWD))
    elif not os.path.exists(TRAF_USER_DIR):
        run_cmd('mkdir -p %s' % TRAF_USER_DIR)
        run_cmd('chmod 700 %s' % TRAF_USER_DIR)

    # set ssh key
    run_cmd_as_user(TRAF_USER,
                    'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
    # the key is generated in copy_file script running on the installer node
    run_cmd('cp %s{,.pub} %s/.ssh/' % (KEY_FILE, TRAF_USER_DIR))

    run_cmd_as_user(TRAF_USER, 'cat ~/.ssh/id_rsa.pub > %s' % AUTH_KEY_FILE)
    run_cmd('chmod 644 %s' % AUTH_KEY_FILE)

    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(SSH_CFG_FILE, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % SSH_CFG_FILE)

    run_cmd('chown -R %s:%s %s/.ssh/' % (TRAF_USER, TRAF_GROUP, TRAF_USER_DIR))

    # set bashrc
    nodes = dbcfgs['node_list'].split(',')
    change_items = {
        '{{ java_home }}': dbcfgs['java_home'],
        '{{ sq_home }}': SQ_ROOT,
        '{{ hadoop_type }}': hadoop_type,
        '{{ node_list }}': ' '.join(nodes),
        '{{ node_count }}': str(len(nodes)),
        '{{ enable_ha }}': dbcfgs['enable_ha'],
        '{{ my_nodes }}': ' -w ' + ' -w '.join(nodes)
    }

    mod_file(BASHRC_TEMPLATE, change_items)

    if 'APACHE' in DISTRO:
        bashrc_content = """
export HADOOP_PREFIX=%s
export HBASE_HOME=%s
export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'])
        append_file(BASHRC_TEMPLATE, bashrc_content, position='HADOOP_TYPE')

    # backup bashrc if exsits
    if os.path.exists(BASHRC_FILE):
        run_cmd('cp %s %s.bak' % ((BASHRC_FILE, ) * 2))

    # copy bashrc to trafodion's home
    run_cmd('cp %s %s' % (BASHRC_TEMPLATE, BASHRC_FILE))
    run_cmd('chown -R %s:%s %s*' % (TRAF_USER, TRAF_GROUP, BASHRC_FILE))

    # set ulimits for trafodion user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
%s   soft nofile 8192
%s   hard nofile 65535
hbase soft nofile 8192
''' % ((TRAF_USER, ) * 10)

    with open(ULIMITS_FILE, 'w') as f:
        f.write(ulimits_config)

    # change permission for hsperfdata
    if os.path.exists(HSPERFDATA_FILE):
        run_cmd('chown -R %s:%s %s' % (TRAF_USER, TRAF_GROUP, HSPERFDATA_FILE))

    # clean up unused key file at the last step
    run_cmd('rm -rf %s{,.pub}' % KEY_FILE)

    print 'Setup trafodion user successfully!'
Exemplo n.º 47
0
 def __init__(self, dbcfgs):
     self.CPUINFO = cmd_output('cat /proc/cpuinfo')
     self.MEMINFO = cmd_output('cat /proc/meminfo')
     self.SYSCTLINFO = cmd_output('sysctl -a')
     self.version = Version()
     self.dbcfgs = dbcfgs
Exemplo n.º 48
0
def run():
    """ setup Kerberos security """
    dbcfgs = json.loads(dbcfgs_json)

    distro = dbcfgs['distro']
    admin_principal = dbcfgs['admin_principal']
    admin_passwd = dbcfgs['kdcadmin_pwd']
    kdc_server = dbcfgs['kdc_server']
    cluster_name = dbcfgs['cluster_name']
    # maxlife = dbcfgs['max_lifetime']
    # max_renewlife = dbcfgs['max_renew_lifetime']
    maxlife = '24hours'
    max_renewlife = '7days'
    kadmin_cmd = 'kadmin -p %s -w %s -s %s -q' % (admin_principal, admin_passwd, kdc_server)

    host_name = socket.getfqdn()
    traf_user = dbcfgs['traf_user']
    hdfs_user = '******'
    hbase_user = '******'
    realm = re.match('.*@(.*)', admin_principal).groups()[0]
    traf_keytab_dir = '/etc/%s/keytab' % traf_user
    traf_keytab = '%s/%s.keytab' % (traf_keytab_dir, traf_user)
    traf_principal = '%s/%s@%s' % (traf_user, host_name, realm)
    hbase_principal = '%s/%s@%s' % (hbase_user, host_name, realm)

    ### setting start ###
    print 'Checking KDC server connection'
    run_cmd('%s listprincs' % kadmin_cmd)

    # create principals and keytabs for trafodion user
    principal_exists = cmd_output('%s listprincs | grep -c %s' % (kadmin_cmd, traf_principal))
    if int(principal_exists) == 0: # not exist
        run_cmd('%s \'addprinc -randkey %s\'' % (kadmin_cmd, traf_principal))
        # Adjust principal's maxlife and maxrenewlife
        run_cmd('%s \'modprinc -maxlife %s -maxrenewlife %s\' %s >/dev/null 2>&1' % (kadmin_cmd, maxlife, max_renewlife, traf_principal))

    run_cmd('mkdir -p %s' % traf_keytab_dir)

    # TODO: need skip add keytab if exist?
    print 'Create keytab file for trafodion user'
    run_cmd('%s \'ktadd -k %s %s\'' % (kadmin_cmd, traf_keytab, traf_principal))
    run_cmd('chown %s %s' % (traf_user, traf_keytab))
    run_cmd('chmod 400 %s' % traf_keytab)

    # create principals for hdfs/hbase user
    print 'Create principals for hdfs/hbase user'
    if 'CDH' in distro:
        hdfs_keytab = cmd_output('find /var/run/cloudera-scm-agent/process/ -name hdfs.keytab | head -n 1')
        hbase_keytab = cmd_output('find /var/run/cloudera-scm-agent/process/ -name hbase.keytab | head -n 1')
        hdfs_principal = '%s/%s@%s' % (hdfs_user, host_name, realm)
    elif 'HDP' in distro:
        hdfs_keytab = '/etc/security/keytabs/hdfs.headless.keytab'
        hbase_keytab = '/etc/security/keytabs/hbase.service.keytab'
        hdfs_principal = '%s-%s@%s' % (hdfs_user, cluster_name, realm)

    sudo_prefix = get_sudo_prefix()
    kinit_cmd_ptr = '%s su - %s -s /bin/bash -c "kinit -kt %s %s"'
    run_cmd(kinit_cmd_ptr % (sudo_prefix, hdfs_user, hdfs_keytab, hdfs_principal))
    run_cmd(kinit_cmd_ptr % (sudo_prefix, hbase_user, hbase_keytab, hbase_principal))

    print 'Done creating principals and keytabs'

    kinit_bashrc = """

# ---------------------------------------------------------------
# if needed obtain and cache the Kerberos ticket-granting ticket
# start automatic ticket renewal process
# ---------------------------------------------------------------
klist -s >/dev/null 2>&1
if [[ $? -eq 1 ]]; then
    kinit -kt %s %s >/dev/null 2>&1
fi

# ---------------------------------------------------------------
# Start trafodion kerberos ticket manager process
# ---------------------------------------------------------------
$TRAF_HOME/sql/scripts/krb5service start >/dev/null 2>&1
""" % (traf_keytab, traf_principal)

    traf_bashrc = '/home/%s/.bashrc' % traf_user
    with open(traf_bashrc, 'a') as f:
        f.write(kinit_bashrc)
Exemplo n.º 49
0
 def check_ssh_pam(self):
     """ check if UsePAM is set to yes in sshd_config """
     if not cmd_output('grep "^UsePAM yes" %s' % SSH_CONFIG_FILE):
         err('\'UsePAM\' should be set to \'yes\' in %s' % SSH_CONFIG_FILE)
Exemplo n.º 50
0
 def check_ssh_pam(self):
     """ check if UsePAM is set to yes in sshd_config """
     if not cmd_output('grep "^UsePAM yes" %s' % SSH_CONFIG_FILE):
         err('\'UsePAM\' should be set to \'yes\' in %s' % SSH_CONFIG_FILE)