예제 #1
0
def run(pwd):
    """ gen ssh key on local and copy to all nodes
        copy traf package file from local to all nodes
    """
    dbcfgs = json.loads(dbcfgs_json)
    hosts = dbcfgs['node_list'].split(',')
    traf_package = dbcfgs['traf_package']

    # save db configs to a tmp file and copy to all trafodion nodes
    dbcfgs_file = '/tmp/dbcfgs'
    p = ParseJson(dbcfgs_file)
    # remove password from config file
    try:
        dbcfgs.pop('mgr_pwd')
        dbcfgs.pop('traf_pwd')
        dbcfgs.pop('kdcadmin_pwd')
    except KeyError:
        pass
    p.save(dbcfgs)

    key_file = '/tmp/id_rsa'
    run_cmd('sudo -n rm -rf %s*' % key_file)
    run_cmd('sudo -n echo -e "y" | ssh-keygen -t rsa -N "" -f %s' % key_file)

    files = [key_file, key_file+'.pub', traf_package, dbcfgs_file]

    remote_insts = [Remote(h, pwd=pwd) for h in hosts]
    threads = [Thread(target=r.copy, args=(files, '/tmp')) for r in remote_insts]
    for thread in threads: thread.start()
    for thread in threads: thread.join()
    for r in remote_insts:
        if r.rc != 0: err('Failed to copy files to %s' % r.host)
예제 #2
0
def run():
    dbcfgs = json.loads(dbcfgs_json)
    if 'APACHE' in dbcfgs['distro']:
        modcfgs = ParseJson(MODCFG_FILE).load()
        MOD_CFGS = modcfgs['MOD_CFGS']

        hdfs_xml_file = dbcfgs['hdfs_xml_file']
        hbase_xml_file = dbcfgs['hbase_xml_file']

        hbasexml = ParseXML(hbase_xml_file)
        for key, value in MOD_CFGS['hbase-site'].items():
            hbasexml.add_property(key, value)
        hbasexml.write_xml()

        hdfsxml = ParseXML(hdfs_xml_file)
        for key, value in MOD_CFGS['hdfs-site'].items():
            hdfsxml.add_property(key, value)
        hdfsxml.write_xml()

        print 'Apache Hadoop modification completed'
        first_node = dbcfgs['first_rsnode']
        local_host = socket.gethostname()
        if first_node in local_host:
            hadoop_home = dbcfgs['hadoop_home']
            hbase_home = dbcfgs['hbase_home']
            # stop
            run_cmd(hbase_home + '/bin/stop-hbase.sh')
            run_cmd(hadoop_home + '/sbin/stop-dfs.sh')
            # start
            run_cmd(hadoop_home + '/sbin/start-dfs.sh')
            run_cmd(hbase_home + '/bin/start-hbase.sh')

            print 'Apache Hadoop restart completed'
    else:
        print 'no apache distribution found, skipping'
예제 #3
0
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
# @@@ END COPYRIGHT @@@

### this script should be run on local node ###

import time
import sys
import json
from constants import MODCFG_FILE
from common import ParseHttp, ParseJson, err, info, retry

modcfgs = ParseJson(MODCFG_FILE).load()

MOD_CFGS = modcfgs['MOD_CFGS']
HBASE_MASTER_CONFIG = modcfgs['HBASE_MASTER_CONFIG']
HBASE_RS_CONFIG = modcfgs['HBASE_RS_CONFIG']
HDFS_CONFIG = modcfgs['HDFS_CONFIG']
ZK_CONFIG = modcfgs['ZK_CONFIG']

CLUSTER_URL_PTR = '%s/api/v1/clusters/%s'
RESTART_URL_PTR = CLUSTER_URL_PTR + '/commands/restart'
RESTART_SRV_URL_PTR = CLUSTER_URL_PTR + '/services/%s/commands/restart'
SRVCFG_URL_PTR = CLUSTER_URL_PTR + '/services/%s/config'
RSGRP_BASEURL_PTR = '%s/api/v6/clusters/%s/services/%s/roleConfigGroups'
DEPLOY_CFG_URL_PTR = '%s/api/v6/clusters/%s/commands/deployClientConfig'
CMD_STAT_URL_PTR = '%s/api/v1/commands/%s'
예제 #4
0
def run(dbcfgs, options, mode='install', pwd=''):
    """ main entry
        mode: install/discover
    """
    STAT_FILE = mode + '.status'
    LOG_FILE = '%s/logs/%s_%s.log' % (INSTALLER_LOC, mode,
                                      time.strftime('%Y%m%d_%H%M'))
    logger = get_logger(LOG_FILE)

    verbose = True if hasattr(options,
                              'verbose') and options.verbose else False
    upgrade = True if hasattr(options,
                              'upgrade') and options.upgrade else False
    user = options.user if hasattr(options, 'user') and options.user else ''
    threshold = options.fork if hasattr(options,
                                        'fork') and options.fork else 10

    script_output = []  # script output array
    conf = ParseJson(SCRCFG_FILE).load()
    script_cfgs = conf[mode]

    dbcfgs_json = json.dumps(dbcfgs)
    hosts = dbcfgs['node_list'].split(',')

    # handle skipped scripts, skip them if no need to run
    skipped_scripts = []
    if upgrade:
        skipped_scripts += [
            'hadoop_mods', 'apache_mods', 'apache_restart', 'traf_dep',
            'traf_kerberos'
        ]

    if dbcfgs['secure_hadoop'] == 'N':
        skipped_scripts += ['traf_kerberos']

    if dbcfgs['traf_start'].upper() == 'N':
        skipped_scripts += ['traf_start']

    if dbcfgs['ldap_security'].upper() == 'N':
        skipped_scripts += ['traf_ldap']

    if 'APACHE' in dbcfgs['distro']:
        skipped_scripts += ['hadoop_mods']
    else:
        skipped_scripts += ['apache_mods', 'apache_restart']

    # set ssh config file to avoid known hosts verify on current installer node
    SSH_CFG_FILE = os.environ['HOME'] + '/.ssh/config'
    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(SSH_CFG_FILE, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % SSH_CFG_FILE)

    def run_local_script(script, json_string, req_pwd):
        cmd = '%s/%s \'%s\'' % (INSTALLER_LOC, script, json_string)

        # pass the ssh password to sub scripts which need SSH password
        if req_pwd: cmd += ' ' + pwd

        if verbose: print cmd

        # stdout on screen
        p = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=True)
        stdout, stderr = p.communicate()

        rc = p.returncode
        if rc != 0:
            msg = 'Failed to run \'%s\'' % script
            if stderr:
                msg += ': ' + stderr
                print stderr
            logger.error(msg)
            state_fail('localhost: Script [%s]' % script)
            exit(rc)
        else:
            state_ok('Script [%s]' % script)
            logger.info('Script [%s] ran successfully!' % script)

        return stdout

    # run sub scripts
    try:
        remote_instances = []
        if mode == 'discover':
            remote_instances = [
                RemoteRun(host, logger, user=user, pwd=pwd, quiet=True)
                for host in hosts
            ]
        else:
            remote_instances = [
                RemoteRun(host, logger, user=user, pwd=pwd) for host in hosts
            ]
        first_instance = remote_instances[0]
        for instance in remote_instances:
            if instance.host == dbcfgs['first_rsnode']:
                first_rs_instance = instance
                break

        logger.info(' ***** %s Start *****' % mode)
        for cfg in script_cfgs:
            script = cfg['script']
            node = cfg['node']
            desc = cfg['desc']
            run_user = ''
            if not 'run_as_traf' in cfg.keys():
                pass
            elif cfg['run_as_traf'] == 'yes':
                run_user = dbcfgs['traf_user']

            if not 'req_pwd' in cfg.keys():
                req_pwd = False
            elif cfg['req_pwd'] == 'yes':
                req_pwd = True

            status = Status(STAT_FILE, script)
            if status.get_status():
                msg = 'Script [%s] had already been executed' % script
                state_skip(msg)
                logger.info(msg)
                continue

            if script.split('.')[0] in skipped_scripts:
                continue
            else:
                print '\nTASK: %s %s' % (desc, (83 - len(desc)) * '*')

            #TODO: timeout exit
            if node == 'local':
                run_local_script(script, dbcfgs_json, req_pwd)
            elif node == 'first':
                first_instance.run_script(script,
                                          run_user,
                                          dbcfgs_json,
                                          verbose=verbose)
            elif node == 'first_rs':
                first_rs_instance.run_script(script,
                                             run_user,
                                             dbcfgs_json,
                                             verbose=verbose)
            elif node == 'all':
                l = len(remote_instances)
                if l > threshold:
                    piece = (l - (l % threshold)) / threshold
                    parted_remote_instances = [
                        remote_instances[threshold * i:threshold * (i + 1)]
                        for i in range(piece)
                    ]
                    parted_remote_instances.append(remote_instances[threshold *
                                                                    piece:])
                else:
                    parted_remote_instances = [remote_instances]

                for parted_remote_inst in parted_remote_instances:
                    threads = [
                        Thread(target=r.run_script,
                               args=(script, run_user, dbcfgs_json, verbose))
                        for r in parted_remote_inst
                    ]
                    for t in threads:
                        t.start()
                    for t in threads:
                        t.join()

                    if sum([r.rc for r in parted_remote_inst]) != 0:
                        err_m(
                            'Script failed to run on one or more nodes, exiting ...\nCheck log file %s for details.'
                            % LOG_FILE)

                    script_output += [{
                        r.host: r.stdout.strip()
                    } for r in parted_remote_inst]

            else:
                # should not go to here
                err_m('Invalid configuration for %s' % SCRCFG_FILE)

            status.set_status()
    except KeyboardInterrupt:
        err_m('User quit')

    # remove status file if all scripts run successfully
    os.remove(STAT_FILE)

    return script_output