Exemplo n.º 1
0
def handle_dir(dir, hash_check, is_last_iteration):
  audio_files, _ = get_audio_files(dir)
  hash_actual = calc_album_hash(dir) # todo: handle NotAlbumException
  if hash_check:
    assert type(hash_check) == str, type(hash_check)
    assert type(hash_actual) == str, type(hash_actual)
    assert hash_check == hash_actual, '%s vs %s' % (hash_check, hash_actual)

  db_album_file = os.path.join(DB_ROOT, NAROD_VOLUME, hash_actual)
  if os.path.exists(db_album_file):
    #append_file(FAILURE_LIST, '%s %s' % (hash1, dir))
    print ''
    print 'WARINIG! db_album_file already exists: %s' % db_album_file
    print 'No zip, no upload'
    print ''
  else:
    archive_file = os.path.join(ARCHIVE_TEMP_DIR, 'arkiv.narod.ru %s.zip' % hash_actual)
    print 'zipping into %s...' % archive_file
    if ACTUAL_ZIP_AND_UPLOAD:
      archive_dir(dir, archive_file)

    sys.stdout.flush()

    narod_url = do_upload(archive_file)
    if narod_url:
      if narod_url.find(hash_actual) < 0:
        raise Exception('uploaded url does not contain hash! %s in %s' % (hash_actual, narod_url))
      if ACTUAL_ZIP_AND_UPLOAD:
        album_obj = scan_album_from_dir(dir)
        album_obj['url'] = narod_url
        album_obj['total_size'] = os.stat(archive_file).st_size
        #print 'WARNING! db file not written!'
        save_db_album(DB_ROOT, NAROD_VOLUME, album_obj)
      append_file(SUCCESS_LIST, '%s %s' % (hash_actual, dir))

      if not is_last_iteration and ACTUAL_ZIP_AND_UPLOAD:
        print 'sleeping for %s seconds' % SLEEP_SEC
        sleep(SLEEP_SEC)

    else:
      append_file(FAILURE_LIST, '%s %s' % (hash_actual, dir))

    if os.path.exists(archive_file):
      os.remove(archive_file)
Exemplo n.º 2
0
def error(e):
    print e
    common.append_file(ERROR_PATH,
                       '\n[' + common.now_time() + ']' + str(e) + '\n')
Exemplo n.º 3
0
def log(c):
    print c
    common.append_file(LOG_PATH,
                       '\n[' + common.now_time() + ']' + str(c) + '\n')
Exemplo n.º 4
0
def run():
    """ create trafodion user, bashrc, setup passwordless SSH """
    dbcfgs = json.loads(dbcfgs_json)

    DISTRO = dbcfgs['distro']
    if 'CDH' in DISTRO:
        hadoop_type = 'cloudera'
    elif 'HDP' in DISTRO:
        hadoop_type = 'hortonworks'
    elif 'APACHE' in DISTRO:
        hadoop_type = 'apache'

    TRAF_USER = dbcfgs['traf_user']
    TRAF_PWD = dbcfgs['traf_pwd']
    TRAF_GROUP = TRAF_USER
    TRAF_HOME = cmd_output(
        'cat /etc/default/useradd |grep HOME |cut -d "=" -f 2').strip()
    TRAF_USER_DIR = '%s/%s' % (TRAF_HOME, TRAF_USER)
    SQ_ROOT = '%s/%s-%s' % (TRAF_USER_DIR, dbcfgs['traf_basename'],
                            dbcfgs['traf_version'])

    KEY_FILE = '/tmp/id_rsa'
    AUTH_KEY_FILE = '%s/.ssh/authorized_keys' % TRAF_USER_DIR
    SSH_CFG_FILE = '%s/.ssh/config' % TRAF_USER_DIR
    BASHRC_TEMPLATE = '%s/bashrc.template' % TMP_DIR
    BASHRC_FILE = '%s/.bashrc' % TRAF_USER_DIR
    ULIMITS_FILE = '/etc/security/limits.d/%s.conf' % TRAF_USER
    HSPERFDATA_FILE = '/tmp/hsperfdata_trafodion'

    # create trafodion user and group
    if not cmd_output('getent group %s' % TRAF_GROUP):
        run_cmd('groupadd %s > /dev/null 2>&1' % TRAF_GROUP)

    if not cmd_output('getent passwd %s' % TRAF_USER):
        run_cmd(
            'useradd --shell /bin/bash -m %s -g %s --password "$(openssl passwd %s)"'
            % (TRAF_USER, TRAF_GROUP, TRAF_PWD))
    elif not os.path.exists(TRAF_USER_DIR):
        run_cmd('mkdir -p %s' % TRAF_USER_DIR)
        run_cmd('chmod 700 %s' % TRAF_USER_DIR)

    # set ssh key
    run_cmd_as_user(TRAF_USER,
                    'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
    # the key is generated in copy_file script running on the installer node
    run_cmd('cp %s{,.pub} %s/.ssh/' % (KEY_FILE, TRAF_USER_DIR))

    run_cmd_as_user(TRAF_USER, 'cat ~/.ssh/id_rsa.pub > %s' % AUTH_KEY_FILE)
    run_cmd('chmod 644 %s' % AUTH_KEY_FILE)

    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(SSH_CFG_FILE, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % SSH_CFG_FILE)

    run_cmd('chown -R %s:%s %s/.ssh/' % (TRAF_USER, TRAF_GROUP, TRAF_USER_DIR))

    # set bashrc
    nodes = dbcfgs['node_list'].split(',')
    change_items = {
        '{{ java_home }}': dbcfgs['java_home'],
        '{{ sq_home }}': SQ_ROOT,
        '{{ hadoop_type }}': hadoop_type,
        '{{ node_list }}': ' '.join(nodes),
        '{{ node_count }}': str(len(nodes)),
        '{{ enable_ha }}': dbcfgs['enable_ha'],
        '{{ my_nodes }}': ' -w ' + ' -w '.join(nodes)
    }

    mod_file(BASHRC_TEMPLATE, change_items)

    if 'APACHE' in DISTRO:
        bashrc_content = """
export HADOOP_PREFIX=%s
export HBASE_HOME=%s
export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'])
        append_file(BASHRC_TEMPLATE, bashrc_content, position='HADOOP_TYPE')

    # backup bashrc if exsits
    if os.path.exists(BASHRC_FILE):
        run_cmd('cp %s %s.bak' % ((BASHRC_FILE, ) * 2))

    # copy bashrc to trafodion's home
    run_cmd('cp %s %s' % (BASHRC_TEMPLATE, BASHRC_FILE))
    run_cmd('chown -R %s:%s %s*' % (TRAF_USER, TRAF_GROUP, BASHRC_FILE))

    # set ulimits for trafodion user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
%s   soft nofile 8192
%s   hard nofile 65535
hbase soft nofile 8192
''' % ((TRAF_USER, ) * 10)

    with open(ULIMITS_FILE, 'w') as f:
        f.write(ulimits_config)

    # change permission for hsperfdata
    if os.path.exists(HSPERFDATA_FILE):
        run_cmd('chown -R %s:%s %s' % (TRAF_USER, TRAF_GROUP, HSPERFDATA_FILE))

    # clean up unused key file at the last step
    run_cmd('rm -rf %s{,.pub}' % KEY_FILE)

    print 'Setup trafodion user successfully!'
Exemplo n.º 5
0
def run():
    """ create trafodion user, bashrc, setup passwordless SSH """
    dbcfgs = json.loads(dbcfgs_json)

    distro = dbcfgs['distro']
    if 'CDH' in distro:
        hadoop_type = 'cloudera'
    elif 'HDP' in distro:
        hadoop_type = 'hortonworks'
    elif 'APACHE' in distro:
        hadoop_type = 'apache'

    home_dir = get_default_home()
    # customize trafodion home dir
    if dbcfgs.has_key('home_dir') and dbcfgs['home_dir']:
        home_dir = dbcfgs['home_dir']

    traf_user = dbcfgs['traf_user']
    traf_user_dir = '%s/%s' % (home_dir, traf_user)
    traf_dirname = dbcfgs['traf_dirname']
    traf_home = '%s/%s' % (traf_user_dir, traf_dirname)
    traf_log = dbcfgs['traf_log']
    traf_var = dbcfgs['traf_var']

    hbase_xml_file = dbcfgs['hbase_xml_file']
    auth_key_file = '%s/.ssh/authorized_keys' % traf_user_dir
    ssh_cfg_file = '%s/.ssh/config' % traf_user_dir
    ulimits_file = '/etc/security/limits.d/%s.conf' % traf_user

    # create trafodion user and group
    if cmd_output('getent passwd %s' % traf_user):
        # trafodion user exists, set actual trafodion group
        traf_group = cmd_output('id -ng %s' % traf_user)
    else:
        # default trafodion group
        traf_group = traf_user
        if not cmd_output('getent group %s' % traf_group):
            run_cmd('groupadd %s > /dev/null 2>&1' % traf_group)
        traf_pwd = dbcfgs['traf_pwd']
        run_cmd('useradd --shell /bin/bash -m %s -g %s --home %s --password "$(openssl passwd %s)"' % (traf_user, traf_group, traf_user_dir, traf_pwd))
    # hbase group is generally either hbase or hadoop, depending on distro
    if cmd_output('getent group hbase'):
        cmd_output('/usr/sbin/usermod -a -G hbase %s' % traf_user)
    if cmd_output('getent group hadoop'):
        cmd_output('/usr/sbin/usermod -a -G hadoop %s' % traf_user)
    if cmd_output('getent group hive'):
        cmd_output('/usr/sbin/usermod -a -G hive %s' % traf_user)

    if not os.path.exists(traf_user_dir):
        run_cmd('mkdir -p %s' % traf_user_dir)
        run_cmd('chmod 700 %s' % traf_user_dir)

    # set ssh key
    run_cmd_as_user(traf_user, 'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
    # the key is generated in copy_file script running on the installer node
    run_cmd('cp %s{,.pub} %s/.ssh/' % (SSHKEY_FILE, traf_user_dir))

    run_cmd_as_user(traf_user, 'cat ~/.ssh/id_rsa.pub > %s' % auth_key_file)
    run_cmd('chmod 644 %s' % auth_key_file)

    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(ssh_cfg_file, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % ssh_cfg_file)

    run_cmd('chown -R %s:%s %s/.ssh/' % (traf_user, traf_group, traf_user_dir))

    hb = ParseXML(hbase_xml_file)
    zk_nodes = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')
    # set trafodion_config
    nodes = dbcfgs['node_list'].split(',')
    trafodion_config = """
export TRAF_HOME="%s"
export TRAF_VAR="%s"
export TRAF_CONF="%s"
export TRAF_LOG="%s"
export JAVA_HOME="%s"
export node_count="%s"
export HADOOP_TYPE="%s"
export ENABLE_HA="%s"
export ZOOKEEPER_NODES="%s"
export ZOOKEEPER_PORT="%s"
export SECURE_HADOOP="%s"
export CLUSTERNAME="%s"
""" % (traf_home, traf_var, TRAF_CFG_DIR, traf_log, dbcfgs['java_home'], str(len(nodes)), hadoop_type, dbcfgs['enable_ha'],
       zk_nodes, zk_port, dbcfgs['secure_hadoop'], socket.gethostname())

    # save additonal configs for elastic
    trafodion_config += """
export hbase_xml_file="%s"
export hbase_lib_path="%s"
export traf_user="******"
export traf_version="%s"
export dcs_cnt_per_node="%s"
""" % (dbcfgs['hbase_xml_file'], dbcfgs['hbase_lib_path'], dbcfgs['traf_user'], dbcfgs['traf_version'], dbcfgs['dcs_cnt_per_node'])

    # save additonal configs for multi instance support
    trafodion_config += """
export TRAF_CLUSTER_NAME="%s"
export TRAF_INSTANCE_NAME="%s"
export TRAF_CLUSTER_ID="%s"
export TRAF_INSTANCE_ID="%s"
export TRAF_ROOT_ZNODE="/%s"
""" % (dbcfgs['cluster_name'], dbcfgs['traf_instance_name'], dbcfgs['traf_cluster_id'], dbcfgs['traf_instance_id'], dbcfgs['traf_user'])

    run_cmd('mkdir -p %s' % TRAF_CFG_DIR)
    write_file(TRAF_CFG_FILE, trafodion_config)

    if 'APACHE' in distro:
        extra_config = """
export HADOOP_PREFIX=%s
export HBASE_HOME=%s
export HIVE_HOME=%s
export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'], dbcfgs['hive_home'])
        append_file(TRAFODION_CFG_FILE, extra_config)

    # set permission
    run_cmd('chown -R %s:%s %s*' % (traf_user, traf_group, TRAF_CFG_DIR))

    # set ulimits for trafodion user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
%s   soft nofile 8192
%s   hard nofile 65535
''' % ((traf_user,) * 10)

    write_file(ulimits_file, ulimits_config)

    # change permission for hsperfdata
    if os.path.exists(TRAF_HSPERFDATA_FILE):
        run_cmd('chown -R %s:%s %s' % (traf_user, traf_group, TRAF_HSPERFDATA_FILE))

    # clean up unused key file at the last step
    run_cmd('rm -rf %s{,.pub}' % SSHKEY_FILE)

    print 'Setup trafodion user successfully!'
Exemplo n.º 6
0
def run():
    """ create trafodion user, bashrc, setup passwordless SSH """
    dbcfgs = json.loads(dbcfgs_json)

    DISTRO = dbcfgs['distro']
    if 'CDH' in DISTRO:
        hadoop_type = 'cloudera'
    elif 'HDP' in DISTRO:
        hadoop_type = 'hortonworks'
    elif 'APACHE' in DISTRO:
        hadoop_type = 'apache'

    TRAF_USER = dbcfgs['traf_user']
    TRAF_GROUP = TRAF_USER
    HOME_DIR = cmd_output('cat /etc/default/useradd |grep HOME |cut -d "=" -f 2').strip()
    # customize trafodion home dir
    if dbcfgs.has_key('home_dir') and dbcfgs['home_dir']:
        HOME_DIR = dbcfgs['home_dir']

    TRAF_USER_DIR = '%s/%s' % (HOME_DIR, TRAF_USER)
    TRAF_DIRNAME = dbcfgs['traf_dirname']
    TRAF_HOME = '%s/%s' % (TRAF_USER_DIR, TRAF_DIRNAME)

    TRAFODION_CFG_DIR = '/etc/trafodion/'
    TRAFODION_CFG_FILE = '/etc/trafodion/trafodion_config'
    HBASE_XML_FILE = dbcfgs['hbase_xml_file']
    KEY_FILE = '/tmp/id_rsa'
    AUTH_KEY_FILE = '%s/.ssh/authorized_keys' % TRAF_USER_DIR
    SSH_CFG_FILE = '%s/.ssh/config' % TRAF_USER_DIR
    ULIMITS_FILE = '/etc/security/limits.d/%s.conf' % TRAF_USER
    HSPERFDATA_FILE = '/tmp/hsperfdata_trafodion'

    # create trafodion user and group
    if not cmd_output('getent group %s' % TRAF_GROUP):
        run_cmd('groupadd %s > /dev/null 2>&1' % TRAF_GROUP)

    if not cmd_output('getent passwd %s' % TRAF_USER):
        TRAF_PWD = dbcfgs['traf_pwd']
        run_cmd('useradd --shell /bin/bash -m %s -g %s --home %s --password "$(openssl passwd %s)"' % (TRAF_USER, TRAF_GROUP, TRAF_USER_DIR, TRAF_PWD))
    elif not os.path.exists(TRAF_USER_DIR):
        run_cmd('mkdir -p %s' % TRAF_USER_DIR)
        run_cmd('chmod 700 %s' % TRAF_USER_DIR)

    # set ssh key
    run_cmd_as_user(TRAF_USER, 'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
    # the key is generated in copy_file script running on the installer node
    run_cmd('cp %s{,.pub} %s/.ssh/' % (KEY_FILE, TRAF_USER_DIR))

    run_cmd_as_user(TRAF_USER, 'cat ~/.ssh/id_rsa.pub > %s' % AUTH_KEY_FILE)
    run_cmd('chmod 644 %s' % AUTH_KEY_FILE)

    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(SSH_CFG_FILE, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % SSH_CFG_FILE)

    run_cmd('chown -R %s:%s %s/.ssh/' % (TRAF_USER, TRAF_GROUP, TRAF_USER_DIR))

    hb = ParseXML(HBASE_XML_FILE)
    zk_nodes = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')
    # set trafodion_config
    nodes = dbcfgs['node_list'].split(',')
    trafodion_config = """
export TRAF_HOME="%s"
export MY_SQROOT=$TRAF_HOME # for compatibility
export JAVA_HOME="%s"
export NODE_LIST="%s"
export MY_NODES="%s"
export node_count="%s"
export HADOOP_TYPE="%s"
export ENABLE_HA="%s"
export ZOOKEEPER_NODES="%s"
export ZOOKEEPER_PORT="%s"
export SECURE_HADOOP="%s"
""" % (TRAF_HOME, dbcfgs['java_home'], ' '.join(nodes), ' -w ' + ' -w '.join(nodes),
       str(len(nodes)), hadoop_type, dbcfgs['enable_ha'], zk_nodes, zk_port, dbcfgs['secure_hadoop'])

    run_cmd('mkdir -p %s' % TRAFODION_CFG_DIR)
    write_file(TRAFODION_CFG_FILE, trafodion_config)

    if 'APACHE' in DISTRO:
        extra_config = """
export HADOOP_PREFIX=%s
export HBASE_HOME=%s
export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'])
        append_file(TRAFODION_CFG_FILE, extra_config)

    # set permission
    run_cmd('chown -R %s:%s %s*' % (TRAF_USER, TRAF_GROUP, TRAFODION_CFG_DIR))


    # set ulimits for trafodion user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
%s   soft nofile 8192
%s   hard nofile 65535
''' % ((TRAF_USER,) * 10)

    write_file(ULIMITS_FILE, ulimits_config)

    # change permission for hsperfdata
    if os.path.exists(HSPERFDATA_FILE):
        run_cmd('chown -R %s:%s %s' % (TRAF_USER, TRAF_GROUP, HSPERFDATA_FILE))

    # clean up unused key file at the last step
    run_cmd('rm -rf %s{,.pub}' % KEY_FILE)

    print 'Setup trafodion user successfully!'
Exemplo n.º 7
0
def run():
    """ create trafodion user, bashrc, setup passwordless SSH """
    dbcfgs = json.loads(dbcfgs_json)

    DISTRO = dbcfgs['distro']
    if 'CDH' in DISTRO:
        hadoop_type = 'cloudera'
    elif 'HDP' in DISTRO:
        hadoop_type = 'hortonworks'
    elif 'APACHE' in DISTRO:
        hadoop_type = 'apache'

    TRAF_USER = dbcfgs['traf_user']
    TRAF_PWD = dbcfgs['traf_pwd']
    TRAF_GROUP = TRAF_USER
    TRAF_HOME = cmd_output('cat /etc/default/useradd |grep HOME |cut -d "=" -f 2').strip()
    TRAF_USER_DIR = '%s/%s' % (TRAF_HOME, TRAF_USER)
    SQ_ROOT = '%s/%s-%s' % (TRAF_USER_DIR, dbcfgs['traf_basename'], dbcfgs['traf_version'])

    KEY_FILE = '/tmp/id_rsa'
    AUTH_KEY_FILE = '%s/.ssh/authorized_keys' % TRAF_USER_DIR
    SSH_CFG_FILE = '%s/.ssh/config' % TRAF_USER_DIR
    BASHRC_TEMPLATE = '%s/bashrc.template' % TMP_DIR
    BASHRC_FILE = '%s/.bashrc' % TRAF_USER_DIR
    ULIMITS_FILE = '/etc/security/limits.d/%s.conf' % TRAF_USER
    HSPERFDATA_FILE = '/tmp/hsperfdata_trafodion'

    # create trafodion user and group
    if not cmd_output('getent group %s' % TRAF_GROUP):
        run_cmd('groupadd %s > /dev/null 2>&1' % TRAF_GROUP)

    if not cmd_output('getent passwd %s' % TRAF_USER):
        run_cmd('useradd --shell /bin/bash -m %s -g %s --password "$(openssl passwd %s)"' % (TRAF_USER, TRAF_GROUP, TRAF_PWD))
    elif not os.path.exists(TRAF_USER_DIR):
        run_cmd('mkdir -p %s' % TRAF_USER_DIR)
        run_cmd('chmod 700 %s' % TRAF_USER_DIR)

    # set ssh key
    run_cmd_as_user(TRAF_USER, 'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
    # the key is generated in copy_file script running on the installer node
    run_cmd('cp %s{,.pub} %s/.ssh/' % (KEY_FILE, TRAF_USER_DIR))

    run_cmd_as_user(TRAF_USER, 'cat ~/.ssh/id_rsa.pub > %s' % AUTH_KEY_FILE)
    run_cmd('chmod 644 %s' % AUTH_KEY_FILE)

    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(SSH_CFG_FILE, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % SSH_CFG_FILE)

    run_cmd('chown -R %s:%s %s/.ssh/' % (TRAF_USER, TRAF_GROUP, TRAF_USER_DIR))

    # set bashrc
    nodes = dbcfgs['node_list'].split(',')
    change_items = {
        '{{ java_home }}': dbcfgs['java_home'],
        '{{ sq_home }}': SQ_ROOT,
        '{{ hadoop_type }}': hadoop_type,
        '{{ node_list }}': ' '.join(nodes),
        '{{ node_count }}': str(len(nodes)),
        '{{ enable_ha }}': dbcfgs['enable_ha'],
        '{{ my_nodes }}': ' -w ' + ' -w '.join(nodes)
    }

    mod_file(BASHRC_TEMPLATE, change_items)

    if 'APACHE' in DISTRO:
        bashrc_content = """
export HADOOP_PREFIX=%s
export HBASE_HOME=%s
export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'])
        append_file(BASHRC_TEMPLATE, bashrc_content, position='HADOOP_TYPE')

    # backup bashrc if exsits
    if os.path.exists(BASHRC_FILE):
        run_cmd('cp %s %s.bak' % ((BASHRC_FILE,) *2))

    # copy bashrc to trafodion's home
    run_cmd('cp %s %s' % (BASHRC_TEMPLATE, BASHRC_FILE))
    run_cmd('chown -R %s:%s %s*' % (TRAF_USER, TRAF_GROUP, BASHRC_FILE))

    # set ulimits for trafodion user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
%s   soft nofile 8192
%s   hard nofile 65535
hbase soft nofile 8192
''' % ((TRAF_USER,) * 10)

    with open(ULIMITS_FILE, 'w') as f:
        f.write(ulimits_config)

    # change permission for hsperfdata
    if os.path.exists(HSPERFDATA_FILE):
        run_cmd('chown -R %s:%s %s' % (TRAF_USER, TRAF_GROUP, HSPERFDATA_FILE))

    # clean up unused key file at the last step
    run_cmd('rm -rf %s{,.pub}' % KEY_FILE)

    print 'Setup trafodion user successfully!'
Exemplo n.º 8
0
def main_update():
  """ update given mp3's with md5 of their audio content """

  ERROR_LOG = 'c:\\temp\\mp3hash.errors.m3u'
  BAD_MP3_LOG = 'c:\\temp\\mp3hash.incorrect.m3u'
  GENERAL_LOG = 'c:\\temp\\mp3hash.general.log'

  def do_mp3(file):
    append_file(GENERAL_LOG, 'File %s ...' % file)
    h_tag, recheck = read_mp3_audio_hash_tag(file)
    do_write = True
    if h_tag:
      do_write = False
      if not check_hex_digest(h_tag):
        append_file(ERROR_LOG, 'ERROR: invalid hash %s stored in file %s' % (h_tag, file))
      elif recheck:
        do_write = True
    if do_write:
      h_123 = calc_mp3_audio_hash_mpg123(file)
      if h_123:
        assert check_hex_digest(h_123)
        tmp = tempfile.NamedTemporaryFile(delete=False).name
        try:
          # mp3 tagging is dangerous so do a test write first
          shutil.copy(file, tmp)
          write_mp3_audio_hash_tag(tmp, h_123)

          h_re = calc_mp3_audio_hash_mpg123(tmp)
          if h_re == h_123:
            write_mp3_audio_hash_tag(file, h_123)
          else:
            append_file(ERROR_LOG,
              u'# ERROR: file got different hash sum after tagging! Original file not affected: %s' % file)
            append_file(ERROR_LOG, win32api.GetShortPathName(file))
        finally:
          if os.path.exists(tmp):
            os.remove(tmp)
      else:
        append_file(BAD_MP3_LOG, "# %s" % file)
        append_file(BAD_MP3_LOG, win32api.GetShortPathName(file))

  def collect_mp3s(root_dir):
    files = []
    if os.path.isfile(root_dir) and os.path.splitext(root_dir)[1].lower() == '.m3u':
      with open(root_dir, 'r') as f:
        for fname in f:
          fname = fname.strip()
          if os.path.isfile(fname):
            files.append(fname)
    elif os.path.isdir(root_dir):
      for dir, subdirs, subfiles in os.walk(unicode(root_dir)):
        for file in glob.glob1(dir, u'*.mp3'):
          files.append(os.path.join(dir, file))
    else:
      assert False, root_dir
    return files

  root_dir = sys.argv[2]

  empty_files(ERROR_LOG, BAD_MP3_LOG, GENERAL_LOG)

  print 'collecting mp3 names...'
  files = collect_mp3s(root_dir)
  print '%d mp3 files at all' % len(files)
  for cnt, file in enumerate(files):
    try:
      if cnt % 20 == 0:
        pct = float(cnt) / len(files) * 100
        sys.stdout.write('%.1f%%' % pct)
      else:
        sys.stdout.write('.')

      if os.path.exists(file):
        # file may disappear after its name has been collected
        do_mp3(file)

    except KeyboardInterrupt:
      print >> sys.stderr, 'KeyboardInterrupt happened'
      return
    except BaseException, msg:
      print >> sys.stderr, 'exception: %s' % msg
      append_file(ERROR_LOG, '# exception: %s' % msg)
      append_file(ERROR_LOG, '# %s' % file)
      append_file(ERROR_LOG, win32api.GetShortPathName(file))
      with open(GENERAL_LOG, 'a+') as log:
        traceback.print_exc(file=log)
Exemplo n.º 9
0
  def do_mp3(file):
    append_file(GENERAL_LOG, 'File %s ...' % file)
    h_tag, recheck = read_mp3_audio_hash_tag(file)
    do_write = True
    if h_tag:
      do_write = False
      if not check_hex_digest(h_tag):
        append_file(ERROR_LOG, 'ERROR: invalid hash %s stored in file %s' % (h_tag, file))
      elif recheck:
        do_write = True
    if do_write:
      h_123 = calc_mp3_audio_hash_mpg123(file)
      if h_123:
        assert check_hex_digest(h_123)
        tmp = tempfile.NamedTemporaryFile(delete=False).name
        try:
          # mp3 tagging is dangerous so do a test write first
          shutil.copy(file, tmp)
          write_mp3_audio_hash_tag(tmp, h_123)

          h_re = calc_mp3_audio_hash_mpg123(tmp)
          if h_re == h_123:
            write_mp3_audio_hash_tag(file, h_123)
          else:
            append_file(ERROR_LOG,
              u'# ERROR: file got different hash sum after tagging! Original file not affected: %s' % file)
            append_file(ERROR_LOG, win32api.GetShortPathName(file))
        finally:
          if os.path.exists(tmp):
            os.remove(tmp)
      else:
        append_file(BAD_MP3_LOG, "# %s" % file)
        append_file(BAD_MP3_LOG, win32api.GetShortPathName(file))
Exemplo n.º 10
0
def run():
    dbcfgs = json.loads(dbcfgs_json)

    hbase_xml_file = dbcfgs['hbase_xml_file']

    dcs_conf_dir = '%s/dcs' % (TRAF_CFG_DIR)
    dcs_srv_file = dcs_conf_dir + '/servers'
    dcs_master_file = dcs_conf_dir + '/masters'
    dcs_site_file = dcs_conf_dir + '/dcs-site.xml'
    rest_site_file = '%s/rest/rest-site.xml' % (TRAF_CFG_DIR)

    ### dcs setting ###
    # servers
    nodes = dbcfgs['node_list'].split(',')
    dcs_cnt = dbcfgs['dcs_cnt_per_node']
    dcs_servers = ''
    for node in nodes:
        dcs_servers += '%s %s\n' % (node, dcs_cnt)

    write_file(dcs_srv_file, dcs_servers)

    ### modify dcs config files ###
    # modify master
    dcs_master = nodes[0]
    append_file(dcs_master_file, dcs_master+'\n')

    # modify dcs-site.xml
    net_interface = run_cmd('ip route |grep default|awk \'{print $5}\'')
    hb = ParseXML(hbase_xml_file)
    zk_hosts = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')

    p = ParseXML(dcs_site_file)
    p.add_property('dcs.zookeeper.property.clientPort', zk_port)
    p.add_property('dcs.zookeeper.quorum', zk_hosts)
    p.add_property('dcs.dns.interface', net_interface)

    if dbcfgs['dcs_ha'] == 'Y':
        dcs_floating_ip = dbcfgs['dcs_floating_ip']
        dcs_backup_nodes = dbcfgs['dcs_backup_nodes']
        p.add_property('dcs.master.floating.ip', 'true')
        p.add_property('dcs.master.floating.ip.external.interface', net_interface)
        p.add_property('dcs.master.floating.ip.external.ip.address', dcs_floating_ip)
        p.rm_property('dcs.dns.interface')

        # set DCS_MASTER_FLOATING_IP ENV for trafci
        dcs_floating_ip_cfg = 'export DCS_MASTER_FLOATING_IP=%s' % dcs_floating_ip
        append_file(TRAF_CFG_FILE, dcs_floating_ip_cfg)

        # modify master with backup master host
        for dcs_backup_node in dcs_backup_nodes.split(','):
            append_file(dcs_master_file, dcs_backup_node)

    p.write_xml()

    ### rest setting ###
    p = ParseXML(rest_site_file)
    p.add_property('rest.zookeeper.property.clientPort', zk_port)
    p.add_property('rest.zookeeper.quorum', zk_hosts)
    p.write_xml()

    ### run sqcertgen ###
    run_cmd('sqcertgen')
Exemplo n.º 11
0
def run():
    """ create trafodion user, bashrc, setup passwordless SSH """
    dbcfgs = json.loads(dbcfgs_json)

    distro = dbcfgs['distro']
    if 'CDH' in distro:
        hadoop_type = 'cloudera'
    elif 'HDP' in distro:
        hadoop_type = 'hortonworks'
    elif 'APACHE' in distro:
        hadoop_type = 'apache'

    home_dir = get_default_home()
    # customize trafodion home dir
    if dbcfgs.has_key('home_dir') and dbcfgs['home_dir']:
        home_dir = dbcfgs['home_dir']

    traf_user = dbcfgs['traf_user']
    traf_user_dir = '%s/%s' % (home_dir, traf_user)
    traf_dirname = dbcfgs['traf_dirname']
    traf_home = '%s/%s' % (traf_user_dir, traf_dirname)

    hbase_xml_file = dbcfgs['hbase_xml_file']
    auth_key_file = '%s/.ssh/authorized_keys' % traf_user_dir
    ssh_cfg_file = '%s/.ssh/config' % traf_user_dir
    ulimits_file = '/etc/security/limits.d/%s.conf' % traf_user

    # create trafodion user and group
    if cmd_output('getent passwd %s' % traf_user):
        # trafodion user exists, set actual trafodion group
        traf_group = cmd_output('id -ng %s' % traf_user)
    else:
        # default trafodion group
        traf_group = traf_user
        if not cmd_output('getent group %s' % traf_group):
            run_cmd('groupadd %s > /dev/null 2>&1' % traf_group)
        traf_pwd = dbcfgs['traf_pwd']
        run_cmd('useradd --shell /bin/bash -m %s -g %s --home %s --password "$(openssl passwd %s)"' % (traf_user, traf_group, traf_user_dir, traf_pwd))
    # hbase group is generally either hbase or hadoop, depending on distro
    if cmd_output('getent group hbase'):
        cmd_output('/usr/sbin/usermod -a -G hbase %s' % traf_user)
    if cmd_output('getent group hadoop'):
        cmd_output('/usr/sbin/usermod -a -G hadoop %s' % traf_user)
    if cmd_output('getent group hive'):
        cmd_output('/usr/sbin/usermod -a -G hive %s' % traf_user)

    if not os.path.exists(traf_user_dir):
        run_cmd('mkdir -p %s' % traf_user_dir)
        run_cmd('chmod 700 %s' % traf_user_dir)

    # set ssh key
    run_cmd_as_user(traf_user, 'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
    # the key is generated in copy_file script running on the installer node
    run_cmd('cp %s{,.pub} %s/.ssh/' % (SSHKEY_FILE, traf_user_dir))

    run_cmd_as_user(traf_user, 'cat ~/.ssh/id_rsa.pub > %s' % auth_key_file)
    run_cmd('chmod 644 %s' % auth_key_file)

    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(ssh_cfg_file, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % ssh_cfg_file)

    run_cmd('chown -R %s:%s %s/.ssh/' % (traf_user, traf_group, traf_user_dir))

    hb = ParseXML(hbase_xml_file)
    zk_nodes = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')
    # set trafodion_config
    nodes = dbcfgs['node_list'].split(',')
    trafodion_config = """
export TRAF_HOME="%s"
export TRAF_VAR=$TRAF_HOME/tmp
export MY_SQROOT=$TRAF_HOME # for compatibility
export JAVA_HOME="%s"
export node_count="%s"
export HADOOP_TYPE="%s"
export ENABLE_HA="%s"
export ZOOKEEPER_NODES="%s"
export ZOOKEEPER_PORT="%s"
export SECURE_HADOOP="%s"
export CLUSTERNAME="%s"
""" % (traf_home, dbcfgs['java_home'], str(len(nodes)), hadoop_type, dbcfgs['enable_ha'],
       zk_nodes, zk_port, dbcfgs['secure_hadoop'], socket.gethostname())

    # save additonal configs for elastic
    trafodion_config += """
export hbase_xml_file="%s"
export hbase_lib_path="%s"
export traf_user="******"
export traf_version="%s"
export dcs_cnt_per_node="%s"
""" % (dbcfgs['hbase_xml_file'], dbcfgs['hbase_lib_path'], dbcfgs['traf_user'], dbcfgs['traf_version'], dbcfgs['dcs_cnt_per_node'])

    run_cmd('mkdir -p %s' % TRAF_CFG_DIR)
    write_file(TRAF_CFG_FILE, trafodion_config)

    if 'APACHE' in distro:
        extra_config = """
export HADOOP_PREFIX=%s
export HBASE_HOME=%s
export HIVE_HOME=%s
export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'], dbcfgs['hive_home'])
        append_file(TRAFODION_CFG_FILE, extra_config)

    # set permission
    run_cmd('chown -R %s:%s %s*' % (traf_user, traf_group, TRAF_CFG_DIR))

    # set ulimits for trafodion user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
%s   soft nofile 8192
%s   hard nofile 65535
''' % ((traf_user,) * 10)

    write_file(ulimits_file, ulimits_config)

    # change permission for hsperfdata
    if os.path.exists(TRAF_HSPERFDATA_FILE):
        run_cmd('chown -R %s:%s %s' % (traf_user, traf_group, TRAF_HSPERFDATA_FILE))

    # clean up unused key file at the last step
    run_cmd('rm -rf %s{,.pub}' % SSHKEY_FILE)

    print 'Setup trafodion user successfully!'
Exemplo n.º 12
0
def run():
    dbcfgs = json.loads(dbcfgs_json)

    TRAF_HOME = os.environ['TRAF_HOME']
    TRAF_VER = dbcfgs['traf_version']
    HBASE_XML_FILE = dbcfgs['hbase_xml_file']

    DCS_CONF_DIR = '%s/dcs-%s/conf' % (TRAF_HOME, TRAF_VER)
    DCS_SRV_FILE = DCS_CONF_DIR + '/servers'
    DCS_MASTER_FILE = DCS_CONF_DIR + '/master'
    DCS_BKMASTER_FILE = DCS_CONF_DIR + '/backup-masters'
    DCS_SITE_FILE = DCS_CONF_DIR + '/dcs-site.xml'
    REST_SITE_FILE = '%s/rest-%s/conf/rest-site.xml' % (TRAF_HOME, TRAF_VER)

    ### dcs setting ###
    # servers
    nodes = dbcfgs['node_list'].split(',')
    dcs_cnt = dbcfgs['dcs_cnt_per_node']
    dcs_servers = ''
    for node in nodes:
        dcs_servers += '%s %s\n' % (node, dcs_cnt)

    write_file(DCS_SRV_FILE, dcs_servers)

    ### modify dcs config files ###
    # modify master
    dcs_master = nodes[0]
    append_file(DCS_MASTER_FILE, dcs_master)

    # modify dcs-site.xml
    net_interface = run_cmd('ip route |grep default|awk \'{print $5}\'')
    hb = ParseXML(HBASE_XML_FILE)
    zk_hosts = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')

    p = ParseXML(DCS_SITE_FILE)
    p.add_property('dcs.zookeeper.property.clientPort', zk_port)
    p.add_property('dcs.zookeeper.quorum', zk_hosts)
    p.add_property('dcs.dns.interface', net_interface)

    if dbcfgs['dcs_ha'] == 'Y':
        dcs_floating_ip = dbcfgs['dcs_floating_ip']
        dcs_backup_nodes = dbcfgs['dcs_backup_nodes']
        p.add_property('dcs.master.floating.ip', 'true')
        p.add_property('dcs.master.floating.ip.external.interface',
                       net_interface)
        p.add_property('dcs.master.floating.ip.external.ip.address',
                       dcs_floating_ip)
        p.rm_property('dcs.dns.interface')

        # modify backup_master
        write_file(DCS_BKMASTER_FILE, dcs_backup_nodes)

    p.write_xml()

    ### rest setting ###
    p = ParseXML(REST_SITE_FILE)
    p.add_property('rest.zookeeper.property.clientPort', zk_port)
    p.add_property('rest.zookeeper.quorum', zk_hosts)
    p.write_xml()

    ### run sqcertgen ###
    run_cmd('sqcertgen')
Exemplo n.º 13
0
def run():
    dbcfgs = json.loads(dbcfgs_json)

    SQ_ROOT = os.environ['MY_SQROOT']
    TRAF_VER = dbcfgs['traf_version']
    HBASE_XML_FILE = dbcfgs['hbase_xml_file']

    DCS_INSTALL_ENV = 'export DCS_INSTALL_DIR=%s/dcs-%s' % (SQ_ROOT, TRAF_VER)
    REST_INSTALL_ENV = 'export REST_INSTALL_DIR=%s/rest-%s' % (SQ_ROOT,
                                                               TRAF_VER)

    DCS_CONF_DIR = '%s/dcs-%s/conf' % (SQ_ROOT, TRAF_VER)
    DCS_SRV_FILE = DCS_CONF_DIR + '/servers'
    DCS_MASTER_FILE = DCS_CONF_DIR + '/master'
    DCS_BKMASTER_FILE = DCS_CONF_DIR + '/backup-masters'
    DCS_ENV_FILE = DCS_CONF_DIR + '/dcs-env.sh'
    DCS_SITE_FILE = DCS_CONF_DIR + '/dcs-site.xml'
    REST_SITE_FILE = '%s/rest-%s/conf/rest-site.xml' % (SQ_ROOT, TRAF_VER)
    TRAFCI_FILE = SQ_ROOT + '/trafci/bin/trafci'
    SQENV_FILE = SQ_ROOT + '/sqenvcom.sh'

    ### dcs setting ###
    # servers
    nodes = dbcfgs['node_list'].split(',')
    dcs_cnt = dbcfgs['dcs_cnt_per_node']
    dcs_servers = ''
    for node in nodes:
        dcs_servers += '%s %s\n' % (node, dcs_cnt)

    write_file(DCS_SRV_FILE, dcs_servers)

    ### modify dcs config files ###
    # modify master
    dcs_master = nodes[0]
    append_file(DCS_MASTER_FILE, dcs_master)

    # modify sqenvcom.sh
    append_file(SQENV_FILE, DCS_INSTALL_ENV)
    append_file(SQENV_FILE, REST_INSTALL_ENV)

    # modify dcs-env.sh
    mod_file(DCS_ENV_FILE,
             {'.*DCS_MANAGES_ZK=.*': 'export DCS_MANAGES_ZK=false'})

    # modify trafci
    mod_file(TRAFCI_FILE, {'HNAME=.*': 'HNAME=%s:23400' % dcs_master})

    # modify dcs-site.xml
    net_interface = cmd_output(
        'netstat -rn | grep "^0.0.0.0" | awk \'{print $8}\'').strip()
    hb = ParseXML(HBASE_XML_FILE)
    zk_hosts = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')

    p = ParseXML(DCS_SITE_FILE)
    p.add_property('dcs.zookeeper.property.clientPort', zk_port)
    p.add_property('dcs.zookeeper.quorum', zk_hosts)
    p.add_property('dcs.dns.interface', net_interface)

    if dbcfgs['dcs_ha'] == 'Y':
        dcs_floating_ip = dbcfgs['dcs_floating_ip']
        dcs_backup_nodes = dbcfgs['dcs_backup_nodes']
        p.add_property('dcs.master.floating.ip', 'true')
        p.add_property('dcs.master.floating.ip.external.interface',
                       net_interface)
        p.add_property('dcs.master.floating.ip.external.ip.address',
                       dcs_floating_ip)
        p.rm_property('dcs.dns.interface')

        # modify backup_master
        write_file(DCS_BKMASTER_FILE, dcs_backup_nodes)

    p.write_xml()

    ### rest setting ###
    p = ParseXML(REST_SITE_FILE)
    p.add_property('rest.zookeeper.property.clientPort', zk_port)
    p.add_property('rest.zookeeper.quorum', zk_hosts)
    p.write_xml()

    ### run sqcertgen ###
    run_cmd('sqcertgen')
Exemplo n.º 14
0
def run():
    dbcfgs = json.loads(dbcfgs_json)

    SQ_ROOT = os.environ['MY_SQROOT']
    TRAF_VER = dbcfgs['traf_version']
    HBASE_XML_FILE = dbcfgs['hbase_xml_file']

    DCS_INSTALL_ENV = 'export DCS_INSTALL_DIR=%s/dcs-%s' % (SQ_ROOT, TRAF_VER)
    REST_INSTALL_ENV = 'export REST_INSTALL_DIR=%s/rest-%s' % (SQ_ROOT, TRAF_VER)

    DCS_CONF_DIR = '%s/dcs-%s/conf' % (SQ_ROOT, TRAF_VER)
    DCS_SRV_FILE = DCS_CONF_DIR + '/servers'
    DCS_MASTER_FILE = DCS_CONF_DIR + '/master'
    DCS_BKMASTER_FILE = DCS_CONF_DIR + '/backup-masters'
    DCS_ENV_FILE = DCS_CONF_DIR + '/dcs-env.sh'
    DCS_SITE_FILE = DCS_CONF_DIR + '/dcs-site.xml'
    REST_SITE_FILE = '%s/rest-%s/conf/rest-site.xml' % (SQ_ROOT, TRAF_VER)
    TRAFCI_FILE = SQ_ROOT + '/trafci/bin/trafci'
    SQENV_FILE = SQ_ROOT + '/sqenvcom.sh'

    ### dcs setting ###
    # servers
    nodes = dbcfgs['node_list'].split(',')
    dcs_cnt = dbcfgs['dcs_cnt_per_node']
    dcs_servers = ''
    for node in nodes:
        dcs_servers += '%s %s\n' % (node, dcs_cnt)

    write_file(DCS_SRV_FILE, dcs_servers)

    ### modify dcs config files ###
    # modify master
    dcs_master = nodes[0]
    append_file(DCS_MASTER_FILE, dcs_master)

    # modify sqenvcom.sh
    append_file(SQENV_FILE, DCS_INSTALL_ENV)
    append_file(SQENV_FILE, REST_INSTALL_ENV)

    # modify dcs-env.sh
    mod_file(DCS_ENV_FILE, {'.*DCS_MANAGES_ZK=.*':'export DCS_MANAGES_ZK=false'})

    # modify trafci
    mod_file(TRAFCI_FILE, {'HNAME=.*':'HNAME=%s:23400' % dcs_master})

    # modify dcs-site.xml
    net_interface = cmd_output('netstat -rn | grep "^0.0.0.0" | awk \'{print $8}\'').strip()
    hb = ParseXML(HBASE_XML_FILE)
    zk_hosts = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')

    p = ParseXML(DCS_SITE_FILE)
    p.add_property('dcs.zookeeper.property.clientPort', zk_port)
    p.add_property('dcs.zookeeper.quorum', zk_hosts)
    p.add_property('dcs.dns.interface', net_interface)

    if dbcfgs['dcs_ha'] == 'Y':
        dcs_floating_ip = dbcfgs['dcs_floating_ip']
        dcs_backup_nodes = dbcfgs['dcs_backup_nodes']
        p.add_property('dcs.master.floating.ip', 'true')
        p.add_property('dcs.master.floating.ip.external.interface', net_interface)
        p.add_property('dcs.master.floating.ip.external.ip.address', dcs_floating_ip)
        p.rm_property('dcs.dns.interface')

        # modify backup_master
        write_file(DCS_BKMASTER_FILE, dcs_backup_nodes)

    p.write_xml()

    ### rest setting ###
    p = ParseXML(REST_SITE_FILE)
    p.add_property('rest.zookeeper.property.clientPort', zk_port)
    p.add_property('rest.zookeeper.quorum', zk_hosts)
    p.write_xml()

    ### run sqcertgen ###
    run_cmd('sqcertgen')