Example #1
0
def install_zeppelin():
    import params
    Directory(
        [params.conf_dir, params.zeppelin_log_dir, params.zeppelin_pid_dir],
        owner=params.zeppelin_user,
        group=params.zeppelin_group,
        create_parents=True,
        mode=0755)
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.zeppelin_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute(' rm -rf ' + params.install_dir + '/conf')
        Execute('ln -s ' + params.conf_dir + ' ' + params.install_dir +
                '/conf')
        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/zeeplin.sh" %
                params.install_dir)
        Execute('chown -R %s:%s %s/%s' %
                (params.zeppelin_user, params.zeppelin_group,
                 Script.get_stack_root(), params.version_dir))
        Execute(
            'chown -R %s:%s %s' %
            (params.zeppelin_user, params.zeppelin_group, params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)
Example #2
0
def install_elasticjob():
    import params
    Directory([params.conf_dir],
              owner=params.elasticjob_user,
              group=params.user_group,
              mode=0775,
              create_parents=True)
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.elasticjob_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute(' cp -r ' + params.install_dir + '/conf/* ' + params.conf_dir +
                ' && rm -rf ' + params.install_dir + '/conf')
        Execute('ln -s ' + params.conf_dir + ' ' + params.install_dir +
                '/conf')
        Execute('chown -R %s:%s %s/%s' %
                (params.elasticjob_user, params.user_group, params.stack_root,
                 params.version_dir))
        Execute(
            'chown -R %s:%s %s' %
            (params.elasticjob_user, params.user_group, params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)
Example #3
0
def install_angel():
    import params
    Directory(
        [params.angel_conf_dir, params.angel_log_dir, params.angel_run_dir],
        owner=params.angel_user,
        group=params.user_group,
        mode=0775,
        create_parents=True)
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.angel_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute('cp -r ' + params.install_dir + '/conf/* ' +
                params.angel_conf_dir + ' ; rm -rf ' + params.install_dir +
                '/conf')
        Execute('ln -s ' + params.angel_conf_dir + ' ' + params.install_dir +
                '/conf')
        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/angel.sh" %
                params.install_dir)
        Execute('chown -R %s:%s %s/%s' %
                (params.angel_user, params.user_group, params.stack_root,
                 params.version_dir))
        Execute('chown -R %s:%s %s' %
                (params.angel_user, params.user_group, params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)
Example #4
0
def install_ozone():
    import params
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('/bin/rm -f /tmp/' + params.filename)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.hdfs_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute(' rm -rf ' + params.install_dir + '/etc/hadoop')
        Execute('ln -s ' + params.hadoop_conf_dir + ' ' + params.install_dir +
                '/etc/hadoop')
        Execute('mkdir ' + params.install_dir + '/logs && chmod 777 ' +
                params.install_dir + '/logs')
        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/ozone.sh" %
                params.install_dir)
        Execute('chown -R %s:%s %s/%s' %
                (params.hdfs_user, params.user_group, params.stack_root,
                 params.version_dir))
        Execute('chown -R %s:%s %s' %
                (params.hdfs_user, params.user_group, params.install_dir))
        Execute('chmod -R 755 %s/%s' % (params.stack_root, params.version_dir))
        Execute('chown root:%s %s/bin/container-executor' %
                (params.user_group, params.install_dir))

        Execute('/bin/rm -f /tmp/' + params.filename)
Example #5
0
def install_alluxio():
    import params
    Directory([
        params.log_dir, params.hdd_dirs, params.journal_dir,
        params.underfs_addr, params.pid_dir
    ],
              owner=params.alluxio_user,
              group=params.user_group,
              mode=0775,
              create_parents=True)
    if not os.path.exists(Script.get_stack_root() + '/' + params.version_dir) or not os.path.exists(
            params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' + params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute(
            'wget ' + params.download_url + ' -O /tmp/' + params.filename,
            user=params.alluxio_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' + Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir + ' ' + params.install_dir)
        Execute('rm -rf %s' % params.install_dir + '/conf')
        Execute('ln -s ' + params.conf_dir + ' ' + params.install_dir +
                '/conf')
        Execute('chown -R %s:%s %s/%s' % (params.alluxio_user, params.user_group, Script.get_stack_root(),params.version_dir))
        Execute('chown -R %s:%s %s' % (params.alluxio_user, params.user_group,
                                       params.install_dir))
Example #6
0
def install_sqoop():
    import params
    Directory([params.sqoop_conf_dir, '/var/run/sqoop', '/var/log/sqoop'],
              owner=params.sqoop_user,
              group=params.user_group,
              mode=0775,
              create_parents=True)
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.sqoop_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute('ln -s ' + params.sqoop_conf_dir + ' ' + params.install_dir +
                '/etc')
        Execute('chown -R %s:%s %s/%s' %
                (params.sqoop_user, params.user_group, Script.get_stack_root(),
                 params.version_dir))
        Execute('chown -R %s:%s %s' %
                (params.sqoop_user, params.user_group, params.install_dir))
Example #7
0
def install_geoserver():
    import params
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir, ignore_failures=True)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.geoserver_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute('cp -r ' + params.install_dir + '/conf/* ' + params.conf_dir)
        Execute(' rm -rf ' + params.install_dir + '/conf')
        Execute('ln -s ' + params.conf_dir + ' ' + params.install_dir +
                '/conf')
        Execute('mkdir ' + params.install_dir + '/logs && chmod 777 ' +
                params.install_dir + '/logs')
        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/geoserver.sh" %
                params.install_dir)
        Execute('chown -R %s:%s %s/%s' %
                (params.geoserver_user, params.geoserver_group,
                 params.stack_root, params.version_dir))
        Execute('chown -R %s:%s %s' %
                (params.geoserver_user, params.geoserver_group,
                 params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)
Example #8
0
def install_tomcat():
    import params
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir_tomcat) or not os.path.exists(
                              params.install_dir_tomcat):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir_tomcat)
        Execute('rm -rf %s' % params.install_dir_tomcat)
        Execute('wget ' + params.download_url_tomcat + ' -O /tmp/' +
                params.filename_tomcat,
                user=params.tez_user)
        Execute('tar -zxf /tmp/' + params.filename_tomcat + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' +
                params.version_dir_tomcat + ' ' + params.install_dir_tomcat)

        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/tez.sh" %
                params.install_dir_tomcat)
        Execute('chown -R %s:%s %s/%s' %
                (params.tez_user, params.user_group, params.stack_root,
                 params.version_dir_tomcat))
        Execute(
            'chown -R %s:%s %s' %
            (params.tez_user, params.user_group, params.install_dir_tomcat))
        Execute('/bin/rm -f /tmp/' + params.filename_tomcat)
Example #9
0
def install_hbase():
    import params
    Directory([params.etc_prefix_dir, params.hbase_zookeeper_data_dir],
              owner=params.hbase_user,
              group=params.user_group,
              mode=0775,
              create_parents=True)
    install_hbase_share_lib()
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.hbase_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute(' rm -rf ' + params.install_dir + '/conf')
        Execute('ln -s ' + params.etc_prefix_dir + ' ' + params.install_dir +
                '/conf')
        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/hbase.sh" %
                params.install_dir)
        Execute('chown -R %s:%s %s/%s' %
                (params.hbase_user, params.user_group, params.stack_root,
                 params.version_dir))
        Execute('chown -R %s:%s %s' %
                (params.hbase_user, params.user_group, params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)
Example #10
0
def install_registry():
    import params
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.registry_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute(' mkdir -p ' + params.conf_dir + ' && cp -r ' +
                params.install_dir + '/conf/* ' + params.conf_dir)

        Execute(' cp -r ' + params.install_dir + '/conf/* ' + params.conf_dir)

        Execute(' rm -rf ' + params.install_dir + '/conf')
        Execute('ln -s ' + params.conf_dir + ' ' + params.install_dir +
                '/conf')

        Execute('chown -R %s:%s %s/%s' %
                (params.registry_user, params.user_group,
                 Script.get_stack_root(), params.version_dir))
        Execute('chown -R %s:%s %s' %
                (params.registry_user, params.user_group, params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)
Example #11
0
def install_knox():
    import params
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.knox_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute(' mkdir -p ' + params.knox_conf_dir + ' && cp -r ' +
                params.install_dir + '/conf/* ' + params.knox_conf_dir)
        Execute(' cp -rf ' + params.install_dir + '/conf/*  ' +
                params.knox_conf_dir)
        Execute(' rm -rf ' + params.install_dir + '/conf')
        Execute('ln -s ' + params.knox_conf_dir + ' ' + params.install_dir +
                '/conf')

        Execute(' rm -rf ' + params.install_dir + '/pids')
        Execute('ln -s ' + params.knox_pid_dir + ' ' + params.install_dir +
                '/pids')

        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/knox.sh" %
                params.install_dir)
        Execute('chown -R %s:%s %s/%s' %
                (params.knox_user, params.knox_group, Script.get_stack_root(),
                 params.version_dir))
        Execute('chown -R %s:%s %s' %
                (params.knox_user, params.knox_group, params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)
Example #12
0
def install_opentsdb():
    import params
    Directory(
        [params.conf_dir, params.opentsdb_pid_dir, params.opentsdb_log_dir],
        owner=params.opentsdb_user,
        group=params.opentsdb_group,
        mode=0775,
        create_parents=True)
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.opentsdb_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute('chown -R %s:%s %s/%s' %
                (params.opentsdb_user, params.opentsdb_group,
                 Script.get_stack_root(), params.version_dir))
        Execute(
            'chown -R %s:%s %s' %
            (params.opentsdb_user, params.opentsdb_group, params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)
        Execute(params.install_dir + '/tools/create_table.sh')
Example #13
0
def install_flinksql():
    import params
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.streamsql_version_dir) or not os.path.exists(
                              params.streamsql_install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.streamsql_version_dir)
        Execute('rm -rf %s' % params.streamsql_install_dir,
                ignore_failures=True)
        Execute('wget ' + params.streamsql_download_url + ' -O /tmp/' +
                params.streamsql_filename,
                user=params.flink_user)
        Execute('tar -zxf /tmp/' + params.streamsql_filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' +
                params.streamsql_version_dir + ' ' +
                params.streamsql_install_dir)
        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/streamsql.sh" %
                params.streamsql_install_dir)
        Execute('chown -R %s:%s %s/%s' %
                (params.flink_user, params.flink_group, params.stack_root,
                 params.streamsql_version_dir))
        Execute('chown -R %s:%s %s' % (params.flink_user, params.flink_group,
                                       params.streamsql_install_dir))
        Execute('/bin/rm -f /tmp/' + params.streamsql_filename)
Example #14
0
def install_presto():
    install_presto_plugin()
    import params
    Directory([
        params.conf_dir, params.data_dir, params.log_dir, params.pid_dir,
        params.conf_dir + '/catalog'
    ],
              owner=params.presto_user,
              group=params.user_group,
              mode=0775,
              create_parents=True)
    if not os.path.exists(Script.get_stack_root() + '/' + params.version_dir) or not os.path.exists(
            params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' + params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute(
            'wget ' + params.download_url + ' -O /tmp/' + params.filename,
            user=params.presto_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' + Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir + ' ' + params.install_dir)
        Execute('rm -rf ' + params.install_dir + '/etc')
        Execute('ln -s ' + params.conf_dir + ' ' + params.install_dir + '/etc')
        Execute('chown -R %s:%s %s/%s' % (params.presto_user, params.user_group, Script.get_stack_root(),params.version_dir))
        Execute('chown -R %s:%s %s' % (params.presto_user, params.user_group,
                                       params.install_dir))
        Execute('/bin/rm -rf /etc/presto/catalog /tmp/' + params.filename)
Example #15
0
def install_druid():
    import params
    Directory([params.druid_conf_dir],
              owner=params.druid_user,
              group=params.user_group,
              mode=0775,
              create_parents=True)
    print(params.download_url, params.version_dir,
          Script.get_stack_root() + '/' + params.version_dir,
          params.install_dir)
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        #Execute('rm -rf %s' % Script.get_stack_root() + '/' + params.version_dir)
        #Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.druid_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('rm -rf ' + params.install_dir + '/conf/druid')
        Execute('ln -s ' + params.druid_conf_dir + ' ' + params.install_dir +
                '/conf/druid')
        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/druid.sh" %
                params.install_dir)
        Execute('chown -R %s:%s %s' %
                (params.druid_user, params.user_group, params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)

    install_extension()
Example #16
0
def install_webadmin():
    import params
    Directory([params.log_dir, params.conf_dir],
              owner='nobody',
              group='nobody',
              mode=0755,
              create_parents=True)
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir_admin) or not os.path.exists(
                              params.install_dir_admin):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir_admin)
        Execute('rm -rf %s' % params.install_dir_admin)
        Execute('/bin/rm -f /tmp/' + params.filename_admin)
        Execute('wget ' + params.download_url_admin + ' -O /tmp/' +
                params.filename_admin,
                user='******')
        Execute('tar -zxvf /tmp/' + params.filename_admin + ' -C  ' +
                Script.get_stack_root())
        Execute('chown -R %s:%s %s/%s' %
                ('nobody', 'nobody', Script.get_stack_root(),
                 params.version_dir_admin))
        Execute('chown -R %s:%s %s' %
                ('nobody', 'nobody', params.install_dir_admin))

        Execute('wget ' + params.download_url_soar + ' -O /usr/sbin/soar',
                user='******')
        Execute('chmod a+x /usr/sbin/soar')
Example #17
0
def install_livy():
    import params
    Directory([params.livy2_conf],
              owner=params.livy2_user,
              group=params.user_group,
              mode=0775,
              create_parents=True)
    if (not os.path.exists(Script.get_stack_root() + '/' +
                           params.livy_version_dir)
            or not os.path.exists(
                params.livy_install_dir)) and params.has_livyserver:
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.livy_version_dir)
        Execute('rm -rf %s' % params.livy_install_dir)
        Execute('wget ' + params.livy_download_url + ' -O /tmp/' +
                params.livy_filename,
                user=params.livy2_user)
        Execute('tar -zxf /tmp/' + params.livy_filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' +
                params.livy_version_dir + ' ' + params.livy_install_dir)
        Execute(' rm -rf ' + params.livy_install_dir + '/conf')
        Execute('ln -s ' + params.livy2_conf + ' ' + params.livy_install_dir +
                '/conf')
        Execute('chown -R %s:%s %s/%s' %
                (params.livy2_user, params.livy2_group,
                 Script.get_stack_root(), params.livy_version_dir))
        Execute(
            'chown -R %s:%s %s' %
            (params.livy2_user, params.livy2_group, params.livy_install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)
Example #18
0
def install_hadoop():
    import params
    Directory(
        params.hdfs_log_dir,
        owner=params.hdfs_user,
        group=params.user_group,
        create_parents=True,
        mode=0755)

    Directory(
        params.limits_conf_dir,
        create_parents=True,
        owner='root',
        group='root')

    Directory(
        params.hadoop_conf_dir,
        create_parents=True,
        owner='root',
        group='root')

    install_hadoop_share_lib()

    if not os.path.exists(Script.get_stack_root() + '/' + params.version_dir) or not os.path.exists(
            params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' + params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('/bin/rm -f /tmp/' + params.filename)
        Execute(
            'wget ' + params.download_url + ' -O /tmp/' + params.filename,
            user=params.hdfs_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' + Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir + ' ' + params.install_dir)
        Execute(' rm -rf ' + params.install_dir + '/etc/hadoop')
        Execute('ln -s ' + params.hadoop_conf_dir + ' ' + params.install_dir +
                '/etc/hadoop')
        Execute('mkdir ' + params.install_dir + '/logs && chmod 777 ' +
                params.install_dir + '/logs')
        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/hadoop.sh" %
                params.install_dir)
        Execute('chown -R %s:%s %s/%s' %
                (params.hdfs_user, params.user_group, params.stack_root, params.version_dir))
        Execute('chown -R %s:%s %s' % (params.hdfs_user, params.user_group,
                                       params.install_dir))
        Execute('chmod -R 755 %s/%s' % (params.stack_root, params.version_dir))
        Execute('chown root:%s %s/bin/container-executor' %
                (params.user_group, params.install_dir))

        Execute('/bin/rm -f /tmp/' + params.filename)
def setup_extensions_hive():
    import params

    hive_custom_extensions_enabled = default(
        "/configurations/hive-site/hive.custom-extensions.enabled", False)
    hive_custom_extensions_owner = default(
        "/configurations/hive-site/hive.custom-extensions.owner",
        params.hdfs_user)
    hive_custom_extensions_hdfs_dir = DEFAULT_HADOOP_HIVE_EXTENSION_DIR.format(
        params.major_stack_version)

    hive_custom_extensions_local_dir = "{0}/current/ext/hive".format(
        Script.get_stack_root())

    impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT']
    role = params.config.get('role', '')

    # Run copying for HIVE_SERVER and HIVE_CLIENT
    if params.current_service == 'HIVE' and role in impacted_components:
        clean_extensions(hive_custom_extensions_local_dir)
        if hive_custom_extensions_enabled:
            download_extensions(hive_custom_extensions_owner,
                                params.user_group,
                                hive_custom_extensions_hdfs_dir,
                                hive_custom_extensions_local_dir)
Example #20
0
def setup_extensions():
    import params

    # Hadoop Custom extensions
    hadoop_custom_extensions_enabled = default(
        "/configurations/core-site/hadoop.custom-extensions.enabled", False)
    hadoop_custom_extensions_services = default(
        "/configurations/core-site/hadoop.custom-extensions.services", "")
    hadoop_custom_extensions_owner = default(
        "/configurations/core-site/hadoop.custom-extensions.owner",
        params.hdfs_user)
    hadoop_custom_extensions_services = [
        service.strip().upper()
        for service in hadoop_custom_extensions_services.split(",")
    ]
    hadoop_custom_extensions_services.append("YARN")
    hadoop_custom_extensions_hdfs_dir = "/iop/ext/{0}/hadoop".format(
        params.stack_version_formatted)
    hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(
        Script.get_stack_root())

    if params.current_service in hadoop_custom_extensions_services:
        clean_extensions(hadoop_custom_extensions_local_dir)
        if hadoop_custom_extensions_enabled:
            download_extensions(hadoop_custom_extensions_owner,
                                params.user_group,
                                hadoop_custom_extensions_hdfs_dir,
                                hadoop_custom_extensions_local_dir)

    setup_extensions_hive()

    hbase_custom_extensions_services = []
    hbase_custom_extensions_services.append("HBASE")
    if params.current_service in hbase_custom_extensions_services:
        setup_hbase_extensions()
def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password,
                                ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home):

  stack_root = Script.get_stack_root()
  service_name = str(service_name).lower()
  cred_lib_path = format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/install/lib/*')
  cred_setup_prefix = (format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)

  if service_name == 'nifi':
    cred_lib_path = format('{stack_root}/{stack_version}/{service_name}/ext/ranger/install/lib/*')
    cred_setup_prefix = (format('{stack_root}/{stack_version}/{service_name}/ext/ranger/scripts/ranger_credential_helper.py'), '-l', cred_lib_path)

  if audit_db_is_enabled:
    cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'auditDBCred', '-v', PasswordString(xa_audit_db_password), '-c', '1')
    Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)

  cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslKeyStore', '-v', PasswordString(ssl_keystore_password), '-c', '1')
  Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)

  cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslTrustStore', '-v', PasswordString(ssl_truststore_password), '-c', '1')
  Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)

  File(credential_file,
    owner = component_user,
    group = component_group,
    mode = 0640
  )
Example #22
0
def get_package_dirs():
  """
  Get package dir mappings
  :return:
  """
  stack_name = default("/hostLevelParams/stack_name", None)
  if stack_name is None:
    raise Fail("The stack name is not present in the command. Packages for conf-select tool cannot be loaded.")

  stack_packages_config = default("/configurations/cluster-env/stack_packages", None)
  if stack_packages_config is None:
    raise Fail("The stack packages are not defined on the command. Unable to load packages for the conf-select tool")

  data = json.loads(stack_packages_config)

  if stack_name not in data:
    raise Fail(
      "Cannot find conf-select packages for the {0} stack".format(stack_name))

  conf_select_key = "conf-select"
  data = data[stack_name]
  if conf_select_key not in data:
    raise Fail(
      "There are no conf-select packages defined for this command for the {0} stack".format(stack_name))

  package_dirs = data[conf_select_key]

  stack_root = Script.get_stack_root()
  for package_name, directories in package_dirs.iteritems():
    for dir in directories:
      current_dir = dir['current_dir']
      current_dir =  current_dir.format(stack_root)
      dir['current_dir'] = current_dir

  return package_dirs
Example #23
0
    def actionexecute(self, env):
        num_errors = 0

        # Parse parameters
        config = Script.get_config()

        try:
            command_repository = CommandRepository(config['repositoryFile'])
        except KeyError:
            raise Fail(
                "The command repository indicated by 'repositoryFile' was not found"
            )

        repo_rhel_suse = config['configurations']['cluster-env'][
            'repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env'][
            'repo_ubuntu_template']
        template = repo_rhel_suse if OSCheck.is_redhat_family(
        ) or OSCheck.is_suse_family() else repo_ubuntu

        # Handle a SIGTERM and SIGINT gracefully
        signal.signal(signal.SIGTERM, self.abort_handler)
        signal.signal(signal.SIGINT, self.abort_handler)

        self.repository_version = command_repository.version_string

        # Select dict that contains parameters
        try:
            package_list = json.loads(config['roleParams']['package_list'])
            stack_id = config['roleParams']['stack_id']
        except KeyError:
            pass

        self.stack_name = Script.get_stack_name()
        if self.stack_name is None:
            raise Fail("Cannot determine the stack name")

        self.stack_root_folder = Script.get_stack_root()
        if self.stack_root_folder is None:
            raise Fail("Cannot determine the stack's root directory")

        if self.repository_version is None:
            raise Fail("Cannot determine the repository version to install")

        self.repository_version = self.repository_version.strip()

        try:
            if 0 == len(command_repository.repositories):
                Logger.warning(
                    "Repository list is empty. Ambari may not be managing the repositories for {0}."
                    .format(self.repository_version))
            else:
                Logger.info(
                    "Will install packages for repository version {0}".format(
                        self.repository_version))
                create_repo_files(template, command_repository)
        except Exception, err:
            Logger.logger.exception(
                "Cannot install repository files. Error: {0}".format(str(err)))
            num_errors += 1
Example #24
0
def get_hadoop_dir_for_stack_version(target, stack_version):
    """
  Return the hadoop shared directory for the provided stack version. This is necessary
  when folder paths of downgrade-source stack-version are needed after <stack-selector-tool>.
  :target: the target directory
  :stack_version: stack version to get hadoop dir for
  """

    stack_root = Script.get_stack_root()
    if not target in HADOOP_DIR_DEFAULTS:
        raise Fail("Target {0} not defined".format(target))

    hadoop_dir = HADOOP_DIR_DEFAULTS[target]

    formatted_stack_version = format_stack_version(stack_version)
    if formatted_stack_version and check_stack_feature(
            StackFeature.ROLLING_UPGRADE, formatted_stack_version):
        # home uses a different template
        if target == "home":
            hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(
                stack_root, stack_version, "hadoop")
        else:
            hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version,
                                                    "hadoop", target)

    return hadoop_dir
Example #25
0
def select_all(version_to_select):
    """
  Executes <stack-selector-tool> on every component for the specified version. If the value passed in is a
  stack version such as "2.3", then this will find the latest installed version which
  could be "2.3.0.0-9999". If a version is specified instead, such as 2.3.0.0-1234, it will use
  that exact version.
  :param version_to_select: the version to <stack-selector-tool> on, such as "2.3" or "2.3.0.0-1234"
  """
    stack_root = Script.get_stack_root()
    (stack_selector_name, stack_selector_path,
     stack_selector_package) = stack_tools.get_stack_tool(
         stack_tools.STACK_SELECTOR_NAME)
    # it's an error, but it shouldn't really stop anything from working
    if version_to_select is None:
        Logger.error(
            format(
                "Unable to execute {stack_selector_name} after installing because there was no version specified"
            ))
        return

    Logger.info("Executing {0} set all on {1}".format(stack_selector_name,
                                                      version_to_select))

    command = format(
        '{sudo} {stack_selector_path} set all `ambari-python-wrap {stack_selector_path} versions | grep ^{version_to_select} | tail -1`'
    )
    only_if_command = format('ls -d {stack_root}/{version_to_select}*')
    Execute(command, only_if=only_if_command)
Example #26
0
def get_hadoop_conf_dir():
    """
  Return the hadoop shared conf directory which should be used for the command's component. The
  directory including the component's version is tried first, but if that doesn't exist,
  this will fallback to using "current".
  """
    stack_root = Script.get_stack_root()
    stack_version = Script.get_stack_version()

    hadoop_conf_dir = os.path.join(os.path.sep, "etc", "hadoop", "conf")
    if check_stack_feature(StackFeature.CONFIG_VERSIONING, stack_version):
        # read the desired version from the component map and use that for building the hadoop home
        version = component_version.get_component_repository_version()
        if version is None:
            version = default("/commandParams/version", None)

        hadoop_conf_dir = os.path.join(stack_root, str(version), "hadoop",
                                       "conf")
        if version is None or sudo.path_isdir(hadoop_conf_dir) is False:
            hadoop_conf_dir = os.path.join(stack_root, "current",
                                           "hadoop-client", "conf")

        Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir))

    return hadoop_conf_dir
Example #27
0
def get_hadoop_dir(target):
  """
  Return the hadoop shared directory which should be used for the command's component. The
  directory including the component's version is tried first, but if that doesn't exist,
  this will fallback to using "current".

  :target: the target directory
  """
  stack_root = Script.get_stack_root()
  stack_version = Script.get_stack_version()

  if not target in HADOOP_DIR_DEFAULTS:
    raise Fail("Target {0} not defined".format(target))

  hadoop_dir = HADOOP_DIR_DEFAULTS[target]

  formatted_stack_version = format_stack_version(stack_version)

  if stack_features.check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
    # read the desired version from the component map and use that for building the hadoop home
    version = component_version.get_component_repository_version()
    if version is None:
      version = default("/commandParams/version", None)

    # home uses a different template
    if target == "home":
      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, version, "hadoop")
      if version is None or sudo.path_isdir(hadoop_dir) is False:
        hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client")
    else:
      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, version, "hadoop", target)
      if version is None or sudo.path_isdir(hadoop_dir) is False:
        hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client", target)

  return hadoop_dir
def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password,
                                ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home, cred_lib_path_override = None, cred_setup_prefix_override = None):

  stack_root = Script.get_stack_root()
  service_name = str(service_name).lower()

  if cred_lib_path_override is not None:
    cred_lib_path = cred_lib_path_override
  else:
    cred_lib_path = format('{stack_root}/ranger-{service_name}-plugin/install/lib/*')

  if cred_setup_prefix_override is not None:
    cred_setup_prefix = cred_setup_prefix_override
  else:
    cred_setup_prefix = (format('{stack_root}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)

  if audit_db_is_enabled:
    cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'auditDBCred', '-v', PasswordString(xa_audit_db_password), '-c', '1')
    Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)

  cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslKeyStore', '-v', PasswordString(ssl_keystore_password), '-c', '1')
  Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)

  cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslTrustStore', '-v', PasswordString(ssl_truststore_password), '-c', '1')
  Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)

  File(credential_file,
    owner = component_user,
    group = component_group,
    mode = 0640
  )
Example #29
0
def setup_hbase_extensions():
    import params

    # HBase Custom extensions
    hbase_custom_extensions_enabled = default(
        "/configurations/hbase-site/hbase.custom-extensions.enabled", False)
    hbase_custom_extensions_owner = default(
        "/configurations/hbase-site/hbase.custom-extensions.owner",
        params.hdfs_user)
    hbase_custom_extensions_hdfs_dir = get_config_formatted_value(
        default("/configurations/hbase-site/hbase.custom-extensions.root",
                DEFAULT_HADOOP_HBASE_EXTENSION_DIR))
    hbase_custom_extensions_local_dir = "{0}/ext/hbase".format(
        Script.get_stack_root())

    impacted_components = [
        'HBASE_MASTER', 'HBASE_REGIONSERVER', 'PHOENIX_QUERY_SERVER'
    ]
    role = params.config.get('role', '')

    if role in impacted_components:
        clean_extensions(hbase_custom_extensions_local_dir)
        if hbase_custom_extensions_enabled:
            download_extensions(hbase_custom_extensions_owner,
                                params.user_group,
                                hbase_custom_extensions_hdfs_dir,
                                hbase_custom_extensions_local_dir)
Example #30
0
def execute(configurations={}, parameters={}, host_name=None):
  """
  Checks if the stack selector such as hdp-select can find versions installed on this host. E.g.,
  hdp-select versions
  Returns a tuple containing the result code and a pre-formatted result label

  Keyword arguments:
  configurations (dictionary): a mapping of configuration key to value
  parameters (dictionary): a mapping of script parameter key to value
  host_name (string): the name of this host where the alert is running
  """
  msg = []
  try:
    if configurations is None:
      return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])

    # Check required properties
    if STACK_TOOLS not in configurations:
      return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(STACK_TOOLS)])

    stack_name = Script.get_stack_name()

    # Of the form,
    # { "HDP" : { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] } }
    stack_tools_str = configurations[STACK_TOOLS]

    if stack_tools_str is None:
      return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script and the value is null'.format(STACK_TOOLS)])

    distro_select = "unknown-distro-select"
    try:
      stack_tools = json.loads(stack_tools_str)
      stack_tools = stack_tools[stack_name]
      distro_select = stack_tools["stack_selector"][0]
    except:
      pass

    # This may not exist if the host does not contain any stack components,
    # or only contains components like Ambari Metrics and SmartSense
    stack_root_dir = Script.get_stack_root()

    if os.path.isdir(stack_root_dir):
      (code, out, versions) = unsafe_get_stack_versions()

      if code == 0:
        msg.append("{0} ".format(distro_select))
        if versions is not None and type(versions) is list and len(versions) > 0:
          msg.append("reported the following versions: {0}".format(", ".join(versions)))
        return (RESULT_STATE_OK, ["\n".join(msg)])
      else:
        msg.append("{0} could not properly read {1}. Check this directory for unexpected contents.".format(distro_select, stack_root_dir))
        if out is not None:
          msg.append(out)

        return (RESULT_STATE_CRITICAL, ["\n".join(msg)])
    else:
      msg.append("No stack root {0} to check.".format(stack_root_dir))
      return (RESULT_STATE_OK, ["\n".join(msg)])
  except Exception, e:
    return (RESULT_STATE_CRITICAL, [e.message])
Example #31
0
def get_tarball_paths(name,
                      use_upgrading_version_during_upgrade=True,
                      custom_source_file=None,
                      custom_dest_file=None):
    """
  For a given tarball name, get the source and destination paths to use.
  :param name: Tarball name
  :param use_upgrading_version_during_upgrade:
  :param custom_source_file: If specified, use this source path instead of the default one from the map.
  :param custom_dest_file: If specified, use this destination path instead of the default one from the map.
  :return: A tuple of (success status, source path, destination path)
  """
    stack_name = Script.get_stack_name()

    if not stack_name:
        Logger.error(
            "Cannot copy {0} tarball to HDFS because stack name could not be determined."
            .format(str(name)))
        return (False, None, None)

    stack_version = get_current_version(use_upgrading_version_during_upgrade)
    if not stack_version:
        Logger.error(
            "Cannot copy {0} tarball to HDFS because stack version could be be determined."
            .format(str(name)))
        return (False, None, None)

    stack_root = Script.get_stack_root()
    if not stack_root:
        Logger.error(
            "Cannot copy {0} tarball to HDFS because stack root could be be determined."
            .format(str(name)))
        return (False, None, None)

    if name is None or name.lower() not in TARBALL_MAP:
        Logger.error(
            "Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation."
            .format(str(name), str(stack_name)))
        return (False, None, None)
    (source_file, dest_file) = TARBALL_MAP[name.lower()]

    if custom_source_file is not None:
        source_file = custom_source_file

    if custom_dest_file is not None:
        dest_file = custom_dest_file

    source_file = source_file.replace(STACK_NAME_PATTERN, stack_name.lower())
    dest_file = dest_file.replace(STACK_NAME_PATTERN, stack_name.lower())

    source_file = source_file.replace(STACK_ROOT_PATTERN, stack_root.lower())
    dest_file = dest_file.replace(STACK_ROOT_PATTERN, stack_root.lower())

    source_file = source_file.replace(STACK_VERSION_PATTERN, stack_version)
    dest_file = dest_file.replace(STACK_VERSION_PATTERN, stack_version)

    return (True, source_file, dest_file)
def setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list):

  stack_root = Script.get_stack_root()
  jar_files = os.listdir(format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/lib'))

  for jar_file in jar_files:
    for component in component_list:
      Execute(('ln','-sf',format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),format('{stack_root}/current/{component}/lib/{jar_file}')),
      not_if=format('ls {stack_root}/current/{component}/lib/{jar_file}'),
      only_if=format('ls {stack_root}/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),
      sudo=True)
Example #33
0
def get_package_dirs():
  """
  Get package dir mappings
  :return:
  """
  stack_root = Script.get_stack_root()
  package_dirs = copy.deepcopy(_PACKAGE_DIRS)
  for package_name, directories in package_dirs.iteritems():
    for dir in directories:
      current_dir = dir['current_dir']
      current_dir = current_dir.replace(STACK_ROOT_PATTERN, stack_root)
      dir['current_dir'] = current_dir
  return package_dirs
Example #34
0
def get_hadoop_dir(target, force_latest_on_upgrade=False):
  """
  Return the hadoop shared directory in the following override order
  1. Use default for 2.1 and lower
  2. If 2.2 and higher, use <stack-root>/current/hadoop-client/{target}
  3. If 2.2 and higher AND for an upgrade, use <stack-root>/<version>/hadoop/{target}.
  However, if the upgrade has not yet invoked <stack-selector-tool>, return the current
  version of the component.
  :target: the target directory
  :force_latest_on_upgrade: if True, then this will return the "current" directory
  without the stack version built into the path, such as <stack-root>/current/hadoop-client
  """
  stack_root = Script.get_stack_root()
  stack_version = Script.get_stack_version()

  if not target in HADOOP_DIR_DEFAULTS:
    raise Fail("Target {0} not defined".format(target))

  hadoop_dir = HADOOP_DIR_DEFAULTS[target]

  formatted_stack_version = format_stack_version(stack_version)
  if formatted_stack_version and  check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
    # home uses a different template
    if target == "home":
      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client")
    else:
      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client", target)

    # if we are not forcing "current" for HDP 2.2, then attempt to determine
    # if the exact version needs to be returned in the directory
    if not force_latest_on_upgrade:
      stack_info = _get_upgrade_stack()

      if stack_info is not None:
        stack_version = stack_info[1]

        # determine if <stack-selector-tool> has been run and if not, then use the current
        # hdp version until this component is upgraded
        current_stack_version = get_role_component_current_stack_version()
        if current_stack_version is not None and stack_version != current_stack_version:
          stack_version = current_stack_version

        if target == "home":
          # home uses a different template
          hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop")
        else:
          hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop", target)

  return hadoop_dir
Example #35
0
def get_tarball_paths(name, use_upgrading_version_during_upgrade=True, custom_source_file=None, custom_dest_file=None):
  """
  For a given tarball name, get the source and destination paths to use.
  :param name: Tarball name
  :param use_upgrading_version_during_upgrade:
  :param custom_source_file: If specified, use this source path instead of the default one from the map.
  :param custom_dest_file: If specified, use this destination path instead of the default one from the map.
  :return: A tuple of (success status, source path, destination path)
  """
  stack_name = Script.get_stack_name()

  if not stack_name:
    Logger.error("Cannot copy {0} tarball to HDFS because stack name could not be determined.".format(str(name)))
    return (False, None, None)

  stack_version = get_current_version(use_upgrading_version_during_upgrade)
  if not stack_version:
    Logger.error("Cannot copy {0} tarball to HDFS because stack version could be be determined.".format(str(name)))
    return (False, None, None)

  stack_root = Script.get_stack_root()
  if not stack_root:
    Logger.error("Cannot copy {0} tarball to HDFS because stack root could be be determined.".format(str(name)))
    return (False, None, None)

  if name is None or name.lower() not in TARBALL_MAP:
    Logger.error("Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation.".format(str(name), str(stack_name)))
    return (False, None, None)
  (source_file, dest_file) = TARBALL_MAP[name.lower()]

  if custom_source_file is not None:
    source_file = custom_source_file

  if custom_dest_file is not None:
    dest_file = custom_dest_file

  source_file = source_file.replace(STACK_NAME_PATTERN, stack_name.lower())
  dest_file = dest_file.replace(STACK_NAME_PATTERN, stack_name.lower())

  source_file = source_file.replace(STACK_ROOT_PATTERN, stack_root.lower())
  dest_file = dest_file.replace(STACK_ROOT_PATTERN, stack_root.lower())

  source_file = source_file.replace(STACK_VERSION_PATTERN, stack_version)
  dest_file = dest_file.replace(STACK_VERSION_PATTERN, stack_version)

  return (True, source_file, dest_file)
Example #36
0
def get_stack_version_before_install(component_name):
  """
  Works in the similar way to '<stack-selector-tool> status component',
  but also works for not yet installed packages.
  
  Note: won't work if doing initial install.
  """
  stack_root = Script.get_stack_root()
  component_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", component_name)
  stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
  if os.path.islink(component_dir):
    stack_version = os.path.basename(os.path.dirname(os.readlink(component_dir)))
    match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
    if match is None:
      Logger.info('Failed to get extracted version with {0} in method get_stack_version_before_install'.format(stack_selector_name))
      return None # lazy fail
    return stack_version
  else:
    return None
Example #37
0
def select_all(version_to_select):
  """
  Executes <stack-selector-tool> on every component for the specified version. If the value passed in is a
  stack version such as "2.3", then this will find the latest installed version which
  could be "2.3.0.0-9999". If a version is specified instead, such as 2.3.0.0-1234, it will use
  that exact version.
  :param version_to_select: the version to <stack-selector-tool> on, such as "2.3" or "2.3.0.0-1234"
  """
  stack_root = Script.get_stack_root()
  (stack_selector_name, stack_selector_path, stack_selector_package) = stack_tools.get_stack_tool(stack_tools.STACK_SELECTOR_NAME)
  # it's an error, but it shouldn't really stop anything from working
  if version_to_select is None:
    Logger.error(format("Unable to execute {stack_selector_name} after installing because there was no version specified"))
    return

  Logger.info("Executing {0} set all on {1}".format(stack_selector_name, version_to_select))

  command = format('{sudo} {stack_selector_path} set all `ambari-python-wrap {stack_selector_path} versions | grep ^{version_to_select} | tail -1`')
  only_if_command = format('ls -d {stack_root}/{version_to_select}*')
  Execute(command, only_if = only_if_command)
Example #38
0
def get_hadoop_dir_for_stack_version(target, stack_version):
  """
  Return the hadoop shared directory for the provided stack version. This is necessary
  when folder paths of downgrade-source stack-version are needed after <stack-selector-tool>.
  :target: the target directory
  :stack_version: stack version to get hadoop dir for
  """

  stack_root = Script.get_stack_root()
  if not target in HADOOP_DIR_DEFAULTS:
    raise Fail("Target {0} not defined".format(target))

  hadoop_dir = HADOOP_DIR_DEFAULTS[target]

  formatted_stack_version = format_stack_version(stack_version)
  if formatted_stack_version and  check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
    # home uses a different template
    if target == "home":
      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop")
    else:
      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop", target)

  return hadoop_dir
Example #39
0
def get_hadoop_conf_dir(force_latest_on_upgrade=False):
  """
  Gets the shared hadoop conf directory using:
  1.  Start with /etc/hadoop/conf
  2.  When the stack is greater than HDP-2.2, use <stack-root>/current/hadoop-client/conf
  3.  Only when doing a RU and HDP-2.3 or higher, use the value as computed
      by <conf-selector-tool>.  This is in the form <stack-root>/VERSION/hadoop/conf to make sure
      the configs are written in the correct place. However, if the component itself has
      not yet been upgraded, it should use the hadoop configs from the prior version.
      This will perform an <stack-selector-tool> status to determine which version to use.
  :param force_latest_on_upgrade:  if True, then force the returned path to always
  be that of the upgrade target version, even if <stack-selector-tool> has not been called. This
  is primarily used by hooks like before-ANY to ensure that hadoop environment
  configurations are written to the correct location since they are written out
  before the <stack-selector-tool>/<conf-selector-tool> would have been called.
  """
  hadoop_conf_dir = "/etc/hadoop/conf"
  stack_name = None
  stack_root = Script.get_stack_root()
  stack_version = Script.get_stack_version()
  version = None
  allow_setting_conf_select_symlink = False

  if not Script.in_stack_upgrade():
    # During normal operation, the HDP stack must be 2.3 or higher
    if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version):
      hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")

    if stack_version and check_stack_feature(StackFeature.CONFIG_VERSIONING, stack_version):
      hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")
      stack_name = default("/hostLevelParams/stack_name", None)
      version = default("/commandParams/version", None)

      if stack_name and version:
        version = str(version)
        allow_setting_conf_select_symlink = True
  else:
    # During an upgrade/downgrade, which can be a Rolling or Express Upgrade, need to calculate it based on the version
    '''
    Whenever upgrading to HDP 2.2, or downgrading back to 2.2, need to use /etc/hadoop/conf
    Whenever upgrading to HDP 2.3, or downgrading back to 2.3, need to use a versioned hadoop conf dir

    Type__|_Source_|_Target_|_Direction_____________|_Comment_____________________________________________________________
    Normal|        | 2.2    |                       | Use /etc/hadoop/conf
    Normal|        | 2.3    |                       | Use /etc/hadoop/conf, which should be a symlink to <stack-root>/current/hadoop-client/conf
    EU    | 2.1    | 2.3    | Upgrade               | Use versioned <stack-root>/current/hadoop-client/conf
          |        |        | No Downgrade Allowed  | Invalid
    EU/RU | 2.2    | 2.2.*  | Any                   | Use <stack-root>/current/hadoop-client/conf
    EU/RU | 2.2    | 2.3    | Upgrade               | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
          |        |        | Downgrade             | Use <stack-root>/current/hadoop-client/conf
    EU/RU | 2.3    | 2.3.*  | Any                   | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
    '''

    # The "stack_version" is the desired stack, e.g., 2.2 or 2.3
    # In an RU, it is always the desired stack, and doesn't change even during the Downgrade!
    # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is 
    # rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf
    if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version):
      hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")

      # This contains the "version", including the build number, that is actually used during a stack upgrade and
      # is the version upgrading/downgrading to.
      stack_info = stack_select._get_upgrade_stack()

      if stack_info is not None:
        stack_name = stack_info[0]
        version = stack_info[1]
      else:
        raise Fail("Unable to get parameter 'version'")
      
      Logger.info("In the middle of a stack upgrade/downgrade for Stack {0} and destination version {1}, determining which hadoop conf dir to use.".format(stack_name, version))
      # This is the version either upgrading or downgrading to.
      if version and check_stack_feature(StackFeature.CONFIG_VERSIONING, version):
        # Determine if <stack-selector-tool> has been run and if not, then use the current
        # hdp version until this component is upgraded.
        if not force_latest_on_upgrade:
          current_stack_version = stack_select.get_role_component_current_stack_version()
          if current_stack_version is not None and version != current_stack_version:
            version = current_stack_version
            stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
            Logger.info("{0} has not yet been called to update the symlink for this component, "
                        "keep using version {1}".format(stack_selector_name, current_stack_version))

        # Only change the hadoop_conf_dir path, don't <conf-selector-tool> this older version
        hadoop_conf_dir = os.path.join(stack_root, version, "hadoop", "conf")
        Logger.info("Hadoop conf dir: {0}".format(hadoop_conf_dir))

        allow_setting_conf_select_symlink = True

  if allow_setting_conf_select_symlink:
    # If not in the middle of an upgrade and on HDP 2.3 or higher, or if
    # upgrading stack to version 2.3.0.0 or higher (which may be upgrade or downgrade), then consider setting the
    # symlink for /etc/hadoop/conf.
    # If a host does not have any HDFS or YARN components (e.g., only ZK), then it will not contain /etc/hadoop/conf
    # Therefore, any calls to <conf-selector-tool> will fail.
    # For that reason, if the hadoop conf directory exists, then make sure it is set.
    if os.path.exists(hadoop_conf_dir):
      conf_selector_name = stack_tools.get_stack_tool_name(stack_tools.CONF_SELECTOR_NAME)
      Logger.info("The hadoop conf dir {0} exists, will call {1} on it for version {2}".format(
              hadoop_conf_dir, conf_selector_name, version))
      select(stack_name, "hadoop", version)

  Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir))
  return hadoop_conf_dir
# a map of the Ambari role to the component name
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
  'SPARK2_JOBHISTORYSERVER' : 'spark2-historyserver',
  'SPARK2_CLIENT' : 'spark2-client',
  'SPARK2_THRIFTSERVER' : 'spark2-thriftserver'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK2_CLIENT")

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)

spark_conf = '/etc/spark2/conf'
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")

if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
  hadoop_home = stack_select.get_hadoop_dir("home")
  spark_conf = format("{stack_root}/current/{component_directory}/conf")
  spark_log_dir = config['configurations']['spark2-env']['spark_log_dir']