Esempio n. 1
0
 def initdb(self, env):
     import params
     File('/tmp/wherehows_init.sql',
          content=StaticFile("init.sql"),
          mode=0755)
     Execute('mysql -u ' + params.mysql_user + ' -p ' +
             params.mysql_password + ' </tmp/wherehows_init.sql ')
Esempio n. 2
0
 def configure(self, env):
     import params
     env.set_params(params)
     File(params.install_dir_graphite_web + '/conf/dashboard.conf',
          content=StaticFile("dashboard.conf"),
          mode=0755)
     File(params.install_dir_graphite_web + '/conf/graphite.wsgi',
          content=StaticFile("graphite.wsgi"),
          mode=0755)
     File(params.install_dir_graphite_web + '/conf/graphTemplates.conf',
          content=StaticFile("graphTemplates.conf"),
          mode=0755)
     File(params.install_dir_graphite_web +
          '/webapp/graphite/local_settings.py',
          content=StaticFile("graphite_settings.py"),
          mode=0755)
Esempio n. 3
0
def update_credential_provider_path(config, config_type, dest_provider_path,
                                    file_owner, file_group):
    """
  Copies the JCEKS file for the specified config from the default location to the given location,
  and sets the ACLs for the specified owner and group. Also updates the config type's configuration
  hadoop credential store provider with the copied file name.
  :param config: configurations['configurations'][config_type]
  :param config_type: Like hive-site, oozie-site, etc.
  :param dest_provider_path: The full path to the file where the JCEKS provider file is to be copied to.
  :param file_owner: File owner
  :param file_group: Group
  :return: A copy of the config that was modified or the input config itself if nothing was modified.
  """
    # Get the path to the provider <config_type>.jceks
    if HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME in config:
        provider_path = config[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME]
        src_provider_path = provider_path[len('jceks://file'):]
        File(dest_provider_path,
             owner=file_owner,
             group=file_group,
             mode=0640,
             content=StaticFile(src_provider_path))
        # make a copy of the config dictionary since it is read-only
        config_copy = config.copy()
        # overwrite the provider path with the path specified
        config_copy[
            HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'jceks://file{0}'.format(
                dest_provider_path)
        return config_copy
    return config
Esempio n. 4
0
 def configure(self, env):
     import params
     env.set_params(params)
     File('/etc/soar.yaml',
          content=InlineTemplate(params.soar_content),
          mode=0755,
          owner='mysql',
          group='mysql')
     File('/opt/archery/archery/settings.py',
          content=params.settings_content,
          mode=0755,
          owner='mysql',
          group='mysql')
     File('/etc/init.d/archery', content=params.init_content, mode=0755)
     File('/etc/pyxbackup.cnf',
          content=InlineTemplate(params.backup_content),
          mode=0755,
          owner='mysql',
          group='mysql')
     Execute(
         "echo 'px0nwi7Kbf25fkUaKwUdmG+eDmg7YZt9' > /etc/pyxbackup_encrypt.key",
         user='******')
     File('/usr/sbin/pyxbackup',
          content=StaticFile('pyxbackup.py'),
          mode=0755,
          owner='mysql',
          group='mysql')
Esempio n. 5
0
def backup_dir_contents(dir_path, backup_folder_suffix):
    import params
    backup_destination_path = params.tmp_dir + os.path.normpath(
        dir_path) + backup_folder_suffix
    Directory(
        backup_destination_path,
        mode=0755,
        cd_access='a',
        owner=params.kafka_user,
        group=params.user_group,
        create_parents=True,
        recursive_ownership=True,
    )
    # Safely copy top-level contents to backup folder
    for file in os.listdir(dir_path):
        if os.path.isdir(os.path.join(dir_path, file)):
            Execute(('cp', '-r', os.path.join(dir_path,
                                              file), backup_destination_path),
                    sudo=True)
            Execute(("chown", "-R", format("{kafka_user}:{user_group}"),
                     os.path.join(backup_destination_path, file)),
                    sudo=True)
        else:
            File(os.path.join(backup_destination_path, file),
                 owner=params.kafka_user,
                 content=StaticFile(os.path.join(dir_path, file)))

    return backup_destination_path
Esempio n. 6
0
    def service_check(self, env):
        import params
        env.set_params(params)

        unique = get_unique_id_and_date()

        File(
            "/tmp/wordCount.jar",
            content=StaticFile("wordCount.jar"),
            owner=params.storm_user)

        cmd = ""
        if params.nimbus_seeds_supported:
            # Because this command is guaranteed to run on one of the hosts with storm client, there is no need
            # to specify "-c nimbus.seeds={nimbus_seeds}"
            cmd = format(
                "storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique}")
        elif params.nimbus_host is not None:
            cmd = format(
                "storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique} -c nimbus.host={nimbus_host}")

        Execute(
            cmd,
            logoutput=True,
            path=params.storm_bin_dir,
            user=params.storm_user)

        Execute(
            format("storm kill WordCount{unique}"),
            path=params.storm_bin_dir,
            user=params.storm_user)
Esempio n. 7
0
    def service_check(self, env):
        import params
        env.set_params(params)

        File(params.zk_smoke_out, action="delete")

        File(format("{tmp_dir}/zkSmoke.sh"),
             mode=0755,
             content=StaticFile('zkSmoke.sh'))

        if params.security_enabled:
            smokeUserKeytab = params.smoke_user_keytab
            smokeUserPrincipal = params.smokeuser_principal
        else:
            smokeUserKeytab = "no_keytab"
            smokeUserPrincipal = "no_principal"

        cmd_quorum = format(
            "{tmp_dir}/zkSmoke.sh {zk_cli_shell} {smokeuser} {config_dir} {client_port} "
            "{security_enabled} {kinit_path_local} {smokeUserKeytab} {smokeUserPrincipal} {zk_smoke_out}"
        )

        Execute(cmd_quorum,
                tries=3,
                try_sleep=5,
                path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
                logoutput=True)
Esempio n. 8
0
    def init(self, env):
        import params
        env.set_params(params)
        Execute(
            "curl -XPUT 'http://" + params.hostname +
            ":9200/_template/index_template' -H 'Content-Type: application/json' -d '"
            + params.index_template_content + "'")
        Execute(
            "curl -XPUT 'http://" + params.hostname +
            ":9200/_ilm/policy/hot-warm-cold-delete-60days' -H 'Content-Type: application/json' -d '"
            + params.policy_content + "'")

        File('/tmp/license.json',
             mode=0755,
             content=StaticFile('license.json'))

        Execute(
            "curl -XPUT -u admin:admin -H 'Content-Type: application/json' 'http://"
            + params.hostname +
            ":9200/_xpack/license?acknowledge=true' -d @/tmp/license.json")

        Execute(
            "curl -XPUT 'http://" + params.hostname +
            ":9200/_snapshot/hdfs_backup' -H 'Content-Type: application/json' -d '"
            + InlineTemplate(params.hdfs_backup_content) + "'")
Esempio n. 9
0
def StaticFile(name, **kwargs):
    with RMFTestCase.env:
        from resource_management.core.source import StaticFile
        from resource_management.core import sudo
        sudo.path_isfile = lambda path: True
        sudo.read_file = lambda path: 'dummy_output'
        return StaticFile(name, **kwargs)
Esempio n. 10
0
def create_topology_script():
    import params

    File(params.net_topology_script_file_path,
         content=StaticFile('topology_script.py'),
         mode=0755,
         only_if=format("test -d {net_topology_script_dir}"))
Esempio n. 11
0
  def action_create(self):
    with Environment.get_instance_copy() as env:
      with tempfile.NamedTemporaryFile() as tmpf:
        repo_file_name = format("{repo_file_name}.list",repo_file_name = self.resource.repo_file_name)
        repo_file_path = format("{repo_dir}/{repo_file_name}", repo_dir = self.repo_dir)

        new_content = InlineTemplate(self.resource.repo_template, package_type=self.package_type,
                                      base_url=self.resource.base_url,
                                      components=' '.join(self.resource.components)).get_content()
        old_content = ''
        if self.resource.append_to_file and os.path.isfile(repo_file_path):
            old_content = sudo.read_file(repo_file_path) + '\n'

        File(tmpf.name, content=old_content+new_content)

        if not os.path.isfile(repo_file_path) or not filecmp.cmp(tmpf.name, repo_file_path):
          File(repo_file_path,
               content = StaticFile(tmpf.name)
          )
          
          update_cmd_formatted = [format(x) for x in self.update_cmd]
          # this is time expensive
          retcode, out = checked_call(update_cmd_formatted, sudo=True)
          
          # add public keys for new repos
          missing_pkeys = set(re.findall(self.missing_pkey_regex, out))
          for pkey in missing_pkeys:
            Execute(format(self.add_pkey_cmd),
                    timeout = 15, # in case we are on the host w/o internet (using localrepo), we should ignore hanging
                    ignore_failures = True
            )
Esempio n. 12
0
def setup_solr_client(config,
                      custom_log4j=True,
                      custom_log_location=None,
                      log4jcontent=None):
    solr_client_dir = '/usr/lib/ambari-infra-solr-client'
    solr_client_log_dir = default(
        '/configurations/infra-solr-env/infra_solr_client_log_dir',
        '/var/log/ambari-infra-solr-client'
    ) if custom_log_location is None else custom_log_location
    solr_client_log = format("{solr_client_log_dir}/solr-client.log")

    Directory(solr_client_log_dir,
              mode=0755,
              cd_access='a',
              create_parents=True)
    Directory(solr_client_dir,
              mode=0755,
              cd_access='a',
              create_parents=True,
              recursive_ownership=True)
    solrCliFilename = format("{solr_client_dir}/solrCloudCli.sh")
    File(solrCliFilename, mode=0755, content=StaticFile(solrCliFilename))
    if custom_log4j:
        # use custom log4j content only, when infra is not installed on the cluster
        solr_client_log4j_content = config['configurations'][
            'infra-solr-client-log4j'][
                'content'] if log4jcontent is None else log4jcontent
        File(format("{solr_client_dir}/log4j.properties"),
             content=InlineTemplate(solr_client_log4j_content),
             mode=0644)
    else:
        File(format("{solr_client_dir}/log4j.properties"), mode=0644)

    File(solr_client_log, mode=0664, content='')
Esempio n. 13
0
def setup_configs():
    """
  Creates configs for services HDFS mapred
  """
    import params

    if params.has_namenode or params.dfs_type == 'HCFS':
        if os.path.exists(params.hadoop_conf_dir):
            File(
                params.task_log4j_properties_location,
                content=StaticFile("task-log4j.properties"),
                mode=0755)

        if os.path.exists(
                os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
            File(
                os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
                owner=params.hdfs_user,
                group=params.user_group)
        if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
            File(
                os.path.join(params.hadoop_conf_dir, 'masters'),
                owner=params.hdfs_user,
                group=params.user_group)

    generate_include_file()
Esempio n. 14
0
def mysql_configure():
    from params import params

    # required for running hive
    replace_bind_address = ('sed', '-i',
                            's|^bind-address[ \t]*=.*|bind-address = 0.0.0.0|',
                            params.mysql_configname)
    Execute(
        replace_bind_address,
        sudo=True,
    )

    # this also will start mysql-server
    mysql_users.mysql_adduser()

    File(params.mysql_create_geoip_path,
         mode=0755,
         content=StaticFile('createMysqlGeoIp.sh'))

    geoip_setup_cmd = format(
        "bash -x {mysql_create_geoip_path} {daemon_name} {geoip_ddl} {geoip_url}"
    )
    Execute(
        geoip_setup_cmd,
        tries=3,
        try_sleep=5,
        path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
    )
Esempio n. 15
0
def hcat_service_check():
    import params
    unique = get_unique_id_and_date()
    output_file = format("{hive_apps_whs_dir}/hcatsmoke{unique}")
    test_cmd = format("fs -test -e {output_file}")

    if params.security_enabled:
        kinit_cmd = format(
            "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; "
        )
    else:
        kinit_cmd = ""

    File(format("{tmp_dir}/hcatSmoke.sh"),
         content=StaticFile("hcatSmoke.sh"),
         mode=0755)

    prepare_cmd = format(
        "{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} prepare {purge_tables}"
    )

    exec_path = params.execute_path
    if params.version and params.stack_root:
        upgrade_hive_bin = format("{stack_root}/{version}/hive/bin")
        exec_path = os.environ[
            'PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin

    Execute(
        prepare_cmd,
        tries=3,
        user=params.smokeuser,
        try_sleep=5,
        path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
        logoutput=True)

    if params.security_enabled:
        Execute(
            format(
                "{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"
            ),
            user=params.hdfs_user,
        )

    ExecuteHadoop(test_cmd,
                  user=params.hdfs_user,
                  logoutput=True,
                  conf_dir=params.hadoop_conf_dir,
                  bin_dir=params.execute_path)

    cleanup_cmd = format(
        "{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup {purge_tables}"
    )

    Execute(
        cleanup_cmd,
        tries=3,
        user=params.smokeuser,
        try_sleep=5,
        path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
        logoutput=True)
Esempio n. 16
0
    def service_check(self, env):
        import params

        env.set_params(params)

        component_type = 'hs'
        if params.hadoop_ssl_enabled:
            component_address = params.hs_webui_address
        else:
            component_address = params.hs_webui_address

        validateStatusFileName = "validateYarnComponentStatusWindows.py"
        validateStatusFilePath = os.path.join(
            os.path.dirname(params.hadoop_home), "temp",
            validateStatusFileName)
        python_executable = sys.executable
        validateStatusCmd = "{0} {1} {2} -p {3} -s {4}".format(
            python_executable, validateStatusFilePath, component_type,
            component_address, params.hadoop_ssl_enabled)

        if params.security_enabled:
            kinit_cmd = "{0} -kt {1} {2};".format(params.kinit_path_local,
                                                  params.smoke_user_keytab,
                                                  params.smokeuser)
            smoke_cmd = kinit_cmd + validateStatusCmd
        else:
            smoke_cmd = validateStatusCmd

        File(validateStatusFilePath,
             content=StaticFile(validateStatusFileName))

        Execute(smoke_cmd, tries=3, try_sleep=5, logoutput=True)
Esempio n. 17
0
def setup_metastore():
    import params

    if params.hive_metastore_site_supported:
        hivemetastore_site_config = get_config("hivemetastore-site")
        if hivemetastore_site_config:
            XmlConfig("hivemetastore-site.xml",
                      conf_dir=params.hive_server_conf_dir,
                      configurations=params.config['configurations']
                      ['hivemetastore-site'],
                      configuration_attributes=params.
                      config['configuration_attributes']['hivemetastore-site'],
                      owner=params.hive_user,
                      group=params.user_group,
                      mode=0600)

    File(os.path.join(params.hive_server_conf_dir,
                      "hadoop-metrics2-hivemetastore.properties"),
         owner=params.hive_user,
         group=params.user_group,
         content=Template("hadoop-metrics2-hivemetastore.properties.j2"),
         mode=0600)

    File(params.start_metastore_path,
         mode=0755,
         content=StaticFile('startMetastore.sh'))

    if not is_empty(params.hive_exec_scratchdir):
        dirPathStr = urlparse(params.hive_exec_scratchdir).path
        pathComponents = dirPathStr.split("/")
        if dirPathStr.startswith("/tmp") and len(pathComponents) > 2:
            Directory(params.hive_exec_scratchdir,
                      owner=params.hive_user,
                      create_parents=True,
                      mode=0777)
Esempio n. 18
0
def install_graphouse():
    import params
    Directory([params.graphouse_conf_dir, graphouse_log_dir],
              owner=params.graphite_user,
              group=params.user_group,
              mode=0775,
              cd_access="a",
              create_parents=True)

    File('/tmp/init_clickhouse.sql',
         content=StaticFile("init_clickhouse.sql"),
         mode=0755)
    # todo excute clickhouse.sql

    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.graphite_user)
        Execute('tar -xf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)

        Execute(' rm -rf ' + params.install_dir + '/conf')
        Execute('ln -s ' + params.graphouse_conf_dir + ' ' +
                params.install_dir + '/conf')

        Execute(' rm -rf ' + params.install_dir + '/log')
        Execute('ln -s ' + params.log_dir + ' ' + params.install_dir + '/log')

        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/graphouse.sh" %
                params.install_dir)
        Execute('chown -R %s:%s %s/%s' %
                (params.graphite_user, params.user_group, params.stack_root,
                 params.version_dir))
        Execute('chown -R %s:%s %s' %
                (params.graphite_user, params.user_group, params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)

        File('/etc/init.d/graphouse',
             content=StaticFile("graphouse.init"),
             mode=0755)
        Execute('chkconfig graphouse on')
Esempio n. 19
0
    def configure(self, env):
        import params
        import status_params
        env.set_params(params)
        env.set_params(status_params)
        self.create_zeppelin_log_dir(env)

        # write out zeppelin-site.xml
        XmlConfig(
            "zeppelin-site.xml",
            conf_dir=params.conf_dir,
            configurations=params.config['configurations']['zeppelin-config'],
            owner=params.zeppelin_user,
            group=params.zeppelin_group)
        # write out zeppelin-env.sh
        env_content = InlineTemplate(params.zeppelin_env_content)
        File(format("{params.conf_dir}/zeppelin-env.sh"),
             content=env_content,
             owner=params.zeppelin_user,
             group=params.zeppelin_group)

        # write out shiro.ini
        shiro_ini_content = InlineTemplate(params.shiro_ini_content)
        File(format("{params.conf_dir}/shiro.ini"),
             content=shiro_ini_content,
             owner=params.zeppelin_user,
             group=params.zeppelin_group)

        # write out log4j.properties
        File(format("{params.conf_dir}/log4j.properties"),
             content=params.log4j_properties_content,
             owner=params.zeppelin_user,
             group=params.zeppelin_group)

        # copy hive-site.xml only if Spark 1.x is installed
        if 'spark-defaults' in params.config['configurations']:
            File(format("{params.conf_dir}/hive-site.xml"),
                 content=StaticFile("/etc/spark/conf/hive-site.xml"),
                 owner=params.zeppelin_user,
                 group=params.zeppelin_group)

        if len(params.hbase_master_hosts) > 0:
            # copy hbase-site.xml
            File(format("{params.conf_dir}/hbase-site.xml"),
                 content=StaticFile("/etc/hbase/conf/hbase-site.xml"),
                 owner=params.zeppelin_user,
                 group=params.zeppelin_group)
Esempio n. 20
0
    def install(self, env):
        import jkg_toree_params as params
        self.install_packages(env)

        # Create user and group if they don't exist
        helpers.create_linux_user(params.user, params.group)

        # Create directories used by the service and service user
        Directory([
            params.home_dir, params.jkg_pid_dir, params.log_dir,
            params.spark_config_dir
        ],
                  mode=0755,
                  create_parents=True,
                  owner=params.user,
                  group=params.group,
                  recursive_ownership=True)

        if os.path.exists(params.py_venv_pathprefix):
            Logger.warning(
                "Virtualenv path prefix {0} to be used for JNBG service might already exist."
                "This is unexpected if the service or service component is being installed on the node for the first time."
                "It could indicate remnants from a prior installation.".format(
                    params.py_venv_pathprefix))

        # Setup bash scripts for execution
        for sh_script in params.sh_scripts:
            File(params.sh_scripts_dir + os.sep + sh_script,
                 content=StaticFile(sh_script),
                 mode=0750)
        for sh_script in params.sh_scripts_user:
            File(params.sh_scripts_dir + os.sep + sh_script,
                 content=StaticFile(sh_script),
                 mode=0755)

        # Run install commands for JKG defined in params
        for command in params.jkg_commands:
            Execute(command, logoutput=True)

        # Run install commands for Toree defined in params
        for command in params.toree_commands:
            Execute(command, logoutput=True)

        # Run setup commands for log4j
        for command in params.log4j_setup_commands:
            Execute(command, logoutput=True)
Esempio n. 21
0
def accumulo_StaticFile(name, dest_conf_dir):
    import params

    File(format("{dest_conf_dir}/{name}"),
         mode=0644,
         group=params.user_group,
         owner=params.accumulo_user,
         content=StaticFile(name))
Esempio n. 22
0
def setup_metastore():
    import params

    if params.hive_metastore_site_supported:
        hivemetastore_site_config = get_config("hivemetastore-site")
        if hivemetastore_site_config:
            XmlConfig("hivemetastore-site.xml",
                      conf_dir=params.hive_server_conf_dir,
                      configurations=params.config['configurations']
                      ['hivemetastore-site'],
                      configuration_attributes=params.
                      config['configuration_attributes']['hivemetastore-site'],
                      owner=params.hive_user,
                      group=params.user_group,
                      mode=0600)

    File(os.path.join(params.hive_server_conf_dir,
                      "hadoop-metrics2-hivemetastore.properties"),
         owner=params.hive_user,
         group=params.user_group,
         content=Template("hadoop-metrics2-hivemetastore.properties.j2"),
         mode=0600)

    File(params.start_metastore_path,
         mode=0755,
         content=StaticFile('startMetastore.sh'))
    if params.init_metastore_schema:
        create_schema_cmd = format(
            "export HIVE_CONF_DIR={hive_server_conf_dir} ; "
            "{hive_schematool_bin}/schematool -initSchema "
            "-dbType {hive_metastore_db_type} "
            "-userName {hive_metastore_user_name} "
            "-passWord {hive_metastore_user_passwd!p} -verbose")

        check_schema_created_cmd = as_user(
            format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                   "{hive_schematool_bin}/schematool -info "
                   "-dbType {hive_metastore_db_type} "
                   "-userName {hive_metastore_user_name} "
                   "-passWord {hive_metastore_user_passwd!p} -verbose"),
            params.hive_user)

        # HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
        # Fixing it with the hack below:
        quoted_hive_metastore_user_passwd = quote_bash_args(
            quote_bash_args(params.hive_metastore_user_passwd))
        if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
            or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
            quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[
                1:-1]
        Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(
            check_schema_created_cmd.replace(
                format("-passWord {quoted_hive_metastore_user_passwd}"),
                "-passWord " + utils.PASSWORDS_HIDE_STRING))

        Execute(create_schema_cmd,
                not_if=check_schema_created_cmd,
                user=params.hive_user)
Esempio n. 23
0
    def elasticsearch_template_install(self, env):
        import params
        env.set_params(params)

        File(params.bro_index_path,
             mode=0755,
             content=StaticFile('bro_index.template'))

        File(params.snort_index_path,
             mode=0755,
             content=StaticFile('snort_index.template'))

        File(params.yaf_index_path,
             mode=0755,
             content=StaticFile('yaf_index.template'))

        File(params.error_index_path,
             mode=0755,
             content=StaticFile('error_index.template'))

        File(params.meta_index_path,
             mode=0755,
             content=StaticFile('meta_index.mapping'))

        bro_cmd = ambari_format(
            'curl -s -XPOST http://{es_http_url}/_template/bro_index -d @{bro_index_path}'
        )
        Execute(bro_cmd, logoutput=True)
        snort_cmd = ambari_format(
            'curl -s -XPOST http://{es_http_url}/_template/snort_index -d @{snort_index_path}'
        )
        Execute(snort_cmd, logoutput=True)
        yaf_cmd = ambari_format(
            'curl -s -XPOST http://{es_http_url}/_template/yaf_index -d @{yaf_index_path}'
        )
        Execute(yaf_cmd, logoutput=True)
        error_cmd = ambari_format(
            'curl -s -XPOST http://{es_http_url}/_template/error_index -d @{error_index_path}'
        )
        Execute(error_cmd, logoutput=True)
        error_cmd = ambari_format(
            'curl -s -XPOST http://{es_http_url}/metaalerts -d @{meta_index_path}'
        )
        Execute(error_cmd, logoutput=True)
Esempio n. 24
0
    def install(self, env):
        import py_client_params as params
        from jkg_toree_params import user, group, sh_scripts_dir, sh_scripts, sh_scripts_user

        # Setup bash scripts for execution
        for sh_script in sh_scripts:
            File(sh_scripts_dir + os.sep + sh_script,
                 content=StaticFile(sh_script),
                 mode=0750)
        for sh_script in sh_scripts_user:
            File(sh_scripts_dir + os.sep + sh_script,
                 content=StaticFile(sh_script),
                 mode=0755)

        self.install_packages(env)
        self.configure(env)

        # Run install commands for Python client defined in params
        for command in params.commands:
            Execute(command, logoutput=True)
Esempio n. 25
0
def setup_symlink(kafka_managed_dir, kafka_ambari_managed_dir):
    import params
    backup_folder_path = None
    backup_folder_suffix = "_tmp"
    if kafka_ambari_managed_dir != kafka_managed_dir:
        if os.path.exists(
                kafka_managed_dir) and not os.path.islink(kafka_managed_dir):

            # Backup existing data before delete if config is changed repeatedly to/from default location at any point in time time, as there may be relevant contents (historic logs)
            backup_folder_path = backup_dir_contents(kafka_managed_dir,
                                                     backup_folder_suffix)

            Directory(kafka_managed_dir, action="delete", create_parents=True)

        elif os.path.islink(kafka_managed_dir) and os.path.realpath(
                kafka_managed_dir) != kafka_ambari_managed_dir:
            Link(kafka_managed_dir, action="delete")

        if not os.path.islink(kafka_managed_dir):
            Link(kafka_managed_dir, to=kafka_ambari_managed_dir)

    elif os.path.islink(
            kafka_managed_dir
    ):  # If config is changed and coincides with the kafka managed dir, remove the symlink and physically create the folder
        Link(kafka_managed_dir, action="delete")

        Directory(
            kafka_managed_dir,
            mode=0755,
            cd_access='a',
            owner=params.kafka_user,
            group=params.user_group,
            create_parents=True,
            recursive_ownership=True,
        )

    if backup_folder_path:
        # Restore backed up files to current relevant dirs if needed - will be triggered only when changing to/from default path;
        for file in os.listdir(backup_folder_path):
            if os.path.isdir(os.path.join(backup_folder_path, file)):
                Execute(('cp', '-r', os.path.join(backup_folder_path,
                                                  file), kafka_managed_dir),
                        sudo=True)
                Execute(("chown", "-R", format("{kafka_user}:{user_group}"),
                         os.path.join(kafka_managed_dir, file)),
                        sudo=True)
            else:
                File(os.path.join(kafka_managed_dir, file),
                     owner=params.kafka_user,
                     content=StaticFile(os.path.join(backup_folder_path,
                                                     file)))

        # Clean up backed up folder
        Directory(backup_folder_path, action="delete", create_parents=True)
Esempio n. 26
0
 def install(self, env):
     self.install_packages(env)
     Execute(
         'cd /usr/lib/python2.7/site-packages/webvirtmgr;python manage.py syncdb --noinput'
     )
     File('/tmp/createsuperuser.py',
          mode=0755,
          content=StaticFile('createsuperuser.py'))
     Execute(
         'cd /usr/lib/python2.7/site-packages/webvirtmgr;cat /tmp/createsuperuser.py | python manage.py shell --plain'
     )
Esempio n. 27
0
def setup_solr_client(config,
                      custom_log4j=True,
                      custom_log_location=None,
                      log4jcontent=None):
    solr_client_dir = '/usr/lib/ambari-infra-solr-client'
    solr_client_log_dir = default_config(
        config,
        '/configurations/infra-solr-client-log4j/infra_solr_client_log_dir',
        '/var/log/ambari-infra-solr-client'
    ) if custom_log_location is None else custom_log_location
    solr_client_log = format("{solr_client_log_dir}/solr-client.log")
    solr_client_log_maxfilesize = default_config(
        config,
        'configurations/infra-solr-client-log4j/infra_client_log_maxfilesize',
        80)
    solr_client_log_maxbackupindex = default_config(
        config,
        'configurations/infra-solr-client-log4j/infra_client_log_maxbackupindex',
        60)

    Directory(solr_client_log_dir,
              mode=0755,
              cd_access='a',
              create_parents=True)
    Directory(solr_client_dir,
              mode=0755,
              cd_access='a',
              create_parents=True,
              recursive_ownership=True)
    solrCliFilename = format("{solr_client_dir}/solrCloudCli.sh")
    File(solrCliFilename, mode=0755, content=StaticFile(solrCliFilename))
    if custom_log4j:
        # use custom log4j content only, when infra is not installed on the cluster
        solr_client_log4j_content = config['configurations'][
            'infra-solr-client-log4j'][
                'content'] if log4jcontent is None else log4jcontent
        context = {
            'solr_client_log': solr_client_log,
            'solr_client_log_maxfilesize': solr_client_log_maxfilesize,
            'solr_client_log_maxbackupindex': solr_client_log_maxbackupindex
        }
        template = JinjaEnvironment(
          line_statement_prefix='%',
          variable_start_string="{{",
          variable_end_string="}}")\
          .from_string(solr_client_log4j_content)

        File(format("{solr_client_dir}/log4j.properties"),
             content=template.render(context),
             mode=0644)
    else:
        File(format("{solr_client_dir}/log4j.properties"), mode=0644)

    File(solr_client_log, mode=0664, content='')
def copy_toolkit_scripts(toolkit_files_dir, toolkit_tmp_dir, user, group,
                         upgrade_type, service):
    import params

    if service == NIFI:
        run_ca_tmp_script = os.path.join(toolkit_tmp_dir, 'run_ca.sh')
        new_run_ca_tmp_script = StaticFile("run_ca.sh")

        if not sudo.path_isfile(run_ca_tmp_script) or sudo.read_file(
                run_ca_tmp_script) != new_run_ca_tmp_script:
            File(format(run_ca_tmp_script),
                 content=new_run_ca_tmp_script,
                 mode=0755,
                 owner=user,
                 group=group)

    if not params.stack_support_nifi_toolkit_package:
        nifiToolkitDirFilesPath = None
        nifiToolkitDirTmpPath = None

        Logger.info("Toolkit files dir is " + toolkit_files_dir)
        Logger.info("Toolkit tmp dir is " + toolkit_tmp_dir)

        for dir in os.listdir(toolkit_files_dir):
            if dir.startswith('nifi-toolkit-'):
                nifiToolkitDirFilesPath = os.path.join(toolkit_files_dir, dir)
                nifiToolkitDirTmpPath = os.path.join(toolkit_tmp_dir, dir)

        if not sudo.path_isdir(nifiToolkitDirTmpPath) or not (upgrade_type is
                                                              None):
            os.system("\cp -r " + nifiToolkitDirFilesPath + " " +
                      toolkit_tmp_dir)
            Directory(nifiToolkitDirTmpPath,
                      owner=user,
                      group=group,
                      create_parents=False,
                      recursive_ownership=True,
                      cd_access="a",
                      mode=0755)
            os.system("\/var/lib/ambari-agent/ambari-sudo.sh chmod -R 755 " +
                      nifiToolkitDirTmpPath)
    else:
        Logger.info("Changing owner of package files")
        package_toolkit_dir = os.path.join(params.stack_root, 'current',
                                           'nifi-toolkit')
        Directory(package_toolkit_dir,
                  owner=user,
                  group=group,
                  create_parents=False,
                  recursive_ownership=True,
                  cd_access="a",
                  mode=0755,
                  recursion_follow_links=True)
Esempio n. 29
0
    def action_create(self):
        with tempfile.NamedTemporaryFile() as tmpf:
            with tempfile.NamedTemporaryFile() as old_repo_tmpf:
                for repo_file_path, repo_file_content in RepositoryProvider.repo_files_content.iteritems(
                ):
                    repo_file_content = repo_file_content.strip()

                    File(
                        tmpf.name,
                        content=repo_file_content,
                        owner=os_utils.current_user(),
                    )

                    if os.path.isfile(repo_file_path):
                        # a copy of old repo file, which will be readable by current user
                        File(
                            old_repo_tmpf.name,
                            content=StaticFile(repo_file_path),
                            owner=os_utils.current_user(),
                        )

                    if not os.path.isfile(repo_file_path) or not filecmp.cmp(
                            tmpf.name, old_repo_tmpf.name):
                        Logger.info(
                            format(
                                "Rewriting {repo_file_path} since it has changed."
                            ))
                        File(repo_file_path, content=StaticFile(tmpf.name))

                        try:
                            self.update(repo_file_path)
                        except:
                            # remove created file or else ambari will consider that update was successful and skip repository operations
                            File(
                                repo_file_path,
                                action="delete",
                            )
                            raise

        RepositoryProvider.repo_files_content.clear()
Esempio n. 30
0
    def service_check(self, env):
        import params
        env.set_params(params)

        File(format("{tmp_dir}/janusgraphSmoke.groovy"),
             content=StaticFile("janusgraphSmoke.groovy"),
             mode=0755)

        if params.security_enabled:
            kinit_cmd = format(
                "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};"
            )
            Execute(kinit_cmd, user=params.smokeuser)
        secure = ""
        if params.janusgraph_server_ssl == "true":
            secure = "-k"
        if params.janusgraph_server_ssl_key_cert_file:
            secure = "--cacert " + params.janusgraph_server_ssl_key_cert_file.split(
                ":")[1]
            grepresult = """ | grep 99"""
        if len(params.janusgraph_server_simple_authenticator) > 0:
            grepresult = ""

        headers = """ -XPOST -Hcontent-type:application/json -d '{"gremlin":"100-1"}' """
        http = "http://"
        if params.janusgraph_server_ssl == "true":
            http = "https://"

        janusgraph_server_host = http + format("{janusgraph_host}")
        janusgraph_port = format("{janusgraph_server_port}")
        cmd = "curl " + secure + headers + janusgraph_server_host + ":" + janusgraph_port + grepresult
        gremlin_bin = params.janusgraph_bin_dir + "/gremlin.sh"

        Execute(
            (cmd),
            tries=40,
            try_sleep=5,
            path=format(
                '{janusgraph_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
            ),
            user=params.smokeuser,
            logoutput=True)

        Execute(
            format("{gremlin_bin} {tmp_dir}/janusgraphSmoke.groovy"),
            tries=3,
            try_sleep=5,
            path=format(
                '{janusgraph_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
            ),
            user=params.smokeuser,
            logoutput=True)