def test_static_file_absolute_path(self, join_mock, is_file_mock):
    """
    Testing StaticFile source with absolute path
    """
    sudo.read_file = lambda path: 'content'
    is_file_mock.return_value = True

    with Environment("/base") as env:
      static_file = StaticFile("/absolute/path/file")
      content = static_file.get_content()

    self.assertEqual('content', content)
    self.assertEqual(is_file_mock.call_count, 1)
    self.assertEqual(join_mock.call_count, 0)
  def test_static_file_relative_path(self, join_mock, is_file_mock):
    """
    Testing StaticFile source with relative path
    """
    sudo.read_file = lambda path: 'content'
    is_file_mock.return_value = True

    with Environment("/base") as env:
      static_file = StaticFile("relative/path/file")
      content = static_file.get_content()

    self.assertEqual('content', content)
    self.assertEqual(is_file_mock.call_count, 1)
    self.assertEqual(join_mock.call_count, 1)
    join_mock.assert_called_with('/base', 'files', 'relative/path/file')
Beispiel #3
0
  def test_static_file_absolute_path(self, join_mock, open_mock):
    """
    Testing StaticFile source with absolute path
    """
    file_mock = MagicMock(name = 'file_mock')
    file_mock.__enter__.return_value = file_mock
    file_mock.read.return_value = 'content'
    open_mock.return_value = file_mock

    with Environment("/base") as env:
      static_file = StaticFile("/absolute/path/file")
      content = static_file.get_content()

    self.assertEqual('content', content)
    self.assertEqual(file_mock.read.call_count, 1)
    self.assertEqual(join_mock.call_count, 0)
Beispiel #4
0
    def elasticsearch_template_install(self, env):
        from params import params
        env.set_params(params)
        Logger.info("Installing Elasticsearch index templates")
        metron_service.check_indexer_parameters()

        commands = IndexingCommands(params)
        for template_name, template_path in commands.get_templates().iteritems(
        ):

            # install the index template
            File(template_path,
                 mode=0755,
                 content=StaticFile("{0}.template".format(template_name)))
            cmd = "curl -s -XPOST http://{0}/_template/{1} -d @{2}"
            Execute(cmd.format(params.es_http_url, template_name,
                               template_path),
                    logoutput=True)
Beispiel #5
0
def backup_dir_contents(dir_path, backup_folder_suffix):
  import params
  backup_destination_path = params.tmp_dir + os.path.normpath(dir_path)+backup_folder_suffix
  Directory(backup_destination_path,
            mode=0755,
            cd_access='a',
            owner=params.kafka_user,
            group=params.user_group,
            recursive=True
  )
  set_dir_ownership(backup_destination_path)
  # Safely copy top-level contents to backup folder
  for file in os.listdir(dir_path):
    File(os.path.join(backup_destination_path, file),
         owner=params.kafka_user,
         content = StaticFile(os.path.join(dir_path,file)))

  return backup_destination_path
Beispiel #6
0
    def action_create(self):
        with Environment.get_instance_copy() as env:
            with tempfile.NamedTemporaryFile() as tmpf:
                repo_file_name = format(
                    "{repo_file_name}.list",
                    repo_file_name=self.resource.repo_file_name)
                repo_file_path = format("{repo_dir}/{repo_file_name}",
                                        repo_dir=self.repo_dir)
                repo_template = os.path.join(
                    os.path.dirname(os.path.realpath(__file__)), '..',
                    REPO_TEMPLATE_FOLDER, self.resource.repo_template)

                new_content = Template(
                    repo_template,
                    package_type=self.package_type,
                    base_url=self.resource.base_url,
                    components=' '.join(
                        self.resource.components)).get_content()
                old_content = ''
                if self.resource.append_to_file and os.path.isfile(
                        repo_file_path):
                    with open(repo_file_path) as repo_file:
                        old_content = repo_file.read() + '\n'

                File(tmpf.name, content=old_content + new_content)

                if not os.path.isfile(repo_file_path) or not filecmp.cmp(
                        tmpf.name, repo_file_path):
                    File(repo_file_path, content=StaticFile(tmpf.name))

                    update_cmd_formatted = [format(x) for x in self.update_cmd]
                    # this is time expensive
                    retcode, out = checked_call(update_cmd_formatted,
                                                sudo=True)

                    # add public keys for new repos
                    missing_pkeys = set(
                        re.findall(self.missing_pkey_regex, out))
                    for pkey in missing_pkeys:
                        Execute(
                            format(self.add_pkey_cmd),
                            timeout=
                            15,  # in case we are on the host w/o internet (using localrepo), we should ignore hanging
                            ignore_failures=True)
Beispiel #7
0
def setup_symlink(kafka_managed_dir, kafka_ambari_managed_dir):
  import params
  backup_folder_path = None
  backup_folder_suffix = "_tmp"
  if kafka_ambari_managed_dir != kafka_managed_dir:
    if os.path.exists(kafka_managed_dir) and not os.path.islink(kafka_managed_dir):

      # Backup existing data before delete if config is changed repeatedly to/from default location at any point in time time, as there may be relevant contents (historic logs)
      backup_folder_path = backup_dir_contents(kafka_managed_dir, backup_folder_suffix)

      Directory(kafka_managed_dir,
                action="delete",
                create_parents=True)

    elif os.path.islink(kafka_managed_dir) and os.path.realpath(kafka_managed_dir) != kafka_ambari_managed_dir:
      Link(kafka_managed_dir,
           action="delete")

    if not os.path.islink(kafka_managed_dir):
      Link(kafka_managed_dir,
           to=kafka_ambari_managed_dir)

  elif os.path.islink(kafka_managed_dir): # If config is changed and coincides with the kafka managed dir, remove the symlink and physically create the folder
    Link(kafka_managed_dir,
         action="delete")

    Directory(kafka_managed_dir,
              mode=0755,
              cd_access='a',
              owner=params.kafka_user,
              group=params.user_group,
              create_parents=True)

  if backup_folder_path:
    # Restore backed up files to current relevant dirs if needed - will be triggered only when changing to/from default path;
    for file in os.listdir(backup_folder_path):
      File(os.path.join(kafka_managed_dir,file),
           owner=params.kafka_user,
           content = StaticFile(os.path.join(backup_folder_path,file)))

    # Clean up backed up folder
    Directory(backup_folder_path,
              action="delete",
              create_parents=True)
Beispiel #8
0
def create_topology_script():
    import params

    path = params.net_topology_script_file_path
    parent_dir = os.path.dirname(path)
    # only create the parent directory and set its permission if it does not exist
    if not os.path.exists(parent_dir):
        Directory(parent_dir,
                  create_parents=True,
                  owner=params.hdfs_user,
                  group=params.user_group)

    # installing the topology script to the specified location
    File(
        path,
        content=StaticFile('topology_script.py'),
        mode=0755,
        only_if=format("test -d {net_topology_script_dir}"),
    )
Beispiel #9
0
def setup_metastore():
  import params

  if params.hive_metastore_site_supported:
    hivemetastore_site_config = get_config("hivemetastore-site")
    if hivemetastore_site_config:
      XmlConfig("hivemetastore-site.xml",
                conf_dir=params.hive_server_conf_dir,
                configurations=params.config['configurations']['hivemetastore-site'],
                configuration_attributes=params.config['configurationAttributes']['hivemetastore-site'],
                owner=params.hive_user,
                group=params.user_group,
                mode=0600)

  File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hivemetastore.properties"),
       owner=params.hive_user,
       group=params.user_group,
       content=Template("hadoop-metrics2-hivemetastore.properties.j2"),
       mode=0600
  )

  File(params.start_metastore_path,
       mode=0755,
       content=StaticFile('startMetastore.sh')
  )

  if params.hive_repl_cmrootdir is not None and params.hive_repl_cmrootdir.strip() != "":
    params.HdfsResource(params.hive_repl_cmrootdir,
                        type = "directory",
                        action = "create_on_execute",
                        owner = params.hive_user,
                        group=params.user_group,
                        mode = 01777)
  if params.hive_repl_rootdir is not None and params.hive_repl_rootdir.strip() != "":
    params.HdfsResource(params.hive_repl_rootdir,
                        type = "directory",
                        action = "create_on_execute",
                        owner = params.hive_user,
                        group=params.user_group,
                        mode = 0700)
  params.HdfsResource(None, action="execute")

  generate_logfeeder_input_config('hive', Template("input.config-hive.json.j2", extra_imports=[default]))
Beispiel #10
0
def setup_metastore():
    import params

    if params.hive_metastore_site_supported:
        hivemetastore_site_config = get_config("hivemetastore-site")
        if hivemetastore_site_config:
            XmlConfig("hivemetastore-site.xml",
                      conf_dir=params.hive_server_conf_dir,
                      configurations=params.config['configurations']
                      ['hivemetastore-site'],
                      configuration_attributes=params.
                      config['configuration_attributes']['hivemetastore-site'],
                      owner=params.hive_user,
                      group=params.user_group,
                      mode=0600)

    File(os.path.join(params.hive_server_conf_dir,
                      "hadoop-metrics2-hivemetastore.properties"),
         owner=params.hive_user,
         group=params.user_group,
         content=Template("hadoop-metrics2-hivemetastore.properties.j2"),
         mode=0600)

    File(params.start_metastore_path,
         mode=0755,
         content=StaticFile('startMetastore.sh'))

    if params.hive_repl_cmrootdir:
        params.HdfsResource(params.hive_repl_cmrootdir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            group=params.user_group,
                            mode=01777)
    if params.hive_repl_rootdir:
        params.HdfsResource(params.hive_repl_rootdir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            group=params.user_group,
                            mode=0700)
    if params.hive_repl_cmrootdir or params.hive_repl_rootdir:
        params.HdfsResource(None, action="execute")
Beispiel #11
0
def setup_solr_client(config,
                      custom_log4j=True,
                      custom_log_location=None,
                      log4jcontent=None):
    solr_client_dir = '/usr/lib/ambari-infra-solr-client'
    solr_client_log_dir = default_config(
        config,
        '/configurations/infra-solr-client-log4j/infra_solr_client_log_dir',
        '/var/log/ambari-infra-solr-client'
    ) if custom_log_location is None else custom_log_location
    solr_client_log = format("{solr_client_log_dir}/solr-client.log")
    solr_client_log_maxfilesize = default_config(
        config,
        'configurations/infra-solr-client-log4j/infra_client_log_maxfilesize',
        80)
    solr_client_log_maxbackupindex = default_config(
        config,
        'configurations/infra-solr-client-log4j/infra_client_log_maxbackupindex',
        60)

    Directory(solr_client_log_dir,
              mode=0755,
              cd_access='a',
              create_parents=True)
    Directory(solr_client_dir,
              mode=0755,
              cd_access='a',
              create_parents=True,
              recursive_ownership=True)
    solrCliFilename = format("{solr_client_dir}/solrCloudCli.sh")
    File(solrCliFilename, mode=0755, content=StaticFile(solrCliFilename))
    if custom_log4j:
        # use custom log4j content only, when infra is not installed on the cluster
        solr_client_log4j_content = config['configurations'][
            'infra-solr-client-log4j'][
                'content'] if log4jcontent is None else log4jcontent
        File(format("{solr_client_dir}/log4j.properties"),
             content=InlineTemplate(solr_client_log4j_content),
             mode=0644)
    else:
        File(format("{solr_client_dir}/log4j.properties"), mode=0644)

    File(solr_client_log, mode=0664, content='')
Beispiel #12
0
def install_azkaban():
    import params
    Directory([params.conf_dir, params.log_dir],
              owner=params.azkaban_user,
              group=params.user_group,
              mode=0755,
              create_parents=True)
    File(params.install_dir_executor +
         '/lib/azkaban-ldap-usermanager-1.2.1-SNAPSHOT.jar',
         content=StaticFile("azkaban-ldap-usermanager-1.2.1-SNAPSHOT.jar"),
         mode=0755)

    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir_executor) or not os.path.exists(
                              params.install_dir_executor):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir_executor)
        Execute('rm -rf %s' % params.install_dir_executor)
        Execute('wget ' + params.download_url_executor + ' -O /tmp/' +
                params.filename_executor,
                user=params.azkaban_user)
        Execute('tar -zxf /tmp/' + params.filename_executor + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' +
                params.version_dir_executor + ' ' +
                params.install_dir_executor)
        Execute(' cp -r ' + params.install_dir_executor + '/conf/* ' +
                params.conf_dir)
        Execute(' rm -rf ' + params.install_dir_executor + '/conf')
        Execute('ln -s ' + params.conf_dir + ' ' +
                params.install_dir_executor + '/conf')
        Execute('ln -s ' + params.log_dir + ' ' + params.install_dir_executor +
                '/logs/azkaban')

        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/azkaban.sh" %
                params.install_dir_executor)
        Execute('chown -R %s:%s %s/%s' %
                (params.azkaban_user, params.user_group,
                 Script.get_stack_root(), params.version_dir_executor))
        Execute('chown -R %s:%s %s' % (params.azkaban_user, params.user_group,
                                       params.install_dir_executor))
        Execute('/bin/rm -f /tmp/' + params.filename_executor)
def set_uid(user, user_dirs):
    """
  user_dirs - comma separated directories
  """
    import params

    File(
        format("{tmp_dir}/changeUid.sh"),
        content=StaticFile("changeToSecureUid.sh"),
        mode=0555)
    ignore_groupsusers_create_str = str(
        params.ignore_groupsusers_create).lower()
    uid = get_uid(user, return_existing=True)
    Execute(
        format(
            "{tmp_dir}/changeUid.sh {user} {user_dirs} {new_uid}",
            new_uid=0 if uid is None else uid),
        not_if=format(
            "(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"
        ))
Beispiel #14
0
    def service_check(self, env):
        import params
        env.set_params(params)

        unique = get_unique_id_and_date()

        File("/tmp/wordCount.jar", content=StaticFile("wordCount.jar"))

        cmd = format(
            "storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique} -c nimbus.host={nimbus_host}"
        )

        Execute(cmd,
                logoutput=True,
                path=params.storm_bin_dir,
                user=params.storm_user)

        Execute(format("storm kill WordCount{unique}"),
                path=params.storm_bin_dir,
                user=params.storm_user)
Beispiel #15
0
    def service_check(self, env):
        import params
        env.set_params(params)

        temp_dir = os.path.join(os.path.dirname(params.knox_home), "temp")
        validateKnoxFileName = "validateKnoxStatus.py"
        validateKnoxFilePath = os.path.join(temp_dir, validateKnoxFileName)
        python_executable = sys.executable
        validateStatusCmd = "%s %s -p %s -n %s" % (
            python_executable, validateKnoxFilePath, params.knox_host_port,
            params.knox_host_name)

        print "Test connectivity to knox server"

        File(validateKnoxFilePath, content=StaticFile(validateKnoxFileName))

        Execute(validateStatusCmd,
                tries=3,
                try_sleep=5,
                timeout=5,
                logoutput=True)
def update_credential_provider_path(config,
                                    config_type,
                                    dest_provider_path,
                                    file_owner,
                                    file_group,
                                    use_local_jceks=False):
    """
  Copies the JCEKS file for the specified config from the default location to the given location,
  and sets the ACLs for the specified owner and group. Also updates the config type's configuration
  hadoop credential store provider with the copied file name.
  :param config: configurations['configurations'][config_type]
  :param config_type: Like hive-site, oozie-site, etc.
  :param dest_provider_path: The full path to the file where the JCEKS provider file is to be copied to.
  :param file_owner: File owner
  :param file_group: Group
  :return: A copy of the config that was modified or the input config itself if nothing was modified.
  """
    # Get the path to the provider <config_type>.jceks
    if HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME in config:
        provider_path = config[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME]
        src_provider_path = provider_path[len('jceks://file'):]
        File(dest_provider_path,
             owner=file_owner,
             group=file_group,
             mode=0640,
             content=StaticFile(src_provider_path))
        # make a copy of the config dictionary since it is read-only
        config_copy = config.copy()
        # overwrite the provider path with the path specified
        if use_local_jceks:
            config_copy[
                HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'localjceks://file{0}'.format(
                    dest_provider_path)
        else:
            config_copy[
                HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'jceks://file{0}'.format(
                    dest_provider_path)
        return config_copy
    return config
Beispiel #17
0
def oozie_smoke_shell_file(file_name):
    import params

    File(format("{tmp_dir}/{file_name}"),
         content=StaticFile(file_name),
         mode=0755)

    if params.security_enabled:
        sh_cmd = format(
            "{tmp_dir}/{file_name} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}"
        )
    else:
        sh_cmd = format(
            "{tmp_dir}/{file_name} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}"
        )

    Execute(format("{tmp_dir}/{file_name}"),
            command=sh_cmd,
            path=params.execute_path,
            tries=3,
            try_sleep=5,
            logoutput=True)
Beispiel #18
0
def mysql_deluser():
  import params
  
  File(params.mysql_deluser_path,
       mode=0755,
       content=StaticFile('removeMysqlUser.sh')
  )
  hive_server_host = format("{hive_server_host}")
  hive_metastore_host = format("{hive_metastore_host}")

  del_hiveserver_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_server_host}"
  del_metastore_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_host}"
  if (hive_server_host == hive_metastore_host):
    cmd = format(del_hiveserver_cmd)
  else:
    cmd = format(
      del_hiveserver_cmd + ";" + del_metastore_cmd)
  Execute(cmd,
          tries=3,
          try_sleep=5,
          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
  )
Beispiel #19
0
    def elasticsearch_template_install(self, env):
        from params import params
        env.set_params(params)
        Logger.info("Installing Elasticsearch index templates")

        try:
            metron_service.check_indexer_parameters()
            commands = IndexingCommands(params)
            for template_name, template_path in commands.get_templates().iteritems():
                # install the index template
                File(template_path, mode=0755, content=StaticFile("{0}.template".format(template_name)))
                cmd = "curl -s -XPOST http://{0}/_template/{1} -d @{2}"
                Execute(
                  cmd.format(params.es_http_url, template_name, template_path),
                  logoutput=True)
            return True

        except Exception as e:
            msg = "WARNING: Elasticsearch index templates could not be installed.  " \
                  "Is Elasticsearch running?  Will reattempt install on next start.  error={0}"
            Logger.warning(msg.format(e))
            return False
Beispiel #20
0
def mysql_adduser():
  import params
  
  File(params.mysql_adduser_path,
       mode=0755,
       content=StaticFile('addMysqlUser.sh')
  )
  hive_server_host = format("{hive_server_host}")
  hive_metastore_host = format("{hive_metastore_host}")

  add_metastore_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_metastore_host}"
  add_hiveserver_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_server_host}"
  if (hive_server_host == hive_metastore_host):
    cmd = format(add_hiveserver_cmd)
  else:
    cmd = format(add_hiveserver_cmd + ";" + add_metastore_cmd)
  Execute(cmd,
          tries=3,
          try_sleep=5,
          logoutput=False,
          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
  )
Beispiel #21
0
    def install(self, env):
        self.install_packages(env)
        self.configure(env)
        import params

        File(
            '/etc/openldap/schema/cisco.schema',
            mode=0755,
            content=StaticFile('cisco.schema'))
        File(
            '/etc/openldap/schema/tacacs.schema',
            mode=0755,
            content=StaticFile('tacacs.schema'))
        File(
            '/etc/openldap/schema/radius.schema',
            mode=0755,
            content=StaticFile('radius.schema'))
        File(
            '/etc/openldap/schema/dnsdomain2.schema',
            mode=0755,
            content=StaticFile('dnsdomain2.schema'))
        File(
            '/etc/openldap/schema/public_key.schema',
            mode=0755,
            content=StaticFile('public_key.schema'))

        File(
            '/etc/httpd/conf.d/self-service-password.conf',
            mode=0755,
            content=StaticFile('self-service-password.conf'))

        setup_file = '/usr/sbin/ldap_setup.sh'
        File(setup_file, mode=0755, content=StaticFile('setup.sh'))

        Execute('echo Running /usr/sbin/ldap_setup.sh')

        # run setup script which has simple shell setup
        Execute(setup_file + ' ' + params.ldap_password + ' ' +
                params.ldap_adminuser + ' ' + params.ldap_domain + ' ' +
                params.ldap_ldifdir + ' ' + params.ldap_ou + ' "' +
                params.binddn + '" >> ' + params.stack_log)
Beispiel #22
0
def install_graphite_web():
    import params
    Directory([params.graphite_conf_dir, params.log_dir, params.pid_dir],
              owner=params.graphite_user,
              group=params.user_group,
              mode=0775,
              cd_access="a",
              create_parents=True)

    if not os.path.exists(params.install_dir_graphite_web):
        Execute('wget ' + params.download_url_graphite_web + ' -O /tmp/' +
                params.filename_graphite_web,
                user=params.graphite_user)
        Execute('tar -xf /tmp/' + params.filename_graphite_web + ' -C  ' +
                Script.get_stack_root())

        Execute(' rm -rf ' + params.install_dir_graphite_web + '/conf')
        Execute('ln -s ' + params.graphite_conf_dir + ' ' +
                params.install_dir_graphite_web + '/conf')

        Execute('chown -R %s:%s %s' % (params.graphite_user, params.user_group,
                                       params.install_dir_graphite_web))
        Execute('/bin/rm -f /tmp/' + params.filename_graphite_web)

        File(params.install_dir_graphite_web + '/webapp/graphite/graphouse.py',
             content=StaticFile("graphouse.py"),
             mode=0755)

        File('/usr/lib/systemd/system/graphite-web.service',
             content=params.graphite_web_systemd_content,
             mode=0755)
        File('/etc/sysconfig/memcached',
             content=params.memcached_content,
             mode=0755)
        Execute('systemctl daemon-reload')
        Execute('systemctl enable graphite-web')
        Execute('systemctl enable memcached')
        Execute('systemctl start memcached')
Beispiel #23
0
    def service_check(self, env):
        import params
        env.set_params(params)

        yarn_exe = os_utils.quote_path(
            os.path.join(params.yarn_home, "bin", "yarn.cmd"))

        run_yarn_check_cmd = "cmd /C %s node -list" % yarn_exe

        component_type = 'rm'
        if params.hadoop_ssl_enabled:
            component_address = params.rm_webui_https_address
        else:
            component_address = params.rm_webui_address

        #temp_dir = os.path.abspath(os.path.join(params.hadoop_home, os.pardir)), "/tmp"
        temp_dir = os.path.join(os.path.dirname(params.hadoop_home), "temp")
        validateStatusFileName = "validateYarnComponentStatusWindows.py"
        validateStatusFilePath = os.path.join(temp_dir, validateStatusFileName)
        python_executable = sys.executable
        validateStatusCmd = "%s %s %s -p %s -s %s" % (
            python_executable, validateStatusFilePath, component_type,
            component_address, params.hadoop_ssl_enabled)

        if params.security_enabled:
            kinit_cmd = "%s -kt %s %s;" % (params.kinit_path_local,
                                           params.smoke_user_keytab,
                                           params.smokeuser)
            smoke_cmd = kinit_cmd + ' ' + validateStatusCmd
        else:
            smoke_cmd = validateStatusCmd

        File(validateStatusFilePath,
             content=StaticFile(validateStatusFileName))

        Execute(smoke_cmd, tries=3, try_sleep=5, logoutput=True)

        Execute(run_yarn_check_cmd, logoutput=True)
def get_uid(user, return_existing=False):
    """
  Tries to get UID for username. It will try to find UID in custom properties in *cluster_env* and, if *return_existing=True*,
  it will try to return UID of existing *user*.

  :param user: username to get UID for
  :param return_existing: return UID for existing user
  :return:
  """
    import params
    user_str = str(user) + "_uid"
    service_env = [
        serviceEnv for serviceEnv in params.config['configurations']
        if user_str in params.config['configurations'][serviceEnv]
    ]

    if service_env and params.config['configurations'][
            service_env[0]][user_str]:
        service_env_str = str(service_env[0])
        uid = params.config['configurations'][service_env_str][user_str]
        if len(service_env) > 1:
            Logger.warning(
                "Multiple values found for %s, using %s" % (user_str, uid))
        return uid
    else:
        if return_existing:
            # pick up existing UID or try to find available UID in /etc/passwd, see changeToSecureUid.sh for more info
            if user == params.smoke_user:
                return None
            File(
                format("{tmp_dir}/changeUid.sh"),
                content=StaticFile("changeToSecureUid.sh"),
                mode=0555)
            code, newUid = shell.call(format("{tmp_dir}/changeUid.sh {user}"))
            return int(newUid)
        else:
            # do not return UID for existing user, used in User resource call to let OS to choose UID for us
            return None
Beispiel #25
0
def install_graphite_api():
    import params
    Directory([
        params.graphite_conf_dir, params.log_dir, params.pid_dir,
        '/srv/graphite'
    ],
              owner=params.graphite_user,
              group=params.user_group,
              mode=0775,
              cd_access="a",
              create_parents=True)

    if not os.path.exists(params.install_dir_graphite_api):
        Execute('wget ' + params.download_url_graphite_api + ' -O /tmp/' +
                params.filename_graphite_api,
                user=params.graphite_user)
        Execute('tar -xf /tmp/' + params.filename_graphite_api + ' -C  ' +
                Script.get_stack_root())

        Execute('chown -R %s:%s %s' % (params.graphite_user, params.user_group,
                                       params.install_dir_graphite_api))
        Execute('/bin/rm -f /tmp/' + params.filename_graphite_api)

        File(
            params.install_dir_graphite_api +
            '/lib/python3.6/site-packages/graphite_api/finders/graphouse_api.py',
            content=StaticFile("graphouse_api.py"),
            mode=0755)
        File('/usr/lib/systemd/system/graphite-api.service',
             content=params.graphite_api_systemd_content,
             mode=0755)
        File('/etc/sysconfig/memcached',
             content=params.memcached_content,
             mode=0755)
        Execute('systemctl daemon-reload')
        Execute('systemctl enable graphite-api')
        Execute('systemctl enable memcached')
        Execute('systemctl start memcached')
Beispiel #26
0
def update_credential_provider_path(config_type, dest_provider_path):
  import params

  # Get the path to the provider <config_type>.jceks
  if HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME in params.config['configurations'][config_type]:
    provider_paths = params.config['configurations'][config_type][HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME].split(',')
    for path_index in range(len(provider_paths)):
      provider_path = provider_paths[path_index]
      if config_type == os.path.splitext(os.path.basename(provider_path))[0]:
        src_provider_path = provider_path[len('jceks://file'):]
        Logger.info('src_provider_path={0}, dest_provider_path{1}'.format(src_provider_path, dest_provider_path))
        File(dest_provider_path,
          owner = params.oozie_user,
          group = params.user_group,
          mode = 0640,
          content = StaticFile(src_provider_path)
        )
        provider_paths[path_index] = 'jceks://file{0}'.format(dest_provider_path)
        # make a copy of the config dictionary since it cannot be modified
        config = params.config['configurations'][config_type].copy()
        config[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = ','.join(provider_paths)
        return config
    return params.config['configurations'][config_type]
Beispiel #27
0
    def service_check(self, env):
        import params
        env.set_params(params)

        File( format("{tmp_dir}/titanSmoke.groovy"),
              content = StaticFile("titanSmoke.groovy"),
              mode = 0755
              )

        if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.2') >= 0:
            if params.security_enabled:
                kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
                Execute(kinit_cmd,
                        user=params.smokeuser
                        )

            Execute(format("gremlin {tmp_dir}/titanSmoke.groovy"),
                    tries     = 3,
                    try_sleep = 5,
                    path      = format('{titan_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
                    user      = params.smokeuser,
                    logoutput = True
                    )
def setup_configs():
    """
  Creates configs for services HDFS mapred
  """
    import params

    if params.has_namenode:
        if os.path.exists(params.hadoop_conf_dir):
            File(
                params.task_log4j_properties_location,
                content=StaticFile("task-log4j.properties"),
                mode=0755)

        if os.path.exists(
                os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
            File(
                os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
                owner=params.hdfs_user,
                group=params.user_group)
        if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
            File(
                os.path.join(params.hadoop_conf_dir, 'masters'),
                owner=params.hdfs_user,
                group=params.user_group)
Beispiel #29
0
def backup_dir_contents(dir_path, backup_folder_suffix):
  import params
  backup_destination_path = params.tmp_dir + os.path.normpath(dir_path)+backup_folder_suffix
  Directory(backup_destination_path,
            mode=0755,
            cd_access='a',
            owner=params.kafka_user,
            group=params.user_group,
            create_parents = True,
            recursive_ownership = True,
  )
  # Safely copy top-level contents to backup folder
  for file in os.listdir(dir_path):
    if os.path.isdir(os.path.join(dir_path, file)):
      Execute(('cp', '-r', os.path.join(dir_path, file), backup_destination_path),
              sudo=True)
      Execute(("chown", "-R", format("{kafka_user}:{user_group}"), os.path.join(backup_destination_path, file)),
              sudo=True)
    else:
      File(os.path.join(backup_destination_path, file),
         owner=params.kafka_user,
         content = StaticFile(os.path.join(dir_path,file)))

  return backup_destination_path
Beispiel #30
0
    def install(self, env):
        File('/tmp/libvirt-bootstrap.sh',
             mode=0755,
             content=StaticFile('libvirt-bootstrap.sh'))
        Execute('/tmp/libvirt-bootstrap.sh')
        Execute('virsh net-destroy default')
        Execute('virsh net-undefine default')
        Execute('systemctl restart libvirtd')

        import params
        Directory([params.data_dir],
                  owner='root',
                  group='root',
                  mode=0775,
                  create_parents=True)

        Execute('virsh pool-define-as --name dir_pool dir --target ' +
                params.data_dir)
        Execute('virsh pool-autostart dir_pool')
        Execute('virsh pool-build dir_pool')
        Execute('virsh pool-start dir_pool')

        if params.enable_kvm:
            File('/tmp/lvm_pool.xml', mode=0755, content=params.lvm_pool)
            Execute('virsh pool-define /tmp/lvm_pool.xml')
            Execute('irsh pool-build storage_pool')
            Execute('virsh pool-autostart storage_pool')

        #https://www.ovirt.org/documentation/how-to/networking/bonding-vlan-bridge/
        if params.enable_net:
            File('/tmp/network.xml', mode=0755, content=params.lvm_pool)
            Execute('virsh iface-define /tmp/network.xml')
            Execute('virsh iface-start bond0')

        Execute('wget ' + params.base_os_image_url + ' -O ' + params.data_dir +
                '/base.qcow2')
Beispiel #31
0
def fill_conf_dir(component_conf_dir):
  import params
  hive_client_conf_path = os.path.realpath(format("{stack_root}/current/{component_directory}/conf"))
  component_conf_dir = os.path.realpath(component_conf_dir)
  mode_identified_for_file = 0644 if component_conf_dir == hive_client_conf_path else 0600
  mode_identified_for_dir = 0755 if component_conf_dir == hive_client_conf_path else 0700
  Directory(component_conf_dir,
            owner=params.hive_user,
            group=params.user_group,
            create_parents = True,
            mode=mode_identified_for_dir
  )

  XmlConfig("mapred-site.xml",
            conf_dir=component_conf_dir,
            configurations=params.config['configurations']['mapred-site'],
            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=mode_identified_for_file)


  File(format("{component_conf_dir}/hive-default.xml.template"),
       owner=params.hive_user,
       group=params.user_group,
       mode=mode_identified_for_file
  )

  File(format("{component_conf_dir}/hive-env.sh.template"),
       owner=params.hive_user,
       group=params.user_group,
       mode=mode_identified_for_file
  )

  # Create hive-log4j.properties and hive-exec-log4j.properties
  # in /etc/hive/conf and not in /etc/hive2/conf
  if params.log4j_version == '1':
    log4j_exec_filename = 'hive-exec-log4j.properties'
    if (params.log4j_exec_props != None):
      File(format("{component_conf_dir}/{log4j_exec_filename}"),
           mode=mode_identified_for_file,
           group=params.user_group,
           owner=params.hive_user,
           content=params.log4j_exec_props
      )
    elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
      File(format("{component_conf_dir}/{log4j_exec_filename}"),
           mode=mode_identified_for_file,
           group=params.user_group,
           owner=params.hive_user,
           content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
      )

    log4j_filename = 'hive-log4j.properties'
    if (params.log4j_props != None):
      File(format("{component_conf_dir}/{log4j_filename}"),
           mode=mode_identified_for_file,
           group=params.user_group,
           owner=params.hive_user,
           content=InlineTemplate(params.log4j_props)
      )
    elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
      File(format("{component_conf_dir}/{log4j_filename}"),
           mode=mode_identified_for_file,
           group=params.user_group,
           owner=params.hive_user,
           content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
      )
    pass # if params.log4j_version == '1'
Beispiel #32
0
def storm(name=None):
    import params
    import os

    Directory(
        params.log_dir,
        owner=params.storm_user,
        group=params.user_group,
        mode=0777,
        create_parents=True,
        cd_access="a",
    )

    Directory(
        [params.pid_dir, params.local_dir],
        owner=params.storm_user,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
        mode=0755,
    )

    Directory(
        params.conf_dir,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
    )

    File(format("{conf_dir}/config.yaml"),
         content=Template("config.yaml.j2"),
         owner=params.storm_user,
         group=params.user_group)

    File(params.conf_dir + "/jmxetric-conf.xml",
         content=StaticFile("jmxetric-conf.xml"),
         owner=params.storm_user)
    File(params.storm_lib_dir + "/gmetric4j-1.0.3.jar",
         content=StaticFile("gmetric4j-1.0.3.jar"),
         owner=params.storm_user)
    File(params.storm_lib_dir + "/jmxetric-1.0.4.jar",
         content=StaticFile("jmxetric-1.0.4.jar"),
         owner=params.storm_user)
    File(params.storm_lib_dir + "/oncrpc-1.0.7.jar",
         content=StaticFile("oncrpc-1.0.7.jar"),
         owner=params.storm_user)

    configurations = params.config['configurations']['storm-site']

    File(format("{conf_dir}/storm.yaml"),
         content=yaml_config_template(configurations),
         owner=params.storm_user,
         group=params.user_group)

    File(format("{conf_dir}/storm-env.sh"),
         owner=params.storm_user,
         content=InlineTemplate(params.storm_env_sh_template))

    # Generate atlas-application.properties.xml file and symlink the hook jars
    if params.enable_atlas_hook:
        script_path = os.path.realpath(__file__).split(
            '/services')[0] + '/hooks/before-INSTALL/scripts/atlas'
        sys.path.append(script_path)
        from setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook, setup_atlas_jar_symlinks
        atlas_hook_filepath = os.path.join(params.conf_dir,
                                           params.atlas_hook_filename)
        setup_atlas_hook(SERVICE.STORM,
                         params.storm_atlas_application_properties,
                         atlas_hook_filepath, params.storm_user,
                         params.user_group)
        storm_extlib_dir = os.path.join(params.storm_component_home_dir,
                                        "extlib")
        setup_atlas_jar_symlinks("storm", storm_extlib_dir)

    if params.has_metric_collector:
        File(format("{conf_dir}/storm-metrics2.properties"),
             owner=params.storm_user,
             group=params.user_group,
             content=Template("storm-metrics2.properties.j2"))

        # Remove symlinks. They can be there, if you doing upgrade from HDP < 2.2 to HDP >= 2.2
        Link(format("{storm_lib_dir}/ambari-metrics-storm-sink.jar"),
             action="delete")
        # On old HDP 2.1 versions, this symlink may also exist and break EU to newer versions
        Link("/usr/lib/storm/lib/ambari-metrics-storm-sink.jar",
             action="delete")

        sink_jar = params.sink_jar

        Execute(format(
            "{sudo} ln -s {sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"
        ),
                not_if=format(
                    "ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
                only_if=format("ls {sink_jar}"))

    if params.storm_logs_supported:
        Directory(params.log4j_dir,
                  owner=params.storm_user,
                  group=params.user_group,
                  mode=0755,
                  create_parents=True)

        File(format("{log4j_dir}/cluster.xml"),
             owner=params.storm_user,
             content=InlineTemplate(params.storm_cluster_log4j_content))
        File(format("{log4j_dir}/worker.xml"),
             owner=params.storm_user,
             content=InlineTemplate(params.storm_worker_log4j_content))

    if params.security_enabled:
        TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
                       owner=params.storm_user)

        TemplateConfig(format("{conf_dir}/client_jaas.conf"),
                       owner=params.storm_user)
        minRuid = configurations['_storm.min.ruid'] if configurations.has_key(
            '_storm.min.ruid') else ''

        min_user_ruid = int(
            minRuid) if minRuid.isdigit() else _find_real_user_min_uid()

        File(format("{conf_dir}/worker-launcher.cfg"),
             content=Template("worker-launcher.cfg.j2",
                              min_user_ruid=min_user_ruid),
             owner='root',
             group=params.user_group)
Beispiel #33
0
def fill_conf_dir(component_conf_dir):
  import params

  Directory(component_conf_dir,
            owner=params.hive_user,
            group=params.user_group,
            create_parents = True
  )

  XmlConfig("mapred-site.xml",
            conf_dir=component_conf_dir,
            configurations=params.config['configurations']['mapred-site'],
            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)


  File(format("{component_conf_dir}/hive-default.xml.template"),
       owner=params.hive_user,
       group=params.user_group
  )

  File(format("{component_conf_dir}/hive-env.sh.template"),
       owner=params.hive_user,
       group=params.user_group
  )

  # Create hive-log4j.properties and hive-exec-log4j.properties
  # in /etc/hive/conf and not in /etc/hive2/conf
  if params.log4j_version == '1':
    log4j_exec_filename = 'hive-exec-log4j.properties'
    if (params.log4j_exec_props != None):
      File(format("{component_conf_dir}/{log4j_exec_filename}"),
           mode=0644,
           group=params.user_group,
           owner=params.hive_user,
           content=params.log4j_exec_props
      )
    elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
      File(format("{component_conf_dir}/{log4j_exec_filename}"),
           mode=0644,
           group=params.user_group,
           owner=params.hive_user,
           content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
      )

    log4j_filename = 'hive-log4j.properties'
    if (params.log4j_props != None):
      File(format("{component_conf_dir}/{log4j_filename}"),
           mode=0644,
           group=params.user_group,
           owner=params.hive_user,
           content=params.log4j_props
      )
    elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
      File(format("{component_conf_dir}/{log4j_filename}"),
           mode=0644,
           group=params.user_group,
           owner=params.hive_user,
           content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
      )
    pass # if params.log4j_version == '1'