Esempio n. 1
0
    def update_atlas_simple_authz(self, env):
        import params
        env.set_params(params)
        if params.upgrade_direction == Direction.UPGRADE:
            orchestration = stack_select.PACKAGE_SCOPE_STANDARD
            summary = upgrade_summary.get_upgrade_summary()

            if summary is not None:
                orchestration = summary.orchestration
                if orchestration is None:
                    raise Fail(
                        "The upgrade summary does not contain an orchestration type"
                    )

                if orchestration.upper(
                ) in stack_select._PARTIAL_ORCHESTRATION_SCOPES:
                    orchestration = stack_select.PACKAGE_SCOPE_PATCH

            stack_select_packages = stack_select.get_packages(
                orchestration,
                service_name="ATLAS",
                component_name="ATLAS_SERVER")
            if stack_select_packages is None:
                raise Fail("Unable to get packages for stack-select")

            Logger.info(
                "ATLAS_SERVER component will be stack-selected to version {0} using a {1} orchestration"
                .format(params.version, orchestration.upper()))

            for stack_select_package_name in stack_select_packages:
                stack_select.select(stack_select_package_name, params.version)
            Directory(
                format('{metadata_home}/'),
                owner=params.metadata_user,
                group=params.user_group,
                recursive_ownership=True,
            )

            target_version = upgrade_summary.get_target_version('ATLAS')
            update_atlas_simple_authz_script = os.path.join(
                format('{stack_root}'), target_version, 'atlas', 'bin',
                'atlas_update_simple_auth_json.py')
            update_atlas_simple_authz_command = format(
                'source {params.conf_dir}/atlas-env.sh ; {update_atlas_simple_authz_script} {conf_dir}'
            )
            Execute(
                update_atlas_simple_authz_command,
                only_if=format("test -e {update_atlas_simple_authz_script}"),
                user=params.metadata_user)
            atlas_simple_auth_policy_file = os.path.join(
                format('{conf_dir}'), 'atlas-simple-authz-policy.json')
            File(atlas_simple_auth_policy_file,
                 group=params.user_group,
                 owner=params.metadata_user,
                 only_if=format("test -e {atlas_simple_auth_policy_file}"),
                 mode=0644)
Esempio n. 2
0
def jdbc_connector():
    import params
    from urllib2 import HTTPError
    from resource_management import Fail
    for jar_name in params.sqoop_jdbc_drivers_dict:
        if not jar_name or 'mysql' in jar_name:
            continue
        downloaded_custom_connector = format("{sqoop_lib}/{jar_name}")
        custom_connector_to_remove = format(
            "{sqoop_lib}/" +
            str(params.sqoop_jdbc_drivers_to_remove[jar_name]))
        jdbc_driver_label = params.sqoop_jdbc_drivers_name_dict[jar_name]
        driver_curl_source = format("{jdk_location}/{jar_name}")
        environment = {"no_proxy": format("{ambari_server_hostname}")}
        try:
            if custom_connector_to_remove and os.path.isfile(
                    custom_connector_to_remove):
                File(custom_connector_to_remove, action='delete')

            File(
                downloaded_custom_connector,
                content=DownloadSource(driver_curl_source),
                mode=0644,
            )
        except HTTPError:
            error_string = format("Could not download {driver_curl_source}\n\
                 Please upload jdbc driver to server by run command:\n\
                 ambari-server setup --jdbc-db={jdbc_driver_label} --jdbc-driver=<PATH TO DRIVER>\n\
                 at {ambari_server_hostname}")
            raise Fail(error_string)
Esempio n. 3
0
  def service_check(self, env):
    import params

    env.set_params(params)

    for i in xrange(0, self.ATLAS_CONNECT_TRIES):
      try:
        conn = httplib.HTTPConnection(params.metadata_host,
                                      int(params.metadata_port))
        conn.request("GET", format("http://{params.metadata_host}:{params.metadata_port}/"))
      except (httplib.HTTPException, socket.error) as ex:
        if i < self.ATLAS_CONNECT_TRIES - 1:
          time.sleep(self.ATLAS_CONNECT_TIMEOUT)
          Logger.info("Connection failed. Next retry in %s seconds."
                      % (self.ATLAS_CONNECT_TIMEOUT))
          continue
        else:
          raise Fail("Service check has failed.")

    resp = conn.getresponse()
    if resp.status == 200 :
      Logger.info('Atlas server up and running')
    else:
      Logger.debug('Atlas server not running')
      raise ComponentIsNotRunning()
Esempio n. 4
0
def jdbc_connector():
  import params
  from urllib2 import HTTPError
  from resource_management import Fail
  for jar_name in params.sqoop_jdbc_drivers_dict:
    if 'mysql-connector-java.jar' in jar_name:
      continue
    downloaded_custom_connector = format("{sqoop_lib}/{jar_name}")
    jdbc_symlink_remote = params.sqoop_jdbc_drivers_dict[jar_name]
    jdbc_driver_label = params.sqoop_jdbc_drivers_name_dict[jar_name]
    driver_curl_source = format("{jdk_location}/{jdbc_symlink_remote}")
    environment = {
      "no_proxy": format("{ambari_server_hostname}")
    }
    try:
      File(downloaded_custom_connector,
           content = DownloadSource(driver_curl_source),
           mode = 0644,
      )
    except HTTPError:
      error_string = format("Could not download {driver_curl_source}\n\
                 Please upload jdbc driver to server by run command:\n\
                 ambari-server setup --jdbc-db={jdbc_driver_label} --jdbc-driver=<PATH TO DRIVER>\n\
                 at {ambari_server_hostname}") 
      raise Fail(error_string)
                 
Esempio n. 5
0
    def test_attribute_try_sleep_tries(self, popen_mock, time_mock):
        expected_call = "call('Retrying after %d seconds. Reason: %s', 1, 'Fail')"

        subproc_mock = MagicMock()
        subproc_mock.returncode = 0
        subproc_mock.communicate.side_effect = [Fail("Fail"), ["1"]]
        popen_mock.return_value = subproc_mock

        with Environment("/") as env:
            Execute('echo "1"', tries=2, try_sleep=10)
        pass

        time_mock.assert_called_once_with(10)
Esempio n. 6
0
 def check_no_symlink_to_version(self, structured_output, version):
     files = os.listdir(stack_root_current)
     for file in files:
         if version in os.path.realpath(stack_root_current + file):
             structured_output["remove_previous_stacks"] = {
                 "exit_code":
                 -1,
                 "message":
                 "{0} contains symlink to version for remove! {1}".format(
                     stack_root_current, version)
             }
             self.put_structured_out(structured_output)
             raise Fail(
                 "{0} contains symlink to version for remove! {1}".format(
                     stack_root_current, version))
Esempio n. 7
0
    def check_datanode_shutdown(self, hdfs_binary):
        """
    Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
    several times, pausing in between runs. Once the DataNode stops responding
    this method will return, otherwise it will raise a Fail(...) and retry
    automatically.
    The stack defaults for retrying for HDFS are also way too slow for this
    command; they are set to wait about 45 seconds between client retries. As
    a result, a single execution of dfsadmin will take 45 seconds to retry and
    the DataNode may be marked as dead, causing problems with HBase.
    https://issues.apache.org/jira/browse/HDFS-8510 tracks reducing the
    times for ipc.client.connect.retry.interval. In the meantime, override them
    here, but only for RU.
    :param hdfs_binary: name/path of the HDFS binary to use
    :return:
    """
        import params

        # override stock retry timeouts since after 30 seconds, the datanode is
        # marked as dead and can affect HBase during RU
        dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
        command = format(
            '{dfsadmin_base_command} -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}'
        )

        is_datanode_deregistered = False
        try:
            shell.checked_call(command, user=params.hdfs_user, tries=1)
        except:
            is_datanode_deregistered = True

        if not is_datanode_deregistered:
            Logger.info(
                "DataNode has not yet deregistered from the NameNode...")
            raise Fail(
                'DataNode has not yet deregistered from the NameNode...')

        Logger.info("DataNode has successfully shutdown.")
        return True
def setup_conf_dir(name=None): # 'master' or 'tserver' or 'monitor' or 'gc' or 'tracer' or 'client'
  import params

  # check if confdir is a link
  if not os.path.exists(params.conf_dir) or not os.path.islink(params.conf_dir):
    raise Fail("confdir {} must be a symlink".format(params.conf_dir))

  if name == 'client':
    dest_conf_dir = params.conf_dir

    # create a site file for client processes
    configs = {}
    configs.update(params.config['configurations']['accumulo-site'])
    if "instance.secret" in configs:
      configs.pop("instance.secret")
    if "trace.token.property.password" in configs:
      configs.pop("trace.token.property.password")
    XmlConfig("accumulo-site.xml",
              conf_dir = dest_conf_dir,
              configurations = configs,
              configuration_attributes=params.config['configurationAttributes']['accumulo-site'],
              owner = params.accumulo_user,
              group = params.user_group,
              mode = 0644
    )

    # create env file
    File(format("{dest_conf_dir}/accumulo-env.sh"),
         mode=0644,
         group=params.user_group,
         owner=params.accumulo_user,
         content=InlineTemplate(params.env_sh_template)
    )
  else:
    dest_conf_dir = params.server_conf_dir
    # create server conf directory
    Directory( params.server_conf_dir,
               mode=0700,
               owner = params.accumulo_user,
               group = params.user_group,
               create_parents = True
    )
    # create a site file for server processes
    configs = {}
    configs.update(params.config['configurations']['accumulo-site'])
    configs["instance.secret"] = str(params.config['configurations']['accumulo-env']['instance_secret'])
    configs["trace.token.property.password"] = str(params.trace_password)
    XmlConfig( "accumulo-site.xml",
               conf_dir = dest_conf_dir,
               configurations = configs,
               configuration_attributes=params.config['configurationAttributes']['accumulo-site'],
               owner = params.accumulo_user,
               group = params.user_group,
               mode = 0600
    )

    # create pid dir
    Directory( params.pid_dir,
               owner = params.accumulo_user,
               group = params.user_group,
               create_parents = True,
               cd_access = "a",
               mode = 0755,
    )

    # create log dir
    Directory (params.log_dir,
               owner = params.accumulo_user,
               group = params.user_group,
               create_parents = True,
               cd_access = "a",
               mode = 0755,
    )

    # create env file
    File(format("{dest_conf_dir}/accumulo-env.sh"),
         mode=0644,
         group=params.user_group,
         owner=params.accumulo_user,
         content=InlineTemplate(params.server_env_sh_template)
    )

    if  params.security_enabled:
      accumulo_TemplateConfig("accumulo_jaas.conf", dest_conf_dir)

  # create client.conf file
  configs = {}
  if 'client' in params.config['configurations']:
    configs.update(params.config['configurations']['client'])
  configs["instance.name"] = params.instance_name
  configs["instance.zookeeper.host"] = params.config['configurations']['accumulo-site']['instance.zookeeper.host']
  copy_site_property(configs, 'instance.rpc.sasl.enabled')
  copy_site_property(configs, 'rpc.sasl.qop')
  copy_site_property(configs, 'rpc.useJsse')
  copy_site_property(configs, 'instance.rpc.ssl.clientAuth')
  copy_site_property(configs, 'instance.rpc.ssl.enabled')
  copy_site_property(configs, 'instance.zookeeper.timeout')
  copy_site_property(configs, 'trace.span.receivers')
  copy_site_property(configs, 'trace.zookeeper.path')
  for key,value in params.config['configurations']['accumulo-site'].iteritems():
    if key.startswith("trace.span.receiver."):
      configs[key] = value
  PropertiesFile(format("{dest_conf_dir}/client.conf"),
                 properties = configs,
                 owner = params.accumulo_user,
                 group = params.user_group
  )

  # create log4j.properties files
  if (params.log4j_props != None):
    File(format("{dest_conf_dir}/log4j.properties"),
         mode=0644,
         group=params.user_group,
         owner=params.accumulo_user,
         content=params.log4j_props
    )
  else:
    File(format("{dest_conf_dir}/log4j.properties"),
         mode=0644,
         group=params.user_group,
         owner=params.hbase_user
    )

  # create logging configuration files
  accumulo_TemplateConfig("auditLog.xml", dest_conf_dir)
  accumulo_TemplateConfig("generic_logger.xml", dest_conf_dir)
  accumulo_TemplateConfig("monitor_logger.xml", dest_conf_dir)
  accumulo_StaticFile("accumulo-metrics.xml", dest_conf_dir)

  # create host files
  accumulo_TemplateConfig("tracers", dest_conf_dir)
  accumulo_TemplateConfig("gc", dest_conf_dir)
  accumulo_TemplateConfig("monitor", dest_conf_dir)
  accumulo_TemplateConfig("slaves", dest_conf_dir)
  accumulo_TemplateConfig("masters", dest_conf_dir)

  # metrics configuration
  if params.has_metric_collector:
    accumulo_TemplateConfig( "hadoop-metrics2-accumulo.properties", dest_conf_dir)

  # other server setup
  if name == 'master':
    params.HdfsResource(format("/user/{params.accumulo_user}"),
                         type="directory",
                         action="create_on_execute",
                         owner=params.accumulo_user,
                         mode=0700
    )
    params.HdfsResource(format("{params.parent_dir}"),
                         type="directory",
                         action="create_on_execute",
                         owner=params.accumulo_user,
                         mode=0700
    )
    params.HdfsResource(None, action="execute")
    if params.security_enabled and params.has_secure_user_auth:
      Execute( format("{params.kinit_cmd} "
                      "{params.daemon_script} init "
                      "--user {params.accumulo_principal_name} "
                      "--instance-name {params.instance_name} "
                      "--clear-instance-name "
                      ">{params.log_dir}/accumulo-init.out "
                      "2>{params.log_dir}/accumulo-init.err"),
               not_if=as_user(format("{params.kinit_cmd} "
                                     "{params.hadoop_bin_dir}/hadoop --config "
                                     "{params.hadoop_conf_dir} fs -stat "
                                     "{params.instance_volumes}"),
                              params.accumulo_user),
               logoutput=True,
               user=params.accumulo_user)
    else:
      passfile = format("{params.exec_tmp_dir}/pass")
      try:
        File(passfile,
             mode=0600,
             group=params.user_group,
             owner=params.accumulo_user,
             content=InlineTemplate('{{root_password}}\n'
                                    '{{root_password}}\n\n')
        )
        Execute( format("cat {passfile} | {params.daemon_script} init "
                        "--instance-name {params.instance_name} "
                        "--clear-instance-name "
                        ">{params.log_dir}/accumulo-init.out "
                        "2>{params.log_dir}/accumulo-init.err"),
                 not_if=as_user(format("{params.kinit_cmd} "
                                       "{params.hadoop_bin_dir}/hadoop --config "
                                       "{params.hadoop_conf_dir} fs -stat "
                                       "{params.instance_volumes}"),
                                params.accumulo_user),
                 logoutput=True,
                 user=params.accumulo_user)
      finally:
        File(passfile, action = "delete")

  if name == 'tracer':
    if params.security_enabled and params.has_secure_user_auth:
      Execute( format("{params.kinit_cmd} "
                      "{params.daemon_script} init --reset-security "
                      "--user {params.accumulo_principal_name} "
                      "--password NA "
                      ">{params.log_dir}/accumulo-reset.out "
                      "2>{params.log_dir}/accumulo-reset.err"),
               not_if=as_user(format("{params.kinit_cmd} "
                                     "{params.daemon_script} shell -e "
                                     "\"userpermissions -u "
                                     "{params.accumulo_principal_name}\" | "
                                     "grep System.CREATE_TABLE"),
                              params.accumulo_user),
               user=params.accumulo_user)
      create_user(params.smokeuser_principal, params.smoke_test_password)
    else:
      # do not try to reset security in nonsecure mode, for now
      # Execute( format("{params.daemon_script} init --reset-security "
      #                 "--user root "
      #                 ">{params.log_dir}/accumulo-reset.out "
      #                 "2>{params.log_dir}/accumulo-reset.err"),
      #          not_if=as_user(format("cat {rpassfile} | "
      #                                "{params.daemon_script} shell -e "
      #                                "\"userpermissions -u root\" | "
      #                                "grep System.CREATE_TABLE"),
      #                         params.accumulo_user),
      #          user=params.accumulo_user)
      create_user(params.smoke_test_user, params.smoke_test_password)
    create_user(params.trace_user, params.trace_password)
    rpassfile = format("{params.exec_tmp_dir}/pass0")
    cmdfile = format("{params.exec_tmp_dir}/resetcmds")
    try:
      File(cmdfile,
           mode=0600,
           group=params.user_group,
           owner=params.accumulo_user,
           content=InlineTemplate('grant -t trace -u {{trace_user}} Table.ALTER_TABLE\n'
                                  'grant -t trace -u {{trace_user}} Table.READ\n'
                                  'grant -t trace -u {{trace_user}} Table.WRITE\n\n')
      )
      if params.security_enabled and params.has_secure_user_auth:
        Execute( format("{params.kinit_cmd} {params.daemon_script} shell -f "
                        "{cmdfile}"),
                 only_if=as_user(format("{params.kinit_cmd} "
                                        "{params.daemon_script} shell "
                                        "-e \"table trace\""),
                                 params.accumulo_user),
                 not_if=as_user(format("{params.kinit_cmd} "
                                       "{params.daemon_script} shell "
                                       "-e \"userpermissions -u "
                                       "{params.trace_user} | "
                                       "grep Table.READ | grep trace"),
                                params.accumulo_user),
                 user=params.accumulo_user)
      else:
        File(rpassfile,
             mode=0600,
             group=params.user_group,
             owner=params.accumulo_user,
             content=InlineTemplate('{{root_password}}\n\n')
        )
        Execute( format("cat {rpassfile} | {params.daemon_script} shell -f "
                        "{cmdfile} -u root"),
                 only_if=as_user(format("cat {rpassfile} | "
                                       "{params.daemon_script} shell -u root "
                                       "-e \"table trace\""),
                                params.accumulo_user),
                 not_if=as_user(format("cat {rpassfile} | "
                                       "{params.daemon_script} shell -u root "
                                       "-e \"userpermissions -u "
                                       "{params.trace_user} | "
                                       "grep Table.READ | grep trace"),
                                params.accumulo_user),
                 user=params.accumulo_user)
    finally:
      try_remove(rpassfile)
      try_remove(cmdfile)
Esempio n. 9
0
def metadata():
    import params

    Directory([params.pid_dir],
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              recursive=True
    )

    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              recursive=True
    )

    Directory(params.log_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              recursive=True
    )

    Directory(params.data_dir,
              mode=0644,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              recursive=True
    )

    Directory(params.expanded_war_dir,
              mode=0644,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              recursive=True
    )

    metadata_war_file = format('{params.metadata_home}/server/webapp/metadata.war')
    if not os.path.isfile(metadata_war_file):
        raise Fail("Unable to copy {0} because it does not exist".format(metadata_war_file))

    Logger.info("Copying {0} to {1}".format(metadata_war_file, params.expanded_war_dir))
    shutil.copy2(metadata_war_file, params.expanded_war_dir)

    File(format('{conf_dir}/application.properties'),
         content=InlineTemplate(params.application_properties_content),
         mode=0644,
         owner=params.metadata_user,
         group=params.user_group
    )

    File(format("{conf_dir}/metadata-env.sh"),
         owner=params.metadata_user,
         group=params.user_group,
         mode=0755,
         content=InlineTemplate(params.metadata_env_content)
    )

    File(format("{conf_dir}/log4j.xml"),
         mode=0644,
         owner=params.metadata_user,
         group=params.user_group,
         content=StaticFile('log4j.xml')
    )