Example #1
0
    def actionexecute(self, env):
        config = Script.get_config()
        tmp_dir = Script.get_tmp_dir()
        report_file_handler_dict = {}

        #print "CONFIG: " + str(config)

        check_execute_list = config['commandParams']['check_execute_list']
        structured_output = {}

        # check each of the commands; if an unknown exception wasn't handled
        # by the functions, then produce a generic exit_code : 1
        if CHECK_JAVA_HOME in check_execute_list:
            try:
                java_home_check_structured_output = self.execute_java_home_available_check(
                    config)
                structured_output[
                    CHECK_JAVA_HOME] = java_home_check_structured_output
            except Exception, exception:
                print "There was an unexpected error while checking for the Java home location: " + str(
                    exception)
                structured_output[CHECK_JAVA_HOME] = {
                    "exit_code": 1,
                    "message": str(exception)
                }
Example #2
0
def install_streamline():
    import params
    Directory([params.conf_dir],
              owner=params.streamline_user,
              group=params.user_group,
              mode=0775,
              create_parents=True)
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.streamline_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute('/bin/rm -rf ' + params.install_dir + '/conf')
        Execute('ln -s ' + params.conf_dir + ' ' + params.install_dir +
                '/conf')
        Execute('chown -R %s:%s %s/%s' %
                (params.streamline_user, params.user_group, params.stack_root,
                 params.version_dir))
        Execute(
            'chown -R %s:%s %s' %
            (params.streamline_user, params.user_group, params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)
Example #3
0
    def actionexecute(self, env):
        Logger.info("Host checks started.")
        config = Script.get_config()
        tmp_dir = Script.get_tmp_dir()
        report_file_handler_dict = {}

        #print "CONFIG: " + str(config)

        check_execute_list = config['commandParams']['check_execute_list']
        if check_execute_list == '*BEFORE_CLEANUP_HOST_CHECKS*':
            check_execute_list = BEFORE_CLEANUP_HOST_CHECKS
        structured_output = {}

        Logger.info("Check execute list: " + str(check_execute_list))

        # check each of the commands; if an unknown exception wasn't handled
        # by the functions, then produce a generic exit_code : 1
        if CHECK_JAVA_HOME in check_execute_list:
            try:
                java_home_check_structured_output = self.execute_java_home_available_check(
                    config)
                structured_output[
                    CHECK_JAVA_HOME] = java_home_check_structured_output
            except Exception, exception:
                Logger.exception(
                    "There was an unexpected error while checking for the Java home location: "
                    + str(exception))
                structured_output[CHECK_JAVA_HOME] = {
                    "exit_code": 1,
                    "message": str(exception)
                }
def get_pid_file():
  """
  Fetches the pid file, which will be used to get the status of the HAWQ Master, Standby
  or Segments
  """

  config = Script.get_config()
  
  component_name = config['componentName']
  component = "master" if component_name in ["HAWQMASTER", "HAWQSTANDBY"] else "segment"
  hawq_pid_file = os.path.join(hawq_constants.hawq_pid_dir, "hawq-{0}.pid".format(component))

  File(hawq_pid_file, action='delete')
  utils.create_dir_as_hawq_user(hawq_constants.hawq_pid_dir)

  #Get hawq_master_directory or hawq_segment_directory value from hawq-site.xml depending 
  #on the component
  hawq_site_directory_property = "hawq_{0}_directory".format(component)
  
  #hawq-site content from Ambari server will not be available when the 
  #command type is STATUS_COMMAND. Hence, reading it directly from the local file
  postmaster_pid_file = os.path.join(common.get_local_hawq_site_property(
      hawq_site_directory_property), hawq_constants.postmaster_pid_filename)

  pid = ""
  if os.path.exists(postmaster_pid_file):
    with open(postmaster_pid_file, 'r') as fh:
      pid = fh.readline().strip()

  if not pid:
    raise Fail("Failed to fetch pid from {0}".format(postmaster_pid_file))

  File(hawq_pid_file, content=pid, owner=hawq_constants.hawq_user, group=hawq_constants.hawq_user)

  return hawq_pid_file
  def actionexecute(self, env):
    config = Script.get_config()
    structured_output = {}


    try:
      repo_info_json = config['hostLevelParams']['repo_info']
      repo_info_dict = json.loads(repo_info_json)

      for item in repo_info_dict["repositories"]:
        base_url = item["base_url"]
        repo_name = item["repo_name"]
        repo_id = item["repo_id"]

        repo_rhel_suse = config['configurations']['cluster-env']['repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env']['repo_ubuntu_template']

        template = repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else repo_ubuntu
        ubuntu_components = [repo_name] + self.UBUNTU_REPO_COMPONENTS_POSTFIX

        Repository(repo_id,
                 action = "create",
                 base_url = base_url,
                 mirror_list = None,
                 repo_file_name = repo_name,
                 repo_template = template,
                 components = ubuntu_components, # ubuntu specific
        )
        structured_output["repo_update"] = {"exit_code" : 0, "message": format("Repository files successfully updated!")}
    except Exception, exception:
      Logger.logger.exception("ERROR: There was an unexpected error while updating repositories")
      raise Fail("Failed to update repo files!")
Example #6
0
  def actionexecute(self, env):
    config = Script.get_config()
    structured_output = {}


    try:
      repo_info = config['repositoryFile']

      for item in repo_info["repositories"]:
        base_url = item["baseUrl"]
        repo_name = item["repoName"]
        repo_id = item["repoId"]
        distribution = item["distribution"] if "distribution" in item else None
        components = item["components"] if "components" in item else None

        repo_rhel_suse = config['configurations']['cluster-env']['repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env']['repo_ubuntu_template']

        template = repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else repo_ubuntu
        ubuntu_components = [distribution if distribution else repo_name] + \
                            [components.replace(",", " ") if components else self.UBUNTU_REPO_COMPONENTS_POSTFIX]

        Repository(repo_id,
                 action = "create",
                 base_url = base_url,
                 mirror_list = None,
                 repo_file_name = repo_name,
                 repo_template = template,
                 components = ubuntu_components, # ubuntu specific
        )
        structured_output["repo_update"] = {"exit_code" : 0, "message": format("Repository files successfully updated!")}
    except Exception, exception:
      Logger.logger.exception("ERROR: There was an unexpected error while updating repositories")
      raise Fail("Failed to update repo files!")
Example #7
0
def get_pid_file():
  """
  Fetches the pid file, which will be used to get the status of the HAWQ Master, Standby
  or Segments
  """

  config = Script.get_config()
  
  component_name = config['componentName']
  component = "master" if component_name in ["HAWQMASTER", "HAWQSTANDBY"] else "segment"
  hawq_pid_file = os.path.join(hawqconstants.hawq_pid_dir, "hawq-{0}.pid".format(component))

  File(hawq_pid_file, action='delete')
  utils.create_dir_as_hawq_user(hawqconstants.hawq_pid_dir)

  #Get hawq_master_directory or hawq_segment_directory value from hawq-site.xml depending 
  #on the component
  hawq_site_directory_property = "hawq_{0}_directory".format(component)
  
  #hawq-site content from Ambari server will not be available when the 
  #command type is STATUS_COMMAND. Hence, reading it directly from the local file
  postmaster_pid_file = os.path.join(common.get_local_hawq_site_property(
      hawq_site_directory_property), hawqconstants.postmaster_pid_filename)

  pid = ""
  if os.path.exists(postmaster_pid_file):
    with open(postmaster_pid_file, 'r') as fh:
      pid = fh.readline().strip()

  if not pid:
    raise Fail("Failed to fetch pid from {0}".format(postmaster_pid_file))

  File(hawq_pid_file, content=pid, owner=hawqconstants.hawq_user, group=hawqconstants.hawq_user)

  return hawq_pid_file
Example #8
0
  def actionexecute(self, env):
    config = Script.get_config()
    tmp_dir = Script.get_tmp_dir()

    #print "CONFIG: " + str(config)

    check_execute_list = config['commandParams']['check_execute_list']
    structured_output = {}

    # check each of the commands; if an unknown exception wasn't handled
    # by the functions, then produce a generic exit_code : 1
    if CHECK_JAVA_HOME in check_execute_list:
      try :
        java_home_check_structured_output = self.execute_java_home_available_check(config)
        structured_output[CHECK_JAVA_HOME] = java_home_check_structured_output
      except Exception, exception:
        print "There was an unexpected error while checking for the Java home location: " + str(exception)
        structured_output[CHECK_JAVA_HOME] = {"exit_code" : 1, "message": str(exception)}
  def actionexecute(self, env):
    config = Script.get_config()
    params = config['commandParams']

    validation_passed = self.check_users(params) and self.check_directories(params)

    if validation_passed:
      print 'All configurations validated!'
    else:
      self.fail_with_error('Configurations validation failed!')
Example #10
0
	def start(self, env): #解析 service 的配置参数 
	    config = Script.get_config()
	    AMBARI_USER =config['configurations']['ambari-server-env']['AMBARI_USER'] 
	    AMBARI_USER_PWD =config['configurations']['ambari-server-env']['AMBARI_USER_PASSWORD'] 
	    AMBARI_SERVER_HOST =config['configurations']['ambari-server-env']['AMBARI_SERVER_HOST'] 
	    AMBARI_WEB_LISTEN_PORT = config['configurations']['ambari-server-env']['AMBARI_WEB_LISTEN_PORT'] 
	    print "Ambari User:"******" \nAmbari user password: "******"\nServer: " +AMBARI_SERVER_HOST + "\nLinsten port " + str(AMBARI_WEB_LISTEN_PORT) 
	    cmd = "mkdir -p /var/run/guoqingyao" 
	    os.system(cmd) 
	    print "start the service" 
Example #11
0
  def actionexecute(self, env):
    config = Script.get_config()
    params = config['commandParams']

    validation_passed = self.check_users(params) and self.check_directories(params)

    if validation_passed:
      print 'All configurations validated!'
    else:
      self.fail_with_error('Configurations validation failed!')
Example #12
0
def post_metrics_to_collector(ams_metrics_post_url,
                              metric_collector_host,
                              metric_collector_port,
                              metric_collector_https_enabled,
                              metric_json,
                              headers,
                              ca_certs,
                              tries=1,
                              connect_timeout=10):
    for i in xrange(0, tries):
        try:
            Logger.info("Generated metrics for host %s :\n%s" %
                        (metric_collector_host, metric_json))

            Logger.info("Connecting (POST) to %s:%s%s" %
                        (metric_collector_host, metric_collector_port,
                         ams_metrics_post_url))
            conn = network.get_http_connection(
                metric_collector_host,
                int(metric_collector_port),
                metric_collector_https_enabled,
                ca_certs,
                ssl_version=Script.get_force_https_protocol_value())
            conn.request("POST", ams_metrics_post_url, metric_json, headers)

            response = conn.getresponse()
            Logger.info(
                "Http response for host %s: %s %s" %
                (metric_collector_host, response.status, response.reason))
        except (httplib.HTTPException, socket.error) as ex:
            if i < tries - 1:  #range/xrange returns items from start to end-1
                time.sleep(connect_timeout)
                Logger.info(
                    "Connection failed for host %s. Next retry in %s seconds."
                    % (metric_collector_host, connect_timeout))
                continue
            else:
                raise Fail("Metrics were not saved. Connection failed.")

        data = response.read()
        Logger.info("Http data: %s" % data)
        conn.close()

        if response.status == 200:
            Logger.info("Metrics were saved.")
            break
        else:
            Logger.info("Metrics were not saved.")
            if i < tries - 1:  #range/xrange returns items from start to end-1
                time.sleep(tries)
                Logger.info("Next retry in %s seconds." % (tries))
            else:
                raise Fail(
                    "Metrics were not saved. POST request status: %s %s \n%s" %
                    (response.status, response.reason, data))
    def actionexecute(self, env):
        config = Script.get_config()
        structured_output = {}
        version = config['commandParams']['version']
        self.stack_tool_package = stack_tools.get_stack_tool_package(
            stack_tools.STACK_SELECTOR_NAME)

        versions_to_remove = self.get_lower_versions(version)

        for low_version in versions_to_remove:
            self.remove_stack_version(structured_output, low_version)
Example #14
0
  def actionexecute(self, env):
    config = Script.get_config()
    structured_output = {}
    cmd = self.get_clearcache_cmd()

    Logger.info("Clearing repository cache")
    code, output = shell.call(cmd, sudo = True)
    if 0 == code:
      structured_output["clear_repocache"] = {"exit_code" : 0, "message": format("Repository cache successfully cleared!")}
    else:
      structured_output["clear_repocache"] = {"exit_code": code, "message": "Failed to clear repository cache! {0}".format(str(output))}
    self.put_structured_out(structured_output)
Example #15
0
    def actionexecute(self, env):
        config = Script.get_config()
        packages_to_remove = config['roleParams']['package_list'].split(',')
        structured_output = {'success': [], 'failure': []}

        for package_name in packages_to_remove:
            try:
                Package(package_name,
                        action='remove',
                        ignore_dependencies=True)
                Logger.info('Removed {0}'.format(package_name))
                structured_output['success'].append(package_name)
            except Exception, e:
                Logger.exception('Failed to remove {0}: {1}'.format(
                    package_name, str(e)))
                structured_output['failure'].append(package_name)
Example #16
0
    def rebalancehdfs(self, env):
        from ambari_commons.os_windows import UserHelper, run_os_command_impersonated
        import params
        env.set_params(params)

        hdfs_username, hdfs_domain = UserHelper.parse_user_name(
            params.hdfs_user, ".")

        name_node_parameters = json.loads(params.name_node_params)
        threshold = name_node_parameters['threshold']
        _print("Starting balancer with threshold = %s\n" % threshold)

        def calculateCompletePercent(first, current):
            return 1.0 - current.bytesLeftToMove / first.bytesLeftToMove

        def startRebalancingProcess(threshold):
            rebalanceCommand = 'hdfs balancer -threshold %s' % threshold
            return ['cmd', '/C', rebalanceCommand]

        command = startRebalancingProcess(threshold)
        basedir = os.path.join(env.config.basedir, 'scripts')

        _print("Executing command %s\n" % command)

        parser = hdfs_rebalance.HdfsParser()
        returncode, stdout, err = run_os_command_impersonated(
            ' '.join(command), hdfs_username,
            Script.get_password(params.hdfs_user), hdfs_domain)

        for line in stdout.split('\n'):
            _print('[balancer] %s %s' % (str(datetime.now()), line))
            pl = parser.parseLine(line)
            if pl:
                res = pl.toJson()
                res['completePercent'] = calculateCompletePercent(
                    parser.initialLine, pl)

                self.put_structured_out(res)
            elif parser.state == 'PROCESS_FINISED':
                _print('[balancer] %s %s' %
                       (str(datetime.now()), 'Process is finished'))
                self.put_structured_out({'completePercent': 1})
                break

        if returncode != None and returncode != 0:
            raise Fail(
                'Hdfs rebalance process exited with error. See the log output')
Example #17
0
    def actionexecute(self, env):
        config = Script.get_config()
        structured_output = {}

        try:
            repo_info_json = config['hostLevelParams']['repo_info']
            repo_info_dict = json.loads(repo_info_json)

            for item in repo_info_dict["repositories"]:
                base_url = item["base_url"]
                repo_name = item["repo_name"]
                repo_id = item["repo_id"]

                repo_rhel_suse = config['configurations']['cluster-env'][
                    'repo_suse_rhel_template']
                repo_ubuntu = config['configurations']['cluster-env'][
                    'repo_ubuntu_template']

                template = repo_rhel_suse if OSCheck.is_suse_family(
                ) or OSCheck.is_redhat_family() else repo_ubuntu
                ubuntu_components = [repo_name
                                     ] + self.UBUNTU_REPO_COMPONENTS_POSTFIX

                Repository(
                    repo_id,
                    action="create",
                    base_url=base_url,
                    mirror_list=None,
                    repo_file_name=repo_name,
                    repo_template=template,
                    components=ubuntu_components,  # ubuntu specific
                )
                structured_output["repo_update"] = {
                    "exit_code": 0,
                    "message": format("Repository files successfully updated!")
                }
        except Exception, exception:
            Logger.logger.exception(
                "ERROR: There was an unexpected error while updating repositories"
            )
            raise Fail("Failed to update repo files!")
Example #18
0
 def start(self, env):  # analysis service config 
     config = Script.get_config()
     AMBARI_USER =config['configurations']['ambari-server-env']['AMBARI_USER'] 
     AMBARI_USER_PWD =config['configurations']['ambari-server-env']['AMBARI_USER_PASSWORD'] 
     AMBARI_SERVER_HOST =config['configurations']['ambari-server-env']['AMBARI_SERVER_HOST'] 
     AMBARI_WEB_LISTEN_PORT = config['configurations']['ambari-server-env']['AMBARI_WEB_LISTEN_PORT'] 
     print "Ambari User:"******" \nAmbari user password: "******"\nServer: " +AMBARI_SERVER_HOST + "\nLinsten port " + str(AMBARI_WEB_LISTEN_PORT) 
     cmd = "mkdir -p /var/run/guoqingyao" 
     os.system(cmd) 
     print "start the service" 
     def stop(self, env): 
         cmd ="rm -rf /var/run/guoqingyao" 
         os.system(cmd) 
         print "stop the service" 
     def status(self, env):
         cmd = "echo 'check one time' > /tmp/my.log" 
         os.system(cmd) 
         cmd = "ls /var/run/guoqingyao" 
         result = os.system(cmd) 
         if result != 0: 
             print "The component is not runing"
Example #19
0
 def create_30_config_version(self, env):
     package_name = 'registry'
     stack_root = Script.get_stack_root()
     current_dir = "{0}/current/registry/conf".format(stack_root)
     directories = [{
         "conf_dir": "/etc/registry/conf",
         "current_dir": current_dir
     }]
     stack_version = stack_select.get_stack_version_before_install(
         package_name)
     conf_dir = "/etc/registry/conf"
     if stack_version:
         try:
             #Check if broken symbolic links issue exists
             os.stat(conf_dir)
             conf_select.convert_conf_directories_to_symlinks(
                 package_name, stack_version, directories)
             cp_cmd = as_sudo([
                 "cp", "-a", "-f", "/etc/registry/conf.backup/.",
                 "/etc/registry/conf"
             ])
             Execute(cp_cmd, logoutput=True)
         except OSError as e:
             Logger.warning(
                 "Detected broken symlink : {0}. Attempting to repair.".
                 format(str(e)))
             #removing symlink conf directory
             sudo.unlink(conf_dir)
             #make conf dir again
             sudo.makedirs(conf_dir, 0755)
             #copy all files
             for files in glob.glob("/etc/registry/conf.backup/*"):
                 cp_cmd = as_sudo(["cp", "-r", files, conf_dir])
                 Execute(cp_cmd, logoutput=True)
             conf_select.convert_conf_directories_to_symlinks(
                 package_name, stack_version, directories)
Example #20
0
  def rebalancehdfs(self, env):
    from ambari_commons.os_windows import UserHelper, run_os_command_impersonated
    import params
    env.set_params(params)

    hdfs_username, hdfs_domain = UserHelper.parse_user_name(params.hdfs_user, ".")

    name_node_parameters = json.loads( params.name_node_params )
    threshold = name_node_parameters['threshold']
    _print("Starting balancer with threshold = %s\n" % threshold)

    def calculateCompletePercent(first, current):
      return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove

    def startRebalancingProcess(threshold):
      rebalanceCommand = 'hdfs balancer -threshold %s' % threshold
      return ['cmd', '/C', rebalanceCommand]

    command = startRebalancingProcess(threshold)
    basedir = os.path.join(env.config.basedir, 'scripts')

    _print("Executing command %s\n" % command)

    parser = hdfs_rebalance.HdfsParser()
    returncode, stdout, err = run_os_command_impersonated(' '.join(command), hdfs_username, Script.get_password(params.hdfs_user), hdfs_domain)

    for line in stdout.split('\n'):
      _print('[balancer] %s %s' % (str(datetime.now()), line ))
      pl = parser.parseLine(line)
      if pl:
        res = pl.toJson()
        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl)

        self.put_structured_out(res)
      elif parser.state == 'PROCESS_FINISED' :
        _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
        self.put_structured_out({'completePercent' : 1})
        break

    if returncode != None and returncode != 0:
      raise Fail('Hdfs rebalance process exited with error. See the log output')
Example #21
0
 def __init__(self, nodeType=None):
     Script.__init__(self)
     self.nodeType = nodeType
Example #22
0
    def service_check_for_single_host(self, metric_collector_host, params):
        random_value1 = random.random()
        headers = {"Content-type": "application/json"}
        ca_certs = os.path.join(params.ams_monitor_conf_dir,
                                params.metric_truststore_ca_certs)

        current_time = int(time.time()) * 1000
        metric_json = Template('smoketest_metrics.json.j2',
                               hostname=params.hostname,
                               random1=random_value1,
                               current_time=current_time).get_content()
        try:
            post_metrics_to_collector(
                self.AMS_METRICS_POST_URL, metric_collector_host,
                params.metric_collector_port,
                params.metric_collector_https_enabled, metric_json, headers,
                ca_certs, self.AMS_CONNECT_TRIES, self.AMS_CONNECT_TIMEOUT)

            get_metrics_parameters = {
                "metricNames": "AMBARI_METRICS.SmokeTest.FakeMetric",
                "appId": "amssmoketestfake",
                "hostname": params.hostname,
                "startTime": current_time - 60000,
                "endTime": current_time + 61000,
                "precision": "seconds",
                "grouped": "false",
            }
            encoded_get_metrics_parameters = urllib.urlencode(
                get_metrics_parameters)

            Logger.info(
                "Connecting (GET) to %s:%s%s" %
                (metric_collector_host, params.metric_collector_port,
                 self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters))
            for i in xrange(0, self.AMS_READ_TRIES):
                conn = network.get_http_connection(
                    metric_collector_host,
                    int(params.metric_collector_port),
                    params.metric_collector_https_enabled,
                    ca_certs,
                    ssl_version=Script.get_force_https_protocol_value())
                conn.request(
                    "GET",
                    self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
                response = conn.getresponse()
                Logger.info(
                    "Http response for host %s : %s %s" %
                    (metric_collector_host, response.status, response.reason))

                data = response.read()
                Logger.info("Http data: %s" % data)
                conn.close()

                if response.status == 200:
                    Logger.info("Metrics were retrieved from host %s" %
                                metric_collector_host)
                else:
                    raise Fail(
                        "Metrics were not retrieved from host %s. GET request status: %s %s \n%s"
                        % (metric_collector_host, response.status,
                           response.reason, data))
                data_json = json.loads(data)

                def floats_eq(f1, f2, delta):
                    return abs(f1 - f2) < delta

                values_are_present = False
                for metrics_data in data_json["metrics"]:
                    if (str(current_time) in metrics_data["metrics"] and
                            str(current_time + 1000) in metrics_data["metrics"]
                            and floats_eq(
                                metrics_data["metrics"][str(current_time)],
                                random_value1, 0.0000001) and floats_eq(
                                    metrics_data["metrics"][str(current_time +
                                                                1000)],
                                    current_time, 1)):
                        Logger.info(
                            "Values %s and %s were found in the response from host %s."
                            % (metric_collector_host, random_value1,
                               current_time))
                        values_are_present = True
                        break
                        pass

                if not values_are_present:
                    if i < self.AMS_READ_TRIES - 1:  #range/xrange returns items from start to end-1
                        Logger.info(
                            "Values weren't stored yet. Retrying in %s seconds."
                            % (self.AMS_READ_TIMEOUT))
                        time.sleep(self.AMS_READ_TIMEOUT)
                    else:
                        raise Fail(
                            "Values %s and %s were not found in the response."
                            % (random_value1, current_time))
                else:
                    break
                    pass
        except Fail as ex:
            Logger.warning(
                "Ambari Metrics service check failed on collector host %s. Reason : %s"
                % (metric_collector_host, str(ex)))
            raise Fail(
                "Ambari Metrics service check failed on collector host %s. Reason : %s"
                % (metric_collector_host, str(ex)))
Example #23
0
from resource_management import Script

config = Script.get_config()['configurations']

TERMS_ACCEPTED = config['chorus-env']['chorus.termsaccepted'] == 'yes'
SECURITY_SALT = '' if config['chorus-env'][
    'chorus.security.salt'] == 'generate' else config['chorus-env'][
        'chorus.security.salt']

INSTALLER_PATH = config['chorus-env']['chorus.installation.installerpath']
INSTALLATION_DIRECTORY = config['chorus-env']['chorus.installation.directory']
DATA_DIRECTORY = config['chorus-env']['chorus.installation.datadirectory']

SERVER_PORT = config['chorus-env']['chorus.server.port']
SERVER_TIMEOUT = config['chorus-env']['chorus.server.timeout']
DEFAULT_PREVIEW_ROW_LIMIT = config['chorus-env'][
    'chorus.server.defaultpreviewrowlimit']
EXECUTION_TIMEOUT = config['chorus-env']['chorus.server.executiontimeout']
LOG_LEVEL = config['chorus-env']['chorus.server.loglevel']
MAIL_ENABLED = config['chorus-env']['chorus.server.mailenabled']

minimum_memory = config['chorus-tuning']['chorus.minimum_memory']
maximum_memory = config['chorus-tuning']['chorus.maximum_memory']
maximum_memory = config['chorus-tuning']['chorus.young_heap_size']
maximum_memory = config['chorus-tuning']['chorus.max_perm_size']
Example #24
0
limitations under the License.

Ambari Agent

"""
import re

import os
from resource_management import Script, format, Package, Execute, Fail
from resource_management.core.logger import Logger
from resource_management.libraries.functions import stack_tools
from resource_management.libraries.functions.stack_select import get_stack_versions
from ambari_commons.repo_manager import ManagerFactory

CURRENT_ = "/current/"
stack_root = Script.get_stack_root()
stack_root_current = stack_root + CURRENT_


class RemovePreviousStacks(Script):
    def actionexecute(self, env):
        config = Script.get_config()
        structured_output = {}
        version = config['commandParams']['version']
        self.stack_tool_package = stack_tools.get_stack_tool_package(
            stack_tools.STACK_SELECTOR_NAME)

        versions_to_remove = self.get_lower_versions(version)
        self.pkg_provider = ManagerFactory.get()

        for low_version in versions_to_remove:
Example #25
0
with the License.  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""
import os
from resource_management import Script
from resource_management.libraries.functions import get_kinit_path, format
from resource_management.libraries.functions.default import default

config = Script.get_config()

conf_dir = os.environ[
    'METADATA_CONF'] if 'METADATA_CONF' in os.environ else '/etc/atlas/conf'
pid_dir = config['configurations']['metadata-env']['metadata_pid_dir']
pid_file = format("{pid_dir}/metadata.pid")
metadata_user = config['configurations']['metadata-env']['metadata_user']

# Security related/required params
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(
    default('/configurations/kerberos-env/executable_search_paths', None))
tmp_dir = Script.get_tmp_dir()
Example #26
0
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""
import os
import sys
from resource_management import format_stack_version, Script
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default

import status_params

# server configurations
config = Script.get_config()
stack_root = Script.get_stack_root()

cluster_name = config['clusterName']

# security enabled
security_enabled = status_params.security_enabled

if security_enabled:
  _hostname_lowercase = config['hostname'].lower()
  _atlas_principal_name = config['configurations']['application-properties']['atlas.authentication.principal']
  atlas_jaas_principal = _atlas_principal_name.replace('_HOST',_hostname_lowercase)
  atlas_keytab_path = config['configurations']['application-properties']['atlas.authentication.keytab']

stack_name = status_params.stack_name
Example #27
0
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""
import os

from resource_management import Script, StackFeature
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.stack_features import check_stack_feature, get_stack_feature_version

config = Script.get_config()
stack_root = Script.get_stack_root()

default_conf_file = "application.properties"

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)

if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT,
                       version_for_stack_feature_checks):
    default_conf_file = "atlas-application.properties"

conf_file = default("/configurations/atlas-env/metadata_conf_file",
                    default_conf_file)
conf_dir = format("{stack_root}/current/atlas-server/conf")
pid_dir = default("/configurations/atlas-env/metadata_pid_dir",
"License"); you may not use this file except in compliance
with the License.  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""
import os
from resource_management import Script
from resource_management.libraries.functions import  get_kinit_path, format
from resource_management.libraries.functions.default import default


config = Script.get_config()

conf_dir = os.environ['METADATA_CONF'] if 'METADATA_CONF' in os.environ else '/etc/atlas/conf'
pid_dir = config['configurations']['atlas-env']['metadata_pid_dir']
pid_file = format("{pid_dir}/atlas.pid")
metadata_user = config['configurations']['atlas-env']['metadata_user']

# Security related/required params
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
tmp_dir = Script.get_tmp_dir()
Example #29
0
        prop_name = "atlas.server.address." + id
        prop_value = curr_hostname + ":" + metadata_port
        additional_props[prop_name] = prop_value
        i += 1

    # This may override the existing property
    if i == 1 or (i > 1 and is_atlas_ha_enabled is False):
        additional_props["atlas.server.ha.enabled"] = "false"
    elif i > 1:
        additional_props["atlas.server.ha.enabled"] = "true"

    return additional_props


# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()

# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']

if security_enabled:
    _hostname_lowercase = config['hostname'].lower()
    _atlas_principal_name = config['configurations']['application-properties'][
        'atlas.authentication.principal']
    atlas_jaas_principal = _atlas_principal_name.replace(
        '_HOST', _hostname_lowercase)
    atlas_keytab_path = config['configurations']['application-properties'][
        'atlas.authentication.keytab']

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
Example #30
0
"""

import os
import functools
from resource_management import Script
from resource_management.libraries.functions.default import default
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import conf_select
try:
    from resource_management.libraries.functions import hdp_select as hadoop_select
except ImportError:
    from resource_management.libraries.functions import phd_select as hadoop_select

config = Script.get_config()


def __get_component_host(component):
    """
  Returns the first host where the given component is deployed, None if the component is not deployed
  """
    component_host = None
    if component in config['clusterHostInfo'] and len(
            config['clusterHostInfo'][component]) > 0:
        component_host = config['clusterHostInfo'][component][0]
    return component_host


hostname = config['hostname']
Example #31
0
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""
import os
import sys
from resource_management import format_hdp_stack_version, Script
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default

import status_params

# server configurations
config = Script.get_config()

# security enabled
security_enabled = status_params.security_enabled

stack_name = default("/hostLevelParams/stack_name", None)

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)

# hdp version
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

metadata_home = os.environ['METADATA_HOME_DIR'] if 'METADATA_HOME_DIR' in os.environ else '/usr/hdp/current/atlas-server'
metadata_bin = format("{metadata_home}/bin")