コード例 #1
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      Logger.info("Executing Spark2 Client Stack Upgrade pre-restart")
      conf_select.select(params.stack_name, "spark", params.version)
      stack_select.select("spark2-client", params.version)
コード例 #2
0
ファイル: spark_client.py プロジェクト: gbasehd/GBase-Ambari
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params

        env.set_params(params)
        if params.version and compare_versions(
                format_stack_version(params.version), '4.0.0.0') >= 0:
            conf_select.select(params.stack_name, "spark", params.version)
            stack_select.select("spark-client", params.version)
コード例 #3
0
ファイル: nfsgateway.py プロジェクト: zhujiajunup/ambari
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)

        if params.stack_version_formatted and check_stack_feature(
                StackFeature.NFS, params.stack_version_formatted):
            conf_select.select(params.stack_name, "hadoop", params.version)
            stack_select.select("hadoop-hdfs-nfs3", params.version)
コード例 #4
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
      conf_select.select(params.stack_name, "hadoop", params.version)
      stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
コード例 #5
0
ファイル: datanode.py プロジェクト: padmapriyanitt/ambari-1
 def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing DataNode Stack Upgrade pre-restart")
     import params
     env.set_params(params)
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE,
                                               params.version):
         conf_select.select(params.stack_name, "hadoop", params.version)
         stack_select.select("hadoop-hdfs-datanode", params.version)
コード例 #6
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params
    env.set_params(params)

    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      conf_select.select(params.stack_name, "tez", params.version)
      conf_select.select(params.stack_name, "hadoop", params.version)
      stack_select.select("hadoop-client", params.version)
コード例 #7
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params

        env.set_params(params)
        Logger.info("Executing Spark Thrift Server Stack Upgrade pre-restart")
        # TODO, change to "spark" after RPM switches the name
        conf_select.select(params.stack_name, "spark2", params.version)
        stack_select.select("spark2-thriftserver", params.version)
コード例 #8
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and check_stack_feature(StackFeature.SPARK_THRIFTSERVER, params.version):
      Logger.info("Executing Spark Thrift Server Stack Upgrade pre-restart")
      conf_select.select(params.stack_name, "spark", params.version)
      stack_select.select("spark-thriftserver", params.version)
コード例 #9
0
 def pre_upgrade_restart(self, env, upgrade_type=None):
   Logger.info("Executing Stack Upgrade pre-restart")
   import params
   env.set_params(params)
   
   if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, format_stack_version(params.version)):
     conf_select.select(params.stack_name, "zookeeper", params.version)
     stack_select.select("zookeeper-server", params.version)
コード例 #10
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing WebHCat Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE,
                                                  params.version):
            # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set
            stack_select.select("hive-webhcat", params.version)
コード例 #11
0
ファイル: nifi_ca.py プロジェクト: wang7x/dfhz_hdp_mpack
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, format_stack_version(params.version)):
        stack_select.select("nifi-toolkit", params.version)
    if params.version and check_stack_feature(StackFeature.CONFIG_VERSIONING, params.version):
        conf_select.select(params.stack_name, "nifi-toolkit", params.version)
コード例 #12
0
ファイル: livy_server.py プロジェクト: zhujiajunup/ambari
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      Logger.info("Executing Livy Server Stack Upgrade pre-restart")
      # TODO, change to "spark" and "livy" after RPM switches the name
      conf_select.select(params.stack_name, "spark2", params.version)
      stack_select.select("livy2-server", params.version)
コード例 #13
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)

        if params.stack_supports_ranger_tagsync:
            Logger.info("Executing Ranger Tagsync Stack Upgrade pre-restart")
            conf_select.select(params.stack_name, "ranger-tagsync",
                               params.version)
            stack_select.select("ranger-tagsync", params.version)
コード例 #14
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_stack_version(params.version), '4.0.0.0') >= 0:
            conf_select.select(params.stack_name, "zookeeper", params.version)
            stack_select.select("zookeeper-server", params.version)
コード例 #15
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params

        env.set_params(params)
        if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE,
                                                  params.version):
            Logger.info("Executing Livy Server Stack Upgrade pre-restart")
            conf_select.select(params.stack_name, "spark", params.version)
            stack_select.select("livy-server", params.version)
コード例 #16
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)

        if params.stack_version_formatted and check_stack_feature(
                StackFeature.PHOENIX, params.stack_version_formatted):
            # phoenix uses hbase configs
            conf_select.select(params.stack_name, "hbase", params.version)
            stack_select.select("phoenix-server", params.version)
コード例 #17
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Stack Upgrade post-restart")
        import params
        env.set_params(params)

        if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE,
                                                  params.version):
            conf_select.select(params.stack_name, "hadoop", params.version)
            stack_select.select("hadoop-yarn-resourcemanager", params.version)
コード例 #18
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params

        env.set_params(params)
        if params.version and compare_versions(
                format_stack_version(params.version), '2.2.0.0') >= 0:
            Logger.info("Executing Spark Client Stack Upgrade pre-restart")
            conf_select.select(params.stack_name, "spark", params.version)
            stack_select.select("spark-client", params.version)
コード例 #19
0
ファイル: nodemanager.py プロジェクト: gauravn7/ambari
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing NodeManager Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "hadoop", params.version)
            stack_select.select("hadoop-yarn-nodemanager", params.version)
コード例 #20
0
    def update_atlas_simple_authz(self, env):
        import params
        env.set_params(params)
        if params.upgrade_direction == Direction.UPGRADE:
            orchestration = stack_select.PACKAGE_SCOPE_STANDARD
            summary = upgrade_summary.get_upgrade_summary()

            if summary is not None:
                orchestration = summary.orchestration
                if orchestration is None:
                    raise Fail(
                        "The upgrade summary does not contain an orchestration type"
                    )

                if orchestration.upper(
                ) in stack_select._PARTIAL_ORCHESTRATION_SCOPES:
                    orchestration = stack_select.PACKAGE_SCOPE_PATCH

            stack_select_packages = stack_select.get_packages(
                orchestration,
                service_name="ATLAS",
                component_name="ATLAS_SERVER")
            if stack_select_packages is None:
                raise Fail("Unable to get packages for stack-select")

            Logger.info(
                "ATLAS_SERVER component will be stack-selected to version {0} using a {1} orchestration"
                .format(params.version, orchestration.upper()))

            for stack_select_package_name in stack_select_packages:
                stack_select.select(stack_select_package_name, params.version)
            Directory(
                format('{metadata_home}/'),
                owner=params.metadata_user,
                group=params.user_group,
                recursive_ownership=True,
            )

            target_version = upgrade_summary.get_target_version('ATLAS')
            update_atlas_simple_authz_script = os.path.join(
                format('{stack_root}'), target_version, 'atlas', 'bin',
                'atlas_update_simple_auth_json.py')
            update_atlas_simple_authz_command = format(
                'source {params.conf_dir}/atlas-env.sh ; {update_atlas_simple_authz_script} {conf_dir}'
            )
            Execute(
                update_atlas_simple_authz_command,
                only_if=format("test -e {update_atlas_simple_authz_script}"),
                user=params.metadata_user)
            atlas_simple_auth_policy_file = os.path.join(
                format('{conf_dir}'), 'atlas-simple-authz-policy.json')
            File(atlas_simple_auth_policy_file,
                 group=params.user_group,
                 owner=params.metadata_user,
                 only_if=format("test -e {atlas_simple_auth_policy_file}"),
                 mode=0644)
コード例 #21
0
ファイル: pig_client.py プロジェクト: gauravn7/ambari
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "pig", params.version)
            conf_select.select(params.stack_name, "hadoop", params.version)
            stack_select.select("hadoop-client",
                                params.version)  # includes pig-client
コード例 #22
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing WebHCat Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_stack_version(params.version), '4.1.0.0') >= 0:
            # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set
            conf_select.select(params.stack_name, "hadoop", params.version)
            stack_select.select("hive-webhcat", params.version)
コード例 #23
0
ファイル: namenode.py プロジェクト: surpass/ambari
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks.
    # Therefore, we cannot call this code in that scenario.
    if upgrade_type != constants.UPGRADE_TYPE_NON_ROLLING or params.upgrade_direction != Direction.DOWNGRADE:
      conf_select.select(params.stack_name, "hadoop", params.version)

    stack_select.select("hadoop-hdfs-namenode", params.version)
コード例 #24
0
ファイル: metadata_server.py プロジェクト: csivaguru/ambari
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)

        # TODO: Add ATLAS_CONFIG_VERSIONING stack feature and uncomment this code when config versioning for Atlas is supported
        #if params.version and check_stack_feature(StackFeature.ATLAS_CONFIG_VERSIONING, params.version):
        #  conf_select.select(params.stack_name, "atlas", params.version)

        if params.version and check_stack_feature(
                StackFeature.ATLAS_ROLLING_UPGRADE, params.version):
            stack_select.select("atlas-server", params.version)
コード例 #25
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Metastore Rolling Upgrade pre-restart")
        import params
        env.set_params(params)

        if Script.is_stack_greater_or_equal("4.1.0.0"):
            self.upgrade_schema(env)

        if params.version and compare_versions(
                format_stack_version(params.version), '4.0.0.0') >= 0:
            conf_select.select(params.stack_name, "hive", params.version)
            stack_select.select("hive-metastore", params.version)
コード例 #26
0
ファイル: accumulo_client.py プロジェクト: gauravn7/ambari
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if Script.is_stack_less_than("2.2"):
      return

    Logger.info("Executing Accumulo Client Upgrade pre-restart")
    conf_select.select(params.stack_name, "accumulo", params.version)
    stack_select.select("accumulo-client", params.version)
コード例 #27
0
ファイル: hive_server.py プロジェクト: gbasehd/GBase-Ambari
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing HiveServer2 Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
      conf_select.select(params.stack_name, "hive", params.version)
      stack_select.select("hive-server2", params.version)
      #Execute(format("stack-select set hive-server2 {version}"))
      resource_created = copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
      if resource_created:
        params.HdfsResource(None, action="execute")
コード例 #28
0
ファイル: historyserver.py プロジェクト: gbasehd/GBase-Ambari
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
      conf_select.select(params.stack_name, "hadoop", params.version)
      stack_select.select("hadoop-mapreduce-historyserver", params.version)
      # MC Hammer said, "Can't touch this"
      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.host_sys_prepped)
      copy_to_hdfs("slider", params.user_group, params.hdfs_user, skip=params.host_sys_prepped)
      params.HdfsResource(None, action="execute")
コード例 #29
0
ファイル: oozie_client.py プロジェクト: tiger7456/ambari-chs
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)

        # this function should not execute if the version can't be determined or
        # the stack does not support rolling upgrade
        if not (params.version and check_stack_feature(
                StackFeature.ROLLING_UPGRADE, params.version)):
            return

        Logger.info("Executing Oozie Client Stack Upgrade pre-restart")
        conf_select.select(params.stack_name, "oozie", params.version)
        stack_select.select("oozie-client", params.version)
コード例 #30
0
ファイル: ranger_admin.py プロジェクト: glenraynor/ambari
    def set_pre_start(self, env):
        import params
        env.set_params(params)

        upgrade_stack = stack_select._get_upgrade_stack()
        if upgrade_stack is None:
            raise Fail('Unable to determine the stack and stack version')

        stack_name = upgrade_stack[0]
        stack_version = upgrade_stack[1]

        stack_select.select("ranger-admin", stack_version)
        conf_select.select(stack_name, "ranger-admin", stack_version)
コード例 #31
0
ファイル: slider_client.py プロジェクト: tiger7456/ambari-chs
  def pre_upgrade_restart(self, env,  upgrade_type=None):
    import params
    env.set_params(params)

    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      conf_select.select(params.stack_name, "slider", params.version)
      stack_select.select("slider-client", params.version)

      # also set all of the hadoop clients since slider client is upgraded as
      # part of the final "CLIENTS" group and we need to ensure that
      # hadoop-client is also set
      conf_select.select(params.stack_name, "hadoop", params.version)
      stack_select.select("hadoop-client", params.version)
コード例 #32
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      Logger.info("Executing Spark2 Job History Server Stack Upgrade pre-restart")
      conf_select.select(params.stack_name, "spark2", params.version)
      stack_select.select("spark2-historyserver", params.version)

      # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
      # need to copy the tarball, otherwise, copy it.
      if params.version and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version):
        resource_created = copy_to_hdfs(
          "tez",
          params.user_group,
          params.hdfs_user,
          host_sys_prepped=params.host_sys_prepped)
        if resource_created:
          params.HdfsResource(None, action="execute")