def configure(self, env): import params env.set_params(params) hdfs("namenode") hdfs_binary = self.get_hdfs_binary() namenode(action="configure", hdfs_binary=hdfs_binary, env=env)
def start(self, env, rolling_restart=False): import params env.set_params(params) self.configure(env) setup_ranger_hdfs() namenode(action="start", rolling_restart=rolling_restart, env=env)
def start(self, env, upgrade_type=None): import params env.set_params(params) self.configure(env) hdfs_binary = self.get_hdfs_binary() if not params.hdfs_tmp_dir or params.hdfs_tmp_dir == None or params.hdfs_tmp_dir.lower( ) == 'null': Logger.error( "WARNING: HDFS tmp dir property (hdfs_tmp_dir) is empty or invalid. Ambari will change permissions for the folder on regular basis." ) namenode(action="start", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type, upgrade_suspended=params.upgrade_suspended, env=env) # after starting NN in an upgrade, touch the marker file - but only do this for certain # upgrade types - not all upgrades actually tell NN about the upgrade (like HOU) if upgrade_type in (constants.UPGRADE_TYPE_ROLLING, constants.UPGRADE_TYPE_NON_ROLLING): # place a file on the system indicating that we've submitting the command that # instructs NN that it is now part of an upgrade namenode_upgrade.create_upgrade_marker()
def configure(self, env): import params env.set_params(params) hdfs() namenode(action="configure") pass
def configure(self, env): import params env.set_params(params) hdfs("namenode") hdfs_binary = self.get_hdfs_binary() namenode(action="configure", hdfs_binary=hdfs_binary, env=env) Execute('ln -sf /usr/lib/hadoop/libexec/ /usr/lib/hadoop-hdfs/libexec')
def start(self, env, upgrade_type=None): import params env.set_params(params) self.configure(env) hdfs_binary = self.get_hdfs_binary() namenode(action="start", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type, env=env)
def stop(self, env): import params env.set_params(params) hdfs_binary = self.get_hdfs_binary() if params.dfs_ha_automatic_failover_enabled: initiate_safe_zkfc_failover() namenode(action="stop", hdfs_binary=hdfs_binary, env=env)
def stop(self, env, rolling_restart=False): import params env.set_params(params) if rolling_restart and params.dfs_ha_enabled: if params.dfs_ha_automatic_failover_enabled: initiate_safe_zkfc_failover() else: raise Fail("Rolling Upgrade - dfs.ha.automatic-failover.enabled must be enabled to perform a rolling restart") namenode(action="stop", rolling_restart=rolling_restart, env=env)
def stop(self, env, upgrade_type=None): import params env.set_params(params) hdfs_binary = self.get_hdfs_binary() if upgrade_type == constants.UPGRADE_TYPE_ROLLING and params.dfs_ha_enabled: if params.dfs_ha_automatic_failover_enabled: initiate_safe_zkfc_failover() else: raise Fail("Rolling Upgrade - dfs.ha.automatic-failover.enabled must be enabled to perform a rolling restart") namenode(action="stop", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type, env=env)
def stop(self, env, upgrade_type=None): import params env.set_params(params) hdfs_binary = self.get_hdfs_binary() if upgrade_type == "rolling" and params.dfs_ha_enabled: if params.dfs_ha_automatic_failover_enabled: initiate_safe_zkfc_failover() else: raise Fail("Rolling Upgrade - dfs.ha.automatic-failover.enabled must be enabled to perform a rolling restart") namenode(action="stop", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type, env=env)
def start(self, env): import params env.set_params(params) install_hadoop() self.configure(env) hdfs_binary = self.get_hdfs_binary() namenode( action="start", hdfs_binary=hdfs_binary, upgrade_suspended=params.upgrade_suspended, env=env)
def start(self, env, rolling_restart=False): import params env.set_params(params) self.configure(env) setup_ranger_hdfs() namenode(action="start", rolling_restart=rolling_restart, env=env) HiveDirInit().createHiveDir() Links(params.new_hdfs_namenode_data_path, params.hdfs_namenode_data_paths) Links(params.new_hdfs_log_path, params.hdfs_log_path)
def start(self, env, upgrade_type=None): import params env.set_params(params) # Additional ln execution added because previous one in install func doesn't seem to work Execute(('ln', '-sf', params.hadoop_conf_dir, params.hadoop_home), sudo=True) self.configure(env) hdfs_binary = self.get_hdfs_binary() namenode(action="start", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type, env=env)
def start(self, env, upgrade_type=None): import params env.set_params(params) self.configure(env) hdfs_binary = self.get_hdfs_binary() namenode(action="start", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type, upgrade_suspended=params.upgrade_suspended, env=env) # after starting NN in an upgrade, touch the marker file if upgrade_type is not None: # place a file on the system indicating that we've submitting the command that # instructs NN that it is now part of an upgrade namenode_upgrade.create_upgrade_marker()
def stop(self, env, upgrade_type=None): import params env.set_params(params) if upgrade_type == "rolling" and params.dfs_ha_enabled: if params.dfs_ha_automatic_failover_enabled: failover_namenode() else: raise Fail( "Rolling Upgrade - dfs.ha.automatic-failover.enabled must be enabled to perform a rolling restart" ) namenode(action="stop", upgrade_type=upgrade_type, env=env)
def start(self, env, upgrade_type=None): import params env.set_params(params) self.configure(env) hdfs_binary = self.get_hdfs_binary() namenode(action="start", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type, upgrade_suspended=params.upgrade_suspended, env=env) # after starting NN in an upgrade, touch the marker file - but only do this for certain # upgrade types - not all upgrades actually tell NN about the upgrade (like HOU) if upgrade_type in (constants.UPGRADE_TYPE_ROLLING, constants.UPGRADE_TYPE_NON_ROLLING): # place a file on the system indicating that we've submitting the command that # instructs NN that it is now part of an upgrade namenode_upgrade.create_upgrade_marker()
def decommission(self, env): import params env.set_params(params) namenode(action="decommission") pass
def stop(self, env): import params env.set_params(params) namenode(action="stop")
def status(self, env): #import params #env.set_params(params) namenode(action="status") pass
def start(self, env, upgrade_type=None): import params env.set_params(params) self.configure(env) namenode(action="start", upgrade_type=upgrade_type, env=env)
def start(self, env): import params env.set_params(params) self.configure(env) namenode(action="start")
def status(self, env): import status_params env.set_params(status_params) namenode(action="status", env=env)
def decommission(self, env): import params env.set_params(params) hdfs_binary = self.get_hdfs_binary() namenode(action="decommission", hdfs_binary=hdfs_binary)
def start(self, env, rolling_restart=False): import params env.set_params(params) self.configure(env) namenode(action="start", rolling_restart=rolling_restart, env=env)
def status(self, env): import status_params env.set_params(status_params) namenode(action="status", rolling_restart=False, env=env)
def refresh_nodes(self, env): import params env.set_params(params) namenode(action="refresh_nodes")