def execute_command(self, command): ''' Executes commands of type EXECUTION_COMMAND ''' clusterName = command['clusterName'] commandId = command['commandId'] isCommandBackground = command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND isAutoExecuteCommand = command['commandType'] == self.AUTO_EXECUTION_COMMAND message = "Executing command with id = {commandId} for role = {role} of " \ "cluster {cluster}.".format( commandId = str(commandId), role=command['role'], cluster=clusterName) logger.info(message) taskId = command['taskId'] # Preparing 'IN_PROGRESS' report in_progress_status = self.commandStatuses.generate_report_template(command) # The path of the files that contain the output log and error log use a prefix that the agent advertises to the # server. The prefix is defined in agent-config.ini if not isAutoExecuteCommand: in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt', 'structuredOut' : self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) else: in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'auto_output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'auto_errors-' + str(taskId) + '.txt', 'structuredOut' : self.tmpdir + os.sep + 'auto_structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) self.commandStatuses.put_command_status(command, in_progress_status) numAttempts = 0 retryDuration = 0 # even with 0 allow one attempt retryAble = False delay = 1 if 'commandParams' in command: if 'max_duration_for_retries' in command['commandParams']: retryDuration = int(command['commandParams']['max_duration_for_retries']) if 'command_retry_enabled' in command['commandParams']: retryAble = command['commandParams']['command_retry_enabled'] == "true" if isAutoExecuteCommand: retryAble = False logger.debug("Command execution metadata - retry enabled = {retryAble}, max retry duration (sec) = {retryDuration}". format(retryAble=retryAble, retryDuration=retryDuration)) while retryDuration >= 0: numAttempts += 1 start = 0 if retryAble: start = int(time.time()) # running command commandresult = self.customServiceOrchestrator.runCommand(command, in_progress_status['tmpout'], in_progress_status['tmperr'], override_output_files=numAttempts == 1, retry=numAttempts > 1) end = 1 if retryAble: end = int(time.time()) retryDuration -= (end - start) # dumping results if isCommandBackground: return else: if commandresult['exitcode'] == 0: status = self.COMPLETED_STATUS else: status = self.FAILED_STATUS if status != self.COMPLETED_STATUS and retryAble == True and retryDuration > 0: delay = self.get_retry_delay(delay) if delay > retryDuration: delay = retryDuration retryDuration -= delay # allow one last attempt logger.info("Retrying command id {cid} after a wait of {delay}".format(cid=taskId, delay=delay)) time.sleep(delay) continue else: break roleResult = self.commandStatuses.generate_report_template(command) roleResult.update({ 'stdout': commandresult['stdout'], 'stderr': commandresult['stderr'], 'exitCode': commandresult['exitcode'], 'status': status, }) if roleResult['stdout'] == '': roleResult['stdout'] = 'None' if roleResult['stderr'] == '': roleResult['stderr'] = 'None' # let ambari know name of custom command if command['hostLevelParams'].has_key('custom_command'): roleResult['customCommand'] = command['hostLevelParams']['custom_command'] if 'structuredOut' in commandresult: roleResult['structuredOut'] = str(json.dumps(commandresult['structuredOut'])) else: roleResult['structuredOut'] = '' # let recovery manager know the current state if status == self.COMPLETED_STATUS: if self.controller.recovery_manager.enabled() and command.has_key('roleCommand') \ and self.controller.recovery_manager.configured_for_recovery(command['role']): if command['roleCommand'] == self.ROLE_COMMAND_START: self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.LIVE_STATUS) self.controller.recovery_manager.update_config_staleness(command['role'], False) logger.info("After EXECUTION_COMMAND (START), current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status(command['role']) ) elif command['roleCommand'] == self.ROLE_COMMAND_STOP or command['roleCommand'] == self.ROLE_COMMAND_INSTALL: self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.DEAD_STATUS) logger.info("After EXECUTION_COMMAND (STOP/INSTALL), current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status(command['role']) ) elif command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND: if command['hostLevelParams'].has_key('custom_command') and \ command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART: self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.LIVE_STATUS) self.controller.recovery_manager.update_config_staleness(command['role'], False) logger.info("After EXECUTION_COMMAND (RESTART), current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status(command['role']) ) pass # let ambari know that configuration tags were applied configHandler = ActualConfigHandler(self.config, self.configTags) #update if command.has_key('forceRefreshConfigTags') and len(command['forceRefreshConfigTags']) > 0 : forceRefreshConfigTags = command['forceRefreshConfigTags'] logger.info("Got refresh additional component tags command") for configTag in forceRefreshConfigTags : configHandler.update_component_tag(command['role'], configTag, command['configurationTags'][configTag]) roleResult['customCommand'] = self.CUSTOM_COMMAND_RESTART # force restart for component to evict stale_config on server side command['configurationTags'] = configHandler.read_actual_component(command['role']) if command.has_key('configurationTags'): configHandler.write_actual(command['configurationTags']) roleResult['configurationTags'] = command['configurationTags'] component = {'serviceName':command['serviceName'],'componentName':command['role']} if command.has_key('roleCommand') and \ (command['roleCommand'] == self.ROLE_COMMAND_START or \ (command['roleCommand'] == self.ROLE_COMMAND_INSTALL \ and component in LiveStatus.CLIENT_COMPONENTS) or \ (command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and \ command['hostLevelParams'].has_key('custom_command') and \ command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART)): configHandler.write_actual_component(command['role'], command['configurationTags']) if command['hostLevelParams'].has_key('clientsToUpdateConfigs') and \ command['hostLevelParams']['clientsToUpdateConfigs']: configHandler.write_client_components(command['serviceName'], command['configurationTags'], command['hostLevelParams']['clientsToUpdateConfigs']) roleResult['configurationTags'] = configHandler.read_actual_component(command['role']) self.commandStatuses.put_command_status(command, roleResult)
def execute_command(self, command): ''' Executes commands of type EXECUTION_COMMAND ''' clusterName = command['clusterName'] commandId = command['commandId'] isCommandBackground = command[ 'commandType'] == self.BACKGROUND_EXECUTION_COMMAND isAutoExecuteCommand = command[ 'commandType'] == self.AUTO_EXECUTION_COMMAND message = "Executing command with id = {commandId}, taskId = {taskId} for role = {role} of " \ "cluster {cluster}.".format( commandId = str(commandId), taskId = str(command['taskId']), role=command['role'], cluster=clusterName) logger.info(message) taskId = command['taskId'] # Preparing 'IN_PROGRESS' report in_progress_status = self.commandStatuses.generate_report_template( command) # The path of the files that contain the output log and error log use a prefix that the agent advertises to the # server. The prefix is defined in agent-config.ini if not isAutoExecuteCommand: in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt', 'structuredOut': self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) else: in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'auto_output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'auto_errors-' + str(taskId) + '.txt', 'structuredOut': self.tmpdir + os.sep + 'auto_structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) self.commandStatuses.put_command_status(command, in_progress_status) numAttempts = 0 retryDuration = 0 # even with 0 allow one attempt retryAble = False delay = 1 log_command_output = True if 'commandParams' in command and 'log_output' in command[ 'commandParams'] and "false" == command['commandParams'][ 'log_output']: log_command_output = False if 'commandParams' in command: if 'max_duration_for_retries' in command['commandParams']: retryDuration = int( command['commandParams']['max_duration_for_retries']) if 'command_retry_enabled' in command['commandParams']: retryAble = command['commandParams'][ 'command_retry_enabled'] == "true" if isAutoExecuteCommand: retryAble = False logger.info( "Command execution metadata - taskId = {taskId}, retry enabled = {retryAble}, max retry duration (sec) = {retryDuration}, log_output = {log_command_output}" .format(taskId=taskId, retryAble=retryAble, retryDuration=retryDuration, log_command_output=log_command_output)) command_canceled = False while retryDuration >= 0: numAttempts += 1 start = 0 if retryAble: start = int(time.time()) # running command commandresult = self.customServiceOrchestrator.runCommand( command, in_progress_status['tmpout'], in_progress_status['tmperr'], override_output_files=numAttempts == 1, retry=numAttempts > 1) end = 1 if retryAble: end = int(time.time()) retryDuration -= (end - start) # dumping results if isCommandBackground: logger.info( "Command is background command, quit retrying. Exit code: {exitCode}, retryAble: {retryAble}, retryDuration (sec): {retryDuration}, last delay (sec): {delay}" .format(cid=taskId, exitCode=commandresult['exitcode'], retryAble=retryAble, retryDuration=retryDuration, delay=delay)) return else: if commandresult['exitcode'] == 0: status = self.COMPLETED_STATUS else: status = self.FAILED_STATUS if (commandresult['exitcode'] == -signal.SIGTERM) or (commandresult['exitcode'] == -signal.SIGKILL): logger.info( 'Command with taskId = {cid} was canceled!'.format( cid=taskId)) command_canceled = True break if status != self.COMPLETED_STATUS and retryAble and retryDuration > 0: delay = self.get_retry_delay(delay) if delay > retryDuration: delay = retryDuration retryDuration -= delay # allow one last attempt commandresult[ 'stderr'] += "\n\nCommand failed. Retrying command execution ...\n\n" logger.info( "Retrying command with taskId = {cid} after a wait of {delay}" .format(cid=taskId, delay=delay)) command['commandBeingRetried'] = "true" time.sleep(delay) continue else: logger.info( "Quit retrying for command with taskId = {cid}. Status: {status}, retryAble: {retryAble}, retryDuration (sec): {retryDuration}, last delay (sec): {delay}" .format(cid=taskId, status=status, retryAble=retryAble, retryDuration=retryDuration, delay=delay)) break # do not fail task which was rescheduled from server if command_canceled: with self.lock: with self.commandQueue.mutex: for com in self.commandQueue.queue: if com['taskId'] == command['taskId']: logger.info( 'Command with taskId = {cid} was rescheduled by server. ' 'Fail report on cancelled command won\'t be sent with heartbeat.' .format(cid=taskId)) return # final result to stdout commandresult[ 'stdout'] += '\n\nCommand completed successfully!\n' if status == self.COMPLETED_STATUS else '\n\nCommand failed after ' + str( numAttempts) + ' tries\n' logger.info( 'Command with taskId = {cid} completed successfully!'.format( cid=taskId) if status == self.COMPLETED_STATUS else 'Command with taskId = {cid} failed after {attempts} tries'. format(cid=taskId, attempts=numAttempts)) roleResult = self.commandStatuses.generate_report_template(command) roleResult.update({ 'stdout': commandresult['stdout'], 'stderr': commandresult['stderr'], 'exitCode': commandresult['exitcode'], 'status': status, }) if self.config.has_option("logging","log_command_executes") \ and int(self.config.get("logging", "log_command_executes")) == 1 \ and log_command_output: if roleResult['stdout'] != '': logger.info("Begin command output log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) self.log_command_output(roleResult['stdout'], str(command['taskId'])) logger.info("End command output log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) if roleResult['stderr'] != '': logger.info("Begin command stderr log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) self.log_command_output(roleResult['stderr'], str(command['taskId'])) logger.info("End command stderr log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) if roleResult['stdout'] == '': roleResult['stdout'] = 'None' if roleResult['stderr'] == '': roleResult['stderr'] = 'None' # let ambari know name of custom command if command['hostLevelParams'].has_key('custom_command'): roleResult['customCommand'] = command['hostLevelParams'][ 'custom_command'] if 'structuredOut' in commandresult: roleResult['structuredOut'] = str( json.dumps(commandresult['structuredOut'])) else: roleResult['structuredOut'] = '' # let recovery manager know the current state if status == self.COMPLETED_STATUS: if self.controller.recovery_manager.enabled() and command.has_key('roleCommand') \ and self.controller.recovery_manager.configured_for_recovery(command['role']): if command['roleCommand'] == self.ROLE_COMMAND_START: self.controller.recovery_manager.update_current_status( command['role'], LiveStatus.LIVE_STATUS) self.controller.recovery_manager.update_config_staleness( command['role'], False) logger.info( "After EXECUTION_COMMAND (START), with taskId=" + str(command['taskId']) + ", current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status( command['role'])) elif command[ 'roleCommand'] == self.ROLE_COMMAND_STOP or command[ 'roleCommand'] == self.ROLE_COMMAND_INSTALL: self.controller.recovery_manager.update_current_status( command['role'], LiveStatus.DEAD_STATUS) logger.info( "After EXECUTION_COMMAND (STOP/INSTALL), with taskId=" + str(command['taskId']) + ", current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status( command['role'])) elif command[ 'roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND: if command['hostLevelParams'].has_key('custom_command') and \ command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART: self.controller.recovery_manager.update_current_status( command['role'], LiveStatus.LIVE_STATUS) self.controller.recovery_manager.update_config_staleness( command['role'], False) logger.info( "After EXECUTION_COMMAND (RESTART), current state of " + command['role'] + " to " + self.controller.recovery_manager. get_current_status(command['role'])) pass # let ambari know that configuration tags were applied configHandler = ActualConfigHandler(self.config, self.configTags) #update if 'commandParams' in command: command_params = command['commandParams'] if command_params and command_params.has_key( 'forceRefreshConfigTags') and len( command_params['forceRefreshConfigTags']) > 0: forceRefreshConfigTags = command_params[ 'forceRefreshConfigTags'].split(',') logger.info( "Got refresh additional component tags command") for configTag in forceRefreshConfigTags: configHandler.update_component_tag( command['role'], configTag, command['configurationTags'][configTag]) roleResult[ 'customCommand'] = self.CUSTOM_COMMAND_RESTART # force restart for component to evict stale_config on server side command[ 'configurationTags'] = configHandler.read_actual_component( command['role']) if command.has_key('configurationTags'): configHandler.write_actual(command['configurationTags']) roleResult['configurationTags'] = command['configurationTags'] component = { 'serviceName': command['serviceName'], 'componentName': command['role'] } if 'roleCommand' in command and \ (command['roleCommand'] == self.ROLE_COMMAND_START or (command['roleCommand'] == self.ROLE_COMMAND_INSTALL and component in LiveStatus.CLIENT_COMPONENTS) or (command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and 'custom_command' in command['hostLevelParams'] and command['hostLevelParams']['custom_command'] in (self.CUSTOM_COMMAND_RESTART, self.CUSTOM_COMMAND_START))): configHandler.write_actual_component( command['role'], command['configurationTags']) if 'clientsToUpdateConfigs' in command[ 'hostLevelParams'] and command['hostLevelParams'][ 'clientsToUpdateConfigs']: configHandler.write_client_components( command['serviceName'], command['configurationTags'], command['hostLevelParams']['clientsToUpdateConfigs']) roleResult[ 'configurationTags'] = configHandler.read_actual_component( command['role']) elif status == self.FAILED_STATUS: if self.controller.recovery_manager.enabled() and command.has_key('roleCommand') \ and self.controller.recovery_manager.configured_for_recovery(command['role']): if command['roleCommand'] == self.ROLE_COMMAND_INSTALL: self.controller.recovery_manager.update_current_status( command['role'], self.controller.recovery_manager.INSTALL_FAILED) logger.info( "After EXECUTION_COMMAND (INSTALL), with taskId=" + str(command['taskId']) + ", current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status( command['role'])) self.commandStatuses.put_command_status(command, roleResult)
def execute_command(self, command): ''' Executes commands of type EXECUTION_COMMAND ''' clusterName = command['clusterName'] commandId = command['commandId'] isCommandBackground = command[ 'commandType'] == self.BACKGROUND_EXECUTION_COMMAND message = "Executing command with id = {commandId} for role = {role} of " \ "cluster {cluster}.".format( commandId = str(commandId), role=command['role'], cluster=clusterName) logger.info(message) logger.debug(pprint.pformat(command)) taskId = command['taskId'] # Preparing 'IN_PROGRESS' report in_progress_status = self.commandStatuses.generate_report_template( command) # The path of the files that contain the output log and error log use a prefix that the agent advertises to the # server. The prefix is defined in agent-config.ini in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt', 'structuredOut': self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) self.commandStatuses.put_command_status(command, in_progress_status) # running command commandresult = self.customServiceOrchestrator.runCommand( command, in_progress_status['tmpout'], in_progress_status['tmperr']) # dumping results if isCommandBackground: return else: status = self.COMPLETED_STATUS if commandresult[ 'exitcode'] == 0 else self.FAILED_STATUS roleResult = self.commandStatuses.generate_report_template(command) roleResult.update({ 'stdout': commandresult['stdout'], 'stderr': commandresult['stderr'], 'exitCode': commandresult['exitcode'], 'status': status, }) if roleResult['stdout'] == '': roleResult['stdout'] = 'None' if roleResult['stderr'] == '': roleResult['stderr'] = 'None' # let ambari know name of custom command if command['hostLevelParams'].has_key('custom_command'): roleResult['customCommand'] = command['hostLevelParams'][ 'custom_command'] if 'structuredOut' in commandresult: roleResult['structuredOut'] = str( json.dumps(commandresult['structuredOut'])) else: roleResult['structuredOut'] = '' # let ambari know that configuration tags were applied if status == self.COMPLETED_STATUS: configHandler = ActualConfigHandler(self.config, self.configTags) #update if command.has_key('forceRefreshConfigTags') and len( command['forceRefreshConfigTags']) > 0: forceRefreshConfigTags = command['forceRefreshConfigTags'] logger.info("Got refresh additional component tags command") for configTag in forceRefreshConfigTags: configHandler.update_component_tag( command['role'], configTag, command['configurationTags'][configTag]) roleResult[ 'customCommand'] = self.CUSTOM_COMMAND_RESTART # force restart for component to evict stale_config on server side command[ 'configurationTags'] = configHandler.read_actual_component( command['role']) if command.has_key('configurationTags'): configHandler.write_actual(command['configurationTags']) roleResult['configurationTags'] = command['configurationTags'] component = { 'serviceName': command['serviceName'], 'componentName': command['role'] } if command.has_key('roleCommand') and \ (command['roleCommand'] == self.ROLE_COMMAND_START or \ (command['roleCommand'] == self.ROLE_COMMAND_INSTALL \ and component in LiveStatus.CLIENT_COMPONENTS) or \ (command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and \ command['hostLevelParams'].has_key('custom_command') and \ command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART)): configHandler.write_actual_component( command['role'], command['configurationTags']) if command['hostLevelParams'].has_key('clientsToUpdateConfigs') and \ command['hostLevelParams']['clientsToUpdateConfigs']: configHandler.write_client_components( command['serviceName'], command['configurationTags'], command['hostLevelParams']['clientsToUpdateConfigs']) roleResult[ 'configurationTags'] = configHandler.read_actual_component( command['role']) self.commandStatuses.put_command_status(command, roleResult)
def execute_command(self, command): ''' Executes commands of type EXECUTION_COMMAND ''' clusterName = command['clusterName'] commandId = command['commandId'] isCommandBackground = command[ 'commandType'] == self.BACKGROUND_EXECUTION_COMMAND isAutoExecuteCommand = command[ 'commandType'] == self.AUTO_EXECUTION_COMMAND message = "Executing command with id = {commandId} for role = {role} of " \ "cluster {cluster}.".format( commandId = str(commandId), role=command['role'], cluster=clusterName) logger.info(message) taskId = command['taskId'] # Preparing 'IN_PROGRESS' report in_progress_status = self.commandStatuses.generate_report_template( command) # The path of the files that contain the output log and error log use a prefix that the agent advertises to the # server. The prefix is defined in agent-config.ini if not isAutoExecuteCommand: in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt', 'structuredOut': self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) else: in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'auto_output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'auto_errors-' + str(taskId) + '.txt', 'structuredOut': self.tmpdir + os.sep + 'auto_structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) self.commandStatuses.put_command_status(command, in_progress_status) numAttempts = 0 maxAttempts = 1 retryAble = False delay = 1 if 'commandParams' in command: if 'command_retry_max_attempt_count' in command['commandParams']: maxAttempts = int(command['commandParams'] ['command_retry_max_attempt_count']) if 'command_retry_enabled' in command['commandParams']: retryAble = command['commandParams'][ 'command_retry_enabled'] == "true" logger.debug( "Command execution metadata - retry enabled = {retryAble}, max attempt count = {maxAttemptCount}" .format(retryAble=retryAble, maxAttemptCount=maxAttempts)) while numAttempts < maxAttempts: numAttempts += 1 # running command commandresult = self.customServiceOrchestrator.runCommand( command, in_progress_status['tmpout'], in_progress_status['tmperr'], override_output_files=numAttempts == 1, retry=numAttempts > 1) # dumping results if isCommandBackground: return else: status = self.COMPLETED_STATUS if commandresult[ 'exitcode'] == 0 else self.FAILED_STATUS if status != self.COMPLETED_STATUS and retryAble == True and maxAttempts > numAttempts: delay = self.get_retry_delay(delay) logger.info( "Retrying command id {cid} after a wait of {delay}".format( cid=taskId, delay=delay)) time.sleep(delay) continue else: break roleResult = self.commandStatuses.generate_report_template(command) roleResult.update({ 'stdout': commandresult['stdout'], 'stderr': commandresult['stderr'], 'exitCode': commandresult['exitcode'], 'status': status, }) if roleResult['stdout'] == '': roleResult['stdout'] = 'None' if roleResult['stderr'] == '': roleResult['stderr'] = 'None' # let ambari know name of custom command if command['hostLevelParams'].has_key('custom_command'): roleResult['customCommand'] = command['hostLevelParams'][ 'custom_command'] if 'structuredOut' in commandresult: roleResult['structuredOut'] = str( json.dumps(commandresult['structuredOut'])) else: roleResult['structuredOut'] = '' # let ambari know that configuration tags were applied if status == self.COMPLETED_STATUS: configHandler = ActualConfigHandler(self.config, self.configTags) #update if command.has_key('forceRefreshConfigTags') and len( command['forceRefreshConfigTags']) > 0: forceRefreshConfigTags = command['forceRefreshConfigTags'] logger.info("Got refresh additional component tags command") for configTag in forceRefreshConfigTags: configHandler.update_component_tag( command['role'], configTag, command['configurationTags'][configTag]) roleResult[ 'customCommand'] = self.CUSTOM_COMMAND_RESTART # force restart for component to evict stale_config on server side command[ 'configurationTags'] = configHandler.read_actual_component( command['role']) if command.has_key('configurationTags'): configHandler.write_actual(command['configurationTags']) roleResult['configurationTags'] = command['configurationTags'] component = { 'serviceName': command['serviceName'], 'componentName': command['role'] } if command.has_key('roleCommand') and \ (command['roleCommand'] == self.ROLE_COMMAND_START or \ (command['roleCommand'] == self.ROLE_COMMAND_INSTALL \ and component in LiveStatus.CLIENT_COMPONENTS) or \ (command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and \ command['hostLevelParams'].has_key('custom_command') and \ command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART)): configHandler.write_actual_component( command['role'], command['configurationTags']) if command['hostLevelParams'].has_key('clientsToUpdateConfigs') and \ command['hostLevelParams']['clientsToUpdateConfigs']: configHandler.write_client_components( command['serviceName'], command['configurationTags'], command['hostLevelParams']['clientsToUpdateConfigs']) roleResult[ 'configurationTags'] = configHandler.read_actual_component( command['role']) self.commandStatuses.put_command_status(command, roleResult)
def execute_command(self, command): ''' Executes commands of type EXECUTION_COMMAND ''' clusterName = command['clusterName'] commandId = command['commandId'] command_format = self.determine_command_format_version(command) message = "Executing command with id = {commandId} for role = {role} of " \ "cluster {cluster}. Command format={command_format}".format( commandId = str(commandId), role=command['role'], cluster=clusterName, command_format=command_format) logger.info(message) logger.debug(pprint.pformat(command)) taskId = command['taskId'] # Preparing 'IN_PROGRESS' report in_progress_status = self.commandStatuses.generate_report_template(command) in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt', 'structuredOut' : self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) self.commandStatuses.put_command_status(command, in_progress_status) # running command if command_format == self.COMMAND_FORMAT_V1: # Create a new instance of executor for the current thread puppetExecutor = PuppetExecutor.PuppetExecutor( self.config.get('puppet', 'puppetmodules'), self.config.get('puppet', 'puppet_home'), self.config.get('puppet', 'facter_home'), self.config.get('agent', 'prefix'), self.config) commandresult = puppetExecutor.runCommand(command, in_progress_status['tmpout'], in_progress_status['tmperr']) else: commandresult = self.customServiceOrchestrator.runCommand(command, in_progress_status['tmpout'], in_progress_status['tmperr']) # dumping results status = self.COMPLETED_STATUS if commandresult['exitcode'] != 0: status = self.FAILED_STATUS roleResult = self.commandStatuses.generate_report_template(command) # assume some puppet plumbing to run these commands roleResult.update({ 'stdout': commandresult['stdout'], 'stderr': commandresult['stderr'], 'exitCode': commandresult['exitcode'], 'status': status, }) if roleResult['stdout'] == '': roleResult['stdout'] = 'None' if roleResult['stderr'] == '': roleResult['stderr'] = 'None' # let ambari know name of custom command if command['hostLevelParams'].has_key('custom_command'): roleResult['customCommand'] = command['hostLevelParams']['custom_command'] if 'structuredOut' in commandresult: roleResult['structuredOut'] = str(commandresult['structuredOut']) else: roleResult['structuredOut'] = '' # let ambari know that configuration tags were applied if status == self.COMPLETED_STATUS: configHandler = ActualConfigHandler(self.config, self.configTags) if command.has_key('configurationTags'): configHandler.write_actual(command['configurationTags']) roleResult['configurationTags'] = command['configurationTags'] component = {'serviceName':command['serviceName'],'componentName':command['role']} if command.has_key('roleCommand') and \ (command['roleCommand'] == self.ROLE_COMMAND_START or \ (command['roleCommand'] == self.ROLE_COMMAND_INSTALL \ and component in LiveStatus.CLIENT_COMPONENTS) or \ (command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and \ command['hostLevelParams'].has_key('custom_command') and \ command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART)): configHandler.write_actual_component(command['role'], command['configurationTags']) configHandler.write_client_components(command['serviceName'], command['configurationTags']) roleResult['configurationTags'] = configHandler.read_actual_component(command['role']) self.commandStatuses.put_command_status(command, roleResult)
def execute_command(self, command): ''' Executes commands of type EXECUTION_COMMAND ''' clusterName = command['clusterName'] commandId = command['commandId'] isCommandBackground = command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND isAutoExecuteCommand = command['commandType'] == self.AUTO_EXECUTION_COMMAND message = "Executing command with id = {commandId}, taskId = {taskId} for role = {role} of " \ "cluster {cluster}.".format( commandId = str(commandId), taskId = str(command['taskId']), role=command['role'], cluster=clusterName) logger.info(message) taskId = command['taskId'] # Preparing 'IN_PROGRESS' report in_progress_status = self.commandStatuses.generate_report_template(command) # The path of the files that contain the output log and error log use a prefix that the agent advertises to the # server. The prefix is defined in agent-config.ini if not isAutoExecuteCommand: in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt', 'structuredOut' : self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) else: in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'auto_output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'auto_errors-' + str(taskId) + '.txt', 'structuredOut' : self.tmpdir + os.sep + 'auto_structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) self.commandStatuses.put_command_status(command, in_progress_status) numAttempts = 0 retryDuration = 0 # even with 0 allow one attempt retryAble = False delay = 1 if 'commandParams' in command: if 'max_duration_for_retries' in command['commandParams']: retryDuration = int(command['commandParams']['max_duration_for_retries']) if 'command_retry_enabled' in command['commandParams']: retryAble = command['commandParams']['command_retry_enabled'] == "true" if isAutoExecuteCommand: retryAble = False logger.debug("Command execution metadata - retry enabled = {retryAble}, max retry duration (sec) = {retryDuration}". format(retryAble=retryAble, retryDuration=retryDuration)) while retryDuration >= 0: numAttempts += 1 start = 0 if retryAble: start = int(time.time()) # running command commandresult = self.customServiceOrchestrator.runCommand(command, in_progress_status['tmpout'], in_progress_status['tmperr'], override_output_files=numAttempts == 1, retry=numAttempts > 1) end = 1 if retryAble: end = int(time.time()) retryDuration -= (end - start) # dumping results if isCommandBackground: return else: if commandresult['exitcode'] == 0: status = self.COMPLETED_STATUS else: status = self.FAILED_STATUS if status != self.COMPLETED_STATUS and retryAble and retryDuration > 0: delay = self.get_retry_delay(delay) if delay > retryDuration: delay = retryDuration retryDuration -= delay # allow one last attempt logger.info("Retrying command id {cid} after a wait of {delay}".format(cid=taskId, delay=delay)) time.sleep(delay) continue else: break roleResult = self.commandStatuses.generate_report_template(command) roleResult.update({ 'stdout': commandresult['stdout'], 'stderr': commandresult['stderr'], 'exitCode': commandresult['exitcode'], 'status': status, }) if self.config.has_option("logging","log_command_executes") and int(self.config.get("logging", "log_command_executes")) == 1: if roleResult['stdout'] != '': logger.info("Begin command output log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) logger.info(roleResult['stdout']) logger.info("End command output log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) if roleResult['stderr'] != '': logger.info("Begin command stderr log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) logger.info(roleResult['stderr']) logger.info("End command stderr log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) if roleResult['stdout'] == '': roleResult['stdout'] = 'None' if roleResult['stderr'] == '': roleResult['stderr'] = 'None' # let ambari know name of custom command if command['hostLevelParams'].has_key('custom_command'): roleResult['customCommand'] = command['hostLevelParams']['custom_command'] if 'structuredOut' in commandresult: roleResult['structuredOut'] = str(json.dumps(commandresult['structuredOut'])) else: roleResult['structuredOut'] = '' # let recovery manager know the current state if status == self.COMPLETED_STATUS: if self.controller.recovery_manager.enabled() and command.has_key('roleCommand') \ and self.controller.recovery_manager.configured_for_recovery(command['role']): if command['roleCommand'] == self.ROLE_COMMAND_START: self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.LIVE_STATUS) self.controller.recovery_manager.update_config_staleness(command['role'], False) logger.info("After EXECUTION_COMMAND (START), with taskId=" + str(command['taskId']) + ", current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status(command['role']) ) elif command['roleCommand'] == self.ROLE_COMMAND_STOP or command['roleCommand'] == self.ROLE_COMMAND_INSTALL: self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.DEAD_STATUS) logger.info("After EXECUTION_COMMAND (STOP/INSTALL), with taskId=" + str(command['taskId']) + ", current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status(command['role']) ) elif command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND: if command['hostLevelParams'].has_key('custom_command') and \ command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART: self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.LIVE_STATUS) self.controller.recovery_manager.update_config_staleness(command['role'], False) logger.info("After EXECUTION_COMMAND (RESTART), current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status(command['role']) ) pass # let ambari know that configuration tags were applied configHandler = ActualConfigHandler(self.config, self.configTags) #update if command.has_key('forceRefreshConfigTags') and len(command['forceRefreshConfigTags']) > 0 : forceRefreshConfigTags = command['forceRefreshConfigTags'] logger.info("Got refresh additional component tags command") for configTag in forceRefreshConfigTags : configHandler.update_component_tag(command['role'], configTag, command['configurationTags'][configTag]) roleResult['customCommand'] = self.CUSTOM_COMMAND_RESTART # force restart for component to evict stale_config on server side command['configurationTags'] = configHandler.read_actual_component(command['role']) if command.has_key('configurationTags'): configHandler.write_actual(command['configurationTags']) roleResult['configurationTags'] = command['configurationTags'] if 'roleCommand' in command and \ (command['roleCommand'] == self.ROLE_COMMAND_START or (command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and 'custom_command' in command['hostLevelParams'] and command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART)): configHandler.write_actual_component(command['role'], command['configurationTags']) if 'clientsToUpdateConfigs' in command['hostLevelParams'] and command['hostLevelParams']['clientsToUpdateConfigs']: configHandler.write_client_components(command['serviceName'], command['configurationTags'], command['hostLevelParams']['clientsToUpdateConfigs']) roleResult['configurationTags'] = configHandler.read_actual_component( command['role']) self.commandStatuses.put_command_status(command, roleResult)
def execute_command(self, command): ''' Executes commands of type EXECUTION_COMMAND ''' clusterName = command['clusterName'] commandId = command['commandId'] isCommandBackground = command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND message = "Executing command with id = {commandId} for role = {role} of " \ "cluster {cluster}.".format( commandId = str(commandId), role=command['role'], cluster=clusterName) logger.info(message) taskId = command['taskId'] # Preparing 'IN_PROGRESS' report in_progress_status = self.commandStatuses.generate_report_template(command) # The path of the files that contain the output log and error log use a prefix that the agent advertises to the # server. The prefix is defined in agent-config.ini in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt', 'structuredOut' : self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) self.commandStatuses.put_command_status(command, in_progress_status) # running command commandresult = self.customServiceOrchestrator.runCommand(command, in_progress_status['tmpout'], in_progress_status['tmperr']) # dumping results if isCommandBackground: return else: status = self.COMPLETED_STATUS if commandresult['exitcode'] == 0 else self.FAILED_STATUS roleResult = self.commandStatuses.generate_report_template(command) roleResult.update({ 'stdout': commandresult['stdout'], 'stderr': commandresult['stderr'], 'exitCode': commandresult['exitcode'], 'status': status, }) if roleResult['stdout'] == '': roleResult['stdout'] = 'None' if roleResult['stderr'] == '': roleResult['stderr'] = 'None' # let ambari know name of custom command if command['hostLevelParams'].has_key('custom_command'): roleResult['customCommand'] = command['hostLevelParams']['custom_command'] if 'structuredOut' in commandresult: roleResult['structuredOut'] = str(json.dumps(commandresult['structuredOut'])) else: roleResult['structuredOut'] = '' # let ambari know that configuration tags were applied if status == self.COMPLETED_STATUS: configHandler = ActualConfigHandler(self.config, self.configTags) #update if command.has_key('forceRefreshConfigTags') and len(command['forceRefreshConfigTags']) > 0 : forceRefreshConfigTags = command['forceRefreshConfigTags'] logger.info("Got refresh additional component tags command") for configTag in forceRefreshConfigTags : configHandler.update_component_tag(command['role'], configTag, command['configurationTags'][configTag]) roleResult['customCommand'] = self.CUSTOM_COMMAND_RESTART # force restart for component to evict stale_config on server side command['configurationTags'] = configHandler.read_actual_component(command['role']) if command.has_key('configurationTags'): configHandler.write_actual(command['configurationTags']) roleResult['configurationTags'] = command['configurationTags'] component = {'serviceName':command['serviceName'],'componentName':command['role']} if command.has_key('roleCommand') and \ (command['roleCommand'] == self.ROLE_COMMAND_START or \ (command['roleCommand'] == self.ROLE_COMMAND_INSTALL \ and component in LiveStatus.CLIENT_COMPONENTS) or \ (command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and \ command['hostLevelParams'].has_key('custom_command') and \ command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART)): configHandler.write_actual_component(command['role'], command['configurationTags']) if command['hostLevelParams'].has_key('clientsToUpdateConfigs') and \ command['hostLevelParams']['clientsToUpdateConfigs']: configHandler.write_client_components(command['serviceName'], command['configurationTags'], command['hostLevelParams']['clientsToUpdateConfigs']) roleResult['configurationTags'] = configHandler.read_actual_component(command['role']) self.commandStatuses.put_command_status(command, roleResult)
def execute_command(self, command): ''' Executes commands of type EXECUTION_COMMAND ''' clusterName = command['clusterName'] commandId = command['commandId'] message = "Executing command with id = {commandId} for role = {role} of " \ "cluster {cluster}.".format( commandId = str(commandId), role=command['role'], cluster=clusterName) logger.info(message) logger.debug(pprint.pformat(command)) taskId = command['taskId'] # Preparing 'IN_PROGRESS' report in_progress_status = self.commandStatuses.generate_report_template(command) in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt', 'structuredOut' : self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) self.commandStatuses.put_command_status(command, in_progress_status) # running command commandresult = self.customServiceOrchestrator.runCommand(command, in_progress_status['tmpout'], in_progress_status['tmperr']) # dumping results status = self.COMPLETED_STATUS if commandresult['exitcode'] != 0: status = self.FAILED_STATUS roleResult = self.commandStatuses.generate_report_template(command) roleResult.update({ 'stdout': commandresult['stdout'], 'stderr': commandresult['stderr'], 'exitCode': commandresult['exitcode'], 'status': status, }) if roleResult['stdout'] == '': roleResult['stdout'] = 'None' if roleResult['stderr'] == '': roleResult['stderr'] = 'None' # let ambari know name of custom command if command['hostLevelParams'].has_key('custom_command'): roleResult['customCommand'] = command['hostLevelParams']['custom_command'] if 'structuredOut' in commandresult: roleResult['structuredOut'] = str(json.dumps(commandresult['structuredOut'])) else: roleResult['structuredOut'] = '' # let ambari know that configuration tags were applied if status == self.COMPLETED_STATUS: configHandler = ActualConfigHandler(self.config, self.configTags) if command.has_key('configurationTags'): configHandler.write_actual(command['configurationTags']) roleResult['configurationTags'] = command['configurationTags'] component = {'serviceName':command['serviceName'],'componentName':command['role']} if command.has_key('roleCommand') and \ (command['roleCommand'] == self.ROLE_COMMAND_START or \ (command['roleCommand'] == self.ROLE_COMMAND_INSTALL \ and component in LiveStatus.CLIENT_COMPONENTS) or \ (command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and \ command['hostLevelParams'].has_key('custom_command') and \ command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART)): configHandler.write_actual_component(command['role'], command['configurationTags']) configHandler.write_client_components(command['serviceName'], command['configurationTags']) roleResult['configurationTags'] = configHandler.read_actual_component(command['role']) self.commandStatuses.put_command_status(command, roleResult)
def execute_command(self, command): ''' Executes commands of type EXECUTION_COMMAND ''' clusterName = command['clusterName'] commandId = command['commandId'] isCommandBackground = command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND isAutoExecuteCommand = command['commandType'] == self.AUTO_EXECUTION_COMMAND message = "Executing command with id = {commandId}, taskId = {taskId} for role = {role} of " \ "cluster {cluster}.".format( commandId = str(commandId), taskId = str(command['taskId']), role=command['role'], cluster=clusterName) logger.info(message) taskId = command['taskId'] # Preparing 'IN_PROGRESS' report in_progress_status = self.commandStatuses.generate_report_template(command) # The path of the files that contain the output log and error log use a prefix that the agent advertises to the # server. The prefix is defined in agent-config.ini if not isAutoExecuteCommand: in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt', 'structuredOut' : self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) else: in_progress_status.update({ 'tmpout': self.tmpdir + os.sep + 'auto_output-' + str(taskId) + '.txt', 'tmperr': self.tmpdir + os.sep + 'auto_errors-' + str(taskId) + '.txt', 'structuredOut' : self.tmpdir + os.sep + 'auto_structured-out-' + str(taskId) + '.json', 'status': self.IN_PROGRESS_STATUS }) self.commandStatuses.put_command_status(command, in_progress_status) numAttempts = 0 retryDuration = 0 # even with 0 allow one attempt retryAble = False delay = 1 log_command_output = True if 'commandParams' in command and 'log_output' in command['commandParams'] and "false" == command['commandParams']['log_output']: log_command_output = False if 'commandParams' in command: if 'max_duration_for_retries' in command['commandParams']: retryDuration = int(command['commandParams']['max_duration_for_retries']) if 'command_retry_enabled' in command['commandParams']: retryAble = command['commandParams']['command_retry_enabled'] == "true" if isAutoExecuteCommand: retryAble = False logger.info("Command execution metadata - taskId = {taskId}, retry enabled = {retryAble}, max retry duration (sec) = {retryDuration}, log_output = {log_command_output}". format(taskId=taskId, retryAble=retryAble, retryDuration=retryDuration, log_command_output=log_command_output)) while retryDuration >= 0: numAttempts += 1 start = 0 if retryAble: start = int(time.time()) # running command commandresult = self.customServiceOrchestrator.runCommand(command, in_progress_status['tmpout'], in_progress_status['tmperr'], override_output_files=numAttempts == 1, retry=numAttempts > 1) end = 1 if retryAble: end = int(time.time()) retryDuration -= (end - start) # dumping results if isCommandBackground: logger.info("Command is background command, quit retrying. Exit code: {exitCode}, retryAble: {retryAble}, retryDuration (sec): {retryDuration}, last delay (sec): {delay}" .format(cid=taskId, exitCode=commandresult['exitcode'], retryAble=retryAble, retryDuration=retryDuration, delay=delay)) return else: if commandresult['exitcode'] == 0: status = self.COMPLETED_STATUS else: status = self.FAILED_STATUS if (commandresult['exitcode'] == -signal.SIGTERM) or (commandresult['exitcode'] == -signal.SIGKILL): logger.info('Command {cid} was canceled!'.format(cid=taskId)) break if status != self.COMPLETED_STATUS and retryAble and retryDuration > 0: delay = self.get_retry_delay(delay) if delay > retryDuration: delay = retryDuration retryDuration -= delay # allow one last attempt commandresult['stderr'] += "\n\nCommand failed. Retrying command execution ...\n\n" logger.info("Retrying command id {cid} after a wait of {delay}".format(cid=taskId, delay=delay)) time.sleep(delay) continue else: logger.info("Quit retrying for command id {cid}. Status: {status}, retryAble: {retryAble}, retryDuration (sec): {retryDuration}, last delay (sec): {delay}" .format(cid=taskId, status=status, retryAble=retryAble, retryDuration=retryDuration, delay=delay)) break # final result to stdout commandresult['stdout'] += '\n\nCommand completed successfully!\n' if status == self.COMPLETED_STATUS else '\n\nCommand failed after ' + str(numAttempts) + ' tries\n' logger.info('Command {cid} completed successfully!'.format(cid=taskId) if status == self.COMPLETED_STATUS else 'Command {cid} failed after {attempts} tries'.format(cid=taskId, attempts=numAttempts)) roleResult = self.commandStatuses.generate_report_template(command) roleResult.update({ 'stdout': commandresult['stdout'], 'stderr': commandresult['stderr'], 'exitCode': commandresult['exitcode'], 'status': status, }) if self.config.has_option("logging","log_command_executes") \ and int(self.config.get("logging", "log_command_executes")) == 1 \ and log_command_output: if roleResult['stdout'] != '': logger.info("Begin command output log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) self.log_command_output(roleResult['stdout'], str(command['taskId'])) logger.info("End command output log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) if roleResult['stderr'] != '': logger.info("Begin command stderr log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) self.log_command_output(roleResult['stderr'], str(command['taskId'])) logger.info("End command stderr log for command with id = " + str(command['taskId']) + ", role = " + command['role'] + ", roleCommand = " + command['roleCommand']) if roleResult['stdout'] == '': roleResult['stdout'] = 'None' if roleResult['stderr'] == '': roleResult['stderr'] = 'None' # let ambari know name of custom command if command['hostLevelParams'].has_key('custom_command'): roleResult['customCommand'] = command['hostLevelParams']['custom_command'] if 'structuredOut' in commandresult: roleResult['structuredOut'] = str(json.dumps(commandresult['structuredOut'])) else: roleResult['structuredOut'] = '' # let recovery manager know the current state if status == self.COMPLETED_STATUS: if self.controller.recovery_manager.enabled() and command.has_key('roleCommand') \ and self.controller.recovery_manager.configured_for_recovery(command['role']): if command['roleCommand'] == self.ROLE_COMMAND_START: self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.LIVE_STATUS) self.controller.recovery_manager.update_config_staleness(command['role'], False) logger.info("After EXECUTION_COMMAND (START), with taskId=" + str(command['taskId']) + ", current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status(command['role']) ) elif command['roleCommand'] == self.ROLE_COMMAND_STOP or command['roleCommand'] == self.ROLE_COMMAND_INSTALL: self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.DEAD_STATUS) logger.info("After EXECUTION_COMMAND (STOP/INSTALL), with taskId=" + str(command['taskId']) + ", current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status(command['role']) ) elif command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND: if command['hostLevelParams'].has_key('custom_command') and \ command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART: self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.LIVE_STATUS) self.controller.recovery_manager.update_config_staleness(command['role'], False) logger.info("After EXECUTION_COMMAND (RESTART), current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status(command['role']) ) pass # let ambari know that configuration tags were applied configHandler = ActualConfigHandler(self.config, self.configTags) if command.has_key('configurationTags'): configHandler.write_actual(command['configurationTags']) roleResult['configurationTags'] = command['configurationTags'] component = {'serviceName':command['serviceName'],'componentName':command['role']} if 'roleCommand' in command and \ (command['roleCommand'] == self.ROLE_COMMAND_START or (command['roleCommand'] == self.ROLE_COMMAND_INSTALL and component in LiveStatus.CLIENT_COMPONENTS) or (command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and 'custom_command' in command['hostLevelParams'] and command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART)): configHandler.write_actual_component(command['role'], command['configurationTags']) if 'clientsToUpdateConfigs' in command['hostLevelParams'] and command['hostLevelParams']['clientsToUpdateConfigs']: configHandler.write_client_components(command['serviceName'], command['configurationTags'], command['hostLevelParams']['clientsToUpdateConfigs']) roleResult['configurationTags'] = configHandler.read_actual_component( command['role']) elif status == self.FAILED_STATUS: if self.controller.recovery_manager.enabled() and command.has_key('roleCommand') \ and self.controller.recovery_manager.configured_for_recovery(command['role']): if command['roleCommand'] == self.ROLE_COMMAND_INSTALL: self.controller.recovery_manager.update_current_status(command['role'], self.controller.recovery_manager.INSTALL_FAILED) logger.info("After EXECUTION_COMMAND (INSTALL), with taskId=" + str(command['taskId']) + ", current state of " + command['role'] + " to " + self.controller.recovery_manager.get_current_status(command['role'])) self.commandStatuses.put_command_status(command, roleResult)