def ResetCooldownTimestamps(self): workerNames = RepositoryUtils.GetSlaveNames(True) for workerName in workerNames: if self.verLog: self.LogInfo("[{0}] = a worker".format(workerName)) workerInfo = RepositoryUtils.GetSlaveInfo(workerName, True) workerState = workerInfo.SlaveState if self.verLog: self.LogInfo("[{0}] {0} = worker state".format( workerName, workerState)) if workerState != "Idle" and workerState != "Stalled" and workerState != "Offline": deleteTimestampKey = self.GetConfigEntryWithDefault( "SherpaDeleteTimestampKey", "Sherpa_DeleteTimestamp") workerSettings = RepositoryUtils.GetSlaveSettings( workerName, True) existingTimestamp = workerSettings.GetSlaveExtraInfoKeyValue( deleteTimestampKey) if existingTimestamp: if self.stdLog: self.LogInfo( "[{0}] Worker is {1}: removing Sherpa delete timestamp as extra info key/value pair: {2} (key)" .format(workerName, workerState, deleteTimestampKey)) self.UnearmarkForDeletion(workerSettings)
def inject_openpype_environment(deadlinePlugin): job = deadlinePlugin.GetJob() job = RepositoryUtils.GetJob(job.JobId, True) # invalidates cache print("inject_openpype_environment start") try: exe_list = job.GetJobExtraInfoKeyValue("openpype_executables") openpype_app = FileUtils.SearchFileList(exe_list) if openpype_app == "": raise RuntimeError( "OpenPype executable was not found " + "in the semicolon separated list \"" + exe_list + "\". " + "The path to the render executable can be configured " + "from the Plugin Configuration in the Deadline Monitor.") # tempfile.TemporaryFile cannot be used because of locking export_url = os.path.join(tempfile.gettempdir(), time.strftime('%Y%m%d%H%M%S'), 'env.json') # add HHMMSS + delete later print("export_url {}".format(export_url)) args = [openpype_app, 'extractenvironments', export_url] add_args = {} add_args['project'] = \ job.GetJobEnvironmentKeyValue('AVALON_PROJECT') add_args['asset'] = job.GetJobEnvironmentKeyValue('AVALON_ASSET') add_args['task'] = job.GetJobEnvironmentKeyValue('AVALON_TASK') add_args['app'] = job.GetJobEnvironmentKeyValue('AVALON_APP_NAME') if all(add_args.values()): for key, value in add_args.items(): args.append("--{}".format(key)) args.append(value) else: msg = "Required env vars: AVALON_PROJECT, AVALON_ASSET, " + \ "AVALON_TASK, AVALON_APP_NAME" raise RuntimeError(msg) print("args::{}".format(args)) exit_code = subprocess.call(args, shell=True) if exit_code != 0: raise RuntimeError("Publishing failed, check worker's log") with open(export_url) as fp: contents = json.load(fp) for key, value in contents.items(): deadlinePlugin.SetEnvironmentVariable(key, value) os.remove(export_url) print("inject_openpype_environment end") except Exception: import traceback print(traceback.format_exc()) print("inject_openpype_environment failed") RepositoryUtils.FailJob(job) raise
def map_files_and_replace_slashes(inputFile, outputFile): if SystemUtils.IsRunningOnWindows(): RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator( inputFile, outputFile, "/", "\\") else: RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator( inputFile, outputFile, "\\", "/") os.chmod(outputFile, os.stat(inputFile).st_mode)
def RemoveDeletedWorkers(self): """ Remove (Offline/Stalled) workers so they do not appear in the Monitor Worker List Panel to avoid filling up the list with Workers that will never reconnect. """ if self.GetBooleanConfigEntryWithDefault("RemoveDeletedWorkers", False) is True: if self.stdLog: self.LogInfo("Remove deleted worker(s)") workerNames = RepositoryUtils.GetSlaveNames(True) for workerName in workerNames: workerSettings = RepositoryUtils.GetSlaveSettings( workerName, True) identifierKey = self.GetConfigEntryWithDefault( "SherpaIdentifierKey", "Sherpa_ID") resourceID = workerSettings.GetSlaveExtraInfoKeyValue( identifierKey) if self.verLog: self.LogInfo("[{0}] Worker's resource ID: {0}".format( workerName, resourceID)) if resourceID: deletedKey = self.GetConfigEntryWithDefault( "SherpaDeletedKey", "Sherpa_Deleted") deleted = workerSettings.GetSlaveExtraInfoKeyValue( deletedKey) if deleted == "True": if self.stdLog: self.LogInfo( "[{0}] Deleted worker".format(workerName)) # if the resource is still around, it will check in again after deletion # let's perform a quick check if the resource has already been destroyed marking = GetResourceMarking(self.sherpaClient, resourceID) if marking == "destroyed": if self.stdLog: self.LogInfo( "[{0}] Deleting worker as resource is {1}". format(workerName, marking)) RepositoryUtils.DeleteSlave(workerName) else: if self.verLog: self.LogInfo( "[{0}] Postpone deletion of worker as resource is {1}" .format(workerName, marking))
def get_frame_per_file_render_arguments(self): """ Generates the render arguments for when FramePerFile mode is set to True. This means that each output frame has an associated scene file with the same frame number. This is mostly used in Octane 1 before .orbx was a thing. :return: a list of commandline arguments, and the scene file """ render_args = ['-e', '-q'] sceneFile = self.GetPluginInfoEntry("SceneFile") sceneFile = PathUtils.ToPlatformIndependentPath( RepositoryUtils.CheckPathMapping(sceneFile)) outputFile = self.CreateOutputFile() paddingSize = 0 if not self.GetBooleanPluginInfoEntryWithDefault("SingleFile", True): currPadding = FrameUtils.GetFrameStringFromFilename(sceneFile) paddingSize = len(currPadding) if paddingSize > 0: newPadding = StringUtils.ToZeroPaddedString( self.GetStartFrame(), paddingSize, False) sceneFile = FrameUtils.SubstituteFrameNumber( sceneFile, newPadding) # Build the new output file name. if outputFile: outputFile = PathUtils.ToPlatformIndependentPath( RepositoryUtils.CheckPathMapping(outputFile)) # Add padding to output file if necessary. if paddingSize > 0: outputFile = FrameUtils.SubstituteFrameNumber( outputFile, newPadding) outputPath = os.path.dirname(outputFile) outputFileName, outputExtension = os.path.splitext(outputFile) outputFile = os.path.join( outputPath, outputFileName + newPadding + outputExtension) render_args.extend(['-o', outputFile]) sample = self.GetIntegerPluginInfoEntryWithDefault( "OverrideSampling", 0) if sample > 0: render_args.extend(['-s', str(sample)]) render_target = self.GetPluginInfoEntryWithDefault( "RenderTargetOCS", "") if render_target: render_args.extend(['-t', render_target]) return render_args, sceneFile
def OnJobFinished(self, job): # Skip job if filtered or no filter jobNameFilter = self.GetConfigEntryWithDefault('JobNameFilter', '') if not jobNameFilter or not re.match(jobNameFilter, job.JobName): return pluginNameFilter = self.GetConfigEntryWithDefault( 'PluginNameFilter', '') if not pluginNameFilter or not re.match(pluginNameFilter, job.JobPlugin): return inputFileName = self.GetConfigEntry('InputFile') outputFileName = self.GetConfigEntry('OutputFile') # Format tokens delimiter = self.GetConfigEntryWithDefault('Delimiter', '').stript().replace( ' ', '') if len(delimiter) in [1, 2]: inputFileName = formatToken(job, getTokens(inputFileName, delimiter), inputFileName) outputFileName = formatToken(job, getTokens(outputFileName, delimiter), outputFileName) else: self.LogWarning( 'Token Delimiter "%s" should be one or to char long' % delimiter) return # Path mapping inputFileName = RepositoryUtils.CheckPathMapping(inputFileName, True) outputFileName = RepositoryUtils.CheckPathMapping(outputFileName, True) if not os.path.isdir(os.path.dirname(inputFileName)): self.LogWarning('No such directory %s' % os.path.dirname(inputFileName)) return if not glob.glob(sequenceToWildcard(inputFileName)): self.LogWarning('No file/sequence %s' % inputFileName) return createFFmpegJob( job, inputFileName=inputFileName, outputFileName=outputFileName, outputArgs=self.GetConfigEntryWithDefault('OutputArgs', ''), inputArgs=self.GetConfigEntryWithDefault('InputArgs', ''), priority=self.GetConfigEntryWithDefault('Priority', '50')) self.LogInfo( 'Submitted ffmpeg job with output: {}'.format(outputFileName))
def initialize_settings(self): """Loads concurrent task information from the Plugin.""" config = RepositoryUtils.GetPluginConfig(self.name) self.concurrent_tasks = config.GetBooleanConfigEntryWithDefault( "ConcurrentTasks", True) if PLUGIN_LIMITS_SUPPORTED: self.limits = RepositoryUtils.GetPluginLimitGroups(self.name) else: # we don't have scripting API support for this, please update the client # replicate previous behavior by pretending the plugin has no assigned limits self.limits = []
def GetTaskCollection(): connStr = RepositoryUtils.GetDatabaseConnectionString() urls = connStr.strip('()').split(',') url = urls[0] client = MongoClient('mongodb://' + url) db = client.GetServer().GetDatabase('celery_deadline') return db.GetCollection('job_tasks')
def DetermineWorkerCountForJob(self, job, concurrentTasks): """Given a job and the number of concurrentTasks, determine the number of workers that could work on it""" queued_tasks = self.GetQueuedTasksForPreJobTaskMode(job) # integer division in python 2 returns an int rounded down. To avoid this, we import division from __future__. # ceil() returns a float in python 2. The Target of the SFR needs to be an int or long. # So, we're converting explicitly to an int. num_workers = int(ceil(queued_tasks / concurrentTasks)) # wxit if num_workers is 0 or negative if num_workers <= 0: return 0 for limit in self.LimitedLimitsForJob(job): num_workers = min( num_workers, self.limit_groups[limit].MaxSlavesForLimit(concurrentTasks)) # exit if num_workers is 0 or negative if num_workers <= 0: return 0 # apply the Job's Machine Limits to the count of eligible tasks machineLimit = RepositoryUtils.GetMachineLimit(job.JobId, True) if machineLimit and machineLimit.LimitGroupLimit != 0: num_workers = min( num_workers, machineLimit.LimitGroupLimit - machineLimit.LimitInUse) if num_workers <= 0: return 0 return num_workers
def RenderArgument(self): arguments = str(self.GetPluginInfoEntryWithDefault("Arguments", "")) arguments = RepositoryUtils.CheckPathMapping(arguments) arguments = re.sub(r"<(?i)STARTFRAME>", str(self.GetStartFrame()), arguments) arguments = re.sub(r"<(?i)ENDFRAME>", str(self.GetEndFrame()), arguments) arguments = re.sub(r"<(?i)QUOTE>", "\"", arguments) arguments = self.ReplacePaddedFrame(arguments, "<(?i)STARTFRAME%([0-9]+)>", self.GetStartFrame()) arguments = self.ReplacePaddedFrame(arguments, "<(?i)ENDFRAME%([0-9]+)>", self.GetEndFrame()) count = 0 for filename in self.GetAuxiliaryFilenames(): localAuxFile = Path.Combine(self.GetJobsDataDirectory(), filename) arguments = re.sub(r"<(?i)AUXFILE" + str(count) + r">", localAuxFile.replace("\\", "/"), arguments) count += 1 return arguments
def add_slave_to_limit_group(slave, group_name, exclude): lg = RepositoryUtils.GetLimitGroup(group_name, True) if lg is None: print('The limit group {} was not found'.format(group_name)) return if exclude: if group_name not in lg.LimitGroupExcludedSlaves: newlist = System.Collections.Generic.List[System.String](lg.LimitGroupExcludedSlaves) newlist.Add(slave) lg.SetLimitGroupExcludedSlaves(newlist.ToArray()) else: if group_name not in lg.LimitGroupListedSlaves: newlist = System.Collections.Generic.List[System.String](lg.LimitGroupListedSlaves) newlist.Add(slave) lg.SetLimitGroupListedSlaves(slave) RepositoryUtils.SaveLimitGroup(lg)
def RenderArgument(self): """ Builds up the commandline render arguments as a list which then gets transformed into string :return: a string of commandline arguments """ scene_file = self.GetPluginInfoEntry("SceneFile") scene_file = PathUtils.ToPlatformIndependentPath( RepositoryUtils.CheckPathMapping(scene_file)) render_args = ['--no-gui'] frame_per_file_mode = self.GetBooleanPluginInfoEntryWithDefault( "FramePerFileMode", False) if frame_per_file_mode: # Octane 1 workflow that's still supported in 2 and onward. temp_render_args, scene_file = self.get_frame_per_file_render_arguments( ) render_args.extend(temp_render_args) else: render_args.extend(self.get_script_render_arguments()) additional_args = self.GetPluginInfoEntryWithDefault( "AdditionalArgs", "").strip() if additional_args: render_args.append(additional_args) render_args.extend(self.get_gpu_render_arguments()) render_args.append(scene_file) return self.quote_cmdline_args(render_args)
def GetIncompleteFrames(job): # second argument is invalidate. not sure if it's necessary tasks = RepositoryUtils.GetJobTasks(job, False).TaskCollectionTasks incomplete = [] for task in tasks: if task.TaskStatus != 'Done': incomplete.extend(list(task.TaskFrameList)) return incomplete
def SetupLimitSettings(self): """Creates a dictionary for all Limits containing settings information""" limitGroups = RepositoryUtils.GetLimitGroups(True) self.limit_groups = { limitGroup.Name: limit_settings(limitGroup) for limitGroup in limitGroups }
def pre_render_tasks(self): """Load config file and do remapping.""" self.LogInfo("OpenPype Tile Assembler starting...") scene_filename = self.GetDataFilename() temp_scene_directory = self.CreateTempDirectory( "thread" + str(self.GetThreadNumber())) temp_scene_filename = Path.GetFileName(scene_filename) self.config_file = Path.Combine(temp_scene_directory, temp_scene_filename) if SystemUtils.IsRunningOnWindows(): RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator( scene_filename, self.config_file, "/", "\\") else: RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator( scene_filename, self.config_file, "\\", "/") os.chmod(self.config_file, os.stat(self.config_file).st_mode)
def __main__(*args): """An entry script for multiple post task scripts (chained) This script will lookup script names from job property's environment key `AVALON_POST_TASK_SCRIPTS`. The script must be in `reveries.scripts.post_task` and must have function `__main__` defined. The process will NOT continue if any error raised in scripts. (chained) (NOTE) Post task script will not run if task has error. Args: (DeadlineRepository.Plugins): Plugin object (str): Script type name, e.g. "post task" """ deadline_plugin = args[0] job = deadline_plugin.GetJob() task = deadline_plugin.GetCurrentTask() script_names = job.GetJobEnvironmentKeyValue("AVALON_POST_TASK_SCRIPTS") if not script_names: log.warning("No Avalon post task script found.") return # Collect and exec plugins (scripts) # script_dir = os.path.dirname(__file__) modules = { module.__name__: module for module in avalon.lib.modules_from_path(script_dir) } for name in script_names.split(";"): script = modules.get(name) if script: log.info("Run Avalon post task script: %s" % name) try: script.__main__(*args) except Exception as error: pyblish.lib.extract_traceback(error, script.__file__) message = "{p.__name__} Error: {e} -- {e.traceback}" log.error(message.format(p=script, e=error)) # Fail the task if error raised and stop running the rest of # of the scripts. RepositoryUtils.FailTasks(job, [task]) break
def render_argument(self): """Generate command line arguments for render executable. Returns: (str): arguments to add to render executable. """ # Read tile config file. This file is in compatible format with # Draft Tile Assembler data = {} with open(self.config_file, "rU") as f: for text in f: # Parsing key-value pair and removing white-space # around the entries info = [x.strip() for x in text.split("=", 1)] if len(info) > 1: try: data[str(info[0])] = info[1] except Exception as e: # should never be called self.FailRender( "Cannot parse config file: {}".format(e)) # Get output file. We support only EXRs now. output_file = data["ImageFileName"] output_file = RepositoryUtils.CheckPathMapping(output_file) output_file = self.process_path(output_file) """ _, ext = os.path.splitext(output_file) if "exr" not in ext: self.FailRender( "[{}] Only EXR format is supported for now.".format(ext)) """ tile_info = [] for tile in range(int(data["TileCount"])): tile_info.append({ "filepath": data["Tile{}".format(tile)], "pos_x": int(data["Tile{}X".format(tile)]), "pos_y": int(data["Tile{}Y".format(tile)]), "height": int(data["Tile{}Height".format(tile)]), "width": int(data["Tile{}Width".format(tile)]) }) # FFMpeg doesn't support tile coordinates at the moment. # arguments = self.tile_completer_ffmpeg_args( # int(data["ImageWidth"]), int(data["ImageHeight"]), # tile_info, output_file) arguments = self.tile_oiio_args( int(data["ImageWidth"]), int(data["ImageHeight"]), tile_info, output_file) self.LogInfo( "Using arguments: {}".format(" ".join(arguments))) self.tiles = tile_info return " ".join(arguments)
def MapAndCleanPath(self, path): path = RepositoryUtils.CheckPathMapping(path) if SystemUtils.IsRunningOnWindows(): path = path.replace("/", "\\") if path.startswith("\\") and not path.startswith("\\\\"): path = "\\" + path extension = os.path.splitext(path)[1] if extension == ".project": tempSceneDirectory = self.CreateTempDirectory( "thread" + str(self.GetThreadNumber())) tempSceneFileName = os.path.basename(path) tempSceneFilename = os.path.join(tempSceneDirectory, tempSceneFileName) RepositoryUtils.CheckPathMappingInFileAndReplace( path, tempSceneFilename, "", "") path = tempSceneFilename return path.replace("\\", "/")
def OnJobSubmitted(self, job): valid = self.GetIntegerConfigEntryWithDefault("ValidPriority", 80) penalty = self.GetIntegerConfigEntryWithDefault("Penalty", 70) if job.JobPriority > valid: job.JobPriority = penalty RepositoryUtils.SaveJob(job) print( "Job submitted with priority over %d. Fall back to %d, Bam !" % (valid, penalty))
def process(job_ids, priority): jobIds = parse_job_ids(job_ids) if not jobIds: scriptDialog.ShowMessageBox("Empty Job List!", "Error") return jobs = list() for jobId in jobIds: job = RepositoryUtils.GetJob(jobId, True) if job is None: scriptDialog.ShowMessageBox("Job not found: %s" % jobId, "Error") return jobs.append(job) for job in jobs: job.JobPriority = priority RepositoryUtils.SaveJob(job) return True
def get_script_render_arguments(self): """ Generates the render arguments for the octane lua script workflow. Octane 2 and later. :return: a list of commandline arguments """ output_folder = self.GetPluginInfoEntryWithDefault("OutputFolder", "") output_folder = PathUtils.ToPlatformIndependentPath( RepositoryUtils.CheckPathMapping(output_folder)) lua_script = os.path.join(self.GetPluginDirectory(), "DeadlineOctane.lua") filename_template = self.GetPluginInfoEntryWithDefault( "FilenameTemplate", "") file_format = self.GetPluginInfoEntryWithDefault("FileFormat", "png8") render_target_ocs = self.GetPluginInfoEntryWithDefault( "RenderTargetOCS", "") render_target_orbx = self.GetPluginInfoEntryWithDefault( "RenderTargetORBX", "") render_target = "" if render_target_ocs: render_target = render_target_ocs elif render_target_orbx: render_target = render_target_orbx render_args = [ '-q', '--script', lua_script, '-a', filename_template, '-a', output_folder, '-a', file_format, '-a', str(self.GetStartFrame()), '-a', str(self.GetEndFrame()), '-a', render_target, '--stop-after-script', ] sample = self.GetIntegerPluginInfoEntryWithDefault( "OverrideSampling", 0) if sample > 0: render_args.extend(['-s', str(sample)]) if render_target: render_args.extend(['-t', render_target]) return render_args
def PostRenderTasks(self): slaveInfo = RepositoryUtils.GetSlaveInfo(self.GetSlaveName(), True) # Clean up the temp file if we did path mapping on the vrscene file. if self.GetBooleanConfigEntryWithDefault( "EnableVrscenePathMapping", True) or slaveInfo.IsAWSPortalInstance: tempDir = os.path.dirname(self.tempSceneFilename) for fileName in os.listdir(tempDir): os.remove(os.path.join(tempDir, fileName)) self.LogInfo("Finished V-Ray Task")
def pype(deadlinePlugin): """Remaps `PYPE_METADATA_FILE` and `PYPE_PYTHON_EXE` environment vars. `PYPE_METADATA_FILE` is used on farm to point to rendered data. This path originates on platform from which this job was published. To be able to publish on different platform, this path needs to be remapped. `PYPE_PYTHON_EXE` can be used to specify custom location of python interpreter to use for Pype. This is remappeda also if present even though it probably doesn't make much sense. Arguments: deadlinePlugin: Deadline job plugin passed by Deadline """ job = deadlinePlugin.GetJob() # PYPE should be here, not OPENPYPE - backward compatibility!! pype_metadata = job.GetJobEnvironmentKeyValue("PYPE_METADATA_FILE") pype_python = job.GetJobEnvironmentKeyValue("PYPE_PYTHON_EXE") # test if it is pype publish job. if pype_metadata: pype_metadata = RepositoryUtils.CheckPathMapping(pype_metadata) if platform.system().lower() == "linux": pype_metadata = pype_metadata.replace("\\", "/") print("- remapping PYPE_METADATA_FILE: {}".format(pype_metadata)) job.SetJobEnvironmentKeyValue("PYPE_METADATA_FILE", pype_metadata) deadlinePlugin.SetProcessEnvironmentVariable("PYPE_METADATA_FILE", pype_metadata) if pype_python: pype_python = RepositoryUtils.CheckPathMapping(pype_python) if platform.system().lower() == "linux": pype_python = pype_python.replace("\\", "/") print("- remapping PYPE_PYTHON_EXE: {}".format(pype_python)) job.SetJobEnvironmentKeyValue("PYPE_PYTHON_EXE", pype_python) deadlinePlugin.SetProcessEnvironmentVariable("PYPE_PYTHON_EXE", pype_python) deadlinePlugin.ModifyCommandLineCallback += pype_command_line
def __main__(): loglist = [] for job in (tuple(RepositoryUtils.GetJobs(True)) + tuple(RepositoryUtils.GetDeletedJobs())): stats = JobUtils.CalculateJobStatistics( job, RepositoryUtils.GetJobTasks(job, True)) loglist.append('\t'.join(( job.JobId, job.JobUserName, datetime( job.JobCompletedDateTime.Year, job.JobCompletedDateTime.Month, job.JobCompletedDateTime.Day, job.JobCompletedDateTime.Hour, job.JobCompletedDateTime.Minute, job.JobCompletedDateTime.Second, job.JobCompletedDateTime.Ticks % 10**7 // 10, ).isoformat(), str(stats.TotalTaskRenderTime.Ticks), )) + '\n') with open('log_api.csv', 'w') as output: output.writelines(loglist) return None
def JobsToCheck(self): """Returns the active jobs to evaluate""" jobs = RepositoryUtils.GetJobsInState("Active") if self.verLog: self.LogInfo("{0} job(s) in active state".format(len(jobs))) for job in sorted(jobs, key=lambda x: x.JobPriority, reverse=True): job_has_machine_whitelist = job.JobWhitelistFlag and len( job.JobListedSlaves) > 0 if (not job_has_machine_whitelist and not job.IsCorrupted()): yield job
def process(frames): frame_nums = parse_frames(frames) if not frame_nums: scriptDialog.ShowMessageBox("Empty Frame List!", "Error") return jobIds = MonitorUtils.GetSelectedJobIds() for jobId in jobIds: job = RepositoryUtils.GetJob(jobId, True) tasks = RepositoryUtils.GetJobTasks(job, True) resume = list() for task in tasks: if task.TaskStatus != "Suspended": continue task_frames = set(task.TaskFrameList) if frame_nums.intersection(task_frames): resume.append(task) RepositoryUtils.ResumeTasks(job, resume) return True
def MarkAsDeleted(self, workerSettings): key = self.GetConfigEntryWithDefault("SherpaDeletedKey", "Sherpa_Deleted") value = workerSettings.GetSlaveExtraInfoKeyValue(key) if not value or value is None: if self.verLog: self.LogInfo( "Saving deleted as extra info key/value pair: {0} (key) + {1} (value)" .format(key, value)) dict = workerSettings.SlaveExtraInfoDictionary dict.Add(key, "True") workerSettings.SlaveExtraInfoDictionary = dict RepositoryUtils.SaveSlaveSettings(workerSettings)
def UnearmarkForDeletion(self, workerSettings): key = self.GetConfigEntryWithDefault("SherpaDeleteTimestampKey", "Sherpa_DeleteTimestamp") value = workerSettings.GetSlaveExtraInfoKeyValue(key) if value is not None: if self.verLog: self.LogInfo( "Removing delete timestamp extra info key/value pair: {0} (key) + {1} (value)" .format(key, value)) dict = workerSettings.SlaveExtraInfoDictionary dict.Remove(key) workerSettings.SlaveExtraInfoDictionary = dict RepositoryUtils.SaveSlaveSettings(workerSettings)
def _remove_files_from_directive(directiveLine, additionalFilesToMap): directiveLine = directiveLine.strip() directiveInfo = directiveLine.split(None, 1) #Directive is of the form #include "PATH" if len(directiveInfo) == 2: includePath = directiveInfo[1] includePath = includePath.strip().strip("\"'") includePath = replaceSlashesByOS(includePath) includePath = RepositoryUtils.CheckPathMapping(includePath) additionalFilesToMap.append(includePath) directiveInfo[1] = u'"%s"' % os.path.basename(includePath) directiveLine = u" ".join(directiveInfo) return directiveLine + u"\r\n"
def handleStartStop(self, operation, workerNames): if len(workerNames) < 1: return self.InitializeSherpaClient() for workerName in workerNames: workerSettings = RepositoryUtils.GetSlaveSettings(workerName, True) identifierKey = self.GetConfigEntryWithDefault( "SherpaIdentifierKey", "Sherpa_ID") resourceID = workerSettings.GetSlaveExtraInfoKeyValue( identifierKey) if self.verLog: self.LogInfo("[{0}] Worker's resource ID: {1}".format( workerName, resourceID)) if resourceID: if ResourceHasOperation(self.sherpaClient, resourceID, operation): if self.stdLog: self.LogInfo("[{0}] {1} resource ({2})".format( workerName, operation.capitalize(), resourceID)) if ResourceHasEnabledOperation(self.sherpaClient, resourceID, operation): if operation == OPERATION_START: StartResources(self.sherpaClient, [resourceID]) else: StopResources(self.sherpaClient, [resourceID]) else: if self.verLog: self.LogInfo( "[{0}] Resource ({1}) does not have enabled operation ({2})" .format(workerName, resourceID, operation)) else: if self.verLog: self.LogInfo( "[{0}] Resource ({1}) does not have operation ({2})" .format(workerName, resourceID, operation)) else: if self.stdLog: self.LogInfo( "[{0}] Resource ID not found".format(workerName))