def create_run_for_input_file( self, input_file, options, property_file, required_files_pattern, append_file_tags): """Create a Run from a direct definition of the main input file (without task definition)""" input_files = [input_file] base_dir = os.path.dirname(input_file) for append_file in append_file_tags: input_files.extend( self.expand_filename_pattern(append_file.text, base_dir, sourcefile=input_file)) run = Run( input_file, util.get_files(input_files), # expand directories to get their sub-files options, self, property_file, required_files_pattern) if not run.propertyfile: return run prop = result.Property.create(run.propertyfile, allow_unknown=False) run.properties = [prop] expected_results = result.expected_results_of_file(input_file) if prop.name in expected_results: run.expected_results[prop.filename] = expected_results[prop.name] # We do not check here if there is an expected result for the given propertyfile # like we do in create_run_from_task_definition, to keep backwards compatibility. return run
def create_run_for_input_file( self, input_file, options, local_propertytag, required_files_pattern, append_file_tags, ): """Create a Run from a direct definition of the main input file (without task definition)""" input_files = [input_file] base_dir = os.path.dirname(input_file) for append_file in append_file_tags: input_files.extend( self.expand_filename_pattern( append_file.text, base_dir, sourcefile=input_file ) ) run = Run( input_file, util.get_files(input_files), # expand directories to get their sub-files options, self, local_propertytag, required_files_pattern, ) if not run.propertyfile: return run prop = result.Property.create(run.propertyfile, allow_unknown=False) run.properties = [prop] expected_results = result.expected_results_of_file(input_file) if prop.name in expected_results: run.expected_results[prop.filename] = expected_results[prop.name] # We do not check here if there is an expected result for the given propertyfile # like we do in create_run_from_task_definition, to keep backwards compatibility. if run.propertytag.get("expectedverdict"): global _WARNED_ABOUT_UNSUPPORTED_EXPECTED_RESULT_FILTER if not _WARNED_ABOUT_UNSUPPORTED_EXPECTED_RESULT_FILTER: _WARNED_ABOUT_UNSUPPORTED_EXPECTED_RESULT_FILTER = True logging.warning( "Ignoring filter based on expected verdict " "for tasks without task-definition file. " "Expected verdicts for such tasks will be removed in BenchExec 3.0 " "(cf. https://github.com/sosy-lab/benchexec/issues/439)." ) return run
def cmdline(self, executable, options, tasks, propertyfile, rlimits): return ( [executable] + options + [file for file in util.get_files(tasks) if file.endswith(".java")])
def __init__(self, sourcefiles, fileOptions, runSet, propertyfile=None, required_files_patterns=[]): assert sourcefiles self.identifier = sourcefiles[0] # used for name of logfile, substitution, result-category self.sourcefiles = util.get_files(sourcefiles) # expand directories to get their sub-files self.runSet = runSet self.specific_options = fileOptions # options that are specific for this run self.log_file = runSet.log_folder + os.path.basename(self.identifier) + ".log" self.result_files_folder = os.path.join(runSet.result_files_folder, os.path.basename(self.identifier)) self.required_files = set() rel_sourcefile = os.path.relpath(self.identifier, runSet.benchmark.base_dir) for pattern in required_files_patterns: this_required_files = runSet.expand_filename_pattern(pattern, runSet.benchmark.base_dir, rel_sourcefile) if not this_required_files: logging.warning( "Pattern %s in requiredfiles tag did not match any file for task %s.", pattern, self.identifier ) self.required_files.update(this_required_files) # lets reduce memory-consumption: if 2 lists are equal, do not use the second one self.options = ( runSet.options + fileOptions if fileOptions else runSet.options ) # all options to be used when executing this run substitutedOptions = substitute_vars(self.options, runSet, self.identifier) if substitutedOptions != self.options: self.options = substitutedOptions # for less memory again self.propertyfile = propertyfile or runSet.propertyfile def log_property_file_once(msg): if not self.propertyfile in _logged_missing_property_files: _logged_missing_property_files.add(self.propertyfile) logging.warning(msg) # replace run-specific stuff in the propertyfile and add it to the set of required files if self.propertyfile is None: log_property_file_once("No propertyfile specified. Score computation will ignore the results.") else: # we check two cases: direct filename or user-defined substitution, one of them must be a 'file' # TODO: do we need the second case? it is equal to previous used option "-spec ${sourcefile_path}/ALL.prp" expandedPropertyFiles = util.expand_filename_pattern(self.propertyfile, self.runSet.benchmark.base_dir) substitutedPropertyfiles = substitute_vars([self.propertyfile], runSet, self.identifier) assert len(substitutedPropertyfiles) == 1 if expandedPropertyFiles: if len(expandedPropertyFiles) > 1: log_property_file_once( "Pattern {0} for sourcefile {1} in propertyfile tag matches more than one file. Only {2} will be used.".format( self.propertyfile, self.identifier, expandedPropertyFiles[0] ) ) self.propertyfile = expandedPropertyFiles[0] elif substitutedPropertyfiles and os.path.isfile(substitutedPropertyfiles[0]): self.propertyfile = substitutedPropertyfiles[0] else: log_property_file_once( "Pattern {0} for sourcefile {1} in propertyfile tag did not match any file. It will be ignored.".format( self.propertyfile, self.identifier ) ) self.propertyfile = None if self.propertyfile: self.required_files.add(self.propertyfile) self.properties = result.properties_of_file(self.propertyfile) else: self.properties = [] self.required_files = list(self.required_files) # Copy columns for having own objects in run # (we need this for storing the results in them). self.columns = [Column(c.text, c.title, c.number_of_digits) for c in self.runSet.benchmark.columns] # here we store the optional result values, e.g. memory usage, energy, host name # keys need to be strings, if first character is "@" the value is marked as hidden (e.g., debug info) self.values = collections.OrderedDict() # dummy values, for output in case of interrupt self.status = "" self.cputime = None self.walltime = None self.category = result.CATEGORY_UNKNOWN
def _submit( self, run, limits, cpu_model, required_files, result_files_patterns, meta_information, priority, user_pwd, revision, counter=0, ): params = [] opened_files = [] # open file handles are passed to the request library for programPath in run.sourcefiles: norm_path = self._normalize_path_for_cloud(programPath) params.append( ("programTextHash", (norm_path, self._get_sha256_hash(programPath))) ) for required_file in get_files(required_files): norm_path = self._normalize_path_for_cloud(required_file) params.append( ("requiredFileHash", (norm_path, self._get_sha256_hash(required_file))) ) params.append(("revision", revision or self._revision)) if run.propertyfile: property_file = self._add_file_to_params( params, "propertyText", run.propertyfile ) opened_files.append(property_file) if MEMLIMIT in limits: params.append(("memoryLimitation", str(limits[MEMLIMIT]))) if TIMELIMIT in limits: params.append(("timeLimitation", str(limits[TIMELIMIT]))) if SOFTTIMELIMIT in limits: params.append(("softTimeLimitation", str(limits[SOFTTIMELIMIT]))) if CORELIMIT in limits: params.append(("coreLimitation", str(limits[CORELIMIT]))) if cpu_model: params.append(("cpuModel", cpu_model)) if result_files_patterns: for pattern in result_files_patterns: params.append(("resultFilesPattern", pattern)) else: params.append(("resultFilesPattern", "")) if priority: params.append(("priority", priority)) (invalidOption, files) = self._handle_options(run, params, limits) opened_files.extend(files) if invalidOption: raise WebClientError( 'Command {0} contains option "{1}" that is not usable with the webclient. '.format( run.options, invalidOption ) ) params.append(("groupId", str(self._group_id))) if meta_information: params.append(("metaInformation", meta_information)) # prepare request headers = {"Accept": "text/plain"} path = "runs/" (response, statusCode) = self._request( "POST", path, files=params, headers=headers, expectedStatusCodes=[200, 412], user_pwd=user_pwd, ) for opened_file in opened_files: opened_file.close() # program files or required files given as hash value are not known by the cloud system if statusCode == 412: if counter >= 1: raise WebClientError( "Files still missing on server for run {0} even after uploading them:\n{1}".format( run.identifier, response ) ) headers = { "Content-Type": "application/octet-stream", "Content-Encoding": "deflate", } filePath = "files/" # upload all used program files for programPath in run.sourcefiles: with open(programPath, "rb") as programFile: compressedProgramText = zlib.compress(programFile.read(), 9) self._request( "POST", filePath, data=compressedProgramText, headers=headers, expectedStatusCodes=[200, 204], user_pwd=user_pwd, ) # upload all required files for required_file_path in required_files: with open(required_file_path, "rb") as required_file: compressed_required_file = zlib.compress(required_file.read(), 9) self._request( "POST", filePath, data=compressed_required_file, headers=headers, expectedStatusCodes=[200, 204], user_pwd=user_pwd, ) # retry submission of run return self._submit( run, limits, cpu_model, required_files, result_files_patterns, meta_information, priority, user_pwd, revision, counter + 1, ) else: try: run_id = response.decode("UTF-8") except UnicodeDecodeError as e: raise WebClientError( "Malformed response from server while submitting run {0}:\n{1}".format( run.identifier, response ) ) from e if not VALID_RUN_ID.match(run_id): raise WebClientError( "Malformed response from server while submitting run {0}:\n{1}".format( run.identifier, run_id ) ) logging.debug("Submitted run with id %s", run_id) return self._create_and_add_run_future(run_id)
def __init__(self, sourcefiles, fileOptions, runSet, propertyfile=None, required_files_patterns=[]): assert sourcefiles self.identifier = sourcefiles[ 0] # used for name of logfile, substitution, result-category self.sourcefiles = util.get_files( sourcefiles) # expand directories to get their sub-files self.runSet = runSet self.specific_options = fileOptions # options that are specific for this run self.log_file = runSet.log_folder + os.path.basename( self.identifier) + ".log" self.result_files_folder = os.path.join( runSet.result_files_folder, os.path.basename(self.identifier)) self.required_files = set() rel_sourcefile = os.path.relpath(self.identifier, runSet.benchmark.base_dir) for pattern in required_files_patterns: this_required_files = runSet.expand_filename_pattern( pattern, runSet.benchmark.base_dir, rel_sourcefile) if not this_required_files: logging.warning( 'Pattern %s in requiredfiles tag did not match any file for task %s.', pattern, self.identifier) self.required_files.update(this_required_files) # lets reduce memory-consumption: if 2 lists are equal, do not use the second one self.options = runSet.options + fileOptions if fileOptions else runSet.options # all options to be used when executing this run substitutedOptions = substitute_vars(self.options, runSet, self.identifier) if substitutedOptions != self.options: self.options = substitutedOptions # for less memory again self.propertyfile = propertyfile or runSet.propertyfile def log_property_file_once(msg): if not self.propertyfile in _logged_missing_property_files: _logged_missing_property_files.add(self.propertyfile) logging.warning(msg) # replace run-specific stuff in the propertyfile and add it to the set of required files if self.propertyfile is None: log_property_file_once( 'No propertyfile specified. Score computation will ignore the results.' ) else: # we check two cases: direct filename or user-defined substitution, one of them must be a 'file' # TODO: do we need the second case? it is equal to previous used option "-spec ${sourcefile_path}/ALL.prp" expandedPropertyFiles = util.expand_filename_pattern( self.propertyfile, self.runSet.benchmark.base_dir) substitutedPropertyfiles = substitute_vars([self.propertyfile], runSet, self.identifier) assert len(substitutedPropertyfiles) == 1 if expandedPropertyFiles: if len(expandedPropertyFiles) > 1: log_property_file_once( 'Pattern {0} for sourcefile {1} in propertyfile tag matches more than one file. Only {2} will be used.' .format(self.propertyfile, self.identifier, expandedPropertyFiles[0])) self.propertyfile = expandedPropertyFiles[0] elif substitutedPropertyfiles and os.path.isfile( substitutedPropertyfiles[0]): self.propertyfile = substitutedPropertyfiles[0] else: log_property_file_once( 'Pattern {0} for sourcefile {1} in propertyfile tag did not match any file. It will be ignored.' .format(self.propertyfile, self.identifier)) self.propertyfile = None if self.propertyfile: self.required_files.add(self.propertyfile) self.properties = result.properties_of_file(self.propertyfile) else: self.properties = [] self.required_files = list(self.required_files) # Copy columns for having own objects in run # (we need this for storing the results in them). self.columns = [ Column(c.text, c.title, c.number_of_digits) for c in self.runSet.benchmark.columns ] # here we store the optional result values, e.g. memory usage, energy, host name # keys need to be strings, if first character is "@" the value is marked as hidden (e.g., debug info) self.values = collections.OrderedDict() # dummy values, for output in case of interrupt self.status = "" self.cputime = None self.walltime = None self.category = result.CATEGORY_UNKNOWN