def main():
    pp = Preprocessor()
    print 'processing custom data, computing bows...'
    tdpath = 'dataset/test/sms-data'
    pp.process_custom_data(tdpath)
    
    fm = FeatureModel()
    print 'converting custom data to fvs...'
    fm.compute_custom_fv_matrix('custom')
    
    tdpath = 'bin_data/custom_fv.npy'
    cpath = 'bin_data/mnb-classifier.npy'
    data = np.load('bin_data/custom-data.npy').item()
    
    tester = Tester(tdpath,cpath)
    print 'predicting labels for custom data...'
    results = tester.predict_labels_for_custom_data(data)
    
    with open('output/results.txt','w') as textfile:
        for msg in results:
            line = '%s -> %s\n' % (msg,results[msg])
            textfile.write(line)
        
        textfile.close()
    
    print 'Results written to results.txt'
Esempio n. 2
0
    def __init__(self, configFile, name, timeout, semaphore, debug = False):
        """ WowTester constructor.

        WowTester constructor: configFile is the path to crab.cfg, name is a identification name for the test and timeout is the
        time after which the test is stopped.
        """
        Tester.__init__(self, configFile, name+"-wow", timeout, semaphore, debug)
Esempio n. 3
0
 def __init__(self, name, params):
   Tester.__init__(self, name, params)
   if os.environ.has_key("MOOSE_MPI_COMMAND"):
     self.mpi_command = os.environ['MOOSE_MPI_COMMAND']
     self.force_mpi = True
   else:
     self.mpi_command = 'mpiexec -host localhost'
     self.force_mpi = False
Esempio n. 4
0
    def __init__(self, name, params):
        Tester.__init__(self, name, params)
        self.tags.append('speedtests')
        self.timeout = max(3600, float(params['max_time']))
        self.check_only = False

        self.params = params
        self.benchmark = None
        self.db = os.environ.get('MOOSE_SPEED_DB', 'speedtests.sqlite')
Esempio n. 5
0
  def __init__(self, name, params):
    Tester.__init__(self, name, params)
    if os.environ.has_key("MOOSE_MPI_COMMAND"):
      self.mpi_command = os.environ['MOOSE_MPI_COMMAND']
      self.force_mpi = True
    else:
      self.mpi_command = 'mpiexec -host localhost'
      self.force_mpi = False

    # Handle the special allow_deprecated_until parameter
    if params.isValid('allow_deprecated_until') and params['allow_deprecated_until'] > time.localtime():
      self.specs['cli_args'].append('--allow-deprecated')
Esempio n. 6
0
    def __init__(self, name, params):
        Tester.__init__(self, name, params)
        if os.environ.has_key("MOOSE_MPI_COMMAND"):
            self.mpi_command = os.environ['MOOSE_MPI_COMMAND']
            self.force_mpi = True
        else:
            self.mpi_command = 'mpiexec'
            self.force_mpi = False

        # Make sure that either input or command is supplied
        if not (params.isValid('input') or params.isValid('command')):
            raise Exception('Either "input" or "command" must be supplied for a RunApp test')
Esempio n. 7
0
File: main.py Progetto: yarko90/GIT
def main_preparation(conn):
    Create_tables.create_tables(conn)
    Add = Adder(conn)
    number_of_buildings = 5
    Test = Tester(conn, number_of_buildings)
    test_sets_list = Test.test_sets_list

    for t_set in test_sets_list:
        t_set_DB = t_set.split("|")
        Add.add_detector(t_set_DB[0], t_set_DB[1], t_set_DB[2], t_set_DB[3], t_set_DB[4])
    Test.all_dectors_dict = Test.get_all_detectors()
    test_detector_dict = Test.all_dectors_dict
    return test_detector_dict, Test
Esempio n. 8
0
def doTest():
	# to load this data I need a certificate,
	#therefore at the moment no validation
	client = 'all'
	logging.debug(request)
	f = request.files['key']
	
	string = f.read()
	logging.debug(string)
	
	keyReference = request.form['keyreference']
	logging.debug(keyReference)
	
	engine = getEngine()
	testSet = getTestSet(engine, client)
	if testSet is None:
		logging.info('client ' + client + ' does not exist or has no test set assigned. ')
		abort(400)
	
	try:
		keyReference = int(keyReference)
	except:
		#Http code 400: Bad request
		logging.info('keyReference: ' + keyReference + ' is no integer.')
		abort(400)
	
	if not hasParser(string):
		logging.info('No valid input. ')
		logging.info(string)
		abort(400)
	
	key = parse(string)
	if key is None:
		logging.info('No valid input. ')
		logging.info(string)
		abort(400)	

	keyFormat, keyType = getParam(string)
	if keyFormat is None or keyType is None:
		logging.info('keyFormat: ' + keyFormat + ' and keyType: ' + keyType +' are not valid')
		abort(400)
	
	tester = Tester(testSet, engine, shared)
	id=tester.testSync(keyType, keyFormat, key, keyReference, client)
	threading.Thread(target=tester.testAsync, args=[True]).start()
	#asyncPool.apply_async(testAsync, tester)
	#asyncPool.apply_async(tester.testAsync, [True])
	#apply(tester.testAsync, [True])

	res = Result(engine, id,client).getJSON()
	return res;
def main():
    started = datetime.now()
    
    tdpath = 'bin_data/testing_fv.npy'
    cpath = 'bin_data/mnb-classifier.npy'
    
    tester = Tester(tdpath,cpath)
    tester.test_classifier()
    
    finished = datetime.now()
    
    print 'Started at: ',started
    print 'Finished at: ',finished
    print 'Time taken: ',(finished-started)
Esempio n. 10
0
    def validParams():
        params = Tester.validParams()
        params.addParam('input',              "The input file to use for this test.")
        params.addParam('test_name',          "The name of the test - populated automatically")
        params.addParam('input_switch', '-i', "The default switch used for indicating an input to the executable")
        params.addParam('errors',             ['ERROR', 'command not found', 'erminate called after throwing an instance of'], "The error messages to detect a failed run")
        params.addParam('expect_out',         "A regular expression that must occur in the input in order for the test to be considered passing.")
        params.addParam('match_literal', False, "Treat expect_out as a string not a regular expression.")
        params.addParam('absent_out',         "A regular expression that must be *absent* from the output for the test to pass.")
        params.addParam('should_crash', False, "Inidicates that the test is expected to crash or otherwise terminate early")
        params.addParam('executable_pattern', "A test that only runs if the exectuable name matches the given pattern")
        params.addParam('delete_output_before_running',  True, "Delete pre-existing output files before running test. Only set to False if you know what you're doing!")
        params.addParam('delete_output_folders', True, "Delete output folders before running")

        # RunApp can also run arbitrary commands. If the "command" parameter is supplied
        # it'll be used in lieu of building up the command automatically
        params.addParam('command',            "The command line to execute for this test.")

        # Parallel/Thread testing
        params.addParam('max_parallel', 1000, "Maximum number of MPI processes this test can be run with      (Default: 1000)")
        params.addParam('min_parallel',    1, "Minimum number of MPI processes that this test can be run with (Default: 1)")
        params.addParam('max_threads',    16, "Max number of threads (Default: 16)")
        params.addParam('min_threads',     1, "Min number of threads (Default: 1)")
        params.addParam('allow_warnings',   False, "If the test harness is run --error warnings become errors, setting this to true will disable this an run the test without --error");
        params.addParam('redirect_output',  False, "Redirect stdout to files. Neccessary when expecting an error when using parallel options")

        params.addParamWithType('allow_deprecated_until', type(time.localtime()), "A test that only runs if current date is less than specified date")

        # Valgrind
        params.addParam('valgrind', 'NORMAL', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")

        return params
Esempio n. 11
0
    def validParams():
        params = Tester.validParams()
        params.addParam('input',              "The input file to use for this test.")
        params.addParam('test_name',          "The name of the test - populated automatically")
        params.addParam('input_switch', '-i', "The default switch used for indicating an input to the executable")
        params.addParam('errors',             ['ERROR', 'command not found', 'erminate called after throwing an instance of'], "The error messages to detect a failed run")
        params.addParam('expect_out',         "A regular expression that must occur in the input in order for the test to be considered passing.")
        params.addParam('match_literal', False, "Treat expect_out as a string not a regular expression.")
        params.addParam('absent_out',         "A regular expression that must be *absent* from the output for the test to pass.")
        params.addParam('should_crash', False, "Inidicates that the test is expected to crash or otherwise terminate early")
        params.addParam('executable_pattern', "A test that only runs if the exectuable name matches the given pattern")
        params.addParam('delete_output_before_running',  True, "Delete pre-existing output files before running test. Only set to False if you know what you're doing!")
        params.addParam('delete_output_folders', True, "Delete output folders before running")

        # RunApp can also run arbitrary commands. If the "command" parameter is supplied
        # it'll be used in lieu of building up the command automatically
        params.addParam('command',            "The command line to execute for this test.")

        # Parallel/Thread testing
        params.addParam('max_parallel', 1000, "Maximum number of MPI processes this test can be run with      (Default: 1000)")
        params.addParam('min_parallel',    1, "Minimum number of MPI processes that this test can be run with (Default: 1)")
        params.addParam('max_threads',    16, "Max number of threads (Default: 16)")
        params.addParam('min_threads',     1, "Min number of threads (Default: 1)")
        params.addParam('redirect_output',  False, "Redirect stdout to files. Neccessary when expecting an error when using parallel options")

        params.addParam('allow_warnings',   True, "Whether or not warnings are allowed.  If this is False then a warning will be treated as an error.  Can be globally overridden by setting 'allow_warnings = False' in the testroot file.");
        params.addParam('allow_unused',   True, "Whether or not unused parameters are allowed in the input file.  Can be globally overridden by setting 'allow_unused = False' in the testroot file.");
        params.addParam('allow_override', True, "Whether or not overriding a parameter/block in the input file generates an error.  Can be globally overridden by setting 'allow_override = False' in the testroot file.");
        params.addParam('allow_deprecated', True, "Whether or not deprecated warnings are allowed.  Setting to False will cause deprecation warnings to be treated as test failures.  We do NOT recommend you globally set this permanently to False!  Deprecations are a part of the normal development flow and _SHOULD_ be allowed!")

        # Valgrind
        params.addParam('valgrind', 'NORMAL', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")

        return params
Esempio n. 12
0
    def __init__(self, name, params):
        Tester.__init__(self, name, params)
        if os.environ.has_key("MOOSE_MPI_COMMAND"):
            self.mpi_command = os.environ['MOOSE_MPI_COMMAND']
            self.force_mpi = True
        else:
            self.mpi_command = 'mpiexec'
            self.force_mpi = False

        # Handle the special allow_deprecated_until parameter
        if params.isValid('allow_deprecated_until') and params['allow_deprecated_until'] > time.localtime():
            self.specs['cli_args'].append('--allow-deprecated')

        # Make sure that either input or command is supplied
        if not (params.isValid('input') or params.isValid('command')):
            raise Exception('Either "input" or "command" must be supplied for a RunApp test')
Esempio n. 13
0
  def getValidParams():
    params = Tester.getValidParams()
    params.addRequiredParam('input',      "The input file to use for this test.")
    params.addParam('test_name',          "The name of the test - populated automatically")
    params.addParam('cli_args',       [], "Additional arguments to be passed to the test.")
    params.addParam('input_switch', '-i', "The default switch used for indicating an input to the executable")
    params.addParam('errors',             ['ERROR', 'command not found', 'erminate called after throwing an instance of'], "The error messages to detect a failed run")
    params.addParam('expect_out',         "A regular expression that must occur in the input in order for the test to be considered passing.")
    params.addParam('should_crash',False, "Inidicates that the test is expected to crash or otherwise terminate early")

    params.addParam('walltime',           "The max time as pbs understands it")
    params.addParam('job_name',           "The test name as pbs understands it")
    params.addParam('no_copy',            "The tests file as pbs understands it")

    # Parallel/Thread testing
    params.addParam('max_parallel', 1000, "Maximum number of MPI processes this test can be run with      (Default: 1000)")
    params.addParam('min_parallel',    1, "Minimum number of MPI processes that this test can be run with (Default: 1)")
    params.addParam('max_threads',    16, "Max number of threads (Default: 16)")
    params.addParam('min_threads',     1, "Min number of threads (Default: 1)")
    params.addParam('scale_refine',    0, "The number of refinements to do when scaling")

    # Valgrind
    params.addParam('valgrind', 'NORMAL', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")

    params.addParam('post_command',       "Command to be run after the MOOSE job is run")

    return params
Esempio n. 14
0
  def validParams():
    params = Tester.validParams()
    params.addRequiredParam('input',      "The input file to use for this test.")
    params.addParam('test_name',          "The name of the test - populated automatically")
    params.addParam('skip_test_harness_cli_args', False, "Skip adding global TestHarness CLI Args for this test")
    params.addParam('input_switch', '-i', "The default switch used for indicating an input to the executable")
    params.addParam('errors',             ['ERROR', 'command not found', 'erminate called after throwing an instance of'], "The error messages to detect a failed run")
    params.addParam('expect_out',         "A regular expression that must occur in the input in order for the test to be considered passing.")
    params.addParam('match_literal', False, "Treat expect_out as a string not a regular expression.")
    params.addParam('absent_out',         "A regular expression that must be *absent* from the output for the test to pass.")
    params.addParam('should_crash', False, "Inidicates that the test is expected to crash or otherwise terminate early")
    params.addParam('executable_pattern', "A test that only runs if the exectuable name matches the given pattern")

    params.addParam('walltime',           "The max time as pbs understands it")
    params.addParam('job_name',           "The test name as pbs understands it")
    params.addParam('no_copy',            "The tests file as pbs understands it")

    # Parallel/Thread testing
    params.addParam('max_parallel', 1000, "Maximum number of MPI processes this test can be run with      (Default: 1000)")
    params.addParam('min_parallel',    1, "Minimum number of MPI processes that this test can be run with (Default: 1)")
    params.addParam('max_threads',    16, "Max number of threads (Default: 16)")
    params.addParam('min_threads',     1, "Min number of threads (Default: 1)")
    params.addParam('allow_warnings',   False, "If the test harness is run --error warnings become errors, setting this to true will disable this an run the test without --error");

    # Valgrind
    params.addParam('valgrind', 'NORMAL', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")

    params.addParam('post_command',       "Command to be run after the MOOSE job is run")

    return params
Esempio n. 15
0
  def validParams():
    params = Tester.validParams()
    params.addRequiredParam('input',  "The input file to use for this test.")
    params.addParam('test_name',      "The name of the test - populated automatically")
    params.addParam('expect_out',     "A regular expression that must occur in the input in order for the test to be considered passing.")
    params.addParam('resize_mesh', False, "Resize the input mesh")
    params.addParam('mesh_size',   1, "Resize the input mesh")

    return params
Esempio n. 16
0
 def validParams():
     params = Tester.validParams()
     params.addParam('input',              'The input file to use for this test.')
     params.addParam('test_name',          'The name of the test - populated automatically')
     params.addParam('cumulative_dur', 60, 'cumulative time (secs) to run each benchmark')
     params.addParam('min_runs', 40,       'minimum number of runs for each benchmark')
     params.addParam('max_runs', 400,      'maximum number of runs for each benchmark')
     params.addParam('perflog', False,     'true to enable perflog and store its output')
     return params
Esempio n. 17
0
 def __init__(self, name, params):
   Tester.__init__(self, name, params)
   self.all_files = []
   self.__make_differ('output', ExistsDiff.Exists)
   self.__make_differ('csv', OrderedCSVDiffer.OrderedCSV)
   self.__make_differ('UnorderedCsv', UnorderedCSVDiffer.UnorderedCSV)
   self.__make_differ('xml', XMLDiff.XML, {"unordered":False})
   self.__make_differ('UnorderedXml', XMLDiff.XML, {"unordered":True})
   self.__make_differ('text', TextDiff.Text)
   self.__make_differ('image', RAVENImageDiff.ImageDiffer)
   self.required_executable = self.specs['required_executable']
   self.required_libraries = self.specs['required_libraries'].split(' ')\
     if len(self.specs['required_libraries']) > 0 else []
   self.minimum_libraries = self.specs['minimum_library_versions'].split(' ')\
     if len(self.specs['minimum_library_versions']) > 0 else []
   self.required_executable = self.required_executable.replace("%METHOD%",
                                                               os.environ.get("METHOD", "opt"))
   self.specs['scale_refine'] = False
   self.driver = os.path.join(RAVENDIR, 'Driver.py')
Esempio n. 18
0
    def __init__(self, name, params):
        Tester.__init__(self, name, params)
        if os.environ.has_key("MOOSE_MPI_COMMAND"):
            self.mpi_command = os.environ['MOOSE_MPI_COMMAND']
            self.force_mpi = True
        else:
            self.mpi_command = 'mpiexec'
            self.force_mpi = False

        # Handle the special allow_deprecated_until parameter
        if params.isValid('allow_deprecated_until') and params[
                'allow_deprecated_until'] > time.localtime():
            self.specs['cli_args'].append('--allow-deprecated')

        # Make sure that either input or command is supplied
        if not (params.isValid('input') or params.isValid('command')):
            raise Exception(
                'Either "input" or "command" must be supplied for a RunApp test'
            )
Esempio n. 19
0
  def validParams():
    params = Tester.validParams()
    params.addRequiredParam('input',  "The input file to use for this test.")
    params.addParam('test_name',      "The name of the test - populated automatically")
    params.addParam('expect_out',     "A regular expression that must occur in the input in order for the test to be considered passing.")
    params.addParam('resize_mesh', False, "Resize the input mesh")
    params.addParam('off_diagonal', True, "Also test the off-diagonal Jacobian entries")
    params.addParam('mesh_size',   1, "Resize the input mesh")

    return params
Esempio n. 20
0
File: main.py Progetto: pxm321/raven
def process_result(index, _input_data, output_data):
  """
    This is a callback function that Processes the result of a test.
    @ In, index, int, Index into functions list.
    @ In, _input_data, ignored, the input data passed to the function
    @ In, output_data, Tester.TestResult the output data passed to the function
    @ Out, None
  """
  group = output_data.group
  process_test_name = test_name_list[index]
  if group == Tester.group_success:
    results["pass"] += 1
    for postreq in function_postreq.get(process_test_name, []):
      if postreq in name_to_id:
        job_id = name_to_id[postreq]
        print("Enabling", postreq, job_id)
        run_pool.enable_job(job_id)
    okaycolor = Colors.okay
  elif group == Tester.group_skip:
    results["skipped"] += 1
    print(output_data.message)
    okaycolor = Colors.skip
  else:
    results["fail"] += 1
    failed_list.append(Tester.get_group_name(group)+" "+process_test_name)
    print("Output of'"+process_test_name+"':")
    print(output_data.output)
    print(output_data.message)
    okaycolor = Colors.fail
  number_done = sum(results.values())
  print(' '.join(["({done}/{togo})",
                  "{statcolor}{status:7s}{normcolor}"
                  "({timecolor}{time}{normcolor})"
                  "{namecolor}{test}{normcolor}"])
        .format(done=number_done,
                togo=len(function_list),
                statcolor=okaycolor,
                normcolor=Colors.norm,
                namecolor=Colors.name,
                timecolor=Colors.time,
                status=Tester.get_group_name(group),
                time=sec_format(output_data.runtime),
                test=process_test_name))
Esempio n. 21
0
    def getValidParams():
        params = Tester.getValidParams()
        params.addRequiredParam('input',
                                "The input file to use for this test.")
        params.addParam('test_name',
                        "The name of the test - populated automatically")
        params.addParam('cli_args', [],
                        "Additional arguments to be passed to the test.")
        params.addParam(
            'input_switch', '-i',
            "The default switch used for indicating an input to the executable"
        )
        params.addParam('errors', [
            'ERROR', 'command not found',
            'erminate called after throwing an instance of'
        ], "The error messages to detect a failed run")
        params.addParam(
            'expect_out',
            "A regular expression that must occur in the input in order for the test to be considered passing."
        )
        params.addParam(
            'should_crash', False,
            "Inidicates that the test is expected to crash or otherwise terminate early"
        )

        params.addParam('walltime', "The max time as pbs understands it")
        params.addParam('job_name', "The test name as pbs understands it")
        params.addParam('no_copy', "The tests file as pbs understands it")

        # Parallel/Thread testing
        params.addParam(
            'max_parallel', 1000,
            "Maximum number of MPI processes this test can be run with      (Default: 1000)"
        )
        params.addParam(
            'min_parallel', 1,
            "Minimum number of MPI processes that this test can be run with (Default: 1)"
        )
        params.addParam('max_threads', 16,
                        "Max number of threads (Default: 16)")
        params.addParam('min_threads', 1, "Min number of threads (Default: 1)")
        params.addParam('scale_refine', 0,
                        "The number of refinements to do when scaling")

        # Valgrind
        params.addParam(
            'valgrind', 'NORMAL',
            "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run."
        )

        params.addParam('post_command',
                        "Command to be run after the MOOSE job is run")

        return params
Esempio n. 22
0
 def get_valid_params():
     """
   Return a list of valid parameters and their descriptions for this type
   of test.
   @ In, None
   @ Out, params, _ValidParameters, the parameters for this class.
 """
     params = Tester.get_valid_params()
     params.add_required_param('executable', "The executable to use")
     params.add_param('parameters', '', "arguments to the executable")
     return params
Esempio n. 23
0
 def validParams():
     params = Tester.validParams()
     params.addRequiredParam('input',
                             "The input file to use for this test.")
     params.addParam('output', '',
                     "List of output files that the input should create.")
     params.addParam('csv', '', "List of csv files to check")
     params.addParam('UnorderedCsv', '',
                     "List of unordered csv files to check")
     params.addParam('xml', '', "List of xml files to check")
     params.addParam('UnorderedXml', '',
                     "List of unordered xml files to check")
     params.addParam('xmlopts', '', "Options for xml checking")
     params.addParam('text', '', "List of generic text files to check")
     params.addParam(
         'comment', '-20021986',
         "Character or string denoting comments, all text to the right of the symbol will be ignored in the diff of text files"
     )
     params.addParam('image', '', "List of image files to check")
     params.addParam('rel_err', '',
                     'Relative Error for csv files or floats in xml ones')
     params.addParam('required_executable', '',
                     'Skip test if this executable is not found')
     params.addParam('required_libraries', '',
                     'Skip test if any of these libraries are not found')
     params.addParam(
         'minimum_library_versions', '',
         'Skip test if the library listed is below the supplied version (e.g. minimum_library_versions = \"name1 version1 name2 version2\")'
     )
     params.addParam('skip_if_env', '',
                     'Skip test if this environmental variable is defined')
     params.addParam(
         'test_interface_only', 'False',
         'Test the interface only (without running the driven code')
     params.addParam(
         'check_absolute_value', 'False',
         'if true the values are compared in absolute value (abs(trueValue)-abs(testValue)'
     )
     params.addParam(
         'zero_threshold', sys.float_info.min * 4.0,
         'it represents the value below which a float is considered zero (XML comparison only)'
     )
     params.addParam(
         'remove_whitespace', 'False',
         'Removes whitespace before comparing xml node text if True')
     params.addParam(
         'expected_fail', 'False',
         'if true, then the test should fails, and if it passes, it fails.')
     params.addParam('remove_unicode_identifier', 'False',
                     'if true, then remove u infront of a single quote')
     params.addParam(
         'interactive', 'False',
         'if true, then RAVEN will be run with interactivity enabled.')
     return params
Esempio n. 24
0
def main():
    logging.basicConfig(filename="result/log.txt",
                        filemode='w',
                        format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
                        datefmt='%H:%M:%S',
                        level=logging.DEBUG)
    logging.getLogger().setLevel(logging.INFO)

    parser = argparse.ArgumentParser()

    parser.add_argument('-n_clusters', type=int, default=5)
    parser.add_argument('-n_points', type=int, default=100)

    opt = parser.parse_args()

    tester = Tester(n_gaussian_clusters=opt.n_clusters)

    # Generate data from n 2d multivariate gaussian parameters
    data, labels = tester.generate_2d_gaussian_points(
        how_many_per_each_gaussian=opt.n_points)
    logging.info(" Generated {} data points from {} different 2 dimensional "
                 "multivariate gaussian distributions. ({} data points for "
                 "each cluster.)".format(opt.n_clusters * opt.n_points,
                                         opt.n_clusters, opt.n_points))

    # Raw Data
    utils.draw(data, labels, without_label_color=True, means=None,
               title="Data", save="result/raw.png", show=False)
    utils.draw(data, labels, without_label_color=False, means=tester.means,
               title="Gaussian", save="result/gaussian.png", show=False)

    # KMeans Prediction
    kmeans = KMeans(n_cluster=opt.n_clusters)
    prediction_lables, prediction_centers = kmeans.fit(data)
    utils.draw(data, prediction_lables, without_label_color=False,
               means=prediction_centers, title="KMeans",
               save="result/kmeans.png", show=False)

    # Concatenate results
    png_list = ["result/raw.png", "result/gaussian.png", "result/kmeans.png"]
    utils.concatenate_pngs(png_list, "result/final.png")
Esempio n. 25
0
 def get_valid_params():
   """
     Return a list of valid parameters and their descriptions for this type
     of test.
     @ In, None
     @ Out, params, _ValidParameters, return the parameters.
   """
   params = Tester.get_valid_params()
   params.add_required_param('input', "The python file to use for this test.")
   params.add_param('python_command', '', 'The command to use to run python')
   params.add_param('requires_swig2', False, "Requires swig2 for test")
   return params
Esempio n. 26
0
def main():

    print "Select Network Type"
    print "1. MLP"
    print "2. Radial Basis"
    nn_type = raw_input()
    nn = NeuralNetwork()

    if nn_type == "1":
        num_inputs = int(raw_input("Enter number inputs"))
        num_hidden = int(raw_input("Enter number hidden layers"))
        nn = MLPNN(num_inputs, num_hidden)

    elif nn_type == "2":
        num_inputs = int(raw_input("Enter number inputs"))
        num_centers = int(raw_input("Enter number radial basis functions"))
        nn = RBNN(num_inputs, num_centers)

    trainer = Trainer(nn)
    tester = Tester(nn)

    if nn_type == "1":
        trainer.trainMLP(str(num_inputs))
        tester.test(str(num_inputs))
    elif nn_type == "2":
        trainer.trainRB(str(num_inputs))
        tester.testRB(str(num_inputs))
Esempio n. 27
0
def main():
    tester = Tester()

    line = SimpleLine(1.5,10)
    train_set = generateRandomPoints(1000)
    valid_set = generateRandomPoints(1000)
    results_valid_set = []
    for (x,y) in valid_set:
        results_valid_set.append(line.isUpperLine(x,y)>0.5)

    train_set_sizes = range(1,1000,10) #[10,50,100,250,500,750,1000]
    learning_rates = [0.1,0.5,1.5]

    for lr in learning_rates:
        precisions = []
        for train_set_size in train_set_sizes:
            perceptron = SigmoidNeuron([2,2],2,lr)
            for index in range(train_set_size):
                (x,y) = train_set[index]
                perceptron.trainLonely([x,y],line.isUpperLine(x,y))
            precisions.append(tester.test(perceptron,valid_set,results_valid_set,ifhalf))
        tester.plot(train_set_sizes,precisions,"Presiciones por numero de muestras de entrenamiento, lr %.1f" % (lr))
Esempio n. 28
0
 def __init__(self, name, params):
   """
     Initializer for the class. Takes a String name and a dictionary params
     @ In, name, string, name of the test.
     @ In, params, dictionary, parameters for the class
     @ Out, None.
   """
   Tester.__init__(self, name, params)
   self.specs['scale_refine'] = False
   self.required_executable = self.specs['required_executable']
   self.required_executable = self.required_executable.replace("%METHOD%",
                                                               os.environ.get("METHOD", "opt"))
   self.required_libraries = self.specs['required_libraries'].split(' ') \
     if len(self.specs['required_libraries']) > 0 else []
   self.required_executable_check_flags = self.specs['required_executable_check_flags'].split(' ')
   self.minimum_libraries = self.specs['minimum_library_versions'].split(' ')\
     if len(self.specs['minimum_library_versions']) > 0 else []
   if self.specs['output'].strip() != '':
     self.check_files = [os.path.join(self.specs['test_dir'], filename)
                         for filename in self.specs['output'].split(" ")]
   else:
     self.check_files = []
    def runTest(self, trainingFilename, startIndex, endIndex):

        a = Atomizer('learn')
        e = FeaturesExtractor()

        p = InputDataProcessor(a, e, (0.2, 0.8))
        r = InputDataReader(p)
        (X, y) = r.read_features(trainingFilename)

        n = MLPClassifier(solver='lbfgs',
                          alpha=1e-5,
                          hidden_layer_sizes=(5, ),
                          random_state=1)

        n.fit(X, y)

        a = Atomizer('test')
        e = FeaturesExtractor()

        t = Tester(a, e, n, 0.99)

        for i in range(startIndex, endIndex):
            testFilename = "suspicious-document{:05d}".format(i)
            test_file = r.get_file("dataSets/part{}/{}".format(
                1, testFilename))
            b = t.is_plagiarised(test_file)
            if b == False:
                continue
            print('odpowiedz systemu: ' + str(b[0]))

            print('stan rzeczywisty: ' + str(not not test_file['metadata']))
            csv_file = open("wyniki.csv", 'a')
            wr = csv.writer(csv_file)
            list = [
                trainingFilename, testFilename,
                str(b[0]),
                str(not not test_file['metadata'])
            ]
            wr.writerows([list])
Esempio n. 30
0
 def validParams():
     params = Tester.validParams()
     params.addParam('input', 'The input file to use for this test.')
     params.addParam('test_name',
                     'The name of the test - populated automatically')
     params.addParam('cumulative_dur', 60,
                     'cumulative time (secs) to run each benchmark')
     params.addParam('min_runs', 40,
                     'minimum number of runs for each benchmark')
     params.addParam('max_runs', 400,
                     'maximum number of runs for each benchmark')
     params.addParam('perflog', False,
                     'true to enable perflog and store its output')
     return params
Esempio n. 31
0
 def test(self):
     if 'FZF_DEFAULT_OPTS' in self.yml['test'][0]:
         app_env['FZF_DEFAULT_OPTS'] = self.yml['test'].pop(
             0)['FZF_DEFAULT_OPTS']
     else:
         app_env['FZF_DEFAULT_OPTS'] = ''
     tester = Tester(self.yml['test'])
     result = self.tasks[0].execute(tester=tester)
     while not self._is_job_end(result):
         new_task = Task.clone(self.tasks[-1])
         new_task.update(self.task_switch[result.key], result)
         self.tasks.append(new_task)
         result = self.tasks[-1].execute(tester=tester)
     self.tasks[-1].output(result, tester=tester)
Esempio n. 32
0
 def validParams():
     """ Return a list of valid parameters and their descriptions for this type
     of test.
 """
     params = Tester.validParams()
     params.addRequiredParam('input',
                             "The python file to use for this test.")
     if os.environ.get("CHECK_PYTHON3", "0") == "1":
         params.addParam('python_command', 'python3',
                         'The command to use to run python')
     else:
         params.addParam('python_command', 'python',
                         'The command to use to run python')
     params.addParam('requires_swig2', False, "Requires swig2 for test")
     return params
Esempio n. 33
0
 def get_valid_params():
   """
     This method add defines the valid parameters for the tester.
     The expected error message shuld be unique...
     @ In, None
     @ Out, params, _ValidParameters, return the parameters.
   """
   params = Tester.get_valid_params()
   params.add_required_param('input', "The input file to use for this test.")
   params.add_required_param('expect_err',
                             "All or part of the expected error message (unique keyword)")
   params.add_param('required_executable', '', 'Skip test if this executable is not found')
   params.add_param('required_libraries', '', 'Skip test if any of these libraries are not found')
   params.add_param('skip_if_env', '', 'Skip test if this environmental variable is defined')
   params.add_param('test_interface_only', 'False',
                    'Test the interface only (without running the driven code')
   return params
Esempio n. 34
0
 def get_valid_params():
   """
     Returns the parameters that can be used for this class.
     @ In, None
     @ Out, params, _ValidParameters, return the parameters.
   """
   params = Tester.get_valid_params()
   params.add_required_param('input', "The input file to use for this test.")
   params.add_param('output', '', "List of output files that the input should create.")
   params.add_param('csv', '', "List of csv files to check")
   params.add_param('UnorderedCsv', '', "List of unordered csv files to check")
   params.add_param('xml', '', "List of xml files to check")
   params.add_param('UnorderedXml', '', "List of unordered xml files to check")
   params.add_param('xmlopts', '', "Options for xml checking")
   params.add_param('text', '', "List of generic text files to check")
   params.add_param('comment', '-20021986', "Character or string denoting "+
                    "comments, all text to the right of the symbol will be "+
                    "ignored in the diff of text files")
   params.add_param('image', '', "List of image files to check")
   params.add_param('rel_err', '', 'Relative Error for csv files or floats in xml ones')
   params.add_param('required_executable', '', 'Skip test if this executable is not found')
   params.add_param('required_libraries', '', 'Skip test if any of these libraries are not found')
   params.add_param('minimum_library_versions', '',
                    'Skip test if the library listed is below the supplied'+
                    ' version (e.g. minimum_library_versions = \"name1 version1 name2 version2\")')
   params.add_param('skip_if_env', '', 'Skip test if this environmental variable is defined')
   params.add_param('skip_if_OS', '', 'Skip test if the operating system defined')
   params.add_param('test_interface_only', False,
                    'Test the interface only (without running the driven code')
   params.add_param('check_absolute_value', False,
                    'if true the values are compared to the tolerance '+
                    'directectly, instead of relatively.')
   params.add_param('zero_threshold', sys.float_info.min*4.0,
                    'it represents the value below which a float is'+
                    'considered zero (XML comparison only)')
   params.add_param('remove_whitespace', False,
                    'Removes whitespace before comparing xml node text if True')
   params.add_param('remove_unicode_identifier', False,
                    'if true, then remove u infront of a single quote')
   params.add_param('interactive', False,
                    'if true, then RAVEN will be run with interactivity enabled.')
   params.add_param('python3_only', False, 'if true, then only use with Python3')
   params.add_param('ignore_sign', False, 'if true, then only compare the absolute values')
   return params
Esempio n. 35
0
 def get_valid_params():
   """
     Returns the valid parameters.
     @ In, None
     @ Out, params, _ValidParameters, return the parameters.
   """
   params = Tester.get_valid_params()
   params.add_required_param('input', "The python file to use for this test.")
   params.add_param('output', '', "List of output files that this input should create.")
   params.add_param('python_command', '', 'The command to use to run python')
   params.add_param('requires_swig2', False, "Requires swig2 for test")
   params.add_param('required_executable', '', 'Skip test if this executable is not found')
   params.add_param('required_libraries', '', 'Skip test if any of these libraries are not found')
   params.add_param('required_executable_check_flags', '', 'Flags to add to '+
                    'the required executable to make sure it runs without '+
                    'fail when testing its existence on the machine')
   params.add_param('minimum_library_versions', '', 'Skip test if the library'+
                    ' listed is below the supplied version (e.g. '+
                    'minimum_library_versions = \"name1 version1 name2 version2\")')
   params.add_param('python3_only', False, 'if true, then only use with Python3')
   return params
Esempio n. 36
0
def main():
    transport = TSocket.TSocket("localhost", 9090)
    # transport = TSocket.TSocket("192.168.0.206", 9090)
    transport = TTransport.TBufferedTransport(transport)

    # protocol = TBinaryProtocol.TBinaryProtocol(transport)
    protocol = TCompactProtocol.TCompactProtocol(transport)

    client = Tester.Client(protocol)
    transport.open()

    # warmup
    client.processSmall(gen_small(2))
    client.processMedium(gen_medium(2, 2))
    client.processBig(gen_big(2, 2, 2, 2, 2, 2))

    random.seed(1337)
    results = {}

    for send_func, gen_func, desc in [
        (client.processSmall, gen_small, "small"),
        (client.processMedium, gen_medium, "medium"),
        (client.processBig, gen_big, "big")
    ]:
        for size in [5, 100, 300]:
            print(desc, size, end=' ')
            r = do_test(send_func, gen_func, size)
            results[f"{desc}-{size}"] = r
            print(round(statistics.mean(r) / 1000000.0, 4), "ms")

    OUTDIR = '../../output/thrift/localhost_binary/'
    # OUTDIR = '../../output/thrift/localhost_compact/'
    # OUTDIR = '../../output/thrift/lan_binary/'
    # OUTDIR = '../../output/thrift/lan_compact/'
    os.makedirs(OUTDIR, exist_ok=True)

    for k, v in results.items():
        with open(f"{OUTDIR}{k}.csv", "wt") as f:
            f.write(','.join(map(str, v)))
Esempio n. 37
0
    def __init__(self,
                 what_to_grade: WhatToGrade = None,
                 who_to_grade: WhoToGrade = None,
                 getter: Getter = None,
                 tester: Tester = None,
                 recorder: Recorder = None):
        if not what_to_grade:
            what_to_grade = WhatToGrade()
        if not who_to_grade:
            who_to_grade = WhoToGrade()
        if not getter:
            getter = Getter(what_to_grade, who_to_grade)
        if not tester:
            tester = Tester(what_to_grade, who_to_grade)
        if not recorder:
            recorder = Recorder(what_to_grade, who_to_grade)

        self.what_to_grade = what_to_grade
        self.who_to_grade = who_to_grade
        self.getter = getter
        self.tester = tester
        self.recorder = recorder
Esempio n. 38
0
def gen_big(iSeq1_len, sSeq1_len, iSeq2_len, sSeq2_len, dSeq1_len, dSeq2_len):
    return Tester.BigData(mediumData=gen_medium(iSeq1_len, sSeq1_len),
                          n5=random.randint(1, 1000),
                          n6=random.randint(1, 1000),
                          n7=random.randint(1, 1000),
                          n8=random.randint(1, 1000),
                          n9=random.randint(1, 1000),
                          n10=random.randint(1, 1000),
                          s3=random_string(10),
                          s4=random_string(10),
                          s5=random_string(10),
                          s6=random_string(10),
                          s7=random_string(10),
                          s8=random_string(10),
                          d3=random.uniform(1, 100.0),
                          d4=random.uniform(1, 100.0),
                          d5=random.uniform(1, 100.0),
                          b3=random_bool(),
                          b4=random_bool(),
                          b5=random_bool(),
                          iSeq2=random_i_list(iSeq2_len),
                          sSeq2=random_s_list(sSeq2_len),
                          dSeq1=random_d_list(dSeq1_len),
                          dSeq2=random_d_list(dSeq2_len))
Esempio n. 39
0
    def __init__(self,
                 n=20,
                 chromLen=64,
                 generationStrategy=None,
                 selectionStragegy=None,
                 crossoverStrategy=None,
                 mutationStrategy=None):

        self.numOfChrom = n
        self.chromLen = chromLen
        self.mutationChance = 7  # for now
        self.population = []

        # self.mapa = mapa
        self.t = Tester()
        self.heur = self.t.treasureCount
        self.area = self.t.x + self.t.y

        self.generator = GenerationStrategy.Heuristic
        self.selection = SelectionStrategy.DoubleTrournamentSelection
        self.cross = CrossoverStrategy.Take2Random
        self.mutation = MutationStrategy.RandomResetting

        self.generate()
Esempio n. 40
0
 def get_valid_params():
     params = Tester.get_valid_params()
     params.add_required_param('input',
                               "The input file to use for this test.")
     params.add_required_param('workingDir', "The working directory")
     #params.add_param('executable','',"Name of compiled executable to run.")
     params.add_param('output', '',
                      "List of output files that the input should create.")
     params.add_param('text', '', "List of text files to check.")
     params.add_param('dymola_mats', '', "List of text files to check.")
     params.add_param('numeric_text', '',
                      "List of text files with relative numbers to check.")
     params.add_param(
         'comment', 'asdfghjkl',
         "Characters after which entries should be ignored in text checks.")
     params.add_param('rel_err', '',
                      'Relative Error for csv files or floats in xml ones')
     params.add_param('required_executable', '',
                      'Skip test if this executable is not found')
     params.add_param(
         'zero_threshold', sys.float_info.min * 4.0,
         'it represents the value below which a float is considered zero (XML comparison only)'
     )
     return params
Esempio n. 41
0
 def __init__(self, name, params):
     Tester.__init__(self, name, params)
Esempio n. 42
0
def main():
    tester = Tester(fitnessFunction, alphabeth, genSize, populationSize,
                    genSize)
    tester.test()
    print "vector original"
    print real
Esempio n. 43
0
                stack.append(token)
            elif token == ")":
                while stack and stack[-1] != "(":
                    queue.append(stack.pop())
                if stack and stack[-1] == "(":
                    stack.pop()
                else:
                    raise RuntimeError("PAREN MISMATCH")
        while stack and len(stack) > 0:
            if stack[-1] == "(":
                raise RuntimeError("PAREN MISMATCH")
            else:
                queue.append(stack.pop())
        return " ".join(queue)

test = Tester()

interpreter = Interpreter();

# Basic arithmetic
test.assert_equals(interpreter.input("1 + 1"), 2, "1 + 1")
test.assert_equals(interpreter.input("2 - 1"), 1, "2 - 1")
test.assert_equals(interpreter.input("2 * 3"), 6, "2 * 3")
test.assert_equals(interpreter.input("8 / 4"), 2, "8 / 4")
test.assert_equals(interpreter.input("7 % 4"), 3, "7 % 4")

# Variables
test.assert_equals(interpreter.input("x = 1"), 1, "x = 1")
test.assert_equals(interpreter.input("x"), 1, "x")
test.assert_equals(interpreter.input("x + 3"), 4, "x + 3")
test.expect_error("input: 'y'", lambda: interpreter.input("y"))
        raise Exception(
            'add continue_train flag to load pretrain embedding...')
    trainer.model.save_embeddings()
    exit(0)
if params.train_flag:
    print('[begin training]')
    trainer.run()

# '''
#     test
# '''
if params.test_flag:
    print('[begin testing]')

    model_load_name = trainer.save_best_name
    if torch.cuda.is_available():
        model.load_state_dict(torch.load(model_load_name))
    else:
        model.load_state_dict(
            torch.load(model_load_name, map_location=torch.device('cpu')))
    # ttype = ['test', '1-1', '1-N', 'N-1', 'N-N']
    ttype = ['test']
    for tt in ttype:
        # try:
        test_data_loader = get_data_loader(params, tt)
        ent_tot, rel_tot = dataset_param(params.data)
        tester = Tester(params, ent_tot, rel_tot, model, test_data_loader)
        tester.test_run(mode='test')
        # except Exception as e:
        #     print('no test data {}...'.format(tt))
Esempio n. 45
0
    def validParams():
        params = Tester.validParams()
        params.addParam('input', "The input file to use for this test.")
        params.addParam('test_name',
                        "The name of the test - populated automatically")
        params.addParam(
            'input_switch', '-i',
            "The default switch used for indicating an input to the executable"
        )
        params.addParam('errors', [
            'ERROR', 'command not found',
            'terminate called after throwing an instance of'
        ], "The error messages to detect a failed run")
        params.addParam(
            'expect_out',
            "A regular expression or literal string that must occur in the input in order for the test to be considered passing (see match_literal)."
        )
        params.addParam(
            'match_literal', False,
            "Treat expect_out as a string not a regular expression.")
        params.addParam(
            'absent_out',
            "A regular expression that must be *absent* from the output for the test to pass."
        )
        params.addParam(
            'should_crash', False,
            "Inidicates that the test is expected to crash or otherwise terminate early"
        )
        params.addParam(
            'executable_pattern',
            "A test that only runs if the executable name matches the given pattern"
        )
        params.addParam(
            'delete_output_before_running', True,
            "Delete pre-existing output files before running test. Only set to False if you know what you're doing!"
        )
        params.addParam('delete_output_folders', True,
                        "Delete output folders before running")

        # RunApp can also run arbitrary commands. If the "command" parameter is supplied
        # it'll be used in lieu of building up the command automatically
        params.addParam('command',
                        "The command line to execute for this test.")

        # Parallel/Thread testing
        params.addParam(
            'max_parallel', 1000,
            "Maximum number of MPI processes this test can be run with      (Default: 1000)"
        )
        params.addParam(
            'min_parallel', 1,
            "Minimum number of MPI processes that this test can be run with (Default: 1)"
        )
        params.addParam('max_threads', 16,
                        "Max number of threads (Default: 16)")
        params.addParam('min_threads', 1, "Min number of threads (Default: 1)")
        params.addParam(
            'redirect_output', False,
            "Redirect stdout to files. Neccessary when expecting an error when using parallel options"
        )

        params.addParam(
            'allow_warnings', True,
            "Whether or not warnings are allowed.  If this is False then a warning will be treated as an error.  Can be globally overridden by setting 'allow_warnings = False' in the testroot file."
        )
        params.addParam(
            'allow_unused', True,
            "Whether or not unused parameters are allowed in the input file.  Can be globally overridden by setting 'allow_unused = False' in the testroot file."
        )
        params.addParam(
            'allow_override', True,
            "Whether or not overriding a parameter/block in the input file generates an error.  Can be globally overridden by setting 'allow_override = False' in the testroot file."
        )
        params.addParam(
            'allow_deprecated', True,
            "Whether or not deprecated warnings are allowed.  Setting to False will cause deprecation warnings to be treated as test failures.  We do NOT recommend you globally set this permanently to False!  Deprecations are a part of the normal development flow and _SHOULD_ be allowed!"
        )
        params.addParam(
            'no_error_deprecated', False,
            "Don't pass --error-deprecated on the command line even when running the TestHarness with --error-deprecated"
        )

        # Valgrind
        params.addParam(
            'valgrind', 'NORMAL',
            "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run."
        )

        return params
Esempio n. 46
0
    base_testers, base_differs = get_testers_and_differs(this_dir)
    if not args.testers_dirs:
        testers_dirs = [
            os.path.join(up_one_dir, "scripts", "TestHarness", "testers")
        ]
    else:
        testers_dirs = args.testers_dirs.split(',')
    testers = {}
    differs = {}
    for testers_dir in testers_dirs:
        new_testers, new_differs = get_testers_and_differs(testers_dir)
        testers.update(new_testers)
        differs.update(new_differs)
    testers.update(base_testers)
    differs.update(base_differs)
    Tester.add_non_default_run_type("heavy")
    if args.add_non_default_run_types is not None:
        non_default_run_types = args.add_non_default_run_types.split(",")
        for ndrt in non_default_run_types:
            Tester.add_non_default_run_type(ndrt)

    if args.list_testers:
        print("Testers:")
        for tester_name, tester in testers.items():
            print("Tester:", tester_name)
            print(tester.get_valid_params())
            print()
        print("Differs:")
        for differ_name, differ in differs.items():
            print("Differ:", differ_name)
            print(differ.get_valid_params())
Esempio n. 47
0
		resetDB()
		initAllTests(engine, shared)
		exit()

	if options.result:
		result = Result(engine, options.result, client)
		print result.getAll()
		exit()
	
	if options.file:
		cores = min(config.getint('keycheck', 'NumberCores'), len(filenames))	
		pool = ExitPool(cores, initWorker, [testSet, client, options.makePerm, options.verbose, shared], exitWorker)
		result=pool.map(test, filenames, chunksize=1)	
		pool.close()
		pool.join()
		
	
	if options.globalTest:
		tester = Tester(testSet, engine, shared)
		result=[doTest(tester, None, None, None, None, client, options.makePerm, options.verbose, engine)]
		tester.release()
		
	for s in result:
		print s
			



	

Esempio n. 48
0
 def validParams():
     params = Tester.validParams()
     params.addRequiredParam('command',      "The command line to execute for this test.")
     params.addParam('test_name',          "The name of the test - populated automatically")
     return params
Esempio n. 49
0
 def __init__(self, name, params):
   Tester.__init__(self, name, params)
   self.command = params['command']
Esempio n. 50
0
 def validParams():
   params = Tester.validParams()
   params.addRequiredParam('command',      "The command line to execute for this test.")
   params.addParam('test_name',          "The name of the test - populated automatically")
   return params
Esempio n. 51
0
    def validParams():
        params = Tester.validParams()
        params.addRequiredParam('input',
                                "The input file to use for this test.")
        params.addParam('test_name',
                        "The name of the test - populated automatically")
        params.addParam(
            'skip_test_harness_cli_args', False,
            "Skip adding global TestHarness CLI Args for this test")
        params.addParam(
            'input_switch', '-i',
            "The default switch used for indicating an input to the executable"
        )
        params.addParam('errors', [
            'ERROR', 'command not found',
            'erminate called after throwing an instance of'
        ], "The error messages to detect a failed run")
        params.addParam(
            'expect_out',
            "A regular expression that must occur in the input in order for the test to be considered passing."
        )
        params.addParam(
            'match_literal', False,
            "Treat expect_out as a string not a regular expression.")
        params.addParam(
            'should_crash', False,
            "Inidicates that the test is expected to crash or otherwise terminate early"
        )
        params.addParam(
            'executable_pattern',
            "A test that only runs if the exectuable name matches the given pattern"
        )

        params.addParam('walltime', "The max time as pbs understands it")
        params.addParam('job_name', "The test name as pbs understands it")
        params.addParam('no_copy', "The tests file as pbs understands it")

        # Parallel/Thread testing
        params.addParam(
            'max_parallel', 1000,
            "Maximum number of MPI processes this test can be run with      (Default: 1000)"
        )
        params.addParam(
            'min_parallel', 1,
            "Minimum number of MPI processes that this test can be run with (Default: 1)"
        )
        params.addParam('max_threads', 16,
                        "Max number of threads (Default: 16)")
        params.addParam('min_threads', 1, "Min number of threads (Default: 1)")
        params.addParam(
            'allow_warnings', False,
            "If the test harness is run --error warnings become errors, setting this to true will disable this an run the test without --error"
        )

        # Valgrind
        params.addParam(
            'valgrind', 'NORMAL',
            "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run."
        )

        params.addParam('post_command',
                        "Command to be run after the MOOSE job is run")

        return params
Esempio n. 52
0
 def __init__(self, configFile, name, timeout, semaphore, debug = False):
     Tester.__init__(self, configFile, name+"-linear", timeout, semaphore, debug)