def test_data_files(self): r = Reducer() r.append_data_file("AsciiExample.txt") # Check that we can empty the list of data files self.assertEqual(len(r._data_files), 1) r.clear_data_files() self.assertEqual(len(r._data_files), 0)
def test_reduction_step(self): """ Test that passing a ReductionStep object works """ r = Reducer() r.append_step(TestReductionStep()) for item in r._reduction_steps: result = item.execute(r, "test2") self.assertEqual(result, 5)
def test_parameter_variation(self): """ Similar to previous test, but the algo function is passed as a string """ r = Reducer() r.append_step("LoadAscii", "AsciiExample.txt", None) for item in r._reduction_steps: result = item.execute(r, "test2") # Check that the workspace was created self.assertNotEqual(mtd["test2"], None) mtd.deleteWorkspace("test2")
def test_pars_variation(self): """ Variations for parameter specification """ r = Reducer() # An algorithm with a mandatory property that is NOT InputWorkspace or OutputWorkspace r.append_step(LoadAscii, Filename="AsciiExample.txt", OutputWorkspace=None) # Algorithm with InputWorkspace and OutputWorkspace r.append_step(ConvertToHistogram, None, None) for item in r._reduction_steps: result = item.execute(r, "test2") # Check that the workspace was created self.assertNotEqual(mtd["test2"], None) mtd.deleteWorkspace("test2")
def test_append_step(self): """ Test that a Mantid algorithm function can be added to a Reducer object """ r = Reducer() # An algorithm with a mandatory property that is NOT InputWorkspace or OutputWorkspace r.append_step(LoadAscii, "AsciiExample.txt", None) # Algorithm with InputWorkspace and OutputWorkspace r.append_step(ConvertToHistogram, None, None) for item in r._reduction_steps: result = item.execute(r, "test2") # Check that the workspace was created self.assertNotEqual(mtd["test2"], None) mtd.deleteWorkspace("test2")
def __init__(self, working_dir, src, buggy, oracle, tests, golden, asserts, lines, build, configure, config): self.working_dir = working_dir self.config = config self.repair_test_suite = tests[:] self.validation_test_suite = tests[:] extracted = join(working_dir, 'extracted') os.mkdir(extracted) angelic_forest_file = join(working_dir, 'last-angelic-forest.json') tester = Tester(config, oracle, abspath(working_dir)) self.run_test = tester self.get_suspicious_groups = Localizer(config, lines) self.reduce = Reducer(config) if self.config['use_semfix_syn']: self.synthesize_fix = Semfix_Synthesizer(working_dir, config, extracted, angelic_forest_file) self.infer_spec = Semfix_Inferrer(working_dir, config, tester) else: self.synthesize_fix = Synthesizer(config, extracted, angelic_forest_file) self.infer_spec = Inferrer(config, tester, Load(working_dir)) self.instrument_for_localization = RepairableTransformer(config) self.instrument_for_inference = SuspiciousTransformer(config, extracted) self.apply_patch = FixInjector(config) validation_dir = join(working_dir, "validation") shutil.copytree(src, validation_dir, symlinks=True) self.validation_src = Validation(config, validation_dir, buggy, build, configure) self.validation_src.configure() compilation_db = self.validation_src.export_compilation_db() self.validation_src.import_compilation_db(compilation_db) self.validation_src.initialize() frontend_dir = join(working_dir, "frontend") shutil.copytree(src, frontend_dir, symlinks=True) self.frontend_src = Frontend(config, frontend_dir, buggy, build, configure) self.frontend_src.import_compilation_db(compilation_db) self.frontend_src.initialize() backend_dir = join(working_dir, "backend") shutil.copytree(src, backend_dir, symlinks=True) self.backend_src = Backend(config, backend_dir, buggy, build, configure) self.backend_src.import_compilation_db(compilation_db) self.backend_src.initialize() if golden is not None: golden_dir = join(working_dir, "golden") shutil.copytree(golden, golden_dir, symlinks=True) self.golden_src = Frontend(config, golden_dir, buggy, build, configure) self.golden_src.import_compilation_db(compilation_db) self.golden_src.initialize() else: self.golden_src = None self.dump = Dump(working_dir, asserts) self.trace = Trace(working_dir)
def test_output_wksp(self): """ Similar to previous test, but we specify the output workspace """ r = Reducer() # An algorithm with a mandatory property that is NOT InputWorkspace or OutputWorkspace r.append_step(LoadAscii, "AsciiExample.txt", None) # Algorithm with InputWorkspace and OutputWorkspace r.append_step(ConvertToHistogram, None, None) r._reduction_steps[0].execute(r, "test2") r._reduction_steps[1].execute(r, "test2", "test3") # Check that the workspace was created self.assertNotEqual(mtd["test2"], None) self.assertNotEqual(mtd["test3"], None) mtd.deleteWorkspace("test2") mtd.deleteWorkspace("test3")
import subprocess import tempfile # Declare variables BOARD_SIZE = int( input("Enter the size of the Sudoku Board: ")) # size of the Sudoku board K_VALUE = int(input("Enter the number of random values to generate: ") ) # number of random values generated PATH_NAME = 'output' # name of the file to store generated board # Generate a random Sudoku board using Generation file generator = Generator(PATH_NAME, BOARD_SIZE, K_VALUE) generator.generate() # Pass this file name and reduce to CNF using Reduction file reducer = Reducer(PATH_NAME, BOARD_SIZE) reducer.create_cnf_file() # Create a Solver to test if the CNF file is Satisfiable or Unsatisfiable. # Note that you need minisat on your computer to run this bashCommand = 'minisat %s %s' # command to run on the terminal # Our temporary output file output_file = tempfile.NamedTemporaryFile() # Call the bash terminal subprocess.call(bashCommand % (PATH_NAME + ".cnf", output_file.name), shell=True, stdout=subprocess.PIPE) # Print result
def test_bad_alg_name(self): r = Reducer() r.append_step("NotAnAlgorithm") self.assertRaises(RuntimeError, r._reduction_steps[0].execute, (r, "test") )