def make_preprocessor_should_fail(environment, filename): setup_preprocess_environment(environment, filename) test = Test(environment, filename) preprocess = test.add_step("preprocess", step_preprocess) preprocess.add_check(create_check_errors_reference(environment)) return test
def make_c_test(environment, filename): setup_c_environment(environment, filename) environment.executable = environment.builddir + "/" + environment.filename + ".exe" ensure_dir(os.path.dirname(environment.executable)) test = Test(environment, filename) compile = test.add_step("compile", step_compile_c) compile.add_check(check_cparser_problems) compile.add_check(check_no_errors) compile.add_check(check_firm_problems) compile.add_check(check_retcode_zero) compile.add_check(check_memcheck_output) asmchecks = parse_embedded_commands(environment, environment.filename) if asmchecks: environment.asmfile = environment.builddir + "/" + environment.filename + ".s" ensure_dir(os.path.dirname(environment.asmfile)) asm = test.add_step("asm", step_compile_c_asm) asm.add_checks(asmchecks) if environment.memcheck: return test # no execute necessary execute = test.add_step("execute", step_execute) execute.add_check(check_retcode_zero) execute.add_check(create_check_reference_output(environment)) return test
def make_cmd_should_work(environment, filename): read_cmds(environment, filename) test = Test(environment, filename) runcompiler = test.add_step("cmd_should_work", step_runcommand) runcompiler.add_check(check_retcode_zero) return test
def make_lit_test(environment, filename): # Extract run lines from test runlines = [] for line in open(filename, "r"): match = runmatch.search(line) if not match: break cmd = match.group(1) # Fill in placeholders like %(cc)s cmd = cmd.format(cc=environment.cc, input=filename) # hack cmd = cmd.replace("FileCheck", "scripts/FileCheck") cmd = cmd.replace("not", "scripts/not") # TODO: handle further replacements runlines.append(cmd) if len(runlines) == 0: raise Exception("Coulnd't find any RUN: lines at beginning of %s" % filename) environment.filename = filename test = Test(environment, filename) i = 0 for cmd in runlines: runstep = test.add_step("run%d" % i, create_run_step(cmd)) i += 1 runstep.add_check(check_retcode_zero) return test
def make_cmd_should_warn(environment, filename): read_cmds(environment, filename) test = Test(environment, filename) runcompiler = test.add_step("cmd_should_warn", step_runcommand) runcompiler.add_check(check_retcode_zero) runcompiler.add_check(check_no_errors) runcompiler.add_check(create_check_warnings_reference(environment)) return test
def make_preprocessor_should_warn(environment, filename): setup_preprocess_environment(environment, filename) test = Test(environment, filename) preprocess = test.add_step("preprocess", step_preprocess) preprocess.add_check(check_no_errors) preprocess.add_check(check_retcode_zero) preprocess.add_check(create_check_warnings_reference(environment)) return test
def make_c_should_fail(environment, filename, cflags=""): setup_c_environment(environment, filename) environment.cflags += cflags parse_embedded_commands_no_check(environment) test = Test(environment, filename) compile = test.add_step("compile", step_compile_c_syntax_only) compile.add_check(create_check_errors_reference(environment)) test.steps.append(compile) return test
def make_fluffy_should_fail(environment, filename): environment.filename = filename environment.executable = environment.builddir + "/" + environment.filename + ".exe" ensure_dir(os.path.dirname(environment.executable)) test = Test(environment, filename) compile = test.add_step("compile", step_compile_fluffy) compile.add_check(check_missing_errors) return test
def make_c_should_warn(environment, filename, cflags=" -Wall -W"): setup_c_environment(environment, filename) environment.cflags += cflags parse_embedded_commands_no_check(environment) test = Test(environment, filename) compile = test.add_step("compile", step_compile_c_syntax_only) compile.add_check(check_retcode_zero) compile.add_check(check_no_errors) compile.add_check(create_check_warnings_reference(environment)) return test
def run(): ''' Creates the necessary tools for the application to run and starts it ''' repository = Repository() controller = Controller(repository) ui = UI(controller) tester = Test() Test.testAll(tester) ui.mainMenu()
def make_fluffy_test(environment, filename): environment.filename = filename environment.executable = environment.builddir + "/" + environment.filename + ".exe" ensure_dir(os.path.dirname(environment.executable)) test = Test(environment, filename) compile = test.add_step("compile", step_compile_fluffy) compile.add_check(check_no_errors) compile.add_check(check_firm_problems) compile.add_check(check_retcode_zero) execute = test.add_step("execute", step_execute) execute.add_check(check_retcode_zero) execute.add_check(create_check_reference_output(environment)) return test
def save(self, published = False): try: store = shelve.open('lesson/store') except Exception: print("no lessons to link") return lessons = [(lesson._id, lesson.name) for lesson in store.values() if lesson.published] store.close() self._id2 = self._id.get() self.test = Test(self._id2) for d, a in self.questions: self.test.add(d, a) newlist = [seq[0] for seq in lessons] if self._id2 in newlist: self.element = str(newlist.index(self._id2)) else: print("no lesson to link") return store = shelve.open('lesson/store') self.lesson = store[self._id2] store.close() self.lesson.test = True self.test._id = self._id2 self.test.store() self.lesson.store() self.root.destroy()
def make_perftest(environment, filename, size, check_perf): setup_c_environment(environment, filename) environment.executable = environment.builddir + "/" + environment.filename + ".exe" ensure_dir(os.path.dirname(environment.executable)) if size != 0: environment.executionargs = " %s" % size test = Test(environment, filename) compile = test.add_step("compile", step_compile_c) compile.add_check(check_cparser_problems) compile.add_check(check_no_errors) compile.add_check(check_firm_problems) compile.add_check(check_retcode_zero) execute = test.add_step("execute", step_execute) execute.add_check(check_retcode_zero) execute.add_check(check_perf) return test
# confusion matrix confusion_matrix = ConfusionMatrix(directories) # neighbor best_hmm = 1 for directory in directories: # original kalman + resampled + parsing dataset = CsvDatasetExtended(base_dir + 'parsed/' + directory + '/', type=str) # apply transforms for sequence in dataset.readDataset(): # get log_probabilities obtained from sequence points = [ item for sublist in (sequence.getPoints(columns=[0])) for item in sublist ] probabilities = Test.compare(points, models) # verify if row_label is contained in the firsts x elements (default, x=1) keys = [] for key, value in sorted(probabilities.items(), key=lambda kv: kv[1], reverse=True)[:best_hmm]: if isinstance(key, tuple): keys = keys + list(itertools.chain(key)) else: keys.append(key) index_label = directory if directory in keys else keys[0] if directory not in keys: print('\n') print(points) print(probabilities)
def __init__(self): Test.__init__(self)
from test.test import Test test = Test() # 测试性能 test.test() # 绘制图片 test.paint()
class TestView: def __init__(self,root): self.root = root self._id = '' self.description = '' self.questions = [] self.students = [] self.answer = [0, 0, 0, 0] self.QI = 0 self.build() def save(self, published = False): try: store = shelve.open('lesson/store') except Exception: print("no lessons to link") return lessons = [(lesson._id, lesson.name) for lesson in store.values() if lesson.published] store.close() self._id2 = self._id.get() self.test = Test(self._id2) for d, a in self.questions: self.test.add(d, a) newlist = [seq[0] for seq in lessons] if self._id2 in newlist: self.element = str(newlist.index(self._id2)) else: print("no lesson to link") return store = shelve.open('lesson/store') self.lesson = store[self._id2] store.close() self.lesson.test = True self.test._id = self._id2 self.test.store() self.lesson.store() self.root.destroy() def add(self, Test_description): """ this method adds a question to the test. """ self.answer2 = [0,0,0,0] self.detail2 = 'not writen' self.detail2 = self.description.get() self.answer2[0] = self.answer[0].get() self.answer2[1] = self.answer[1].get() self.answer2[2] = self.answer[2].get() self.answer2[3] = self.answer[3].get() self.questions.append((self.detail2, self.answer2)) self.QI += 1 self.detail() Test_description.destroy() def detail(self): Test_description = tk.Frame(self.root) #Question description descBox = tk.Label(Test_description, text='question ' + str(self.QI + 1) + ':') descBox.grid(sticky=tk.W) self.description = tk.Text(Test_description, height=2,width=50) self.description = tk.Entry(Test_description) self.description.grid(sticky=tk.W) #answer answerBox = tk.Label(Test_description, text='Correct Answer') answerBox.grid(sticky=tk.W) self.answer[0] = tk.Entry(Test_description) self.answer[0].grid(sticky=tk.W) altBox = tk.Label(Test_description, text='alternative answers') altBox.grid(sticky=tk.W) self.answer[1] = tk.Entry(Test_description) self.answer[1].grid(sticky=tk.W) self.answer[2] = tk.Entry(Test_description) self.answer[2].grid(sticky=tk.W) self.answer[3] = tk.Entry(Test_description) self.answer[3].grid(sticky=tk.W) tk.Button(Test_description, text='Publish & Close', command=lambda: self.save(published=True)).grid(padx=40, sticky=tk.W) tk.Button(Test_description, text='Save & Close', command=lambda: self.save()).grid(padx=40, sticky=tk.W) tk.Button(Test_description, text='add question', command=lambda: self.add( Test_description)).grid(padx=40, sticky=tk.W) tk.Button(Test_description, text='Close', command=self.root.destroy).grid(padx=40, sticky=tk.W) tk.Label(Test_description).grid() Test_description.grid(padx=40, pady=0, sticky=tk.W) def build(self): title = tk.Label(self.root, text='Create Test', font = ('Arial', 26)) title.grid(padx=40, pady=40, sticky=tk.W) Test_description = tk.Frame(self.root) #related lesson id tk.Label(Test_description, text='Lesson ID:').grid(sticky=tk.W) self._id = tk.Entry(Test_description) self._id.grid(sticky=tk.W) Test_description.grid(padx=40, pady=0, sticky=tk.W) self.detail()
transform3 = CenteringTransform() transform5 = ResampleInSpaceTransform(samples=80) # Apply transforms dataset.addTransform(transform1) dataset.addTransform(transform2) dataset.addTransform(transform3) dataset.addTransform(transform5) for file in dataset.applyTransforms(): sequences = file[0] buffer = [] res = [] for item in sequences: buffer.append(item) res.append(Test.compare(buffer, models)) print(res) ################### Advanced Version ################### class CsvDatasetRealTime(CsvDataset): """ Class for firing frame like real time: Once it reads the sequence, it fires the frame one by one. """ def __init__(self, dir, maxlen=20, num_samples=20): """ :param dir: dataset's path :param maxlen: indicates the dimension of the circular buffer (its size should be big enough to contain all the frame of a gesture) """
import multiprocessing from multiprocessing import Process from types import TracebackType from typing import * from test.config_editor_test import ConfigEditorTest from test.fail_test import FailTest from test.hapi_sources_test import HapiSourcesTest from test.molecule_info_test import MoleculeInfoTest from test.test import Test from test.throw_test import ThrowTest tests: List[Test] = [ Test(), FailTest(), ThrowTest(), HapiSourcesTest(), MoleculeInfoTest(), ConfigEditorTest() ] def run_tests(): result_fmt = '{:36s} {:36s}' name_fmt = '{:36s} ' print('{}{}'.format(name_fmt, name_fmt).format('Test Name', 'Test Result')) q = multiprocessing.Queue() for test in tests: print(name_fmt.format(test.name())) p = Process(target=test.run, args=(q, ))