def main(args): if args.tool == 'test': test(args) exit(0) if args.tool == 'validate': validate(args) exit(0) if args.tool == 'convert': convert(args) exit(0) print('No such tool \'{}\''.format(args.tool)) exit(1)
def test_TemplateData_with_extra_translations_dont_validate(self): with self.assertRaises(Exception): validate({ "TemplateData": { "example": { "args": [], "translations": { "arg1": "someValue", }, }, }, "RunList": [], })
def test_RunList_with_extra_annotations_fail_to_validate(self): with self.assertRaises(Exception): validate({ "TemplateData": { "example": { "args": [ "arg1", ], "annotations": { "arg1": "someValue", "extraAnnotation": "someValue", }, }, }, "RunList": [] })
def test_RunList_with_extra_args_fail_to_validate(self): with self.assertRaises(Exception): validate({ "TemplateData": { "example": { "args": [], "translations": { "arg1": "someValue", }, }, }, "RunList": [{ "template_type": "example", "args": { "a": "b", "arg1": 5, }, }], })
def test_runs_have_default_tags(self): v = validate({ "TemplateData": { "HBIR": self.sample_hbir_template, }, "RunList": [{ "template_type": "HBIR", "args": self.sample_args, }] }) self.assertTrue(v["RunList"][0]["tag"])
def test_there_should_be_TemplateData_if_there_are_RunList(self): with self.assertRaises(Exception): self.assertFalse( validate({ "TemplateData": {}, "RunList": [{ "template_type": "NONE", "args": { "a": "b" }, }], }))
def evaluate_predictive(self, test_x, test_y): """ Takes two numpy arrays test_x, test_y: returns a dictionary with the metrics to evaluate a predictive model (without comebacks) and also the training time + inference time """ print("=== Evaluating predictive model...") #test_x = self._preprocessor.preprocess(test_x) pred = self._model.predict(test_x) metrics = validate(test_y, pred) metrics['train_time'] = self.train_time return metrics
def test_RunList_with_ommitted_with_no_defaults_fail_to_validate(self): with self.assertRaises(Exception): validate({ "TemplateData": { "example": { "args": [ "arg1", "noDefaults", ], "translations": { "arg1": "someValue", }, "prop_options": { "arg1": "defaultvalue", }, }, }, "RunList": [{ "template_type": "example", "args": { "arg1": 5, }, }], })
def test_runs_have_custom_tags(self): custom_tag = random_run_id() v = validate({ "TemplateData": { "HBIR": self.sample_hbir_template, }, "RunList": [{ "template_type": "HBIR", "args": self.sample_args, "tag": custom_tag, }] }) self.assertEqual(v["RunList"][0]["tag"], custom_tag)
def evaluate(self, test_x, test_y): """ Takes two numpy arrays test_x, test_y: returns a dictionary with the metrics to evaluate a productive model (with comebacks) and also the training time + inference time """ self.test_size = test_x.shape[0] print('{:=^80}'.format(' VALIDATE MODEL ')) print("Evaluating productive model...") pred_y = self._infer(test_x) # Validamos las predicciones de test metrics = validate(test_y, pred_y) # Incluimos metricas de training metrics['train_time'] = self.train_time metrics['mean_inference_time'] = self.inference_time / self.test_size metrics['train_size'] = self.train_size metrics['test_size'] = self.test_size print('{:=^80}'.format('')) return metrics
def do_validate(arguments): """ Validate a configuration based on the schema provided. """ with open(arguments['<config>'], 'r') as f: args = json.loads(f.read()) try: if validate.validate_blackbox(args) is not None: return except Exception as e: print(e) print("attempting to validate SPECtate configuration...") try: return validate.validate(args) is None except Exception as e: return e return True
def test_RunList_with_times_validates(self): v = validate({ "TemplateData": { "HBIR": self.sample_hbir_template, }, "RunList": [ { "template_type": "HBIR", "args": self.sample_args, }, { "template_type": "HBIR", "args": self.sample_args, "times": 2, }, ] }) for run in v["RunList"]: self.assertEqual(self.sample_args, run["args"]) self.assertEqual(v["RunList"][0]["times"], 1) self.assertEqual(v["RunList"][1]["times"], 2)
def test_data_set_two_true_scenario(): string_column_names = ["c"] assert validate(data_set_two, string_column_names)
def test_data_set_two_false_scenario_two(): string_column_names = [] assert not validate(data_set_two, string_column_names)
def test_data_set_three_true_scenario_two(): string_column_names = ["d"] assert validate(data_set_three, string_column_names)
def test_invalid_config_does_not_validate(self): with self.assertRaises(Exception): with open('tests/fixtures/sample_config_invalid.json') as f: j = json.loads(f.read()) self.assertFalse(validate(j))
def test_data_set_one_true_scenario(): string_column_names = ["a", "b", "c"] assert validate(data_set_one, string_column_names)
def test_example_config_does_validate(self): for example in self.examples: with open(example) as f: j = json.loads(f.read()) self.assertTrue(validate(j))
sys.path.append(PROJECT_DIR) from src import bracket as src_bracket from src import compute as src_compute from src import validate as src_validate INPUT_EXPRESSION = '请输入要计算的表达式,退出程序请输q:' RESULT = '计算结果:{0}' # 使用 __name__ 的目的: # 只有执行 python index.py 时,以下代码才执行 # 如果其他人导入该模块,以下代码不执行 if __name__ == "__main__": expression = '' while expression != 'q': expression = input(INPUT_EXPRESSION) if expression == 'q': exit() else: # 如果输入的表达式合法 if src_validate.validate(expression): # 调用remove_bracket函数去除所有括号,重新赋值给expression # 下面一行用于测试 expression = src_bracket.remove_bracket(expression) #print('去掉了全部括号!当前表达式为:',expression) # 调用compute函数进行加减乘除运算 result = src_compute.compute(expression) # 如果result是小数且小数点后字符为0,就去掉小数点及其之后的字符,即按整型打印 result = re.sub('\.0', '', result) print(RESULT.format(result))
def test_data_set_three_false_scenario(): string_column_names = ["a"] assert not validate(data_set_three, string_column_names)