def score_fn(search_idx, uid, batch_data, config_info): config = config_info.loc[uid, :] nasbench201_model = nasbench_network((32, 32, 3), str2structure(config["architecture"]), config["C"], config["N"], 10) feature_list = [ layer.output for layer in nasbench201_model.layers if "re_lu" in layer.name ] model = fe.build( model_fn=lambda: Model(nasbench201_model.input, feature_list), optimizer_fn=None) # Only a single forward pass through the network is required relu_result = fe.backend.feed_forward(model, batch_data["x"], training=False) matrix = np.zeros((relu_result[0].shape[0], relu_result[0].shape[0])) for sample in relu_result: sample = to_number(sample) sample = sample.reshape((sample.shape[0], -1)) x = (sample > 0.).astype(float) x_t = np.transpose(x) mat = x @ x_t mat2 = (1. - x) @ (1. - x_t) matrix = matrix + mat + mat2 _, score = np.linalg.slogdet(matrix) return score
def on_batch_end(self, data): y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key]) assert y_pred.size == y_true.size self.y_pred.extend(y_pred.ravel()) self.y_true.extend(y_true.ravel())
def test_instance_case_tf(self): test_title = "test" test_description = "each return needs to above 0" test_description2 = "each return needs to above -10" save_path = tempfile.mkdtemp() exp_name = "exp" model = fe.build(model_fn=one_layer_tf_model, optimizer_fn="adam") network = fe.Network( ops=[ModelOp(model=model, inputs="x", outputs="y")]) test_cases = [ TestCase(description=test_description, criteria=lambda y: to_number(y) > 10, aggregate=False, fail_threshold=1), TestCase(description=test_description2, criteria=lambda y: to_number(y) > -10, aggregate=False) ] traces = TestReport(test_cases=test_cases, test_title=test_title, save_path=save_path, data_id="id") estimator = fe.Estimator(pipeline=self.pipeline, network=network, epochs=1, traces=traces) with patch('fastestimator.trace.io.test_report.json.dump') as fake: estimator.test(exp_name) json_summary = fake.call_args[0][0] with self.subTest("title"): self.assertEqual(json_summary["title"], test_title) with self.subTest("timestamp"): self.assertIn("timestamp", json_summary) with self.subTest("execution_time(s)"): self.assertIn("execution_time(s)", json_summary) with self.subTest("test_type 1"): self.assertEqual(json_summary["tests"][0]["test_type"], "per-instance") with self.subTest("test_type 2"): self.assertEqual(json_summary["tests"][1]["test_type"], "per-instance") with self.subTest("description 1"): self.assertEqual(json_summary["tests"][0]["description"], test_description) with self.subTest("description 2"): self.assertEqual(json_summary["tests"][1]["description"], test_description2) with self.subTest("passed 1"): self.assertEqual(json_summary["tests"][0]["passed"], False) with self.subTest("passed 2"): self.assertEqual(json_summary["tests"][1]["passed"], True) with self.subTest("fail_threshold 1"): self.assertEqual(json_summary["tests"][0]["fail_threshold"], 1) with self.subTest("fail_threshold 2"): self.assertEqual(json_summary["tests"][1]["fail_threshold"], 0) # its default value should be zero with self.subTest("fail_number 1"): self.assertEqual(json_summary["tests"][0]["fail_number"], 2) with self.subTest("fail_number 2"): self.assertEqual(json_summary["tests"][1]["fail_number"], 0) with self.subTest("fail_id 1"): self.assertEqual(json_summary["tests"][0]["fail_id"], [0, 1]) with self.subTest("fail_id 2"): self.assertEqual(json_summary["tests"][1]["fail_id"], []) with self.subTest("check pdf report"): report_path = os.path.join(save_path, exp_name + "_TestReport.pdf") self.assertTrue(os.path.exists(report_path))
def on_batch_end(self, data): self.buffer.append(to_number(data[self.inputs[0]]))