def test_multi_output(artifact_dir): records_train = gen_records(NUM_SAMPLES_TRAIN) records_validation = gen_records(NUM_SAMPLES_VALIDATION) records_score = gen_records(NUM_SAMPLES_SCORE) loc = os.path.abspath(os.path.dirname(__file__)) cfg = io_utils.load_json("config_multi_output.json", loc) bm = BarrageModel(artifact_dir) bm.train(cfg, records_train, records_validation) scores = bm.predict(records_score) classification = [np.argmax(score["classification"]) for score in scores] regression_1 = [score["regression"][0] for score in scores] regression_2 = [score["regression"][1] for score in scores] df_scores = pd.DataFrame({ "classification": classification, "regression_1": regression_1, "regression_2": regression_2, }) assert (df_scores["classification"] == records_score["y_cls"]).mean() > 0.5 assert abs( (df_scores["regression_1"] - records_score["y_reg_1"]).mean()) < 0.5 assert abs( (df_scores["regression_2"] - records_score["y_reg_2"]).mean()) < 0.5
def test_simple_output(artifact_dir, records_train, records_validation, records_score): loc = os.path.abspath(os.path.dirname(__file__)) cfg = io_utils.load_json("config_single_output.json", loc) bm = BarrageModel(artifact_dir) bm.train(cfg, records_train, records_validation) scores = bm.predict(records_score) df_scores = pd.DataFrame(scores) assert (df_scores["softmax"] == records_score["label"]).mean() >= 0.90
def test_simple_output(artifact_dir): records_train = gen_records(NUM_SAMPLES_TRAIN) records_validation = gen_records(NUM_SAMPLES_VALIDATION) records_score = gen_records(NUM_SAMPLES_SCORE) loc = os.path.abspath(os.path.dirname(__file__)) cfg = io_utils.load_json("config_single_output.json", loc) bm = BarrageModel(artifact_dir) bm.train(cfg, records_train, records_validation) scores = bm.predict(records_score) df_scores = pd.DataFrame(scores) records_score = pd.DataFrame(records_score) assert (df_scores["softmax"] == records_score["label"]).mean() >= 0.90