def _init_meta(self, _seed, exp_dir, meta_config, learner_config, logging_config): emmental.init(path.join(exp_dir, '_emmental_logs')) Meta.update_config( config={ 'meta_config': {**meta_config, 'seed': _seed}, 'learner_config': learner_config, 'logging_config': logging_config } )
def test_config_check_in_meta(caplog): """Unit test of config check in meta.""" caplog.set_level(logging.INFO) dirpath = "temp_test_meta_log_folder" Meta.reset() init(dirpath) config = { "logging_config": { "evaluation_freq": 5.0, }, } Meta.update_config(config) assert type(Meta.config["logging_config"]["evaluation_freq"]) == int assert Meta.config["logging_config"]["evaluation_freq"] == 5 config = { "logging_config": { "counter_unit": "batch", "evaluation_freq": 2.3, }, } Meta.update_config(config) assert type(Meta.config["logging_config"]["evaluation_freq"]) == int assert Meta.config["logging_config"]["evaluation_freq"] == 3 config = { "logging_config": { "counter_unit": "sample", "evaluation_freq": 0.2, }, } Meta.update_config(config) assert type(Meta.config["logging_config"]["evaluation_freq"]) == int assert Meta.config["logging_config"]["evaluation_freq"] == 1 config = { "logging_config": { "counter_unit": "epoch", "evaluation_freq": 1, "writer_config": { "write_loss_per_step": True }, }, } Meta.update_config(config) assert ( Meta.config["logging_config"]["writer_config"]["write_loss_per_step"] is False) # Remove the temp folder shutil.rmtree(dirpath)
def test_meta(caplog): """Unit test of meta.""" caplog.set_level(logging.INFO) dirpath = "temp_test_meta_log_folder" Meta.reset() init(dirpath) # Check the log folder is created correctly assert os.path.isdir(dirpath) is True assert Meta.log_path.startswith(dirpath) is True # Check the config is created assert isinstance(Meta.config, dict) is True assert Meta.config["meta_config"] == { "seed": None, "verbose": True, "log_path": "logs", "use_exact_log_path": False, } Meta.update_config(path="tests/shared", filename="emmental-test-config.yaml") assert Meta.config["meta_config"] == { "seed": 1, "verbose": False, "log_path": "tests", "use_exact_log_path": False, } # Test unable to find config file Meta.reset() init(dirpath) Meta.update_config(path=os.path.dirname(__file__)) assert Meta.config["meta_config"] == { "seed": None, "verbose": True, "log_path": "logs", "use_exact_log_path": False, } # Remove the temp folder shutil.rmtree(dirpath)
def test_model(caplog): """Unit test of model.""" caplog.set_level(logging.INFO) dirpath = "temp_test_model" Meta.reset() init(dirpath) def ce_loss(module_name, immediate_output_dict, Y, active): return F.cross_entropy(immediate_output_dict[module_name][0][active], (Y.view(-1))[active]) def output(module_name, immediate_output_dict): return F.softmax(immediate_output_dict[module_name][0], dim=1) task1 = EmmentalTask( name="task_1", module_pool=nn.ModuleDict({ "m1": nn.Linear(10, 10, bias=False), "m2": nn.Linear(10, 2, bias=False) }), task_flow=[ { "name": "m1", "module": "m1", "inputs": [("_input_", "data")] }, { "name": "m2", "module": "m2", "inputs": [("m1", 0)] }, ], loss_func=partial(ce_loss, "m2"), output_func=partial(output, "m2"), scorer=Scorer(metrics=["accuracy"]), ) new_task1 = EmmentalTask( name="task_1", module_pool=nn.ModuleDict({ "m1": nn.Linear(10, 5, bias=False), "m2": nn.Linear(5, 2, bias=False) }), task_flow=[ { "name": "m1", "module": "m1", "inputs": [("_input_", "data")] }, { "name": "m2", "module": "m2", "inputs": [("m1", 0)] }, ], loss_func=partial(ce_loss, "m2"), output_func=partial(output, "m2"), scorer=Scorer(metrics=["accuracy"]), ) task2 = EmmentalTask( name="task_2", module_pool=nn.ModuleDict({ "m1": nn.Linear(10, 5, bias=False), "m2": nn.Linear(5, 2, bias=False) }), task_flow=[ { "name": "m1", "module": "m1", "inputs": [("_input_", "data")] }, { "name": "m2", "module": "m2", "inputs": [("m1", 0)] }, ], loss_func=partial(ce_loss, "m2"), output_func=partial(output, "m2"), scorer=Scorer(metrics=["accuracy"]), ) config = {"model_config": {"dataparallel": False}} Meta.update_config(config) model = EmmentalModel(name="test", tasks=task1) assert repr(model) == "EmmentalModel(name=test)" assert model.name == "test" assert model.task_names == set(["task_1"]) assert model.module_pool["m1"].weight.data.size() == (10, 10) assert model.module_pool["m2"].weight.data.size() == (2, 10) model.update_task(new_task1) assert model.module_pool["m1"].weight.data.size() == (5, 10) assert model.module_pool["m2"].weight.data.size() == (2, 5) model.update_task(task2) assert model.task_names == set(["task_1"]) model.add_task(task2) assert model.task_names == set(["task_1", "task_2"]) model.remove_task("task_1") assert model.task_names == set(["task_2"]) model.remove_task("task_1") assert model.task_names == set(["task_2"]) model.save(f"{dirpath}/saved_model.pth") model.load(f"{dirpath}/saved_model.pth") # Test add_tasks model = EmmentalModel(name="test") model.add_tasks([task1, task2]) assert model.task_names == set(["task_1", "task_2"]) shutil.rmtree(dirpath)
def test_e2e(caplog): """Run an end-to-end test.""" caplog.set_level(logging.INFO) dirpath = "temp_test_e2e" Meta.reset() emmental.init(dirpath) # Generate synthetic data N = 50 X = np.random.random((N, 2)) * 2 - 1 Y1 = (X[:, 0] > X[:, 1] + 0.25).astype(int) + 1 Y2 = (-X[:, 0] > X[:, 1] + 0.25).astype(int) + 1 # Create dataset and dataloader splits = [0.8, 0.1, 0.1] X_train, X_dev, X_test = [], [], [] Y1_train, Y1_dev, Y1_test = [], [], [] Y2_train, Y2_dev, Y2_test = [], [], [] for i in range(N): if i <= N * splits[0]: X_train.append(torch.Tensor(X[i])) Y1_train.append(Y1[i]) Y2_train.append(Y2[i]) elif i < N * (splits[0] + splits[1]): X_dev.append(torch.Tensor(X[i])) Y1_dev.append(Y1[i]) Y2_dev.append(Y2[i]) else: X_test.append(torch.Tensor(X[i])) Y1_test.append(Y1[i]) Y2_test.append(Y2[i]) Y1_train = torch.from_numpy(np.array(Y1_train)) Y1_dev = torch.from_numpy(np.array(Y1_dev)) Y1_test = torch.from_numpy(np.array(Y1_test)) Y2_train = torch.from_numpy(np.array(Y1_train)) Y2_dev = torch.from_numpy(np.array(Y2_dev)) Y2_test = torch.from_numpy(np.array(Y2_test)) train_dataset1 = EmmentalDataset( name="synthetic", X_dict={"data": X_train}, Y_dict={"label1": Y1_train} ) train_dataset2 = EmmentalDataset( name="synthetic", X_dict={"data": X_train}, Y_dict={"label2": Y2_train} ) dev_dataset1 = EmmentalDataset( name="synthetic", X_dict={"data": X_dev}, Y_dict={"label1": Y1_dev} ) dev_dataset2 = EmmentalDataset( name="synthetic", X_dict={"data": X_dev}, Y_dict={"label2": Y2_dev} ) test_dataset1 = EmmentalDataset( name="synthetic", X_dict={"data": X_test}, Y_dict={"label1": Y2_test} ) test_dataset2 = EmmentalDataset( name="synthetic", X_dict={"data": X_test}, Y_dict={"label2": Y2_test} ) task_to_label_dict = {"task1": "label1"} train_dataloader1 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=train_dataset1, split="train", batch_size=10, ) dev_dataloader1 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=dev_dataset1, split="valid", batch_size=10, ) test_dataloader1 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=test_dataset1, split="test", batch_size=10, ) task_to_label_dict = {"task2": "label2"} train_dataloader2 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=train_dataset2, split="train", batch_size=10, ) dev_dataloader2 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=dev_dataset2, split="valid", batch_size=10, ) test_dataloader2 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=test_dataset2, split="test", batch_size=10, ) # Create task def ce_loss(task_name, immediate_ouput_dict, Y, active): module_name = f"{task_name}_pred_head" return F.cross_entropy( immediate_ouput_dict[module_name][0][active], (Y.view(-1) - 1)[active] ) def output(task_name, immediate_ouput_dict): module_name = f"{task_name}_pred_head" return F.softmax(immediate_ouput_dict[module_name][0], dim=1) task_name = "task1" task1 = EmmentalTask( name=task_name, module_pool=nn.ModuleDict( {"input_module": nn.Linear(2, 8), f"{task_name}_pred_head": nn.Linear(8, 2)} ), task_flow=[ { "name": "input", "module": "input_module", "inputs": [("_input_", "data")], }, { "name": f"{task_name}_pred_head", "module": f"{task_name}_pred_head", "inputs": [("input", 0)], }, ], loss_func=partial(ce_loss, task_name), output_func=partial(output, task_name), scorer=Scorer(metrics=["accuracy", "roc_auc"]), ) task_name = "task2" task2 = EmmentalTask( name=task_name, module_pool=nn.ModuleDict( {"input_module": nn.Linear(2, 8), f"{task_name}_pred_head": nn.Linear(8, 2)} ), task_flow=[ { "name": "input", "module": "input_module", "inputs": [("_input_", "data")], }, { "name": f"{task_name}_pred_head", "module": f"{task_name}_pred_head", "inputs": [("input", 0)], }, ], loss_func=partial(ce_loss, task_name), output_func=partial(output, task_name), scorer=Scorer(metrics=["accuracy", "roc_auc"]), ) # Build model mtl_model = EmmentalModel(name="all", tasks=[task1, task2]) # Create learner emmental_learner = EmmentalLearner() # Update learning config Meta.update_config( config={"learner_config": {"n_epochs": 10, "optimizer_config": {"lr": 0.01}}} ) # Learning emmental_learner.learn( mtl_model, [train_dataloader1, train_dataloader2, dev_dataloader1, dev_dataloader2], ) test1_score = mtl_model.score(test_dataloader1) test2_score = mtl_model.score(test_dataloader2) assert test1_score["task1/synthetic/test/accuracy"] >= 0.5 assert test1_score["task1/synthetic/test/roc_auc"] >= 0.6 assert test2_score["task2/synthetic/test/accuracy"] >= 0.5 assert test2_score["task2/synthetic/test/roc_auc"] >= 0.6 shutil.rmtree(dirpath)
def test_e2e_skip_trained_epoch(caplog): """Run an end-to-end test.""" caplog.set_level(logging.INFO) dirpath = "temp_test_e2e_skip_trained" use_exact_log_path = True Meta.reset() init(dirpath, use_exact_log_path=use_exact_log_path) # Generate synthetic data N = 500 X = np.random.random((N, 2)) * 2 - 1 Y = (X[:, 0] > X[:, 1] + 0.25).astype(int) X = [torch.Tensor(X[i]) for i in range(N)] # Create dataset and dataloader X_train, X_dev, X_test = ( X[:int(0.8 * N)], X[int(0.8 * N):int(0.9 * N)], X[int(0.9 * N):], ) Y_train, Y_dev, Y_test = ( torch.tensor(Y[:int(0.8 * N)]), torch.tensor(Y[int(0.8 * N):int(0.9 * N)]), torch.tensor(Y[int(0.9 * N):]), ) train_dataset = EmmentalDataset( name="synthetic", X_dict={"data": X_train}, Y_dict={"label1": Y_train}, ) dev_dataset = EmmentalDataset( name="synthetic", X_dict={"data": X_dev}, Y_dict={"label1": Y_dev}, ) test_dataset = EmmentalDataset( name="synthetic", X_dict={"data": X_test}, Y_dict={"label1": Y_test}, ) task_to_label_dict = {"task1": "label1"} train_dataloader = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=train_dataset, split="train", batch_size=10, ) dev_dataloader = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=dev_dataset, split="valid", batch_size=10, ) test_dataloader = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=test_dataset, split="test", batch_size=10, ) # Create task def ce_loss(task_name, immediate_output_dict, Y): module_name = f"{task_name}_pred_head" return F.cross_entropy(immediate_output_dict[module_name], Y) def output(task_name, immediate_output_dict): module_name = f"{task_name}_pred_head" return F.softmax(immediate_output_dict[module_name], dim=1) task_metrics = {"task1": ["accuracy"]} class IdentityModule(nn.Module): def __init__(self): """Initialize IdentityModule.""" super().__init__() def forward(self, input): return {"out": input} tasks = [ EmmentalTask( name=task_name, module_pool=nn.ModuleDict({ "input_module0": IdentityModule(), "input_module1": nn.Linear(2, 8), f"{task_name}_pred_head": nn.Linear(8, 2), }), task_flow=[ Action(name="input", module="input_module0", inputs=[("_input_", "data")]), Action(name="input1", module="input_module1", inputs=[("input", "out")]), Action( name=f"{task_name}_pred_head", module=f"{task_name}_pred_head", inputs=[("input1", 0)], ), ], module_device={"input_module0": -1}, loss_func=partial(ce_loss, task_name), output_func=partial(output, task_name), action_outputs=None, scorer=Scorer(metrics=task_metrics[task_name]), require_prob_for_eval=False, require_pred_for_eval=True, ) for task_name in ["task1"] ] # Build model model = EmmentalModel(name="all", tasks=tasks) # Create learner emmental_learner = EmmentalLearner() config = { "meta_config": { "seed": 0, "verbose": True }, "learner_config": { "n_epochs": 1, "epochs_learned": 0, "steps_learned": 0, "skip_learned_data": False, "online_eval": True, "optimizer_config": { "lr": 0.01, "grad_clip": 100 }, }, "logging_config": { "counter_unit": "batch", "evaluation_freq": 5, "writer_config": { "writer": "json", "write_loss_per_step": True, "verbose": True, }, "checkpointing": True, "checkpointer_config": { "checkpoint_path": None, "checkpoint_freq": 1, "checkpoint_metric": { "model/all/train/loss": "min" }, "checkpoint_task_metrics": None, "checkpoint_runway": 1, "checkpoint_all": False, "clear_intermediate_checkpoints": True, "clear_all_checkpoints": False, }, }, } Meta.update_config(config) # Learning emmental_learner.learn( model, [train_dataloader, dev_dataloader], ) test_score = model.score(test_dataloader) assert test_score["task1/synthetic/test/loss"] > 0.3 Meta.reset() init(dirpath, use_exact_log_path=use_exact_log_path) config = { "meta_config": { "seed": 0, "verbose": False }, "learner_config": { "n_epochs": 5, "epochs_learned": 1, "steps_learned": 0, "skip_learned_data": True, "online_eval": False, "optimizer_config": { "lr": 0.01, "grad_clip": 100 }, "optimizer_path": (f"{dirpath}/" "best_model_model_all_train_loss.optimizer.pth"), "scheduler_path": (f"{dirpath}/" "best_model_model_all_train_loss.scheduler.pth"), }, "model_config": { "model_path": f"{dirpath}/best_model_model_all_train_loss.model.pth" }, "logging_config": { "counter_unit": "batch", "evaluation_freq": 5, "writer_config": { "writer": "json", "write_loss_per_step": True, "verbose": True, }, "checkpointing": True, "checkpointer_config": { "checkpoint_path": None, "checkpoint_freq": 1, "checkpoint_metric": { "model/all/train/loss": "min" }, "checkpoint_task_metrics": None, "checkpoint_runway": 1, "checkpoint_all": False, "clear_intermediate_checkpoints": True, "clear_all_checkpoints": False, }, }, } Meta.update_config(config) if Meta.config["model_config"]["model_path"]: model.load(Meta.config["model_config"]["model_path"]) # Learning emmental_learner.learn( model, [train_dataloader, dev_dataloader], ) test_score = model.score(test_dataloader) assert test_score["task1/synthetic/test/loss"] <= 0.4 shutil.rmtree(dirpath)
return args if __name__=="__main__": # Parsing command line arguments args = parse_args() # Configuring run data Meta.update_config( config={ "meta_config": {"seed": 1701, "device": 0}, "learner_config": { "n_epochs": 20, "valid_split": "val", "optimizer_config": {"optimizer": "sgd", "lr": 0.001, "l2": 0.000}, "lr_scheduler_config": { "warmup_steps": None, "warmup_unit": "batch", "lr_scheduler": "linear", "min_lr": 1e-6, }, }, "logging_config": {"evaluation_freq": 4000, "checkpointing": False}, } ) # Getting paths to data DATA_NAME = args.data_name CXRDATA_PATH = args.cxrdata_path CXRIMAGE_PATH = args.cxrimage_path # Providing model settings
def test_e2e(caplog): """Run an end-to-end test.""" caplog.set_level(logging.INFO) dirpath = "temp_test_e2e" use_exact_log_path = False Meta.reset() init(dirpath, use_exact_log_path=use_exact_log_path) config = { "meta_config": { "seed": 0 }, "learner_config": { "n_epochs": 3, "online_eval": True, "optimizer_config": { "lr": 0.01, "grad_clip": 100 }, }, "data_config": { "max_data_len": 10 }, "logging_config": { "counter_unit": "epoch", "evaluation_freq": 0.2, "writer_config": { "writer": "tensorboard", "verbose": True }, "checkpointing": True, "checkpointer_config": { "checkpoint_path": None, "checkpoint_freq": 1, "checkpoint_metric": { "model/all/train/loss": "min" }, "checkpoint_task_metrics": None, "checkpoint_runway": 1, "checkpoint_all": False, "clear_intermediate_checkpoints": True, "clear_all_checkpoints": True, }, }, } Meta.update_config(config) def grouped_parameters(model): no_decay = ["bias", "LayerNorm.weight"] return [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] # Generate synthetic data N = 500 X = np.random.random((N, 2)) * 2 - 1 Y1 = (X[:, 0] > X[:, 1] + 0.25).astype(int) Y2 = (X[:, 0] > X[:, 1] + 0.2).astype(int) X = [torch.Tensor(X[i]) for i in range(N)] # Create dataset and dataloader X_train, X_dev, X_test = ( X[:int(0.8 * N)], X[int(0.8 * N):int(0.9 * N)], X[int(0.9 * N):], ) Y1_train, Y1_dev, Y1_test = ( torch.tensor(Y1[:int(0.8 * N)]), torch.tensor(Y1[int(0.8 * N):int(0.9 * N)]), torch.tensor(Y1[int(0.9 * N):]), ) Y2_train, Y2_dev, Y2_test = ( torch.tensor(Y2[:int(0.8 * N)]), torch.tensor(Y2[int(0.8 * N):int(0.9 * N)]), torch.tensor(Y2[int(0.9 * N):]), ) train_dataset1 = EmmentalDataset(name="synthetic", X_dict={"data": X_train}, Y_dict={"label1": Y1_train}) train_dataset2 = EmmentalDataset(name="synthetic", X_dict={"data": X_train}, Y_dict={"label2": Y2_train}) dev_dataset1 = EmmentalDataset(name="synthetic", X_dict={"data": X_dev}, Y_dict={"label1": Y1_dev}) dev_dataset2 = EmmentalDataset(name="synthetic", X_dict={"data": X_dev}, Y_dict={"label2": Y2_dev}) test_dataset1 = EmmentalDataset(name="synthetic", X_dict={"data": X_test}, Y_dict={"label1": Y1_test}) test_dataset2 = EmmentalDataset(name="synthetic", X_dict={"data": X_test}, Y_dict={"label2": Y2_test}) test_dataset3 = EmmentalDataset(name="synthetic", X_dict={"data": X_test}) task_to_label_dict = {"task1": "label1"} train_dataloader1 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=train_dataset1, split="train", batch_size=10, num_workers=2, ) dev_dataloader1 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=dev_dataset1, split="valid", batch_size=10, num_workers=2, ) test_dataloader1 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=test_dataset1, split="test", batch_size=10, num_workers=2, ) task_to_label_dict = {"task2": "label2"} train_dataloader2 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=train_dataset2, split="train", batch_size=10, ) dev_dataloader2 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=dev_dataset2, split="valid", batch_size=10, ) test_dataloader2 = EmmentalDataLoader( task_to_label_dict=task_to_label_dict, dataset=test_dataset2, split="test", batch_size=10, ) test_dataloader3 = EmmentalDataLoader( task_to_label_dict={"task2": None}, dataset=test_dataset3, split="test", batch_size=10, ) # Create task def ce_loss(task_name, immediate_output_dict, Y): module_name = f"{task_name}_pred_head" return F.cross_entropy(immediate_output_dict[module_name], Y) def output(task_name, immediate_output_dict): module_name = f"{task_name}_pred_head" return F.softmax(immediate_output_dict[module_name], dim=1) task_metrics = {"task1": ["accuracy"], "task2": ["accuracy", "roc_auc"]} class IdentityModule(nn.Module): def __init__(self): """Initialize IdentityModule.""" super().__init__() def forward(self, input): return input, input tasks = [ EmmentalTask( name=task_name, module_pool=nn.ModuleDict({ "input_module0": IdentityModule(), "input_module1": nn.Linear(2, 8), f"{task_name}_pred_head": nn.Linear(8, 2), }), task_flow=[ Action(name="input", module="input_module0", inputs=[("_input_", "data")]), Action(name="input1", module="input_module1", inputs=[("input", 0)]), Action( name=f"{task_name}_pred_head", module=f"{task_name}_pred_head", inputs="input1", ), ], module_device={"input_module0": -1}, loss_func=partial(ce_loss, task_name), output_func=partial(output, task_name), action_outputs=[ (f"{task_name}_pred_head", 0), ("_input_", "data"), (f"{task_name}_pred_head", 0), f"{task_name}_pred_head", ] if task_name == "task2" else None, scorer=Scorer(metrics=task_metrics[task_name]), require_prob_for_eval=True if task_name in ["task2"] else False, require_pred_for_eval=True if task_name in ["task1"] else False, ) for task_name in ["task1", "task2"] ] # Build model mtl_model = EmmentalModel(name="all", tasks=tasks) Meta.config["learner_config"]["optimizer_config"][ "parameters"] = grouped_parameters # Create learner emmental_learner = EmmentalLearner() # Learning emmental_learner.learn( mtl_model, [ train_dataloader1, train_dataloader2, dev_dataloader1, dev_dataloader2 ], ) test1_score = mtl_model.score(test_dataloader1) test2_score = mtl_model.score(test_dataloader2) assert test1_score["task1/synthetic/test/accuracy"] >= 0.7 assert (test1_score["model/all/test/macro_average"] == test1_score["task1/synthetic/test/accuracy"]) assert test2_score["task2/synthetic/test/accuracy"] >= 0.7 assert test2_score["task2/synthetic/test/roc_auc"] >= 0.7 test2_pred = mtl_model.predict(test_dataloader2, return_action_outputs=True) test3_pred = mtl_model.predict( test_dataloader3, return_action_outputs=True, return_loss=False, ) assert test2_pred["uids"] == test3_pred["uids"] assert False not in [ np.array_equal(test2_pred["probs"]["task2"][idx], test3_pred["probs"]["task2"][idx]) for idx in range(len(test3_pred["probs"]["task2"])) ] assert "outputs" in test2_pred assert "outputs" in test3_pred assert False not in [ np.array_equal( test2_pred["outputs"]["task2"]["task2_pred_head_0"][idx], test3_pred["outputs"]["task2"]["task2_pred_head_0"][idx], ) for idx in range( len(test2_pred["outputs"]["task2"]["task2_pred_head_0"])) ] assert False not in [ np.array_equal( test2_pred["outputs"]["task2"]["_input__data"][idx], test3_pred["outputs"]["task2"]["_input__data"][idx], ) for idx in range(len(test2_pred["outputs"]["task2"]["_input__data"])) ] assert len(test3_pred["outputs"]["task2"]["task2_pred_head"]) == 50 assert len(test2_pred["outputs"]["task2"]["task2_pred_head"]) == 50 test4_pred = mtl_model.predict(test_dataloader2, return_action_outputs=False) assert "outputs" not in test4_pred shutil.rmtree(dirpath)