import torch from src.abstract_translator import AbstractTranslator from src.member.abducibles import abducibles, exclusive from src.member.evaluate import Evaluator from src.member.local_params import number_of_arguments from src.member.manager import MemberManager from src.networks.mnist_nets import COMP_NET from src.params import useGPU from src.run import scenario_test network = COMP_NET() if useGPU: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") network.to(device) translator = AbstractTranslator(abducibles, exclusive) dataManager = MemberManager() outputClasses = [10] * number_of_arguments evaluator = Evaluator() scenario = 'member/' + str(number_of_arguments) # model_name = 'model_samples_3000_iter_400_epoch_1.mdl' model_name = 'model_samples_3000_iter_9000_epoch_3.mdl' if __name__ == '__main__': scenario_test(network, outputClasses, translator, dataManager, scenario, model_name, evaluator)
re.search(pattern=r"iter_(\d*)", string=iter_model.name).groups()[0]) iter_models[n_iter] = iter_model # evaluate with each of the two test datasets E1 and E2 for test_dataset in test_datasets: accuracies = list() # evaluate model at each iteration (the models are sorted according to their iteration number) for n_iter in sorted(iter_models.keys()): model_name = iter_models[n_iter].name accuracy = scenario_test(network, outputClasses, translator, dataManager, scenario_name, model_name, evaluator, test_dataset=f"{test_dataset}.csv", train_dataset=train_dataset) accuracies.append([n_iter, accuracy]) results_path = Path(results_root) / scenario_name results_path.mkdir(exist_ok=True) # write results to files with open( results_path / f"train_{train_dataset}_eval_{test_dataset}_results.csv", "w") as f: pd.DataFrame(data=accuracies,