def test_log(mock_now): """Testa se uma nova mensagem foi adicionada à lista de mensagens do objeto da classe Logger""" test_now = 123 test_message = 'A test message' mock_now.now.return_value = test_now log = Logger() log.log(test_message) assert log.messages == [(test_now, test_message)]
ind_permute = np.arange(0, 28) np.random.seed(0) np.random.shuffle(ind_permute) np.save("permuteB.npy", ind_permute) # Load the previous FIM fishers_cpu = torch.load("fisherA.pth") fishers = [] for fisher in fishers_cpu: fishers.append(fisher.to(model.device)) # Create callbacks checkpoint = CheckPoint(model, "modelB.ckpt") earlystop = EarlyStopping(**earlystop_params) list_metrics = ["loss_trainB", "loss_testB", "acc_testA", "acc_testB"] logger = Logger(list_metrics=list_metrics, logger_file="log-metrics.npy") # Train and evaluate flg_stop = False for epoch in range(1, params["n_epochs"] + 1): print("\n[EPOCH %d]" % (epoch)) loss_trainB = train_ewc(model, trainB_loader, optimizer, base_loss_fn, params["lamda"], fishers, prev_opt_thetas, epoch, description="Train on task B") print()
def test_init(): """Testa se a classe Logger é instanciado corretamente.""" log = Logger() assert log.messages == []