예제 #1
0
    else:
        step_func(optim_c, netC, netC_T, i, tloss)

    logger.add("training_pre", "loss", tloss.item(), i + 1)
    logger.add("training_pre", "l_loss", l_loss.item(), i + 1)
    logger.add("training_pre", "u_loss", u_loss.item(), i + 1)
    if (i + 1) % image_interval == 0:
        netC.train()
        netC_T.train()
        for _ in range(int(FLAGS.n_labels / FLAGS.batch_size)):
            data_u, _ = itr.__next__()
            _ = netC_T(data_u.to(device))
        netC.eval()
        netC_T.eval()
        with torch.no_grad():
            total_t, correct_t, loss_t = evaluation.test_classifier(netC)
            total_tt, correct_tt, loss_tt = evaluation.test_classifier(netC_T)
        netC.train()
        netC_T.train()

        logger.add("testing", "loss", loss_t.item(), i + 1)
        logger.add("testing", "accuracy", 100 * (correct_t / total_t), i + 1)
        logger.add("testing", "loss_t", loss_tt.item(), i + 1)
        logger.add("testing", "accuracy_t", 100 * (correct_tt / total_tt),
                   i + 1)
        str_meg = logger_prefix.format(i + 1, max_iter,
                                       100 * ((i + 1) / max_iter))
        logger.log_info(str_meg, text_logger.info, ["testing"])

    if (i + 1) % print_interval == 0:
        prefix = logger_prefix.format(i + 1, max_iter,
예제 #2
0
    tloss, l_loss, u_loss = loss_func(netC, netC_T, i, data, label, data_u)
    loss_classifier.step_MT(optim_c, netC, netC_T, i, tloss)

    logger.add("training", "loss", tloss.item(), i + 1)
    logger.add("training", "l_loss", l_loss.item(), i + 1)
    logger.add("training", "u_loss", u_loss.item(), i + 1)

    if (i + 1) % print_interval == 0:
        prefix = logger_prefix.format(i + 1, max_iter,
                                      (100 * i + 1) / max_iter)
        cats = ["training"]
        logger.log_info(prefix, text_logger.info, cats=cats)

    if (i + 1) % test_interval == 0:
        netC.eval()
        total_t, correct_t, loss_t = evaluation.test_classifier(netC_T)
        logger.add("testing", "loss_t", loss_t.item(), i + 1)
        logger.add("testing", "accuracy_t", 100 * (correct_t / total_t), i + 1)

        total_t, correct_t, loss_t = evaluation.test_classifier(netC)
        logger.add("testing", "loss", loss_t.item(), i + 1)
        logger.add("testing", "accuracy", 100 * (correct_t / total_t), i + 1)

        prefix = logger_prefix.format(i + 1, max_iter,
                                      (100 * i + 1) / max_iter)
        cats = ["testing"]
        logger.log_info(prefix, text_logger.info, cats=cats)
        netC.train()

    if (i + 1) % FLAGS.save_every == 0:
        logger.save_stats("{:08d}.pkl".format(i))