示例#1
0
def train():
    # train
    logger = Logger(T_B_GRAPH_PATH, is_train=True)

    total_step = len(train_loader)
    for epoch in range(NUM_EPOCHS):
        correct = 0
        losses = []
        main_accs = []
        for i, (images, labels) in enumerate(train_loader):
            images = images.to(device)
            labels = labels.to(device)

            # Forward pass
            outputs = net(images)
            loss = criterion(outputs, labels)

            # Backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # calculate accuracy
            _, pred_y = torch.max(outputs.data, 1)
            correct += (pred_y == labels).sum().item()
            main_acc = (correct / len(images)) * 100

            losses.append(loss)
            main_accs.append(main_acc)

            if i % BATCH_SIZE == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.8f}'.format(
                    epoch + 1, NUM_EPOCHS, i + 1, total_step, loss.item()))

        # write log
        logger.scalar_summary('loss', torch.mean(torch.FloatTensor(losses)),
                              epoch + 1)
        logger.scalar_summary('Main_accuracy',
                              torch.mean(torch.FloatTensor(main_accs)),
                              epoch + 1)

        if is_write_sub_log:
            real_acc = logger.accuracy(net, TRAIN_DATASET_PATH, epoch, "Real",
                                       device)
            clay_acc = logger.accuracy(net, TRAIN_DATASET_PATH, epoch, "Clay",
                                       device)
            gltn_acc = logger.accuracy(net, TRAIN_DATASET_PATH, epoch, "Gltn",
                                       device)

            logger.scalar_summary('Real_accuracy', real_acc, epoch + 1)
            logger.scalar_summary('Clay_accuracy', clay_acc, epoch + 1)
            logger.scalar_summary('Gltn_accuracy', gltn_acc, epoch + 1)

        if epoch % MODEL_SAVE_RATE == 0:
            if not os.path.exists(MODEL_SAVE_PATH):
                os.makedirs(MODEL_SAVE_PATH)
            model_save_path = MODEL_SAVE_PATH + "/{}_epoch_{}.pth".format(
                args.model, NUM_EPOCHS)
            torch.save(net.state_dict(), model_save_path)
示例#2
0
def eval():
    # load
    logger = Logger(T_B_GRAPH_PATH, is_train=False)

    eval_model_path = MODEL_SAVE_PATH + '/{}_epoch_{}.pth'.format(
        args.model, NUM_EPOCHS)
    print(eval_model_path)
    if os.path.exists(eval_model_path):
        net.load_state_dict(torch.load(eval_model_path))
    net.eval(
    )  # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)

    # train data accuracy
    logger.accuracy(net,
                    TRAIN_DATASET_PATH,
                    NUM_EPOCHS,
                    "Real",
                    device,
                    MODEL_LOG_PATH,
                    BATCH_SIZE,
                    do_logwrite=True)
    logger.accuracy(net,
                    TRAIN_DATASET_PATH,
                    NUM_EPOCHS,
                    "Clay",
                    device,
                    MODEL_LOG_PATH,
                    BATCH_SIZE,
                    do_logwrite=True)
    logger.accuracy(net,
                    TRAIN_DATASET_PATH,
                    NUM_EPOCHS,
                    "Gltn",
                    device,
                    MODEL_LOG_PATH,
                    BATCH_SIZE,
                    do_logwrite=True)
    logger.accuracy(net,
                    TRAIN_DATASET_PATH,
                    NUM_EPOCHS,
                    "",
                    device,
                    MODEL_LOG_PATH,
                    BATCH_SIZE,
                    do_logwrite=True)

    # test data accuracy
    logger.accuracy(net,
                    TEST_DATASET_PATH,
                    NUM_EPOCHS,
                    "Real",
                    device,
                    MODEL_LOG_PATH,
                    BATCH_SIZE,
                    do_logwrite=True)
    logger.accuracy(net,
                    TEST_DATASET_PATH,
                    NUM_EPOCHS,
                    "Clay",
                    device,
                    MODEL_LOG_PATH,
                    BATCH_SIZE,
                    do_logwrite=True)
    logger.accuracy(net,
                    TEST_DATASET_PATH,
                    NUM_EPOCHS,
                    "Gltn",
                    device,
                    MODEL_LOG_PATH,
                    BATCH_SIZE,
                    do_logwrite=True)
    logger.accuracy(net,
                    TEST_DATASET_PATH,
                    NUM_EPOCHS,
                    "",
                    device,
                    MODEL_LOG_PATH,
                    BATCH_SIZE,
                    do_logwrite=True)

    # FAR and FRR
    logger.FAR(net,
               TEST_DATASET_PATH,
               device,
               MODEL_LOG_PATH,
               BATCH_SIZE,
               do_logwrite=True)
    logger.FRR(net,
               TEST_DATASET_PATH,
               device,
               MODEL_LOG_PATH,
               BATCH_SIZE,
               do_logwrite=True)

    # Test the model
    with torch.no_grad():
        correct = 0
        total = 0
        true_positive = 0
        predict_true = 0
        real_true = 0

        for images, labels in test_loader:
            images = images.to(device)
            labels = labels.to(device)
            outputs = net(images)
            _, pred_y = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (pred_y == labels).sum().item()

            true_positive += (pred_y * labels).sum().item()
            predict_true += pred_y.sum().item()
            real_true += labels.sum().item()

        accuracy = 100 * correct / total
        precision = (true_positive / (1e-9 + predict_true))
        recall = (true_positive / (1e-9 + real_true))
        f1score = (2 * precision * recall) / (precision + recall)

        print('Test Accuracy of the model on the test images: {} %'.format(
            accuracy))
        print('prec: {:.4f} recall: {:.4f} f1: {:.4f}'.format(
            precision, recall, f1score))

        with open('./logs/dataset__log.txt', 'a') as f:
            f.write('prec: {:.4f} recall: {:.4f} f1: {:.4f}\n'.format(
                precision, recall, f1score))