예제 #1
0
def eval_models(models_paths: list, path_to_data: str):
    if len(models_paths) == 0:
        return 0.0

    # getting test loader
    ds = FashionMnistHandler(path_to_data, False)
    ds.download()
    ds.load()
    # noise parameters are not relevant since test loader shouldn't have noise
    _, _, test_loader = ds.get_noisy_loaders(0, '1', 0.2, 128, 128, 128)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    test_acc = []
    for model_file in models_paths:
        # creating model
        checkpoint = torch.load(model_file, map_location=device)
        model_name = model_file.split("/")[-1]

        # loading from checkpoint
        model = CNNModel()
        model.load_state_dict(checkpoint['model_state_dict'])
        model.to(device)
        loss_fn = torch.nn.CrossEntropyLoss()

        # evaluating
        _, acc = Solver.eval(model,
                             device,
                             loss_fn=loss_fn,
                             data_loader=test_loader)
        test_acc.append(acc)
        print(f"Model {model_name} has {acc:.4f} acc in test dataset")

    return test_acc
예제 #2
0
            os.makedirs(batch_summaries_dir)

        for tp_noise in ['1', '2', '3']:
            print("NOISE: " + tp_noise)
            for loss in [torch.nn.CrossEntropyLoss(), DMILoss(num_classes=2)]:
                loss_name = loss.__class__.__name__
                print(f"Loss: {loss_name}\n")
                for noise_value in noise_values:
                    # RUN Experiments

                    name = f'CNN_{loss_name}_{tp_noise}_{noise_value}'

                    print(f"Training {name} with noise of type {tp_noise} and probability {noise_value}...")

                    # data preparation
                    dataset = FashionMnistHandler(data_dir, False)
                    dataset.load()
                    train_loader, val_loader, test_loader = dataset.get_noisy_loaders(p_noise=noise_value,
                                                                                      type_noise=tp_noise,
                                                                                      val_size=1 / 6,
                                                                                      train_batch_size=batch_size,
                                                                                      val_batch_size=128,
                                                                                      test_batch_size=128)

                    # model, optimizer, summary
                    model = CNNModel()
                    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
                    summ = Summary(name, type_noise=tp_noise, noise_rate=noise_value)

                    solver = Solver(name, PROJECT_DIR, batch_model_dir, batch_summaries_dir, model,
                                    optimizer, loss, summ, train_loader, val_loader, test_loader)
예제 #3
0
    noise_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    lr = 1e-4

    noise_model_dir = MODEL_DIR
    if not os.path.exists(noise_model_dir):
        os.makedirs(noise_model_dir)

    noise_summaries_dir = SUMMARIES_DIR
    if not os.path.exists(noise_summaries_dir):
        os.makedirs(noise_summaries_dir)

    for n_noise in [5, 10, 15, 20]:

        print(f"N_NOISE_ADD: {n_noise}")
        # data preparation
        dataset = FashionMnistHandler(data_dir, False, n_noise=n_noise)
        dataset.clean_processed()
        dataset.download()
        dataset.load()
        # since the data already has noise, it isn't necessary to add noise so p_noise=0
        train_loader, val_loader, test_loader = dataset.get_noisy_loaders(
            p_noise=0,
            type_noise=tp_noise,
            val_size=1 / 6,
            train_batch_size=128,
            val_batch_size=128,
            test_batch_size=128)

        print(f"Training adding {n_noise} new examples per clean example...")

        for loss in [torch.nn.CrossEntropyLoss(), DMILoss(num_classes=2)]: