def main():
    args = parse_args()
    num_classes = 10 if args.dataset == 'CIFAR10' else 100

    have_cuda = torch.cuda.is_available()
    def cast(x):
        return x.cuda() if have_cuda else x

    checkpoint = torch.load(args.checkpoint)

    weights_unpacked = {}
    for k, w in checkpoint.items():
        if w.dtype == torch.uint8:
            # weights are packed with np.packbits function
            scale = np.sqrt(2 / (w.shape[1] * w.shape[2] * w.shape[3] * 8))
            signed = np.unpackbits(w, axis=1).astype(np.int) * 2 - 1
            weights_unpacked[k[7:]] = torch.from_numpy(signed).float() * scale
        else:
            weights_unpacked[k[7:]] = w

    model = WRN_McDonnell(args.depth, args.width, num_classes)
    model.load_state_dict(weights_unpacked)
    model = cast(model)
    model.eval()

    class_acc = ClassErrorMeter(accuracy=True)

    for inputs, targets in tqdm(DataLoader(create_dataset(args, train=False), 256)):
        with torch.no_grad():
            class_acc.add(model(cast(inputs)).cpu(), targets)

    print(class_acc.value())
Exemple #2
0
    def _create_dataset(self, station_path: str) -> Dataset:
        station_settings = _new_settings()

        if self.train:
            station_settings.train.path = station_path
        else:
            station_settings.test.path = station_path

        dataset = create_dataset(
            station_settings,
            Model.transformations['train' if self.train else 'test'],
            train=self.train)
        dataset.station_path = station_path

        if self.shuffle:
            dataset.shuffle()

        return dataset
Exemple #3
0
        # - metaepoch-14090-total-95.8-class0-97.4-class1-94.2.pt

        model_name = 'metaepoch-4260-total-94.33-class0-95.72-class1-92.04.pt'
        checkpoint_path = pj(
            config.VISUALIZE_PATH,
            f'runs/meta-learning/train/99%/checkpoints/{model_name}')

        test_test_samples = 10000
        writer = SummaryWriter(path.replace('train', 'test'))

        test_settings = _new_settings()
        test_settings.train.path = test_settings.test.path

        print("Creating Train and Test Datasets")
        dataset_train = create_dataset(test_settings,
                                       Model.transformations['train'],
                                       train=True)
        dataset_test = create_dataset(test_settings,
                                      Model.transformations['test'],
                                      train=False)

        dataset_final = copy.deepcopy(dataset_train)
        del dataset_final.file_paths[test_test_samples:]
        del dataset_train.file_paths[:test_test_samples]

        assert verify_dataset_integrity(dataset_train, dataset_test)
        assert verify_dataset_integrity(dataset_train, dataset_final)

        # Set up test loaders (using default batch size of 128)
        train_loader = create_loader(dataset_train, train=True)
        eval_loader = create_loader(dataset_test, train=False)
Exemple #4
0
def main():
    args = parse_args()
    num_classes = 10 if args.dataset == 'CIFAR10' else 100

    have_cuda = torch.cuda.is_available()

    def cast(x):
        return x.cuda() if have_cuda else x

    model = WRN_McDonnell(args.depth, args.width, num_classes)

    # For loading matconvnet checkpoint
    if '.mat' in args.checkpoint:
        from scipy.io import loadmat

        checkpoint = loadmat(args.checkpoint)

        # get model parameter names and move conv_last to end
        named_params = list(model.state_dict().items())
        # move conv_last to end of list
        if 'conv_last' in named_params[1][0]:
            named_params.append(named_params.pop(1))

        # initialize layer counters
        conv_cnt = bn_mean_cnt = bn_var_cnt = 0
        weights_unpacked = {}
        for name, param in named_params:
            if 'conv' in name:
                scale = checkpoint['LayerWeights'][conv_cnt, 0]
                signed = checkpoint['BinaryWeights'][0, conv_cnt].astype(
                    np.int) * 2 - 1
                # swap conv filter axes
                signed = np.transpose(signed, (3, 2, 0, 1))
                weights_unpacked[name] = torch.from_numpy(
                    signed).float() * scale
                conv_cnt += 1
            elif 'mean' in name:
                weights_unpacked[name] = torch.from_numpy(
                    checkpoint['Moments'][bn_mean_cnt + 1, 0][:, 0])
                bn_mean_cnt += 1
            elif 'var' in name:
                weights_unpacked[name] = torch.from_numpy(
                    checkpoint['Moments'][bn_var_cnt + 1, 0][:, 1]**2)
                bn_var_cnt += 1

        # initialize bn_last weight and bias
        weights_unpacked['bn_last.weight'] = torch.ones_like(
            model.bn_last.weight)
        weights_unpacked['bn_last.bias'] = torch.zeros_like(model.bn_last.bias)

        # initialize input bn parameters
        bn_inp_mean = cast(torch.from_numpy(checkpoint['Moments'][0, 0][:, 0]))
        bn_inp_var = cast(
            torch.from_numpy(checkpoint['Moments'][0, 0][:, 1]**2))
        bn_inp_scale = cast(torch.from_numpy(checkpoint['BNG'][0, 0]))
        bn_inp_offset = cast(torch.from_numpy(checkpoint['BNB'][0, 0]))

        # Create dataloader
        data_loader = DataLoader(create_mat_dataset(args, train=False), 256)
    else:
        checkpoint = torch.load(args.checkpoint)

        weights_unpacked = {}
        for k, w in checkpoint.items():
            if w.dtype == torch.uint8:
                # weights are packed with np.packbits function
                scale = np.sqrt(2 / (w.shape[1] * w.shape[2] * w.shape[3] * 8))
                signed = np.unpackbits(w, axis=1).astype(np.int) * 2 - 1
                weights_unpacked[
                    k[7:]] = torch.from_numpy(signed).float() * scale
            else:
                weights_unpacked[k[7:]] = w

        # Create dataloader
        data_loader = DataLoader(create_dataset(args, train=False), 256)

    model.load_state_dict(weights_unpacked)
    model = cast(model)
    model.eval()

    correct = 0
    total = 0
    for inputs, targets in data_loader:
        with torch.no_grad():

            inputs, targets = cast(inputs), cast(targets)

            # apply input bn
            if '.mat' in args.checkpoint:
                inputs = F.relu(
                    F.batch_norm(inputs, bn_inp_mean, bn_inp_var, bn_inp_scale,
                                 bn_inp_offset))

            outputs = model.forward(inputs)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

    print(100. * correct / total)
Exemple #5
0
def create_dataset(window_size, file_name):
    df_crypto, df_twitter, df_reddit = load_social_datasets()
    dataset = create_dataset(df_crypto, df_twitter, df_reddit, window_size,
                             file_name)
    return dataset
def new_cv(skip_existing=True):
    import copy

    configuration = dv.read_config(file)
    configuration['train']['path'] = folder
    configuration['test']['path'] = folder

    dirs = list(map(os.path.basename, map(get_name, dv.get_paths(folder))))

    initialize_csv(csv_path)
    if skip_existing:
        df = pd.read_csv(csv_path)
        names = list(map(get_name, df['Name'].tolist()))
        print(f"Skipping {len(names)} directories already in CSV")
        dirs = list(set(dirs).difference(set(names)))

    for i, dir in enumerate(dirs):
        print("\n", "-" * 20)
        print(f"Progress: {i}/{len(dirs)}")
        print("-" * 20, "\n")

        net, optimizer, criterion = main.create_model()

        ignore_test = copy.copy(dirs)
        ignore_test.remove(dir)
        configuration['train']['ignore'] = [dir]
        configuration['test']['ignore'] = ignore_test

        dataset_train = main.create_dataset(configuration,
                                            model.transformations['train'],
                                            train=True)
        dataset_test = main.create_dataset(configuration,
                                           model.transformations['test'],
                                           train=False)

        # Create a final test loader where it has unseen data, by taking 10% of the training data
        dataset_final = copy.deepcopy(dataset_train)
        ntest_samples = len(dataset_train) * .1
        del dataset_final.file_paths[ntest_samples:]
        del dataset_train.file_paths[:ntest_samples]

        assert verify_dataset_integrity(dataset_train, dataset_test)
        assert verify_dataset_integrity(dataset_train, dataset_final)

        weigh_classes = dict(enumerate(configuration['weigh_classes']))
        train_loader = create_loader(dataset_train,
                                     train=True,
                                     weigh_classes=weigh_classes)
        test_loader = create_loader(dataset_test, train=False)
        final_test_loader = create_loader(dataset_final, train=False)

        try:
            # Reload environment variables and main file with new configuration
            print("Evaluating Net on " + dir)
            evaluator, best_epoch = train_best_model(epochs=1,
                                                     train_loader,
                                                     test_loader,
                                                     net,
                                                     optimizer,
                                                     criterion,
                                                     net,
                                                     writer=writer,
                                                     write=False,
                                                     yield_every=50_000)

            evaluator = evaluate(net, loader, copy_net=copy_net)
            print('\n', evaluator, '\n')
            write_to_csv(
                dir,
                evaluator.class_details(0).amount_correct,
                evaluator.class_details(0).amount_total,
                evaluator.class_details(1).amount_correct,
                evaluator.class_details(1).amount_total,
                str(evaluator.total_percent_correct()),
                evaluator.iteration,
            )

        except Exception as e:
            print("Failed to run neural net: ", e)
            raise