def create_dsdl(download_loc='./', dataset='MNIST'):
    if dataset == 'MNIST':

        ds_train = MNIST(root=download_loc, 
                         train=True, 
                         download=True,
                         transform=PILToTensor())

        ds_test = MNIST(root=download_loc, 
                        train=False, 
                        download=True,
                        transform=PILToTensor())
    
    elif dataset == 'CIFAR10':
    
        ds_train = CIFAR10(root=download_loc, 
                           train=True, 
                           download=True,
                           transform=PILToTensor())

        ds_test = CIFAR10(root=download_loc, 
                          train=False, 
                          download=True,
                          transform=PILToTensor())

    return ds_train, ds_test
Пример #2
0
    def _legacy_special_options_map(benchmark):
        available_parameters = set()

        for cls in benchmark.legacy_cls.__mro__:
            if cls is legacy_datasets.VisionDataset:
                break

            available_parameters.update(
                inspect.signature(cls.__init__).parameters)

        available_special_kwargs = benchmark._SPECIAL_KWARGS.intersection(
            available_parameters)

        special_options = dict()

        if "download" in available_special_kwargs:
            special_options["download"] = True

        if "transform" in available_special_kwargs:
            special_options["transform"] = PILToTensor()
            if "target_transform" in available_special_kwargs:
                special_options["target_transform"] = torch.tensor
        elif "transforms" in available_special_kwargs:
            special_options["transforms"] = JointTransform(
                PILToTensor(), PILToTensor())

        return special_options
Пример #3
0
def create_dsdl(download_loc='./', batch_size=32):
    ds_train = MNIST(root=download_loc,
                     train=True,
                     download=True,
                     transform=PILToTensor())
    ds_test = MNIST(root=download_loc,
                    train=False,
                    download=True,
                    transform=PILToTensor())

    dl_train = DataLoader(ds_train, batch_size=batch_size)
    dl_test = DataLoader(ds_test, batch_size=batch_size)

    return dl_train, dl_test
 def __init__(self, batch_size):
     self.batch_size = batch_size
     self.mnist = FashionMNIST('.', download=True, transform=PILToTensor())
     self.classes = self.mnist.classes
     self.num_classes = len(self.mnist.class_to_idx)
     self.target_by_classes = [[idx for idx in range(len(self.mnist)) if self.mnist.targets[idx] == i]
                               for i in range(self.num_classes)]
Пример #5
0
def train_model(numepochs, arch, nn_config):
    #TODO add gpu support
    device = 'cpu'
    #Get the training set
    cfarDatasets = datasets.CIFAR10

    ds_train = cfarDatasets(root="./trainingdata",
                            train=True,
                            download=True,
                            transform=PILToTensor())
    ds_test = cfarDatasets(root="./trainingdata",
                           train=False,
                           download=True,
                           transform=PILToTensor())

    learningrate = 1e-2

    arch_dict = json.loads(arch)
    nn_config_dict = json.loads(nn_config)

    dl_train = DataLoader(dataset=ds_train, batch_size=32)
    dl_test = DataLoader(dataset=ds_test, batch_size=32)

    model = Net(arch_dict, nn_config_dict)
    model = model.to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=learningrate)  #Adam optimizer
    model = model.train()

    time_dict = {}
    start = time.time()
    print_freq = 2
    for n in range(numepochs):
        if n % print_freq == 0:
            total_loss = 0
            total_n = 0
            total_correct = 0

        for idx, (X, y) in enumerate(dl_train):
            X = (X - 128.) / 255.

            X = X.flatten(start_dim=1, end_dim=-1)
            X = X.to(device)
            y = y.to(device)

            pred = model(X)
            loss = criterion(pred, y)

            if n % print_freq == 0:
                total_loss += loss
                total_n += X.shape[0]
                total_correct += (pred.argmax(dim=1) == y).sum()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print_freq = 1
        if n % print_freq == 0:
            val_acc = evaluate_model(model, dl_test, device=device)
            print(f"epoch {n+1}:")
            print(f'Validation-Accuracy={val_acc}')
            print("")

    return model
Пример #6
0
def coco_prepare_legacy_root(benchmark, root):
    images, annotations = benchmark.new_raw_dataset.resources(
        benchmark.new_config)
    extract_archive(str(root / images.file_name))
    extract_archive(str(root / annotations.file_name))


DATASET_BENCHMARKS = [
    DatasetBenchmark(
        "caltech101",
        legacy_config_map=caltech101_legacy_config_map,
        prepare_legacy_root=base_folder(),
        legacy_special_options_map=lambda config: dict(
            download=True,
            transform=PILToTensor(),
            target_transform=JointTransform(torch.tensor, torch.tensor),
        ),
    ),
    DatasetBenchmark(
        "caltech256",
        legacy_config_map=no_split,
        prepare_legacy_root=base_folder(),
    ),
    DatasetBenchmark(
        "celeba",
        prepare_legacy_root=base_folder(),
        legacy_config_map=lambda benchmark: dict(
            split="valid" if benchmark.new_config.split == "val" else benchmark
            .new_config.split,
            # The new dataset always returns all annotations