Example #1
0
                                             train=False,
                                             download=True)
#transform=transform_test)
# if accuracy is low, likely due to transform_train, since bot package will also perform transformation on data
total_train_steps_per_epoch = len(train_dataset) // args.batch_size

# organize data
dataset_dict = {
    k: {
        'data': torch.tensor(v.data),
        'targets': torch.tensor(v.targets)
    }
    for k, v in [('train', train_dataset), ('valid', valid_dataset)]
}
# move data to GPU
dataset_dict = bot.map_nested(bot.to(device), dataset_dict)

print('=====> Data moved to GPU')

# preprocess data on gpu
train_set = bot.preprocess(dataset_dict['train'], [
    bot.partial(bot.pad, border=4), bot.transpose, bot.normalise,
    bot.to(torch.float16)
])
valid_set = bot.preprocess(
    dataset_dict['valid'],
    [bot.transpose, bot.normalise,
     bot.to(torch.float16)])

if args.use_subset:
    # use only subset of the data (10%)
Example #2
0
#optm_type = args.optimizer #'sgd'

# no cross-validation splitting, use all for training.
# just need a valid_dataset object for the model interface, but it's not validating against different data from training data.
valid_dataset = train_dataset


# if accuracy is low, likely due to transform_train, since bot package will also perform transformation on data
total_train_steps_per_epoch = len(train_dataset) // args.batch_size

# organize data
dataset_dict = {k: {'data': torch.tensor(v.data), 'targets': torch.tensor(v.targets)} 
                    for k,v in [('train', train_dataset), ('valid', valid_dataset)]}
# move data to GPU
dataset_dict = bot.map_nested(bot.to(device), dataset_dict)

print('=====> Data moved to GPU')

# get data statistics for normalizing data
mean = tuple(np.mean(train_dataset.data, axis=(0,1,2)))
std = tuple(np.std(train_dataset.data, axis=(0,1,2)))
mean, std = [torch.tensor(x, device=device, dtype=torch.float16) for x in (mean, std)]
normalize = lambda data, mean=mean, std=std: (data-mean)/std

# preprocess data on gpu
train_set = bot.preprocess(dataset_dict['train'], [bot.partial(bot.pad, border=4), bot.transpose, bot.normalise, bot.to(torch.float16)])
valid_set = bot.preprocess(dataset_dict['valid'], [bot.transpose, normalize, bot.to(torch.float16)])

if args.use_subset:
    # use only subset of the data (10%)
Example #3
0
device = 'cuda:0'

best_acc = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch
total_epochs = args.epochs

# organize data
dataset_dict = {
    k: {
        'data': torch.tensor(v.data),
        'targets': torch.tensor(v.targets)
    }
    for k, v in [('train', test_dataset), ('valid', test_dataset)]
}
# move data to GPU
dataset_dict = bot.map_nested(bot.to(device), dataset_dict)

print('=====> Data moved to GPU')

mean = tuple(np.mean(test_dataset.data, axis=(0, 1, 2)))
std = tuple(np.std(test_dataset.data, axis=(0, 1, 2)))
mean, std = [
    torch.tensor(x, device=device, dtype=torch.float16) for x in (mean, std)
]
normalize = lambda data, mean=mean, std=std: (data - mean) / std

# preprocess data on gpu
#train_set = bot.preprocess(dataset_dict['train'], [bot.partial(bot.pad, border=4), bot.transpose, bot.normalise, bot.to(torch.float16)])
valid_set = bot.preprocess(
    dataset_dict['valid'],
    [bot.transpose, normalize, bot.to(torch.float16)])  #
Example #4
0
        device = 'cuda:' + str(device_ids[0])
    else:
        device = 'cuda'
else:
    device = 'cpu'
device = 'cuda:0'

best_acc = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch
total_epochs = args.epochs

# organize data
dataset_dict = {k: {'data': torch.tensor(v.data), 'targets': torch.tensor(v.targets)} 
                    for k,v in [('train', test_dataset), ('valid', test_dataset)]}
# move data to GPU
dataset_dict = bot.map_nested(bot.to(device), dataset_dict)

print('=====> Data moved to GPU')

mean = tuple(np.mean(test_dataset.data, axis=(0,1,2)))
std = tuple(np.std(test_dataset.data, axis=(0,1,2)))
mean, std = [torch.tensor(x, device=device, dtype=torch.float16) for x in (mean, std)]
normalize = lambda data, mean=mean, std=std: (data-mean)/std

# preprocess data on gpu
#train_set = bot.preprocess(dataset_dict['train'], [bot.partial(bot.pad, border=4), bot.transpose, bot.normalise, bot.to(torch.float16)])
valid_set = bot.preprocess(dataset_dict['valid'], [bot.transpose, normalize, bot.to(torch.float16)])

if args.use_subset:
    # use only subset of the data (10%)
    valid_set['data'],valid_set['targets'] = bot.get_subset(valid_set, 0.1)