Exemple #1
0
    training_labels)
training_data = training_data.view(-1, 1, 28, 28)
training_labels = th.unsqueeze(training_labels, 1)
training_set = TensorDataset(training_data, training_labels)
training_loader = DataLoader(training_set, args.batch_size)

validation_data, validation_labels = th.from_numpy(
    validation_data), th.from_numpy(validation_labels)
validation_data = validation_data.view(-1, 1, 28, 28)
validation_labels = th.unsqueeze(validation_labels, 1)
validation_set = TensorDataset(validation_data, validation_labels)
validation_loader = DataLoader(validation_set, args.batch_size)

model = CNN()
if cuda:
    model.cuda()
criterion = nn.L1Loss()
optimizer = Adam(model.parameters(), lr=1e-3)

for epoch in range(args.n_epochs):
    for iteration, batch in enumerate(training_loader):
        data, labels = batch
        if cuda:
            data, labels = data.cuda(), labels.cuda()

        noisy_labels = th.zeros(labels.size())
        noisy_labels.copy_(labels)
        n_noises = int(args.noise * labels.size()[0])
        noise = th.from_numpy(np.random.choice(np.arange(1, 10), n_noises))
        if cuda:
            noise = noise.cuda()
Exemple #2
0
from torch.utils.data import DataLoader, TensorDataset

from network import CNN

parser = ArgumentParser()
parser.add_argument('--path', type=str)
args = parser.parse_args()

partitions = ('training', 'validation', 'test')
data = pickle.load(gzip.open('mnist.pkl.gz'))
data = dict(zip(partitions, data))

cnn = CNN()
state_dict = th.load(args.path)
cnn.load_state_dict(state_dict)
cnn.cuda()

batch_size = 1024
data_loaders = {}
for key, value in data.items():
    value = map(th.from_numpy, value)
    dataset = TensorDataset(*value)
    data_loaders[key] = DataLoader(dataset, batch_size)


def n_matches(p, labels):
    _, p = th.max(p, 1)
    p = th.squeeze(p)
    indicator = p == labels
    n = th.sum(indicator.double())
    n = n.data[0]