import torch.optim as optim

from resNet import Model
from functions import filter_size, train, test
from rescale import RandomResizedCrop, RandomRescale

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)

nb_epochs = 100
learning_rate = 0.00001
batch_size = 128
batch_log = 70

train_transf = transforms.Compose([
    RandomRescale(size=28, scales=(0.3, 1), sampling="uniform"),
    transforms.ToTensor(),
    transforms.Normalize((0.1307, ), (0.3081, ))
])
valid_transf = transforms.Compose([
    RandomRescale(size=28, scales=(0.3, 1), sampling="uniform"),
    transforms.ToTensor(),
    transforms.Normalize((0.1307, ), (0.3081, ))
])
test_transf = transforms.Compose([
    RandomRescale(size=28, scales=(0.3, 1), sampling="uniform"),
    transforms.ToTensor(),
    transforms.Normalize((0.1307, ), (0.3081, ))
])

root = './mnistdata'
    "batch size": batch_size,
    "repetitions": repeats,
    "size": size,
    "ratio": ratio,
    "nb channels": nratio,
    "overlap": srange
}
pickle.dump(parameters, log)

root = './mnistdata'
if not os.path.exists(root):
    os.mkdir(root)

train_transf = transforms.Compose([
    transforms.Resize(40),
    RandomRescale(size=40, scales=(1.0, 0.24), sampling="normal"),
    transforms.ToTensor(),
    transforms.Normalize((0.1307, ), (0.3081, ))
])

train_set = datasets.MNIST(root=root,
                           train=True,
                           transform=train_transf,
                           download=True)
train_loader = DataLoader(dataset=train_set,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=1,
                          pin_memory=True)

criterion = nn.CrossEntropyLoss()
Exemplo n.º 3
0
scales = [(1.0, 1.0), (0.9, 1.1), (0.8, 1.2), (0.6, 1.4), (0.5, 1.5),
          (0.4, 1.6), (0.3, 1.7)]

pickle.dump(scales, log)

criterion = nn.CrossEntropyLoss()

avg_test_losses = []
avg_test_accs = []
std_test_losses = []
std_test_accs = []

for scale in scales:
    uniform = transforms.Compose([
        transforms.Resize(40),
        RandomRescale(size=40, scales=scale, sampling="uniform"),
        transforms.ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])

    root = './mnistdata'
    if not os.path.exists(root):
        os.mkdir(root)

    train_set = datasets.MNIST(root=root,
                               train=True,
                               transform=uniform,
                               download=True)
    train_loader = DataLoader(dataset=train_set,
                              batch_size=batch_size,
                              shuffle=True,
Exemplo n.º 4
0
pickle.dump(repeats, log)

s_avg_t_losses = []
s_std_t_losses = []
s_avg_t_accs = []
s_std_t_accs = []

s_avg_v_losses = []
s_std_v_losses = []
s_avg_v_accs = []
s_std_v_accs = []

for s in scales:
    test_transf = transforms.Compose([
        RandomRescale(size=32, sampling="uniform", scales=(s, s)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    valid_set = Subset(
        datasets.CIFAR10(root='./cifardata',
                         train=True,
                         transform=test_transf,
                         download=True), idx[:10000])
    valid_loader = DataLoader(valid_set, batch_size=batch_size, shuffle=True)

    m_t_losses = []
    m_t_accs = []
    m_v_losses = []
    m_v_accs = []
Exemplo n.º 5
0
        #pickle.dump(name, model_log)
        pickle.dump(model, model_log)
        pickle.dump({
            "train_loss": train_loss,
            "train_acc": train_acc
        }, model_log)
        model_log.close()

    m_test_acc = []
    for ii in range(repeats):
        #lists of test acc for each scale with model m, trial ii
        test_acc = []
        for s in scales:
            test_transf = transforms.Compose([
                transforms.Resize(64),
                RandomRescale(size=64, scales=(s, s),
                              sampling="uniform"),  #apply scaling uniformly 
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ])
            test_set = datasets.CIFAR10(root=root,
                                        train=False,
                                        transform=test_transf,
                                        download=True)
            test_loader = DataLoader(dataset=test_set,
                                     batch_size=batch_size,
                                     shuffle=False,
                                     num_workers=1,
                                     pin_memory=True)

            test_l, test_a = test(model, test_loader, criterion, epoch,
                                  batch_log, device)
Exemplo n.º 6
0
for ii in range(repeats):
    for m in range(len(models)):

        model = pickle.load(
            open("./trained_models/trained_model_{}_{}.pickle".format(m, ii),
                 "rb"))
        model.to(device)

        #lists of last test loss and acc for each scale with model m, trial ii
        s_test_loss = []
        s_test_acc = []
        for s in scales:
            test_transf = transforms.Compose([
                transforms.Resize(40),
                RandomRescale(size=40, scales=(s, s), sampling="uniform"),
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ])
            test_set = datasets.CIFAR10(root=root,
                                        train=False,
                                        transform=test_transf,
                                        download=True)
            test_loader = DataLoader(dataset=test_set,
                                     batch_size=batch_size,
                                     shuffle=False,
                                     num_workers=1,
                                     pin_memory=True)

            test_l, test_a = test(model, test_loader, criterion, 200,
                                  batch_log, device)
Exemplo n.º 7
0
from mnistCNNs import SiCNN, SiCNN2, SiCNN3, miniSiAll

from functions import train, test
from rescale import RandomRescale
import pickle

device =  torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)

nb_epochs=200
learning_rate = 0.0001
batch_size = 256
batch_log = 70

train_transf = transforms.Compose([RandomRescale(size = 28, scale = (0.3, 1)), transforms.ToTensor(), transforms.Normalize((0.1307,),(0.3081,))])
valid_transf = transforms.Compose([RandomRescale(size = 28, scale = (0.3, 1)), transforms.ToTensor(), transforms.Normalize((0.1307,),(0.3081,))])
test_transf= transforms.Compose([RandomRescale(size = 28, scale = (0.3, 1)), transforms.ToTensor(), transforms.Normalize((0.1307,),(0.3081,))])

root = './mnistdata'
if not os.path.exists(root):
    os.mkdir(root)

train_set = datasets.MNIST(root=root, train=True, transform=train_transf, download=True)
valid_set = datasets.MNIST(root=root, train=True, transform=valid_transf, download=True)

idx = list(range(len(train_set)))
np.random.seed(11)
np.random.shuffle(idx)
train_idx = idx[20000:]
valid_idx = idx[:20000] #validation set of size 20'000
Exemplo n.º 8
0
from rescale import RandomRescale
import pickle

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)

nb_epochs = 100
learning_rate = 0.00001
batch_size = 128
batch_log = 70

transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.1307, ), (0.3081, ))])
rescale = transforms.Compose([
    RandomRescale(size=28, scale=(0.3, 1)),
    transforms.ToTensor(),
    transforms.Normalize((0.1307, ), (0.3081, ))
])

root = './mnistdata'
if not os.path.exists(root):
    os.mkdir(root)
train_set = datasets.MNIST(root=root,
                           train=True,
                           transform=transform,
                           download=True)
train_loader = DataLoader(dataset=train_set,
                          batch_size=batch_size,
                          shuffle=True)
            train_loss.append(train_l)
            train_acc.append(train_a)
        model_log = open("trained_model_{}_bis.pickle".format(m), "wb")
        #pickle.dump(name, model_log)
        pickle.dump(model, model_log)
        pickle.dump({"train_loss": train_loss, "train_acc": train_acc}, model_log)
        model_log.close()


    m_test_acc = []
    for ii in range(repeats):
        #lists of test acc for each scale with model m, trial ii
        test_acc = []
        for s in scales: 
            test_transf = transforms.Compose([
                                transforms.Resize(64), RandomRescale(size = 64, scales = (s, s), sampling = "uniform"),  #apply scaling uniformly 
                                transforms.ToTensor(), transforms.Normalize((0.1307,),(0.3081,))])
            test_set = datasets.CIFAR10(root=root, train=False, transform=test_transf, download=True)
            test_loader = DataLoader(dataset=test_set, batch_size=batch_size,shuffle=False, num_workers=1, pin_memory=True)

            test_l, test_a = test(model, test_loader, criterion, epoch, batch_log, device)
            test_acc.append(test_a)
        m_test_acc.append(test_acc)
    avg_test_acc = np.mean(np.array(m_test_acc), axis=0)    
    std_test_acc = np.std(np.array(m_test_acc), axis=0)    

    test_accs_dict[name] = {"avg": avg_test_acc, "std": std_test_acc}

pickle.dump(test_accs_dict, log)

log.close()