Exemple #1
0
 def __init__(self, config):
     self.config = config
     # set up random seed
     torch.manual_seed(config.random_seed)
     # set up device
     self.device = torch.device(
         'cuda:0' if torch.cuda.is_available() else 'cpu')
     # set up data
     train_data = cifar100(config, 'train')
     self.train_loader = DataLoader(train_data,
                                    config.batch_size['train'],
                                    True,
                                    num_workers=2)
     val_data = cifar100(config, 'val')
     self.val_loader = DataLoader(val_data,
                                  config.batch_size['val'],
                                  False,
                                  num_workers=2)
     test_data = cifar100(config, 'test')
     self.test_loader = DataLoader(test_data,
                                   config.batch_size['test'],
                                   False,
                                   num_workers=2)
     # set up nn, criterion and optimizer
     self.net = WideResnet(config.nr_block, config.widen_factor)
     self.net.to(self.device)
     self.criterion = torch.nn.CrossEntropyLoss()
     lr, momentum, weight_decay = config.init_lr, config.momentum, config.weight_decay
     parameters = self.net.parameters()
     self.optimizer = torch.optim.SGD(parameters,
                                      lr,
                                      momentum,
                                      weight_decay=weight_decay)
Exemple #2
0
def set_model():
    model = WideResnet(n_classes, k=wresnet_k, n=wresnet_n) # wide resnet-28
    model.train()
    model.cuda()
    criteria_x = CrossEntropyLoss().cuda()
    criteria_u = nn.MSELoss().cuda()
    return model, criteria_x, criteria_u
Exemple #3
0
def set_model():
    model = WideResnet(args.n_classes, k=args.wresnet_k, n=args.wresnet_n, feature_dim=args.feature_dim) # wresnet-28-2
    model.train()
    model.cuda()
    criteria_x = nn.CrossEntropyLoss().cuda()
    criteria_u = nn.CrossEntropyLoss().cuda()
   
    return model, criteria_x, criteria_u
Exemple #4
0
# print(dataset)
train_idx, val_idx = split_indices(len(dataset), 0.1)

print("length of train idx: " + str(len(train_idx)))
train_sampler = SubsetRandomSampler(train_idx)
val_sampler = SubsetRandomSampler(val_idx)

train_dl = DeviceDataloader(
    DataLoader(dataset, BATCH_SIZE, sampler=train_sampler, pin_memory=True),
    get_default_device())
val_dl = DeviceDataloader(
    DataLoader(dataset, BATCH_SIZE, sampler=val_sampler, pin_memory=True),
    get_default_device())

model = WideResnet()
to_device(model, get_default_device())

optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# optimizer = torch.optim.Adadelta(model.parameters(), lr=LEARNING_RATE, rho=0.9, eps=1e-6, weight_decay=0)
avg_validation_losses = []
avg_validation_accuracy = []

print("length of training dataloader: " + str(len(train_dl)))
start_time = time.time()
for i in range(EPOCH):
    print("EPOCH: " + str(i))
    count = 0
    for data_batch, target_batch in train_dl:
        if count % 10 == 0:
            print("batch: " + str(count))
    y.append(pow(i, 2))
    z.append(math.sin((1/2) * i))

print(y)

print(x)
plt.plot(x, y, label="validation loss")
plt.xlabel("epoches")
plt.ylabel("loss")
plt.show()

plt.plot(x, z, label="validation accuracy")
plt.xlabel("epoches")
plt.ylabel("accuracy")
plt.show()'''

model = WideResnet()

torch.save(model.state_dict(), "./test.pt")

model2 = WideResnet()
model2.load_state_dict(torch.load("./test.pt"))

print("loaded!")
'''avg_validation_losses = [12.0, 23.0, 34.0]
avg_validation_accuracy = [0.1, 0.2, 0.3]

file_loss = open("./loss.txt", "w+")
file_loss.writelines([str(ele) + "\n" for ele in avg_validation_losses])
file_accr = open("./accuracy.txt", "w+")
file_accr.writelines([str(ele) + "\n" for ele in avg_validation_accuracy])'''
Exemple #6
0
import torch
from config import Config
from model import WideResnet
import hiddenlayer as hl

net = WideResnet(Config())
net.eval()
im = hl.build_graph(net, torch.zeros([1, 3, 32, 32]))
im.save(path='WideResnet_28_10', format='png')
Exemple #7
0
class Trainer(object):
    def __init__(self, config):
        self.config = config
        # set up random seed
        torch.manual_seed(config.random_seed)
        # set up device
        self.device = torch.device(
            'cuda:0' if torch.cuda.is_available() else 'cpu')
        # set up data
        train_data = cifar100(config, 'train')
        self.train_loader = DataLoader(train_data,
                                       config.batch_size['train'],
                                       True,
                                       num_workers=2)
        val_data = cifar100(config, 'val')
        self.val_loader = DataLoader(val_data,
                                     config.batch_size['val'],
                                     False,
                                     num_workers=2)
        test_data = cifar100(config, 'test')
        self.test_loader = DataLoader(test_data,
                                      config.batch_size['test'],
                                      False,
                                      num_workers=2)
        # set up nn, criterion and optimizer
        self.net = WideResnet(config.nr_block, config.widen_factor)
        self.net.to(self.device)
        self.criterion = torch.nn.CrossEntropyLoss()
        lr, momentum, weight_decay = config.init_lr, config.momentum, config.weight_decay
        parameters = self.net.parameters()
        self.optimizer = torch.optim.SGD(parameters,
                                         lr,
                                         momentum,
                                         weight_decay=weight_decay)

    def train(self):
        # training process
        opt_val_acc = 0
        train_loss_list, train_acc_list, val_loss_list, val_acc_list = [], [], [], []
        for epoch in range(1, self.config.nr_epoch + 1):
            if epoch in self.config.lr_boundaries:
                self.adjust_lr(self.config.lr_decay_rate)
            train_loss, train_acc = 0, 0
            for i, data in enumerate(self.train_loader, 1):
                imgs, labels = data[0].to(self.device), data[1].to(self.device)
                logits = self.net(imgs)
                loss = self.criterion(logits, labels)
                # update nn's paremeters
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                # update metrics
                train_loss = (i - 1) / i * train_loss + loss.item() / i
                _, labels_pre = logits.max(dim=1)
                acc = (labels_pre == labels).float().mean()
                train_acc = (i - 1) / i * train_acc + acc.item() / i
            train_loss_list.append(train_loss)
            train_acc_list.append(train_acc)
            print('_______training_______')
            print('epoch: ', epoch, 'loss: ', '%.3f' % train_loss, 'acc: ',
                  '%.3f' % train_acc)
            val_loss, val_acc = self.evaluate('val')
            if val_acc >= opt_val_acc:
                opt_val_acc = val_acc
                torch.save(self.net.state_dict(),
                           self.config.model_dir + '/opt')
            val_loss_list.append(val_loss)
            val_acc_list.append(val_acc)
            print('_______validation_______')
            print('epoch: ', epoch, 'loss: ', '%.3f' % val_loss, 'acc: ',
                  '%.3f' % val_acc)
        print('Training finish. The optimal validation accuracy is: ',
              '%.3f' % opt_val_acc)
        self.net.load_state_dict(torch.load(self.config.model_dir + '/opt'))
        test_loss, test_acc = self.evaluate('test')
        print('test_loss: ', '%.3f' % test_loss, 'test_acc: ',
              '%.3f' % test_acc)
        history = {
            'train_loss_list': train_loss_list,
            'train_acc_list': train_acc_list,
            'val_loss_list': val_loss_list,
            'val_acc_list': val_acc_list,
            'test_loss': test_loss,
            'test_acc': test_acc
        }
        return history

    def evaluate(self, mode):
        assert mode in ['val', 'test']
        if mode == 'val':
            loader = self.val_loader
        else:
            loader = self.test_loader
        self.net.eval()
        ret_loss, ret_acc = 0, 0
        for i, data in enumerate(loader, 1):
            imgs, labels = data[0].to(self.device), data[1].to(self.device)
            logits = self.net(imgs)
            loss = self.criterion(logits, labels)
            ret_loss = (i - 1) / i * ret_loss + loss.item() / i
            _, labels_pre = logits.max(dim=1)
            acc = (labels_pre == labels).float().mean()
            ret_acc = (i - 1) / i * ret_acc + acc.item() / i
        self.net.train()
        return ret_loss, ret_acc

    def adjust_lr(self, rate):
        for param_group in self.optimizer.param_groups:
            param_group['lr'] *= rate