def trainandsave(args):

    loss_log = open("loss_log.txt", 'w')
    trainData = MyDataset('train.txt', train=True)
    testData = MyDataset('val.txt', train=False)
    train_loader = DataLoader(trainData,
                              batch_size=args.batch,
                              num_workers=4,
                              shuffle=True)
    test_loader = DataLoader(testData,
                             batch_size=32,
                             num_workers=4,
                             shuffle=False,
                             drop_last=True)
    net = resnet18(pretrained=True, num_classes=5, drop_out=args.drop_out)
    if (args.gpu):
        net = net.cuda()
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.wd)
    criterion = nn.CrossEntropyLoss()
    for epoch in range(args.epochs):
        net.train()
        if (epoch in args.lr_decay_steps):
            adjust_learning_rate(optimizer, 0.1)
        running_loss = 0.0
        for batch, data in enumerate(train_loader):
            # get the inputs
            inputs, labels = data
            labels = labels.squeeze(1)
            # wrap them in Variable
            inputs, labels = Variable(inputs), Variable(labels)

            if (args.gpu):
                inputs = inputs.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.cpu().data
            loss_log.write(str(float(loss.cpu().data)) + ' ')
            if batch % 10 == 9:
                print('[%d, %5d] loss: %.3f' %
                      (epoch, batch, running_loss / 10))
                running_loss = 0.0
        #TEST
        net.eval()
        total = 0
        correct = 0
        for batch, data in enumerate(test_loader):
            inputs, labels = data
            labels = labels.squeeze(1)
            # wrap them in Variable
            inputs, labels = Variable(inputs), Variable(labels)

            if (args.gpu):
                inputs = inputs.cuda()
                labels = labels.cuda()

            # forward + backward + optimize
            outputs = net(inputs)
            for idx in range(outputs.size(0)):
                pred = torch.argmax(outputs[idx])
                gt = labels[idx]
                if (int(pred.cpu().data) == int(gt.cpu().data)):
                    correct += 1
                total += 1
        torch.save(net, 'models/net_' + str(epoch) + '.pkl')
        print "total", total, "acc ", float(correct) / total
    loss_log.close()
    print('Finished Training')
Beispiel #2
0
import torch
import network
from torch.autograd import Variable
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
import pickle
import matplotlib.pyplot as plt

model = network.resnet18()
model.load_state_dict(torch.load(r'./cifar_net_224.pth'))
model.eval()


class myNet(nn.Module):
    def _init_(self, pretrained_model, layers):
        super(self, myNet).__init__()
        self.visu_conv1 = nn.Sequential(*list(pretrained_model.conv1))

    def forward(self, x):
        out1 = self.visu_conv1(x)
        return out1


def get_features(pretrained_model, x):
    net1 = pretrained_model.conv1
    bn1 = pretrained_model.bn1
    relu = pretrained_model.relu
    maxpool = pretrained_model.maxpool
    conv2_x = pretrained_model.conv2_x
    conv3_x = pretrained_model.conv3_x
Beispiel #3
0
import torch
from utils import *
import network
import train_config as config

stem = network.resnet18(halving=config.stem['halving'],
                        num_classes=config.stem['num_classes'],
                        feature_layer_dim=config.stem['feature_layer_dim'],
                        use_batchnorm=config.stem['use_batchnorm'],
                        dream=None,
                        preactivation=config.stem['preactivation'],
                        use_avgpool=config.stem['use_avgpool'])

optimizer = torch.optim.SGD(stem.parameters(),
                            lr=1.0,
                            weight_decay=config.loss['weight_l2_reg'],
                            momentum=config.train['momentum'],
                            nesterov=config.train['nesterov'])

resume_optimizer(optimizer, 'save/stitching_train/try_17')
print(optimizer.param_groups[0]['lr'])