Пример #1
0
def main(args):
    print(args.__dict__)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    normMean = [0.6270, 0.5013, 0.7519]
    normStd = [0.1627, 0.1682, 0.0977]
    preprocess = myTransforms.Compose([
        myTransforms.Resize((50, 50)),
        myTransforms.ToTensor(),  #operated on original image
        myTransforms.Normalize(normMean, normStd)
    ])

    testset = SCdataset(args.testpath, preprocess)
    testloader = DataLoader(testset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.nWorker)

    net = getattr(myModelVgg, args.net)(in_channels=3, num_classes=args.nCls)
    if len(args.gpu) > 1:
        net = torch.nn.DataParallel(net).cuda()
    else:
        net = net.cuda()

    if args.restore:
        net.load_state_dict(torch.load(
            args.restore))  # load the finetune weight parameters

    real, score, prediction, namelist = NetPrediction(testloader, net)
    result = EvalMetrics(real, prediction)
    for key in result:
        print(key, ': ', result[key])

    np.savez(args.savename,
             key_real=real,
             key_score=score,
             key_pred=prediction,
             key_namelist=namelist)
Пример #2
0
from torch.utils.data import DataLoader
import torch
from torchvision import transforms, models
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np

from genderDataset import GenderDataset
import myTransforms

csv = 'data/genders.csv'
data_dir = 'data/faces'

thetransforms = []
thetransforms.append(myTransforms.Resize((224, 224)))
thetransforms.append(myTransforms.ToTorchTensor())
#thetransforms.append(transforms.Normalize(mean=[0.485, 0.456, 0.406],
#                                          std=[0.229, 0.224, 0.225]))
thetransforms = transforms.Compose(thetransforms)

dataset = GenderDataset(csv, data_dir, transform=thetransforms)

'''
img, g = dataset[10]
print(img)
print(img.shape)
img = transforms.ToPILImage()(img)
img.show()
print(g)

k
Пример #3
0
def main(args):
    print(args.__dict__)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    normMean = [0.6270, 0.5013, 0.7519]
    normStd = [0.1627, 0.1682, 0.0977]
    ####################### transformer defination, dataset reader and loader
    preprocess = myTransforms.Compose([
        myTransforms.Resize((50, 50)),
        myTransforms.RandomChoice([myTransforms.RandomHorizontalFlip(p=1),
                                   myTransforms.RandomVerticalFlip(p=1),
                                   myTransforms.AutoRandomRotation()]),  # above is for: randomly selecting one for process
        # myTransforms.RandomAffine(degrees=[-180, 180], translate=[0., 1.], scale=[0., 2.], shear=[-180, 180, -180, 180]),
        myTransforms.ColorJitter(brightness=(0.8, 1.2), contrast=(0.8, 1.2)),
        myTransforms.RandomChoice([myTransforms.ColorJitter(saturation=(0.8, 1.2), hue=0.2),
                                   myTransforms.HEDJitter(theta=0.02)]),
        myTransforms.RandomElastic(alpha=2, sigma=0.06),
        myTransforms.ToTensor(),  #operated on original image, rewrite on previous transform.
        myTransforms.Normalize(normMean, normStd)
    ])
    valpreprocess = myTransforms.Compose([myTransforms.Resize((50,50)),
                                          myTransforms.ToTensor(),
                                          myTransforms.Normalize(normMean,normStd)])

    print('####################Loading dataset...')
    trainset = SCdataset(args.trainpath, preprocess)
    trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.nWorker)
    valset = SCdataset(args.validpath, valpreprocess)
    valloader = DataLoader(valset, batch_size=args.batch_size, shuffle=False, num_workers=args.nWorker)

    net = getattr(myModelVgg, args.net)(in_channels=3, num_classes=args.nCls)
    if len(args.gpu) > 1:
        net = torch.nn.DataParallel(net).cuda()
    else:
        net = net.cuda()

    if args.restore:
        net.load_state_dict(torch.load(args.restore)) # load the finetune weight parameters
        print('####################Loading model...', args.restore)
    # else:
    #     net_state_dict = net.state_dict() # get the new network dict
        #pretrained_dict = torch.load('/home/cyyan/.cache/torch/checkpoints/resnet34-333f7ec4.pth') # load the pretrained model
        # pretrained_dict = torch.load('/home/cyyan/.cache/torch/checkpoints/alexnet-owt-4df8aa71.pth') # load the pretrained model
        # pretrained_dict_new = {k: v for k, v in pretrained_dict.items() if k in net_state_dict and net_state_dict[k].size() == v.size()} #check the same key in dict.items
        # net_state_dict.update(pretrained_dict_new) # update the new network dict by new dict in pretrained
        # net.load_state_dict(net_state_dict) # load the finetune weight parameters
        # print('####################Loading pretrained model from torch cache checkpoints...')

    print('####################Loading criterion and optimizer...')
    weights = args.weights if args.weights is None else torch.tensor(args.weights).cuda()
    criterion = getattr(nn, args.loss)(weight=weights).cuda()
    optimizer = optim.SGD(net.parameters(), lr=args.initLR, momentum=args.momentum, weight_decay=args.decay)

    print('####################Start training...')
    for epoch in range(args.epoches):
        start = time.time()
        net.train()
    
        AdjustLR(optimizer, epoch, args.epoches, args.initLR, power=0.9)
        print('Current LR:', optimizer.param_groups[0]['lr'])

        losses = 0.0
        for i, (img, label, _) in enumerate(trainloader):
            img = img.cuda()
            label = label.cuda().long()

            output = net(img)
            loss = criterion(output, label)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            losses += loss.item()

            print('Iteration {:3d} loss {:.6f}'.format(i + 1, loss.item()))
            setproctitle.setproctitle("Iteration:{}/{}".format(i+1,int(trainset.__len__()/args.batch_size)))
        print('Epoch{:3d}--Time(s){:.2f}--Avgloss{:.4f}-'.format(epoch, time.time()-start, losses/(i+1)))

        torch.save(net.state_dict(), args.savename + str(epoch) + '.pkl')
        print('Model has been saved!')

        real, _, prediction, _ = NetPrediction(valloader, net)
        result = EvalMetrics(real, prediction)
        for key in result:
            print(key, ': ', result[key])
    print('####################Finished Training!')