示例#1
0
def main():
    opt = opts().parse()
    now = datetime.datetime.now()
    logger = Logger(opt.saveDir, now.isoformat())
    model, optimizer = getModel(opt)
    criterion = torch.nn.MSELoss().cuda()

    # if opt.GPU > -1:
    #     print('Using GPU {}',format(opt.GPU))
    #     model = model.cuda(opt.GPU)
    #     criterion = criterion.cuda(opt.GPU)
    # dev = opt.device
    model = model.cuda()

    val_loader = torch.utils.data.DataLoader(
            MPII(opt, 'val'), 
            batch_size = 1, 
            shuffle = False,
            num_workers = int(ref.nThreads)
    )

    if opt.test:
        log_dict_train, preds = val(0, opt, val_loader, model, criterion)
        sio.savemat(os.path.join(opt.saveDir, 'preds.mat'), mdict = {'preds': preds})
        return
    # pyramidnet pretrain一次,先定义gen的训练数据loader
    train_loader = torch.utils.data.DataLoader(
            MPII(opt, 'train'), 
            batch_size = opt.trainBatch, 
            shuffle = True if opt.DEBUG == 0 else False,
            num_workers = int(ref.nThreads)
    )
    # 调用train方法
    for epoch in range(1, opt.nEpochs + 1):
        log_dict_train, _ = train(epoch, opt, train_loader, model, criterion, optimizer)
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if epoch % opt.valIntervals == 0:
            log_dict_val, preds = val(epoch, opt, val_loader, model, criterion)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            #saveModel(model, optimizer, os.path.join(opt.saveDir, 'model_{}.checkpoint'.format(epoch)))
            torch.save(model, os.path.join(opt.saveDir, 'model_{}.pth'.format(epoch)))
            sio.savemat(os.path.join(opt.saveDir, 'preds_{}.mat'.format(epoch)), mdict = {'preds': preds})
        logger.write('\n')
        if epoch % opt.dropLR == 0:
            lr = opt.LR * (0.1 ** (epoch // opt.dropLR))
            print('Drop LR to {}'.format(lr))
            adjust_learning_rate(optimizer, lr)
    logger.close()
    torch.save(model.cpu(), os.path.join(opt.saveDir, 'model_cpu.pth'))
示例#2
0
 def __init__(self, opt, split):
   self.ratio3D = opt.ratio3D
   self.split = split
   self.dataset3D = H36M(opt, split)
   self.dataset2D = MPII(opt, split, returnMeta = True)
   self.nImages2D = len(self.dataset2D)
   self.nImages3D = min(len(self.dataset3D), int(self.nImages2D * self.ratio3D))
   
   print('#Images2D {}, #Images3D {}'.format(self.nImages2D, self.nImages3D))
discriminator_model_conf = nn.DataParallel(discriminator_model_conf)
discriminator_model_pose = nn.DataParallel(discriminator_model_pose)

# Datasets
if args.dataset == 'lsp':
    lsp_train_dataset = LSP(args)
    args.mode = 'val'
    lsp_val_dataset = LSP(args)
# medical
if args.dataset == 'medical':
    lsp_train_dataset = HANDXRAY(args)
    args.mode = 'val'
    lsp_val_dataset = HANDXRAY(args)
# MPII
elif args.dataset == 'mpii':
    lsp_train_dataset = MPII('train')
    lsp_val_dataset = MPII('valid')  ## MPII('val') was present originally

# Dataset and the Dataloade
train_loader = torch.utils.data.DataLoader(lsp_train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True)
val_save_loader = torch.utils.data.DataLoader(lsp_val_dataset,
                                              batch_size=args.val_batch_size)
val_eval_loader = torch.utils.data.DataLoader(lsp_val_dataset,
                                              batch_size=args.val_batch_size,
                                              shuffle=True)
#train_eval = torch.utils.data.DataLoader(lsp_train_dataset, batch_size=args.val_batch_size, shuffle=True)

pck = metrics.PCK(metrics.Options(256, 8))
示例#4
0
def main():
    opt = opts().parse()
    now = datetime.datetime.now()
    logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat()))

    if opt.loadModel != 'none':
        model = torch.load(opt.loadModel).cuda()
    else:
        model = HourglassNet3D(opt.nStack, opt.nModules, opt.nFeats,
                               opt.nRegModules)

    criterion = torch.nn.MSELoss().cuda()
    optimizer = torch.optim.RMSprop(model.parameters(),
                                    opt.LR,
                                    alpha=ref.alpha,
                                    eps=ref.epsilon,
                                    weight_decay=ref.weightDecay,
                                    momentum=ref.momentum)

    if opt.ratio3D < ref.eps:
        val_loader = torch.utils.data.DataLoader(MPII(opt,
                                                      'val',
                                                      returnMeta=True),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=int(ref.nThreads))
    else:
        val_loader = torch.utils.data.DataLoader(H36M(opt, 'val'),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=int(ref.nThreads))

    if opt.test:
        val(0, opt, val_loader, model, criterion)
        return

    train_loader = torch.utils.data.DataLoader(
        Fusion(opt, 'train'),
        batch_size=opt.trainBatch,
        shuffle=True if opt.DEBUG == 0 else False,
        num_workers=int(ref.nThreads))

    for epoch in range(1, opt.nEpochs + 1):
        loss_train, acc_train, mpjpe_train, loss3d_train = train(
            epoch, opt, train_loader, model, criterion, optimizer)
        logger.scalar_summary('loss_train', loss_train, epoch)
        logger.scalar_summary('acc_train', acc_train, epoch)
        logger.scalar_summary('mpjpe_train', mpjpe_train, epoch)
        logger.scalar_summary('loss3d_train', loss3d_train, epoch)
        if epoch % opt.valIntervals == 0:
            loss_val, acc_val, mpjpe_val, loss3d_val = val(
                epoch, opt, val_loader, model, criterion)
            logger.scalar_summary('loss_val', loss_val, epoch)
            logger.scalar_summary('acc_val', acc_val, epoch)
            logger.scalar_summary('mpjpe_val', mpjpe_val, epoch)
            logger.scalar_summary('loss3d_val', loss3d_val, epoch)
            torch.save(model,
                       os.path.join(opt.saveDir, 'model_{}.pth'.format(epoch)))
            logger.write(
                '{:8f} {:8f} {:8f} {:8f} {:8f} {:8f} {:8f} {:8f} \n'.format(
                    loss_train, acc_train, mpjpe_train, loss3d_train, loss_val,
                    acc_val, mpjpe_val, loss3d_val))
        else:
            logger.write('{:8f} {:8f} {:8f} {:8f} \n'.format(
                loss_train, acc_train, mpjpe_train, loss3d_train))
        adjust_learning_rate(optimizer, epoch, opt.dropLR, opt.LR)
    logger.close()
                                    config['discriminator']['num_channels'],
                                    config['dataset']['num_joints'],
                                    config['discriminator']['num_residuals'])

# Use dataparallel
generator_model = nn.DataParallel(generator_model)
discriminator_model = nn.DataParallel(discriminator_model)

# Datasets
if args.dataset == 'lsp':
    lsp_train_dataset = LSP(args)
    args.mode = 'val'
    lsp_val_dataset = LSP(args)
# MPII
elif args.dataset == 'mpii':
    lsp_train_dataset = MPII('train')
    lsp_val_dataset = MPII('val')

# Dataset and the Dataloade
train_loader = torch.utils.data.DataLoader(lsp_train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True)
val_save_loader = torch.utils.data.DataLoader(lsp_val_dataset,
                                              batch_size=args.val_batch_size)
val_eval_loader = torch.utils.data.DataLoader(lsp_val_dataset,
                                              batch_size=args.val_batch_size,
                                              shuffle=True)

pck = metrics.PCK(metrics.Options(256, config['generator']['num_stacks']))

# Loading on GPU, if available
    # Load
    model_data = torch.load(args.modelName)
    generator_model = model_data['generator_model']

    # Use dataparallel
    generator_model = nn.DataParallel(generator_model)
    discriminator_model = nn.DataParallel(discriminator_model)

    generator_model = (generator_model).module
    discriminator_model = (discriminator_model).module

    # Dataset and the Dataloader
    lsp_train_dataset = LSP(args)
    args.mode = 'val'
    lsp_val_dataset = MPII('val')  #LSP(args)
    train_loader = torch.utils.data.DataLoader(lsp_train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    val_save_loader = torch.utils.data.DataLoader(lsp_val_dataset,
                                                  batch_size=args.batch_size)
    val_eval_loader = torch.utils.data.DataLoader(lsp_val_dataset,
                                                  batch_size=args.batch_size,
                                                  shuffle=True)

    # Loading on GPU, if available
    if (args.use_gpu):
        generator_model = generator_model.to(fast_device)
        discriminator_model = discriminator_model.to(fast_device)

    # Cross entropy loss
示例#7
0
import numpy as np
import torch
import torch.utils.data as datatorch
import skimage
import matplotlib.pyplot as plt
import cv2
import json

from utils import imgutils
from datasets.mpii import MPII

import os

if __name__ == '__main__':
    # ========== Test dataset ==========
    ds_torch = MPII(use_scale=True, use_flip=True, use_rand_color=True)
    data_loader = datatorch.DataLoader(ds_torch, batch_size=1, shuffle=False)
    for step, (img, heatmaps, pts) in enumerate(data_loader):
        print('img', img.shape)
        print('heatmaps', heatmaps.shape)
        print('pts', pts.shape)

        img = np.transpose(img.squeeze().detach().numpy(), (1, 2, 0))
        # img = np.fliplr(img)
        heatmaps = np.transpose(heatmaps.squeeze().detach().numpy(), (1, 2, 0))
        pts = pts.squeeze().detach().numpy()
        # print('pts', pts)
        print('===========================================================')
        # imgutils2.show_heatmaps(img, heatmaps)
        img = skimage.transform.resize(img, (64, 64))
        imgutils.show_stack_joints(img,
示例#8
0
def main():
  opt = opts().parse()
  now = datetime.datetime.now()
  logger = Logger(opt.saveDir, now.isoformat())
  model = MSSH().cuda()
  optimizer = torch.optim.RMSprop(model.parameters(), opt.LR, 
                                    alpha = ref.alpha, 
                                    eps = ref.epsilon, 
                                    weight_decay = ref.weightDecay, 
                                    momentum = ref.momentum)
  criterion = torch.nn.MSELoss().cuda()

  # if opt.GPU > -1:
  #   print('Using GPU', opt.GPU)
  #   model = model.cuda(opt.GPU)
  #   criterion = criterion.cuda(opt.GPU)



  val_loader = torch.utils.data.DataLoader(
      MPII(opt, 'val'), 
      batch_size = 1, 
      shuffle = False,
      num_workers = int(ref.nThreads)
  )

  if opt.test:
    log_dict_train, preds = val(0, opt, val_loader, model, criterion)
    sio.savemat(os.path.join(opt.saveDir, 'preds.mat'), mdict = {'preds': preds})
    return

  train_loader = torch.utils.data.DataLoader(
      MPII(opt, 'train'), 
      batch_size = opt.trainBatch, 
      shuffle = True if opt.DEBUG == 0 else False,
      num_workers = int(ref.nThreads)
  )

  for epoch in range(1):
    model.train()
    Loss, Acc = AverageMeter(), AverageMeter()
    preds = []
  
    nIters = len(train_loader)
    bar = Bar('{}'.format(opt.expID), max=nIters)
    for i, (input, target, meta) in enumerate(train_loader):
      input_var = torch.autograd.Variable(input).float().cuda()
      target_var = torch.autograd.Variable(target).float().cuda()
      #print( input_var)
      output = model(input_var)
    
      loss = criterion(output, target_var)
      Loss.update(loss.data[0], input.size(0))
      Acc.update(Accuracy((output.data).cpu().numpy(), (target_var.data).cpu().numpy()))
      optimizer.zero_grad()
      loss.backward()
      optimizer.step()

      Bar.suffix = '{split} Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {loss.avg:.6f} | Acc {Acc.avg:.6f} ({Acc.val:.6f})'.format(epoch, i, nIters, total=bar.elapsed_td, eta=bar.eta_td, loss=Loss, Acc=Acc, split = "train")
      bar.next()

    bar.finish()
示例#9
0
import math

from utils import imgutils
from models.hourglass import hg as hg_torch
from models.hourglass2 import hgnet_torch as hgnet_torch
from losses.jointsmseloss import JointsMSELoss
from datasets.mpii import MPII

import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# ================================== Construct dataset ==================================
num_bcsz = 4

ds_torch = MPII(use_scale=True, use_flip=True, use_rand_color=True)
data_loader = datatorch.DataLoader(ds_torch, batch_size=num_bcsz, shuffle=True)

# ================================== Construct model ==================================
device = torch.device('cuda:0')
# device = torch.device('cpu')
learning_rate = 1e-3

net_hg_torch = hg_torch(num_stacks=2, num_blocks=1, num_classes=16).to(device)
# net_hg_torch = hgnet_torch(num_stacks=2, num_blocks=1, num_classes=16, num_features=64).to(device)

optimizer = torch.optim.RMSprop(net_hg_torch.parameters(), lr=learning_rate)
criteon = JointsMSELoss(use_target_weight=True).to(device)

# ================================== Train ==================================
num_epoch = 50
示例#10
0
def main():
    opt = opts().parse()
    now = datetime.datetime.now()
    logger = Logger(opt.saveDir, now.isoformat())
    model, optimizer = getModel(opt)
    criterion = torch.nn.MSELoss()

    if opt.GPU > -1:
        print('Using GPU', opt.GPU)
        model = model.cuda(opt.GPU)
        criterion = criterion.cuda(opt.GPU)

    val_loader = torch.utils.data.DataLoader(MPII(opt, 'val'),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=int(ref.nThreads))

    if 'pred_net' in opt.arch:
        train_net = train_prednet
    elif 'cond_net' in opt.arch:
        train_net = train_condnet
    elif 'hg' in opt.arch:
        train_net = train_hg
    else:
        raise Exception('Model name not known')

    if opt.test:
        log_dict_train, preds = train_net.val(0, opt, val_loader, model,
                                              criterion)
        sio.savemat(os.path.join(opt.saveDir, 'preds.mat'),
                    mdict={'preds': preds})
        return

    train_loader = torch.utils.data.DataLoader(
        MPII(opt, 'train'),
        batch_size=opt.trainBatch,
        shuffle=True if opt.DEBUG == 0 else False,
        num_workers=int(ref.nThreads))

    for epoch in range(1, opt.nEpochs + 1):
        log_dict_train, _ = train_net.train(epoch, opt, train_loader, model,
                                            criterion, optimizer)
        for k, v in list(log_dict_train.items()):
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if epoch % opt.valIntervals == 0:
            log_dict_val, preds = train_net.val(epoch, opt, val_loader, model,
                                                criterion)
            for k, v in list(log_dict_val.items()):
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            #saveModel(model, optimizer, os.path.join(opt.saveDir, 'model_{}.checkpoint'.format(epoch)))
            torch.save(model,
                       os.path.join(opt.saveDir, 'model_{}.pth'.format(epoch)))
            sio.savemat(os.path.join(opt.saveDir,
                                     'preds_{}.mat'.format(epoch)),
                        mdict={'preds': preds})
        logger.write('\n')
        if epoch % opt.dropLR == 0:
            lr = opt.LR * (0.1**(epoch // opt.dropLR))
            print('Drop LR to', lr)
            adjust_learning_rate(optimizer, lr)
    logger.close()
    torch.save(model.cpu(), os.path.join(opt.saveDir, 'model_cpu.pth'))