def load_model(arch, stacks, blocks, num_classes, mobile, checkpoint_resume):
    # create model
    model = models.__dict__[arch](num_stacks=stacks,
                                  num_blocks=blocks,
                                  num_classes=num_classes,
                                  mobile=mobile)

    # optionally resume from a checkpoint
    if isfile(checkpoint_resume):
        print("=> loading checkpoint '{}'".format(checkpoint_resume))
        checkpoint = torch.load(checkpoint_resume,
                                map_location=lambda storage, loc: storage)
        # create new OrderedDict that does not contain `module.`
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in checkpoint['state_dict'].items():
            name = k[7:]  # remove `module.`
            new_state_dict[name] = v
        # load params
        model.load_state_dict(new_state_dict)
        print("=> loaded checkpoint '{}' (epoch {})".format(
            checkpoint_resume, checkpoint['epoch']))
    else:
        print("=> no checkpoint found at '{}'".format(checkpoint_resume))

    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))
    model.eval()
    return model
Ejemplo n.º 2
0
def main(args):
    global best_acc
    global idx1
    global idx2

    # idx is the index of joints used to compute accuracy for dataset2

    idx1 = range(1, 19)
    idx2 = range(1, 19)  # horse

    # create model
    njoints = datasets.__dict__[args.dataset].njoints
    print("==> creating model '{}'".format(args.arch))
    model = models.__dict__[args.arch](num_classes=njoints,
                                       resnet_layers=args.resnet_layers,
                                       pretrained=None,
                                       dual_branch=True)

    model = torch.nn.DataParallel(model).to(device)

    # define loss function (criterion) and optimizer
    criterion = losses.JointsMSELoss().to(device)

    # optionally resume from a checkpoint
    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc = checkpoint['best_acc']
            model.load_state_dict(checkpoint['state_dict_ema'])

            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        raise Exception('please provide a checkpoint')

    val_dataset = datasets.__dict__[args.dataset](is_train=False,
                                                  is_aug=False,
                                                  **vars(args))
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    _, acc, predictions = validate(val_loader, model, criterion, njoints, args,
                                   args.flip, args.test_batch)
    return
Ejemplo n.º 3
0
def main(args):

    # create checkpoint dir
    if not isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # create model
    print("==> creating model '{}', stacks={}, blocks={}".format(
        args.arch, args.stacks, args.blocks))
    model = models.__dict__[args.arch](num_stacks=args.stacks,
                                       num_blocks=args.blocks,
                                       num_classes=args.num_classes,
                                       mobile=args.mobile)
    model.eval()

    # optionally resume from a checkpoint
    title = 'mpii-' + args.arch
    if args.checkpoint:
        if isfile(args.checkpoint):
            print("=> loading checkpoint '{}'".format(args.checkpoint))
            checkpoint = torch.load(args.checkpoint)
            args.start_epoch = checkpoint['epoch']

            # create new OrderedDict that does not contain `module.`
            from collections import OrderedDict
            new_state_dict = OrderedDict()
            for k, v in checkpoint['state_dict'].items():
                name = k[7:]  # remove `module.`
                new_state_dict[name] = v
            # load params
            model.load_state_dict(new_state_dict)

            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.checkpoint, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.checkpoint))
    else:
        logger = Logger(join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(
            ['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])

    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    dummy_input = torch.randn(1, 3, args.in_res, args.in_res)
    torch.onnx.export(model, dummy_input, args.out_onnx)
Ejemplo n.º 4
0
def main(args):
    global best_acc
    global idx

    # idx is the index of joints used to compute accuracy
    if args.dataset in ['mpii', 'lsp']:
        idx = [1, 2, 3, 4, 5, 6, 11, 12, 15, 16]
    elif args.dataset == 'coco':
        idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
    else:
        print("Unknown dataset: {}".format(args.dataset))
        assert False

    # create checkpoint dir
    if not isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # create model
    njoints = datasets.__dict__[args.dataset].njoints

    print("==> creating model '{}', stacks={}, blocks={}".format(
        args.arch, args.stacks, args.blocks))
    model = models.__dict__[args.arch](num_stacks=args.stacks,
                                       num_blocks=args.blocks,
                                       num_classes=njoints,
                                       resnet_layers=args.resnet_layers)

    model = torch.nn.DataParallel(model).to(device)

    # define loss function (criterion) and optimizer
    criterion = losses.JointsMSELoss().to(device)

    if args.solver == 'rms':
        optimizer = torch.optim.RMSprop(model.parameters(),
                                        lr=args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay)
    elif args.solver == 'adam':
        optimizer = torch.optim.Adam(
            model.parameters(),
            lr=args.lr,
        )
    else:
        print('Unknown solver: {}'.format(args.solver))
        assert False

    # optionally resume from a checkpoint
    title = args.dataset + ' ' + args.arch
    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc = checkpoint['best_acc']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            logger = Logger(join(args.checkpoint, 'log.txt'),
                            title=title,
                            resume=True)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        logger = Logger(join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(
            ['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])

    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # create data loader
    train_dataset = datasets.__dict__[args.dataset](is_train=True,
                                                    **vars(args))
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.train_batch,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_dataset = datasets.__dict__[args.dataset](is_train=False, **vars(args))
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # evaluation only
    if args.evaluate:
        print('\nEvaluation only')
        loss, acc, predictions = validate(val_loader, model, criterion,
                                          njoints, args.debug, args.flip)
        save_pred(predictions, checkpoint=args.checkpoint)
        return

    # train and eval
    lr = args.lr
    for epoch in range(args.start_epoch, args.epochs):
        lr = adjust_learning_rate(optimizer, epoch, lr, args.schedule,
                                  args.gamma)
        print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr))

        # decay sigma
        if args.sigma_decay > 0:
            train_loader.dataset.sigma *= args.sigma_decay
            val_loader.dataset.sigma *= args.sigma_decay

        # train for one epoch
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, args.debug, args.flip)

        # evaluate on validation set
        valid_loss, valid_acc, predictions = validate(val_loader, model,
                                                      criterion, njoints,
                                                      args.debug, args.flip)

        # append logger file
        logger.append(
            [epoch + 1, lr, train_loss, valid_loss, train_acc, valid_acc])

        # remember best acc and save checkpoint
        is_best = valid_acc > best_acc
        best_acc = max(valid_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            predictions,
            is_best,
            checkpoint=args.checkpoint,
            snapshot=args.snapshot)

    logger.close()
    logger.plot(['Train Acc', 'Val Acc'])
    savefig(os.path.join(args.checkpoint, 'log.eps'))
Ejemplo n.º 5
0
def main(args):
    global best_acc

    # create checkpoint dir
    if not isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # create model
    print("==> creating model '{}', stacks={}, blocks={}".format(args.arch, args.stacks, args.blocks))
    model = models.__dict__[args.arch](num_stacks=args.stacks, num_blocks=args.blocks, num_classes=args.num_classes)

    model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = torch.nn.MSELoss(size_average=True).cuda()

    optimizer = torch.optim.RMSprop(model.parameters(), 
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    title = 'mpii-' + args.arch
    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc = checkpoint['best_acc']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
            logger = Logger(join(args.checkpoint, 'log.txt'), title=title, resume=True)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:        
        logger = Logger(join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])

    cudnn.benchmark = True
    print('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))

    # Data loading code
    train_loader = torch.utils.data.DataLoader(
        datasets.Mpii('data/mpii/mpii_annotations.json', 'data/mpii/images',
                      sigma=args.sigma, label_type=args.label_type),
        batch_size=args.train_batch, shuffle=True,
        num_workers=args.workers, pin_memory=True)
    
    val_loader = torch.utils.data.DataLoader(
        datasets.Mpii('data/mpii/mpii_annotations.json', 'data/mpii/images',
                      sigma=args.sigma, label_type=args.label_type, train=False),
        batch_size=args.test_batch, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    if args.evaluate:
        print('\nEvaluation only') 
        loss, acc, predictions = validate(val_loader, model, criterion, args.num_classes, args.debug, args.flip)
        save_pred(predictions, checkpoint=args.checkpoint)
        return

    lr = args.lr
    for epoch in range(args.start_epoch, args.epochs):
        from time import sleep
        sleep(2)

        lr = adjust_learning_rate(optimizer, epoch, lr, args.schedule, args.gamma)
        print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr))

        # decay sigma
        if args.sigma_decay > 0:
            train_loader.dataset.sigma *=  args.sigma_decay
            val_loader.dataset.sigma *=  args.sigma_decay

        # train for one epoch
        train_loss, train_acc = train(train_loader, model, criterion, optimizer, args.debug, args.flip)

        # evaluate on validation set
        valid_loss, valid_acc, predictions = validate(val_loader, model, criterion, args.num_classes,
                                                      args.debug, args.flip)

        # append logger file
        logger.append([epoch + 1, lr, train_loss, valid_loss, train_acc, valid_acc])

        # remember best acc and save checkpoint
        is_best = valid_acc > best_acc
        best_acc = max(valid_acc, best_acc)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_acc': best_acc,
            'optimizer' : optimizer.state_dict(),
        }, predictions, is_best, checkpoint=args.checkpoint)

    logger.close()
    logger.plot(['Train Acc', 'Val Acc'])
    savefig(os.path.join(args.checkpoint, 'log.eps'))
Ejemplo n.º 6
0
def main(args):
    num_datasets = len(args.data_dir)  #number of datasets
    for item in [
            args.training_set_percentage, args.meta_dir, args.anno_type,
            args.ratio
    ]:
        if len(item) == 1:
            for i in range(num_datasets - 1):
                item.append(item[0])
        assert len(item) == num_datasets

    scales = [0.7, 0.85, 1, 1.3, 1.6]

    if args.meta_dir == '':
        args.meta_dir = args.data_dir  #if not specified, assume meta info is stored in data dir.

    # create checkpoint dir
    if not isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    #create the log file not exist
    file = open(join(args.checkpoint, 'log.txt'), 'w+')
    file.close()

    if args.evaluate:  #creatng path for evaluation
        if not isdir(args.save_result_dir):
            mkdir_p(args.save_result_dir)

        folders_to_create = ['preds', 'visualization']
        if args.save_heatmap:
            folders_to_create.append('heatmaps')
        for folder_name in folders_to_create:
            if not os.path.isdir(
                    os.path.join(args.save_result_dir, folder_name)):
                print('creating path: ' +
                      os.path.join(args.save_result_dir, folder_name))
                os.mkdir(os.path.join(args.save_result_dir, folder_name))

    idx = range(args.num_classes)
    global best_acc

    cams = ['FusionCameraActor3_2']

    # create model
    print("==> creating model '{}', stacks={}, blocks={}".format(
        args.arch, args.stacks, args.blocks))
    model = models.__dict__[args.arch](num_stacks=args.stacks,
                                       num_blocks=args.blocks,
                                       num_classes=args.num_classes)

    model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = torch.nn.MSELoss(size_average=True).cuda()

    optimizer = torch.optim.RMSprop(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    title = 'arm-' + args.arch
    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc = checkpoint['best_acc']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            logger = Logger(join(args.checkpoint, 'log.txt'),
                            title=title,
                            resume=True)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        logger = Logger(join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(
            ['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])

    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    train_set_list = []
    val_set_list = []

    for i in range(num_datasets):
        train_set_list.append(
            datasets.Arm(
                args.data_dir[i],
                args.meta_dir[i],
                args.random_bg_dir,
                cams[0],
                args.anno_type[i],
                train=True,
                training_set_percentage=args.training_set_percentage[i],
                replace_bg=args.replace_bg))

        val_set_list.append(
            datasets.Arm(
                args.data_dir[i],
                args.meta_dir[i],
                args.random_bg_dir,
                cams[0],
                args.anno_type[i],
                train=False,
                training_set_percentage=args.training_set_percentage[i],
                scales=scales,
                multi_scale=args.multi_scale,
                ignore_invis_pts=args.ignore_invis_pts))

    # Data loading code
    if not args.evaluate:
        train_loader = torch.utils.data.DataLoader(datasets.Concat(
            datasets=train_set_list, ratio=args.ratio),
                                                   batch_size=args.train_batch,
                                                   shuffle=True,
                                                   num_workers=args.workers,
                                                   pin_memory=True)

        print("No. minibatches in training set:{}".format(len(train_loader)))

    if args.multi_scale:  #multi scale testing
        args.test_batch = args.test_batch * len(scales)

    val_loader = torch.utils.data.DataLoader(datasets.Concat(
        datasets=val_set_list, ratio=None),
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    print("No. minibatches in validation set:{}".format(len(val_loader)))

    if args.evaluate:
        print('\nEvaluation only')
        # if not args.compute_3d:
        loss, acc = validate(val_loader, model, criterion, args.num_classes,
                             idx, args.save_result_dir, args.meta_dir,
                             args.anno_type, args.flip, args.evaluate, scales,
                             args.multi_scale, args.save_heatmap)

        if args.compute_3d:

            preds = []
            gts = []
            hit, d3_pred, file_name_list = d2tod3(
                data_dir=args.save_result_dir,
                meta_dir=args.meta_dir[0],
                cam_type=args.camera_type,
                pred_from_heatmap=False,
                em_test=False)

            # validate the 3d reconstruction accuracy

            with open(os.path.join(args.save_result_dir, 'd3_pred.json'),
                      'r') as f:
                obj = json.load(f)
                hit, d3_pred, file_name_list = obj['hit'], obj['d3_pred'], obj[
                    'file_name_list']

            for file_name in file_name_list:
                preds.append(d3_pred[file_name]['preds'])  #predicted x
                with open(os.path.join(args.data_dir[0], 'angles', file_name),
                          'r') as f:
                    gts.append(json.load(f))

            print('average error in angle: [base, elbow, ankle, wrist]:{}'.
                  format(d3_acc(preds, gts)))

        return

    lr = args.lr
    for epoch in range(args.start_epoch, args.epochs):
        lr = adjust_learning_rate(optimizer, epoch, lr, args.schedule,
                                  args.gamma)
        print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr))

        # decay sigma
        if args.sigma_decay > 0:
            train_loader.dataset.sigma *= args.sigma_decay
            val_loader.dataset.sigma *= args.sigma_decay

        # train for one epoch
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, idx, args.flip)

        # evaluate on validation set
        valid_loss, valid_acc = validate(val_loader, model, criterion,
                                         args.num_classes, idx,
                                         args.save_result_dir, args.meta_dir,
                                         args.anno_type, args.flip,
                                         args.evaluate)

        #If concatenated dataset is used, re-random after each epoch
        train_loader.dataset.reset(), val_loader.dataset.reset()

        # append logger file
        logger.append(
            [epoch + 1, lr, train_loss, valid_loss, train_acc, valid_acc])

        # remember best acc and save checkpoint
        is_best = valid_acc > best_acc
        best_acc = max(valid_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot(['Train Acc', 'Val Acc'])
    savefig(os.path.join(args.checkpoint, 'log.eps'))
def main(args):
    global best_acc
    global idx1
    global idx2

    # idx is the index of joints used to compute accuracy for dataset2
    if args.dataset1 == 'real_animal' or args.dataset1 == 'real_animal_sp' or args.dataset1 == 'synthetic_animal' or args.dataset1 == 'synthetic_animal_sp':
        idx1 = range(1, 19)
    else:
        print("Unknown dataset: {}".format(args.dataset1))
        assert False

    if args.dataset2 == 'real_animal' or args.dataset2 == 'real_animal_sp':
        if args.animal == 'horse':
            idx2 = range(1, 19)  # horse
        elif args.animal == 'tiger':
            idx2 = [
                1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 18, 13, 14, 9, 10, 11, 12
            ]  # tiger
    else:
        print("Unknown dataset: {}".format(args.dataset2))
        assert False

    # create checkpoint dir
    if not isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # create model
    njoints = datasets.__dict__[args.dataset1].njoints
    print(njoints)
    print("==> creating model '{}', stacks={}, blocks={}".format(
        args.arch, args.stacks, args.blocks))
    model = models.__dict__[args.arch](num_stacks=args.stacks,
                                       num_blocks=args.blocks,
                                       num_classes=njoints,
                                       resnet_layers=args.resnet_layers)

    model = torch.nn.DataParallel(model).to(device)

    # define loss function (criterion) and optimizer
    criterion = losses.JointsMSELoss().to(device)

    if args.solver == 'rms':
        optimizer = torch.optim.RMSprop(model.parameters(),
                                        lr=args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay)
    elif args.solver == 'adam':
        optimizer = torch.optim.Adam(
            model.parameters(),
            lr=args.lr,
        )
    else:
        print('Unknown solver: {}'.format(args.solver))
        assert False

    # optionally resume from a checkpoint
    title = args.dataset1 + ' ' + args.arch
    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            #model.load_state_dict(checkpoint)
            args.start_epoch = checkpoint['epoch']
            best_acc = checkpoint['best_acc']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        raise Exception('please provide a checkpoint')

    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # create data loader
    train_dataset = datasets.__dict__[args.dataset1](is_train=True,
                                                     **vars(args))
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.train_batch,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_dataset = datasets.__dict__[args.dataset2](is_train=False,
                                                   **vars(args))
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # evaluation only
    if args.evaluate:
        print('\nEvaluation only')
        _, acc, predictions = validate(val_loader, model, criterion, njoints,
                                       args, args.flip, args.test_batch)
        #save_pred(predictions, checkpoint=args.checkpoint)
        return
Ejemplo n.º 8
0
def main(args):
    global best_acc
    # create checkpoint dir
    if not isdir(args.checkpoint):
        mkdir_p(args.checkpoint)
    _logger = log.get_logger(__name__, args)
    _logger.info(print_args(args))

    # create model
    print("==> creating model '{}', stacks={}, blocks={}".format(
        args.arch, args.stacks, args.blocks))
    model = models.__dict__[args.arch](num_stacks=args.stacks,
                                       num_blocks=args.blocks,
                                       num_classes=len(args.index_classes))

    model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = models.loss.UniLoss(a_points=args.a_points)
    optimizer = torch.optim.RMSprop(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    title = 'mpii-' + args.arch
    if args.resume:
        if isfile(args.resume):
            _logger.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc = checkpoint['best_acc']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.lr
                print(param_group['lr'])
            _logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            logger = Logger(join(args.checkpoint, 'log.txt'),
                            title=title,
                            resume=False)
            logger.set_names([
                'Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'
            ])
        else:
            _logger.info("=> no checkpoint found at '{}'".format(args.resume))
    else:
        logger = Logger(join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(
            ['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])

    cudnn.benchmark = True
    _logger.info('    Total params: %.2fM' %
                 (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # Data loading code
    train_loader = torch.utils.data.DataLoader(
        datasets.Mpii('data/mpii/mpii_annotations.json',
                      'data/mpii/images',
                      sigma=args.sigma,
                      label_type=args.label_type,
                      _idx=args.index_classes,
                      direct=True,
                      n_points=args.n_points),
        batch_size=args.train_batch,
        shuffle=True,
        collate_fn=datasets.mpii.mycollate,
        num_workers=args.workers,
        pin_memory=False)

    val_loader = torch.utils.data.DataLoader(
        datasets.Mpii('data/mpii/mpii_annotations.json',
                      'data/mpii/images',
                      sigma=args.sigma,
                      label_type=args.label_type,
                      _idx=args.index_classes,
                      train=False,
                      direct=True),
        batch_size=args.test_batch,
        shuffle=False,
        collate_fn=datasets.mpii.mycollate,
        num_workers=args.workers,
        pin_memory=False)

    if args.evaluate:
        _logger.warning('\nEvaluation only')
        loss, acc, predictions = validate(val_loader,
                                          model,
                                          criterion,
                                          len(args.index_classes),
                                          False,
                                          args.flip,
                                          _logger,
                                          evaluate_only=True)
        save_pred(predictions, checkpoint=args.checkpoint)
        return

    # multi-thread
    inqueues = []
    outqueues = []
    valid_accs = []
    lr = args.lr
    for epoch in range(args.start_epoch, args.epochs):
        lr = adjust_learning_rate(optimizer, epoch, lr, args.schedule,
                                  args.gamma)
        _logger.warning('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr))

        # decay sigma
        if args.sigma_decay > 0:
            train_loader.dataset.sigma *= args.sigma_decay
            val_loader.dataset.sigma *= args.sigma_decay
        # train for one epoch
        train_loss, train_acc = train(inqueues, outqueues, train_loader, model,
                                      criterion, optimizer, args.debug,
                                      args.flip, args.clip, _logger)
        # evaluate on validation set
        with torch.no_grad():
            valid_loss, valid_acc, predictions = validate(
                val_loader, model, criterion, len(args.index_classes),
                args.debug, args.flip, _logger)
        # append logger file
        logger.append(
            [epoch + 1, lr, train_loss, valid_loss, train_acc, valid_acc])
        valid_accs.append(valid_acc)
        if args.schedule[0] == -1:
            if len(valid_accs) > 8:
                if sum(valid_accs[-4:]) / 4 * 0.99 < sum(
                        valid_accs[-8:-4]) / 4:
                    args.schedule.append(epoch + 1)
                    valid_accs = []
        # remember best acc and save checkpoint
        is_best = valid_acc > best_acc
        best_acc = max(valid_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            predictions,
            is_best,
            checkpoint=args.checkpoint,
            snapshot=1)

    logger.close()
    logger.plot(['Train Acc', 'Val Acc'])
    savefig(os.path.join(args.checkpoint, 'log.eps'))
Ejemplo n.º 9
0
def main(args):
    global best_acc
    global idx
    global customMpiiObject

    # idx is the index of joints used to compute accuracy
    if args.dataset in ['mpii', 'lsp']:
        idx = [1, 2, 3, 4, 5, 6, 11, 12, 15, 16]
    elif args.dataset == 'coco':
        idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
    else:
        print("Unknown dataset: {}".format(args.dataset))
        assert False

    # create checkpoint dir
    if not isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # create model
    njoints = CustomMpii.njoints

    print("==> creating model '{}', stacks={}, blocks={}".format(
        args.arch, args.stacks, args.blocks))
    model = models.__dict__[args.arch](num_stacks=args.stacks,
                                       num_blocks=args.blocks,
                                       num_classes=njoints,
                                       resnet_layers=args.resnet_layers)

    model = torch.nn.DataParallel(model).to(device)

    # define loss function (criterion) and optimizer
    criterion = losses.JointsMSELoss().to(device)

    if args.solver == 'rms':
        optimizer = torch.optim.RMSprop(model.parameters(),
                                        lr=args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay)
    elif args.solver == 'adam':
        optimizer = torch.optim.Adam(
            model.parameters(),
            lr=args.lr,
        )
    else:
        print('Unknown solver: {}'.format(args.solver))
        assert False

    # optionally resume from a checkpoint
    title = args.dataset + ' ' + args.arch
    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location='cpu')
            args.start_epoch = checkpoint['epoch']
            best_acc = checkpoint['best_acc']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            logger = Logger(join(args.checkpoint, 'log.txt'),
                            title=title,
                            resume=True)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        logger = Logger(join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(
            ['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])

    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    print("Loading validation dataset...")
    customMpiiObject = CustomMpii(is_train=False, **vars(args))

    loss, acc, predictions = validate(args.unit_path, model, criterion,
                                      njoints, args.debug, args.flip)
    print(predictions)

    if args.debug:
        paint.main(args.unit_path, predictions.squeeze().tolist())

    return
def main():

    _t = {'iter time': Timer()}

    # create directory
    model_name = args.source + '_to_' + args.target
    if not os.path.exists(args.checkpoint):
        os.makedirs(args.checkpoint)
        os.makedirs(os.path.join(args.checkpoint, 'logs'))
        os.makedirs(os.path.join(args.checkpoint, 'ssl_labels'))
    opt.print_options(args)

    # load datasets
    # load synthetic datasets
    source_train_dataset = datasets.__dict__[args.source](is_train=True,
                                                          **vars(args))
    source_train_loader = torch.utils.data.DataLoader(
        source_train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True)

    source_test_dataset = datasets.__dict__[args.source](is_train=False,
                                                         **vars(args))
    source_test_loader = torch.utils.data.DataLoader(
        source_test_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True)
    # load real training set images with pseudo-labels
    target_train_dataset = datasets.__dict__[args.target_ssl](is_train=True,
                                                              is_aug=False,
                                                              **vars(args))
    target_train_loader = torch.utils.data.DataLoader(
        target_train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True)
    # load original real test set with ground truth labels
    target_test_dataset = datasets.__dict__[args.target](is_train=False,
                                                         is_aug=False,
                                                         **vars(args))
    target_test_loader = torch.utils.data.DataLoader(
        target_test_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True)

    # create model and optimizer
    model, optimizer = CreateModel(args, models, datasets)

    cudnn.enabled = True
    cudnn.benchmark = True
    model.train()
    model = torch.nn.DataParallel(model).to(device)
    if isfile(args.resume):
        print("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            args.resume, checkpoint['epoch']))
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    loss = ['loss_kpt_src', 'loss_kpt_trg']
    _t['iter time'].tic()
    trg_val_acc_best = 0

    # CC-SSL training
    for epoch in range(args.num_epochs):
        if epoch == 0:
            trg_val_loss, trg_val_acc = validate(target_test_loader, model,
                                                 criterion, args.flip,
                                                 args.batch_size, njoints)

        # generate ssl labels every 10 epoch
        if epoch % 10 == 0:
            print("==> generating ssl labels")

            target_train_dataset = datasets.__dict__[args.target](
                is_train=True, is_aug=False, **vars(args))
            target_train_loader = torch.utils.data.DataLoader(
                target_train_dataset,
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.workers,
                pin_memory=True)

            model.eval()

            # generate labels on target training set
            ssl_kpts = {}
            acces1 = AverageMeter()
            previous_img = None
            previous_kpts = None
            for _, (trg_img, trg_lbl,
                    trg_meta) in enumerate(target_train_loader):
                trg_img = trg_img.to(device)
                trg_lbl = trg_lbl.to(device, non_blocking=True)
                # generate labels for each image
                for i in range(trg_img.size(0)):
                    score_map, generated_kpts = prediction_check(
                        previous_img, previous_kpts, trg_img[i], model,
                        target_train_dataset)
                    ssl_kpts[int(trg_meta['index'][i].cpu().numpy().astype(
                        np.int32))] = generated_kpts
                    if global_animal == 'tiger':
                        trg_lbl[i] = trg_lbl[
                            i,
                            np.array([
                                1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 18, 13, 14,
                                9, 10, 11, 12
                            ]) - 1, :, :]

                    acc1, _ = accuracy(score_map,
                                       trg_lbl[i].cpu().unsqueeze(0), idx)
                    acces1.update(acc1[0], 1)
                    previous_img = trg_img[i]
                    previous_kpts = generated_kpts
            print('Acc on target training set (pseudo-labels):', acces1.avg)

            # modify confidence score based on ranking
            sorted_confidence = np.zeros(1)
            for k in ssl_kpts:
                sorted_confidence = np.concatenate(
                    (sorted_confidence, ssl_kpts[k][:, 2].reshape(-1)), axis=0)
            sorted_confidence = np.sort(sorted_confidence)
            np.save(
                args.checkpoint +
                '/ssl_labels/ssl_labels_train_confidence.npy',
                sorted_confidence)
            p = (1.0 - 0.02 * (epoch + 10))
            if p > 0.2:
                ccl_thresh = sorted_confidence[int(p *
                                                   sorted_confidence.shape[0])]
            else:
                p = 0.2
                ccl_thresh = sorted_confidence[int(p *
                                                   sorted_confidence.shape[0])]
            print("=====> ccl_thresh: ", ccl_thresh)
            for k in ssl_kpts:
                ssl_kpts[k][:, 2] = (ssl_kpts[k][:, 2] > ccl_thresh).astype(
                    np.float32)

            np.save(args.checkpoint + 'ssl_labels/ssl_labels_train.npy',
                    ssl_kpts)

            # generate labels on target test set for diagnosis
            ssl_kpts = {}
            acces1 = AverageMeter()
            previous_img = None
            previous_kpts = None
            for jj, (trg_img, trg_lbl,
                     trg_meta) in enumerate(target_test_loader):
                trg_img = trg_img.to(device)
                trg_lbl = trg_lbl.to(device, non_blocking=True)
                # generate labels for each image
                for i in range(trg_img.size(0)):
                    score_map, generated_kpts = prediction_check(
                        previous_img, previous_kpts, trg_img[i], model,
                        target_test_dataset)
                    ssl_kpts[int(trg_meta['index'][i].cpu().numpy().astype(
                        np.int32))] = generated_kpts
                    if global_animal == 'tiger':
                        trg_lbl[i] = trg_lbl[
                            i,
                            np.array([
                                1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 18, 13, 14,
                                9, 10, 11, 12
                            ]) - 1, :, :]

                    acc1, _ = accuracy(score_map,
                                       trg_lbl[i].cpu().unsqueeze(0), idx)
                    acces1.update(acc1[0], 1)
                    previous_img = trg_img[i]
                    previous_kpts = generated_kpts
            print('Acc on target testing set (pseudo-labels):', acces1.avg)
            np.save(
                args.checkpoint + 'ssl_labels/ssl_labels_valid' + str(epoch) +
                '.npy', ssl_kpts)

            # load real training set images with pseudo-labels
            target_train_dataset = datasets.__dict__[args.target_ssl](
                is_train=True, is_aug=True, **vars(args))
            target_train_loader = torch.utils.data.DataLoader(
                target_train_dataset,
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.workers,
                pin_memory=True)
            print("======> start training")

        loss_output_src = AverageMeter()
        loss_output_trg = AverageMeter()

        joint_loader = zip(source_train_loader, target_train_loader)
        model.train()

        # training with source images and target images jointly
        for i, ((src_img, src_lbl, src_meta),
                (trg_img, trg_lbl, trg_meta)) in enumerate(joint_loader):

            optimizer.zero_grad()

            # calculate loss on source dataset
            src_img, src_lbl, src_weight = src_img.to(device), src_lbl.to(
                device, non_blocking=True), src_meta['target_weight'].to(
                    device, non_blocking=True)
            src_kpt_score = model(src_img)
            if type(src_kpt_score) == list:  # multiple output
                loss_kpt_src = 0
                for o in src_kpt_score:
                    loss_kpt_src += criterion(o, src_lbl, src_weight, len(idx))
                src_kpt_score = src_kpt_score[-1]
            else:  # single output
                loss_kpt_src = criterion(src_kpt_score, src_lbl, src_weight,
                                         len(idx))
            loss_output_src.update(loss_kpt_src.data.item(), src_img.size(0))
            loss_kpt_src.backward()

            # calculate loss on target dataset
            trg_img, trg_lbl, trg_weight = trg_img.to(device), trg_lbl.to(
                device, non_blocking=True), trg_meta['target_weight'].to(
                    device, non_blocking=True)
            trg_kpt_score = model(trg_img)
            if type(trg_kpt_score) == list:  # multiple output
                loss_kpt_trg = 0
                for o in trg_kpt_score:
                    loss_kpt_trg += criterion(o, trg_lbl, trg_weight, len(idx))
                trg_kpt_score = trg_kpt_score[-1]
            else:  # single output
                loss_kpt_trg = criterion(trg_kpt_score, trg_lbl, trg_weight,
                                         len(idx))
            loss_kpt_trg *= args.gamma_
            loss_output_trg.update(loss_kpt_trg.data.item(), src_img.size(0))
            loss_kpt_trg.backward()

            # update
            optimizer.step()

            # print logs
            if (i + 1) % args.print_freq == 0:
                _t['iter time'].toc(average=False)
                print('[epoch %d][it %d][src kpt loss %.6f][trg kpt loss %.6f][lr %.6f][%.2fs]' % \
                    (epoch+1, i + 1, loss_output_src.avg, loss_output_trg.avg, optimizer.param_groups[0]['lr'], _t['iter time'].diff))
                _t['iter time'].tic()

        print('\nEvaluation')
        src_val_loss, src_val_acc = validate(source_test_loader, model,
                                             criterion, args.flip,
                                             args.batch_size, njoints)
        trg_val_loss, trg_val_acc = validate(target_test_loader, model,
                                             criterion, args.flip,
                                             args.batch_size, njoints)

        # save best model
        if trg_val_acc > trg_val_acc_best:
            trg_val_acc_best = trg_val_acc
            print('\ntaking snapshot ...')
            state = {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            }
            torch.save(
                state,
                os.path.join(args.checkpoint,
                             '%s' % (args.source) + '.pth.tar'))
Ejemplo n.º 11
0
def main(args):
    global best_acc
    global idx

    # idx is the index of joints used to compute accuracy
    if args.dataset in ['mpii', 'lsp']:
        idx = [1, 2, 3, 4, 5, 6, 11, 12, 15, 16]
    elif args.dataset == 'coco':
        idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
    elif args.dataset == 'homes':
        idx = list(range(1, 71))
    else:
        print("Unknown dataset: {}".format(args.dataset))
        assert False

    # create checkpoint dir
    if not isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # create model
    njoints = datasets.__dict__[args.dataset].njoints

    print("==> creating model '{}', stacks={}, blocks={}".format(
        args.arch, args.stacks, args.blocks))
    model = models.__dict__[args.arch](
        num_stacks=args.stacks,
        num_blocks=args.blocks,
        num_classes=16,  # read as 16, the nmodified to 70
        resnet_layers=args.resnet_layers)

    model = torch.nn.DataParallel(model).to(device)
    #print(list(model.children())); exit(1)
    # define loss function (criterion) and optimizer
    #criterion = losses.JointsMSELoss().to(device)
    #criterion = torch.nn.BCEWithLogitsLoss().to(device)
    criterion = torch.nn.MSELoss().to(device)
    print('==> loss function: %s' % criterion.__str__())

    if args.solver == 'rms':
        optimizer = torch.optim.RMSprop(model.parameters(),
                                        lr=args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay)
    elif args.solver == 'adam':
        optimizer = torch.optim.Adam(
            model.parameters(),
            lr=args.lr,
        )
    else:
        print('Unknown solver: {}'.format(args.solver))
        assert False

    # optionally resume from a checkpoint
    title = args.dataset + ' ' + args.arch
    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc = checkpoint['best_acc']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            logger = Logger(join(args.checkpoint, 'log.txt'),
                            title=title,
                            resume=True)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        logger = Logger(join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(
            ['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])

    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # update the final score layer and fine tuning => update number of output channels to 70
    #print(list(model.children())[-1].score[0]); exit(1)
    model_net = list(model.children())
    model_net[-1].score[0] = nn.Conv2d(256,
                                       70,
                                       kernel_size=(1, 1),
                                       stride=(1, 1)).to(device)
    model = nn.Sequential(*model_net)

    # create data loader
    train_dataset = datasets.__dict__[args.dataset](**vars(args))
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.train_batch,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_dataset = datasets.__dict__[args.dataset](is_train=False,
                                                  is_valid=True,
                                                  **vars(args))
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # evaluation only
    if args.evaluate:
        print('\nEvaluation only')
        loss, acc, predictions = validate(val_loader, model, criterion,
                                          njoints, args.debug, args.flip)
        save_pred(predictions, checkpoint=args.checkpoint)
        return

    # train and eval
    df_loss = pd.DataFrame()
    train_epo_loss, val_epo_loss = [], []
    lr = args.lr
    best_vel = np.float('inf')
    for epoch in range(args.start_epoch, args.epochs + args.start_epoch):
        lr = adjust_learning_rate(optimizer, epoch, lr, args.schedule,
                                  args.gamma)
        print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr))

        # decay sigma
        if args.sigma_decay > 0:
            train_loader.dataset.sigma *= args.sigma_decay
            val_loader.dataset.sigma *= args.sigma_decay

        # train for one epoch
        train_loss, train_acc, tel = train(train_loader, model, criterion,
                                           optimizer, args.debug, args.flip)

        # evaluate on validation set
        valid_loss, valid_acc, predictions, vel = validate(
            val_loader, model, criterion, njoints, args.debug, args.flip)
        # save epoch loss to csv
        train_epo_loss += [tel]
        val_epo_loss += [vel]
        df_loss.assign(train=train_epo_loss,
                       val=val_epo_loss).to_csv('./loss.csv')

        # append logger file
        logger.append([
            epoch + 1, lr, train_loss, valid_loss,
            train_acc.item(),
            valid_acc.item()
        ])

        # remember best acc and save checkpoint
        #is_best = valid_acc > best_acc
        #best_acc = max(valid_acc, best_acc)
        is_best = vel < best_vel
        best_vel = min(best_vel, vel)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            predictions,
            is_best,
            checkpoint=args.checkpoint,
            snapshot=args.snapshot)

    logger.close()
def main(args):
    global best_acc
    global idx

    # idx is the index of joints used to compute accuracy
    if args.dataset in ['mpii', 'lsp']:
        idx = [1, 2, 3, 4, 5, 6, 11, 12, 15, 16]
    elif args.dataset == 'mscoco':
        idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
    else:
        print("Unknown dataset: {}".format(args.dataset))
        assert False

    # create checkpoint dir
    if not isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # create model
    njoints = datasets.__dict__[args.dataset].njoints

    print("==> creating model '{}', stacks={}, blocks={}".format(
        args.arch, args.stacks, args.blocks))
    model = models.__dict__[args.arch](num_stacks=args.stacks,
                                       num_blocks=args.blocks,
                                       num_classes=njoints)

    model = torch.nn.DataParallel(model).to(device)

    # define loss function (criterion) and optimizer
    criterion = torch.nn.MSELoss(reduction='mean').to(device)

    optimizer = torch.optim.RMSprop(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    title = args.dataset + ' ' + args.arch
    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc = checkpoint['best_acc']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            logger = Logger(join(args.checkpoint, 'log.txt'),
                            title=title,
                            resume=True)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        logger = Logger(join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(
            ['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])

    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # save a cleaned version of model without dict and DataParallel
    clean_model = checkpoint['state_dict']

    # create new OrderedDict that does not contain `module.`
    from collections import OrderedDict
    clean_model = OrderedDict()
    if any(key.startswith('module') for key in checkpoint['state_dict']):
        for k, v in checkpoint['state_dict'].items():
            name = k[7:]  # remove `module.`
            clean_model[name] = v
    # import pdb; pdb.set_trace()
    # import pdb; pdb.set_trace()
    # while any(key.startswith('module') for key in clean_model):
    #     clean_model = clean_model.module
    clean_model_path = join(args.checkpoint, 'clean_model.pth.tar')
    torch.save(clean_model, clean_model_path)
    print('clean model saved: {}'.format(clean_model_path))
Ejemplo n.º 13
0
def myvalidate( model, criterion, num_classes, debug=False, flip=True):

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    img_folder = '/data3/wzwu/dataset/my'
    img_num = 1
    r = 0
    center1 = torch.Tensor([1281,2169])
    center2 = torch.Tensor([[1281,2169]])
    scale = torch.Tensor([10.0])
    inp_res = 256
    meanstd_file = './data/mpii/mean.pth.tar'
    if isfile(meanstd_file):
        meanstd = torch.load(meanstd_file)
        mean = meanstd['mean']
        std = meanstd['std']

    input_list = []
    for i in range(img_num):
        img_name = str(i)+'.jpg'
        img_path = os.path.join(img_folder,img_name)
        print('img_path')
        print(img_path)
        set_trace()
        img = load_image(img_path)
        inp = crop(img, center1, scale, [inp_res, inp_res], rot=r)
        inp = color_normalize(inp, mean, std)
        input_list.append(inp)



    # predictions
    predictions = torch.Tensor(img_num, num_classes, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=img_num)
    with torch.no_grad():
        for i, input in enumerate(input_list):
            # measure data loading time
            s0, s1, s2 = input.size()
            input = input.view(1, s0, s1, s2)
            data_time.update(time.time() - end)

            input = input.to(device, non_blocking=True)

            # compute output
            output = model(input)
            score_map = output[-1].cpu() if type(output) == list else output.cpu()
            #if flip:
            #    flip_input = torch.from_numpy(fliplr(input.clone().numpy())).float().to(device)
            #    flip_output = model(flip_input)
            #    flip_output = flip_output[-1].cpu() if type(flip_output) == list else flip_output.cpu()
            #    flip_output = flip_back(flip_output)
            #    score_map += flip_output

            # generate predictions
            set_trace()
            preds = final_preds(score_map, center2, scale, [64, 64])
            set_trace()
            print('preds')
            print(preds)
            print('predictions')
            print(predictions)
            for n in range(score_map.size(0)):
                predictions[i, :, :] = preds[n, :, :]


            if debug:
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    #plt.subplot(121)
                    #plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()
                plt.savefig('/data3/wzwu/test/'+str(i)+'.png')


            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix  = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                        batch=i + 1,
                        size=img_num,
                        data=data_time.val,
                        bt=batch_time.avg,
                        total=bar.elapsed_td,
                        eta=bar.eta_td,
                        loss=losses.avg,
                        acc=acces.avg
                        )
            bar.next()

        bar.finish()
    return predictions