def infer(valid_queue, model, criterion, verbose=True):
    objs = utils.AverageMeter()
    top1 = utils.AverageMeter()
    top5 = utils.AverageMeter()
    model.eval()

    with torch.no_grad():
        for step, (input, target) in enumerate(valid_queue):
            input = Variable(input)  #, volatile=True
            target = Variable(target)  #, volatile=True
            if not args.disable_cuda:
                input = input.cuda()
                target = target.cuda(async=True)

            logits = model(input, 0)
            loss = criterion(logits, target)

            prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
            n = input.size(0)
            objs.update(loss.item(), n)
            top1.update(prec1.item(), n)
            top5.update(prec5.item(), n)

            if verbose and step % args.report_freq == 0:
                print('valid %03d %.3e %.3f %.3f' %
                      (step, objs.avg, top1.avg, top5.avg))
            # if step > 10: break

    return top1.avg, objs.avg
Exemple #2
0
    def train_network(self, params, tbx_writer, checkpointer, train_loader, valLoader, test_loader, model, criterion, optimiser, inferer):  
    #{{{
        print('Epoch,\tLR,\tTrain_Loss,\tTrain_Top1,\tTrain_Top5,\tTest_Loss,\tTest_Top1,\tTest_Top5,\tVal_Loss,\tVal_Top1,\tVal_Top5')

        # iterate over the epochs
        for epoch in tqdm(range(params.start_epoch, params.epochs), desc='training', leave=False) : 
            params.curr_epoch = epoch
            state = self.update_lr(params, optimiser)

            # setup train loss, top1 and top5 averaging units
            losses = utils.AverageMeter()
            top1 = utils.AverageMeter()
            top5 = utils.AverageMeter()
            
            # iterate batches in an epoch
            self.batch_iter(model, criterion, optimiser, train_loader, params, losses, top1, top5)
            # record train loss
            params.train_loss = losses.avg        
            params.train_top1 = top1.avg        
            params.train_top5 = top5.avg        
    
            # get test loss
            params.test_loss, params.test_top1, params.test_top5 = inferer.test_network(params, test_loader, model, criterion, optimiser, verbose=False)
            params.val_loss, params.val_top1, params.val_top5 = inferer.test_network(params, valLoader, model, criterion, optimiser, verbose=False)
            
            checkpointer.save_checkpoint(model.state_dict(), optimiser.state_dict(), params)
            
            tqdm.write("{},\t{},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f}".format(epoch, params.lr, params.train_loss, params.train_top1, params.train_top5, params.test_loss, params.test_top1, params.test_top5, params.val_loss, params.val_top1, params.val_top5))
Exemple #3
0
 def test_network(self, params, test_loader, model, criterion, optimiser, verbose=True) :  
     model.eval()
         
     losses = utils.AverageMeter()
     top1 = utils.AverageMeter()
     top5 = utils.AverageMeter()
 
     for batch_idx, (inputs, targets) in tqdm(enumerate(test_loader), total=len(test_loader)-1, desc='inference', leave=False) : 
         # move inputs and targets to GPU
         with torch.no_grad():
             device = 'cuda:' + str(params.gpuList[0])
             if params.use_cuda : 
                 inputs, targets = inputs.cuda(device, non_blocking=True), targets.cuda(device, non_blocking=True)
             
             # perform inference 
             outputs = model(inputs) 
             loss = criterion(outputs, targets)
         
         prec1, prec5 = utils.accuracy(outputs.data, targets.data)
 
         losses.update(loss.item()) 
         top1.update(prec1.item()) 
         top5.update(prec5.item())
 
     if verbose : 
         tqdm.write('Loss: {}, Top1: {}, Top5: {}'.format(losses.avg, top1.avg, top5.avg))
     
     return (losses.avg, top1.avg, top5.avg)
 def _get_average_meters(self):
     error_metric = utils.AverageMeter()
     obj = utils.AverageMeter()
     main_obj = utils.AverageMeter()
     kl = utils.AverageMeter()
     ece = utils.AverageMeter()
     return error_metric, obj, main_obj, kl, ece
Exemple #5
0
    def static_finetune_l1_weights(self, params, pruner, checkpointer,
                                   train_loader, test_loader, valLoader, model,
                                   criterion, optimiser, inferer):
        #{{{
        print(
            'Epoch,\tLR,\tTrain_Loss,\tTrain_Top1,\tTrain_Top5,\tTest_Loss,\tTest_Top1,\tTest_Top5,\tVal_Loss,\tVal_Top1,\tVal_Top5'
        )

        for epoch in tqdm(range(params.start_epoch, params.finetuneBudget),
                          desc='training',
                          leave=False):
            params.curr_epoch = epoch
            state = self.update_lr(params, optimiser)

            # perform pruning
            if params.pruneFilters == True and epoch == params.pruneAfter:
                checkpointer.save_model_only(model.state_dict(),
                                             params.printOnly, 'pre_pruning')
                tqdm.write('Pruning Network')
                channelsPruned, model, optimiser = pruner.prune_model(model)
                totalPrunedPerc, _, _ = pruner.prune_rate(model)
                tqdm.write(
                    'Pruned Percentage = {:.2f}%'.format(totalPrunedPerc))
                summary = pruner.log_pruned_channels(checkpointer.root, params,
                                                     totalPrunedPerc,
                                                     channelsPruned)

            losses = utils.AverageMeter()
            top1 = utils.AverageMeter()
            top5 = utils.AverageMeter()

            self.batch_iter(model, criterion, optimiser, train_loader, params,
                            losses, top1, top5)

            params.train_loss = losses.avg
            params.train_top1 = top1.avg
            params.train_top5 = top5.avg

            # get test loss
            params.test_loss, params.test_top1, params.test_top5 = inferer.test_network(
                params,
                test_loader,
                model,
                criterion,
                optimiser,
                verbose=False)
            params.val_loss, params.val_top1, params.val_top5 = inferer.test_network(
                params, valLoader, model, criterion, optimiser, verbose=False)

            checkpointer.save_checkpoint(model.state_dict(),
                                         optimiser.state_dict(), params)

            tqdm.write(
                "{},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f}"
                .format(epoch, params.lr, params.train_loss, params.train_top1,
                        params.train_top5, params.test_loss, params.test_top1,
                        params.test_top5, params.val_loss, params.val_top1,
                        params.val_top5))
Exemple #6
0
    def train_batch(self, arch, errors_dict):
        args = self.args
        if self.steps % len(self.train_queue) == 0:
            self.scheduler.step()
            self.objs = utils.AverageMeter()
            self.top1 = utils.AverageMeter()
            self.top5 = utils.AverageMeter()
        lr = self.scheduler.get_lr()[0]

        weights = self.get_weights_from_arch(arch)
        self.set_model_weights(weights)

        step = self.steps % len(self.train_queue)
        input, target = next(self.train_iter)

        self.model.train()
        n = input.size(0)

        input = Variable(input, requires_grad=False).cuda()
        target = Variable(target, requires_grad=False).cuda(async=True)

        # get a random minibatch from the search queue with replacement
        self.optimizer.zero_grad()
        logits = self.model(input, discrete=True)
        loss = self.criterion(logits, target)

        loss.backward()
        nn.utils.clip_grad_norm(self.model.parameters(), args.grad_clip)
        self.optimizer.step()

        prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
        self.objs.update(loss.data[0], n)
        self.top1.update(prec1.data[0], n)
        self.top5.update(prec5.data[0], n)

        if step % args.report_freq == 0:
            logger.info('train %03d %e %f %f', step, self.objs.avg,
                        self.top1.avg, self.top5.avg)

        self.steps += 1
        if self.steps % len(self.train_queue) == 0:
            self.epochs += 1
            self.train_iter = iter(self.train_queue)
            valid_err, valid_obj = self.evaluate(arch)
            logger.info('epoch %d  |  train_acc %f  |  valid_acc %f' %
                        (self.epochs, self.top1.avg, 1 - valid_err))
            self.save()
            errors_dict['train_acc'].append(self.top1.avg)
            errors_dict['train_loss'].append(self.objs.avg)
            errors_dict['valid_acc'].append(1 - valid_err)
            errors_dict['valid_loss'].append(valid_obj)
Exemple #7
0
    def train_network(self, params, tbx_writer, checkpointer, train_loader,
                      test_loader, valLoader, model, criterion, optimiser,
                      inferer, policy, scaler):
        print(
            'Epoch,\tLR,\tTrain_Loss,\tTrain_Top1,\tTrain_Top5,\tTest_Loss,\tTest_Top1,\tTest_Top5,\tVal_Loss,\tVal_Top1,\tVal_Top5,\tDataType,\tBitWidth'
        )

        for epoch in tqdm(range(params.start_epoch, params.epochs),
                          desc='training',
                          leave=False):
            params.curr_epoch = epoch
            state = self.update_lr(params, optimiser)

            losses = utils.AverageMeter()
            top1 = utils.AverageMeter()
            top5 = utils.AverageMeter()

            # iterate over the batches in the epoch
            self.batch_iter(model, criterion, optimiser, train_loader, params,
                            losses, top1, top5)

            params.train_loss = losses.avg
            params.train_top1 = top1.avg
            params.train_top5 = top5.avg

            # get val and test loss
            params.test_loss, params.test_top1, params.test_top5 = inferer.test_network(
                params, test_loader, model, criterion, optimiser)
            params.val_loss, params.val_top1, params.val_top5 = inferer.test_network(
                params, valLoader, model, criterion, optimiser)

            if params.runMuppet:
                policy.update(model)
                if policy.check_violation(epoch, tqdm, checkpointer):
                    policy.change_precision(scaler, model, optimiser)
                    tqdm.write(
                        "GD violation detected, precision changed to {}".
                        format(params.bitWidth))
                if policy.check_stopping_condition(optimiser):
                    tqdm.write("Ending training")
                    return

            checkpointer.save_checkpoint(model.state_dict(),
                                         optimiser.state_dict(), params)

            tqdm.write(
                "{},\t{},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{},\t\t{}"
                .format(epoch, params.lr, params.train_loss, params.train_top1,
                        params.train_top5, params.test_loss, params.test_top1,
                        params.test_top5, params.val_loss, params.val_top1,
                        params.val_top5, params.dataType, params.bitWidth))
Exemple #8
0
def train(train_loader,
          model,
          criterion,
          optimizer,
          lr_init=None,
          lr_now=None,
          glob_step=None,
          lr_decay=None,
          gamma=None,
          max_norm=True):

    losses = utils.AverageMeter()
    model.train()

    for i, (inps, tars) in enumerate(tqdm(train_loader)):
        glob_step += 1
        if glob_step % lr_decay == 0 or glob_step == 1:
            lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay,
                                    gamma)

        #make prediction with model
        inputs = Variable(inps.cuda())
        targets = Variable(tars.cuda(non_blocking=True))
        outputs = model(inputs)

        # calculate loss
        optimizer.zero_grad()
        loss = criterion(outputs, targets)
        losses.update(loss.item(), inputs.size(0))
        loss.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()

    return glob_step, lr_now, losses.avg
Exemple #9
0
    def __call__(self, glob_step, epoch):
        losses = utils.AverageMeter()

        self.model.train()
        
        start = time.time()
        batch_time = 0

        for i, (inps, tars) in enumerate(self.train_loader):
            glob_step += 1
            if glob_step % self.cfg.get_lr_decay() == 0 or glob_step == 1:
                self.lr_now = self._lr_decay( 
                        glob_step, 
                        self.cfg.get_learning_rate(), 
                        self.cfg.get_lr_decay(), 
                        self.cfg.get_lr_gamma())
            inputs = Variable(inps.cuda())
            targets = Variable(tars.cuda(async=True))

            outputs = self.model(inputs)

            # calculate loss
            self.optimizer.zero_grad()
            loss = self.loss_fuction(outputs, targets)
            losses.update(loss.item(), inputs.size(0))
            loss.backward()
            #losses.update(loss.data[0], inputs.size(0))

            if True:
                '''
                Max norm constraints. 
                Another form of regularization is to enforce an absolute upper bound 
                on the magnitude of the weight vector for every neuron and 
                use projected gradient descent to enforce the constraint. 
                In practice, this corresponds to performing the parameter update as normal,
                and then enforcing the constraint by clamping the weight vector w⃗  
                of every neuron to satisfy ∥w⃗ ∥2<c. 
                Typical values of c are on orders of 3 or 4. 
                Some people report improvements when using this form of regularization. 
                One of its appealing properties is that network cannot “explode” even 
                when the learning rates are set too high because the updates are always bounded.
                '''
                nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1)
            self.optimizer.step()
            
            # update summary
            if (i + 1) % 100 == 0:
                batch_time = time.time() - start
                start = time.time()
            print('Epoch: {epoch}/{epochs} | ({batch}/{size}) | batch: {batchtime:.4}ms | loss: {loss:.4f}' \
            .format(epoch=epoch+1,
                    epochs=self.cfg.get_epochs(),
                    batch=i + 1,
                    size=len(self.train_loader),
                    batchtime=batch_time * 10.0,
                    loss=losses.avg))
            
        return glob_step, self.lr_now, losses.avg, self.optimizer
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!lr_now 
Exemple #10
0
def test(test_loader, model, criterion, stat):
    losses = utils.AverageMeter()

    model.eval()

    all_dist, all_output, all_target, all_input = [], [], [], []

    for i, (inps, tars) in enumerate(tqdm(test_loader)):
        inputs = Variable(inps.cuda())
        targets = Variable(tars.cuda(non_blocking=True))

        # make prediction with model
        outputs = model(inputs)

        # outputs = outputs.view(outputs.size(0), -1)
        # targets = targets.view(targets.size(0), -1)

        # calculate loss
        loss = criterion(outputs, targets)
        losses.update(loss.item(), inputs.size(0))

        # undo normalisation to calculate accuracy in real units
        dim = 3
        # dimensions = stat['targets_3d']
        tar = utils.unNormalizeData(
            targets.data.cpu().numpy(), stat["mean"], stat["std"]
        )
        out = utils.unNormalizeData(
            outputs.data.cpu().numpy(), stat["mean"], stat["std"]
        )

        abserr = np.abs(out - tar) 

        n_pts = 39 // dim
        distance = np.zeros((abserr.shape[0], n_pts))
        for k in range(n_pts):
            distance[:, k] = np.sum(abserr[:, dim * k : dim * (k + 1)], axis=1)

        # group and stack
        all_dist.append(distance)
        all_output.append(outputs.data.cpu().numpy())
        all_target.append(targets.data.cpu().numpy())
        all_input.append(inputs.data.cpu().numpy())

    all_dist, all_output, all_target, all_input = (
        np.vstack(all_dist),
        np.vstack(all_output),
        np.vstack(all_target),
        np.vstack(all_input),
    )

    # mean errors
    all_dist[all_dist == 0] = np.nan
    joint_err = np.nanmean(all_dist, axis=0)
    ttl_err = np.nanmean(joint_err)

    print(">>> error: {} <<<".format(ttl_err))
    return losses.avg, ttl_err, joint_err, all_dist, all_output, all_target, all_input
Exemple #11
0
    def finetune_entropy(self, params, pruner, checkpointer, train_loader,
                         test_loader, valLoader, model, criterion, optimiser,
                         inferer):
        #{{{
        print(
            'Epoch,\tLR,\tTrain_Loss,\tTrain_Top1,\tTrain_Top5,\tTest_Loss,\tTest_Top1,\tTest_Top5,\tVal_Loss,\tVal_Top1,\tVal_Top5'
        )

        for epoch in tqdm(range(params.start_epoch, params.finetuneBudget),
                          desc='training',
                          leave=False):
            params.curr_epoch = epoch
            state = self.update_lr(params, optimiser)

            losses = utils.AverageMeter()
            top1 = utils.AverageMeter()
            top5 = utils.AverageMeter()

            self.batch_iter(model, criterion, optimiser, train_loader, params,
                            losses, top1, top5)

            params.train_loss = losses.avg
            params.train_top1 = top1.avg
            params.train_top5 = top5.avg

            # get test loss
            params.test_loss, params.test_top1, params.test_top5 = inferer.test_network(
                params,
                test_loader,
                model,
                criterion,
                optimiser,
                verbose=False)
            params.val_loss, params.val_top1, params.val_top5 = inferer.test_network(
                params, valLoader, model, criterion, optimiser, verbose=False)

            checkpointer.save_checkpoint(model.state_dict(),
                                         optimiser.state_dict(), params)

            tqdm.write(
                "{},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f}"
                .format(epoch, params.lr, params.train_loss, params.train_top1,
                        params.train_top5, params.test_loss, params.test_top1,
                        params.test_top5, params.val_loss, params.val_top1,
                        params.val_top5))
Exemple #12
0
def test(test_loader, model, criterion, stat, predict=False):
    losses = utils.AverageMeter()
    model.eval()

    all_dist, all_output, all_target, all_input, all_bool = [], [], [], [], []

    for i, (inps, tars, good_keypts, keys) in enumerate(tqdm(test_loader)):
        inputs = Variable(inps.cuda())
        targets = Variable(tars.cuda(non_blocking=True))

        #make prediction with model
        outputs = model(inputs)
        all_output.append(outputs.data.cpu().numpy())
        all_input.append(inputs.data.cpu().numpy())

        if not predict:
            # calculate loss
            loss = criterion(outputs, targets)
            losses.update(loss.item(), inputs.size(0))

            outputs[~good_keypts] = 0
            targets[~good_keypts] = 0

            # undo normalisation to calculate accuracy in real units
            dim = 1
            dimensions = stat['targets_1d']
            out = stats.unNormalize(outputs.data.cpu().numpy(),
                                    stat['mean'][dimensions],
                                    stat['std'][dimensions])
            tar = stats.unNormalize(targets.data.cpu().numpy(),
                                    stat['mean'][dimensions],
                                    stat['std'][dimensions])

            #compute error
            distance = stats.abs_error(tar, out, dim)

            all_dist.append(distance)
            all_target.append(targets.data.cpu().numpy())
            all_bool.append(good_keypts)

    all_input = np.vstack(all_input)
    all_output = np.vstack(all_output)

    if predict:
        return None, None, None, None, all_output, None, all_input, None, None

    all_target = np.vstack(all_target)
    all_dist = np.vstack(all_dist)
    all_bool = np.vstack(all_bool)

    #mean errors
    all_dist[all_dist == 0] = np.nan
    joint_err = np.nanmean(all_dist, axis=0)
    ttl_err = np.nanmean(joint_err)

    print(">>> error: {} <<<".format(ttl_err))
    return losses.avg, ttl_err, joint_err, all_dist, all_output, all_target, all_input, all_bool
Exemple #13
0
    def validating(self):
        losses = utils.AverageMeter()

        self.model.eval()

        all_dist = []

        for i, (inps, tars) in enumerate(self.data_loader):
            inputs = Variable(inps.cuda())
            tars = Variable(tars.cuda(async=True))

            outputs = self.model(inputs)
            # calculate loss
            outputs_coord = outputs
            loss = self.loss_fuction(outputs_coord, tars)
            losses.update(loss.item(), inputs.size(0))
            # calculate erruracy
            targets_unnorm = self.unNormalizeData(tars.data.cpu().numpy(),
                                                  self.stat_3d['mean'],
                                                  self.stat_3d['std'],
                                                  self.stat_3d['dim_use'])
            outputs_unnorm = self.unNormalizeData(outputs.data.cpu().numpy(),
                                                  self.stat_3d['mean'],
                                                  self.stat_3d['std'],
                                                  self.stat_3d['dim_use'])

            # remove dim ignored
            dim_use = np.hstack((np.arange(3), self.stat_3d['dim_use']))

            outputs_use = outputs_unnorm[:, dim_use]
            targets_use = targets_unnorm[:, dim_use]

            #use procrustes analysis at testing
            if True:
                for ba in range(inps.size(0)):
                    gt = targets_use[ba].reshape(-1, 3)
                    out = outputs_use[ba].reshape(-1, 3)
                    _, Z, T, b, c = get_transformation(gt, out, True)
                    out = (b * out.dot(T)) + c
                    outputs_use[ba, :] = out.reshape(1, 51)

            sqerr = (outputs_use - targets_use)**2

            distance = np.zeros((sqerr.shape[0], 17))
            dist_idx = 0
            for k in np.arange(0, 17 * 3, 3):
                distance[:,
                         dist_idx] = np.sqrt(np.sum(sqerr[:, k:k + 3], axis=1))
                dist_idx += 1
            all_dist.append(distance)

        all_dist = np.vstack(all_dist)
        #joint_err = np.mean(all_dist, axis=0)
        ttl_err = np.mean(all_dist)
        print(">>> error: {} <<<".format(ttl_err))
        return losses.avg, ttl_err
def train(
    train_loader,
    model,
    criterion,
    optimizer,
    lr_init=None,
    lr_now=None,
    glob_step=None,
    lr_decay=None,
    gamma=None,
    max_norm=True,
):
    losses = utils.AverageMeter()

    model.train()

    start = time.time()
    batch_time = 0
    bar = Bar(">>>", fill=">", max=len(train_loader))

    for i, (inps, tars) in enumerate(train_loader):
        glob_step += 1
        if glob_step % lr_decay == 0 or glob_step == 1:
            lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay,
                                    gamma)
        inputs = Variable(inps.cuda())
        targets = Variable(tars.cuda())

        outputs = model(inputs)

        # calculate loss
        optimizer.zero_grad()
        loss = criterion(outputs, targets)
        losses.update(loss.item(), inputs.size(0))
        loss.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()

        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        bar.suffix = "({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.4f}".format(
            batch=i + 1,
            size=len(train_loader),
            batchtime=batch_time * 10.0,
            ttl=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
        )
        bar.next()

    bar.finish()
    return glob_step, lr_now, losses.avg
Exemple #15
0
    def evaluate(self, arch, split=None):
        # Return error since we want to minimize obj val
        logger.info(arch)
        objs = utils.AverageMeter()
        top1 = utils.AverageMeter()
        top5 = utils.AverageMeter()

        weights = self.get_weights_from_arch(arch)
        self.set_model_weights(weights)

        self.model.eval()

        if split is None:
            n_batches = 10
        elif self.args.debug:
            n_batches = 1
        else:
            n_batches = len(self.valid_queue)

        for step in range(n_batches):
            try:
                input, target = next(self.valid_iter)
            except Exception as e:
                logger.info('looping back over valid set')
                self.valid_iter = iter(self.valid_queue)
                input, target = next(self.valid_iter)
            input = Variable(input, volatile=True).cuda()
            target = Variable(target, volatile=True).cuda(async=True)

            logits = self.model(input, discrete=True)
            loss = self.criterion(logits, target)

            prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
            n = input.size(0)
            objs.update(loss.data[0], n)
            top1.update(prec1.data[0], n)
            top5.update(prec5.data[0], n)

            if step % self.args.report_freq == 0:
                logger.info('valid %03d %e %f %f', step, objs.avg, top1.avg,
                            top5.avg)

        return 1 - top1.avg, objs.avg
def test(test_loader, model, criterion, joint_num, procrustes=False):
    losses = utils.AverageMeter()

    model.eval()
   
    all_dist = []
    start = time.time()
    batch_time = 0
    bar = Bar('>>>', fill='>', max=len(test_loader))

    for i, data in enumerate(test_loader):
        joint2d,truth=data['joint2d'],data['truth']
        
        inputs=Variable(joint2d.cuda().type(torch.cuda.FloatTensor))
        targets=Variable(truth.cuda().type(torch.cuda.FloatTensor))

        outputs = model(inputs)

        outputs=torch.reshape(outputs,(-1,(joint_num)*3))
        targets=torch.reshape(targets,(-1,(joint_num)*3))

        # calculate loss
        loss = criterion(outputs, targets)
        losses.update(loss.item(), inputs.size(0))
    
        sqerr = (outputs - targets) ** 2
        distance = np.zeros((sqerr.shape[0],joint_num+1))
        dist_idx = 0
        for k in np.arange(0, (joint_num+1) * 3, 3):
            distance[:, dist_idx] = torch.sqrt(torch.sum(sqerr[:, k:k + 3], axis=1)).to('cpu').detach().numpy()
            dist_idx += 1
        all_dist.append(distance)
            
        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}' \
            .format(batch=i + 1,
                    size=len(test_loader),
                    batchtime=batch_time * 10.0,
                    ttl=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg)
        bar.next()
        
    all_dist = np.vstack(all_dist)
#    joint_err = np.mean(all_dist, axis=0)
    ttl_err = np.mean(all_dist)
    bar.finish()
    print (">>> error: {} <<<".format(ttl_err))
    
    return targets, losses.avg, ttl_err
def train(train_loader, model, criterion, optimizer, joint_num,
          lr_init=None, lr_now=None, glob_step=None, lr_decay=None, gamma=None,
          max_norm=True):
    losses = utils.AverageMeter()

    model.train()
 
    start = time.time()
    batch_time = 0
    bar = Bar('>>>', fill='>', max=len(train_loader))
    
    for i, data in enumerate(train_loader):
        # Turn down Learning Rate
        glob_step += 1
        if glob_step % lr_decay == 0 or glob_step == 1:
            lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay, gamma)
        
        joint2d, truth = data['joint2d'], data['truth']
        inputs=Variable(joint2d.cuda().type(torch.cuda.FloatTensor))
        targets=Variable(truth.cuda().type(torch.cuda.FloatTensor))
    
        outputs = model(inputs)
        outputs=torch.reshape(outputs,(-1,(joint_num)*3))
        targets=torch.reshape(targets,(-1,(joint_num)*3))

        # calculate loss
        optimizer.zero_grad()
        loss = criterion(outputs, targets)

        losses.update(loss.item(), inputs.size(0))
        loss.backward()
        
        if max_norm:
            nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
        optimizer.step()

        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.4f}' \
            .format(batch=i + 1,
                    size=len(train_loader),
                    batchtime=batch_time * 10.0,
                    ttl=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg)
        bar.next()

    bar.finish()
    return glob_step, lr_now, losses.avg
    def test_network(self,
                     params,
                     test_loader,
                     model,
                     criterion,
                     optimiser,
                     verbose=True):
        model.eval()

        losses = utils.AverageMeter()
        top1 = utils.AverageMeter()
        top5 = utils.AverageMeter()

        with torch.no_grad():
            with tqdm(total=len(test_loader), desc='Inference',
                      leave=verbose) as t:
                for batch_idx, (inputs, targets) in enumerate(test_loader):
                    device = 'cuda:' + str(params.gpuList[0])
                    inputs, targets = inputs.cuda(device, non_blocking=True),\
                            targets.cuda(device, non_blocking=True)

                    outputs = model(inputs)
                    loss = criterion(outputs, targets)

                    prec1, prec5 = utils.accuracy(outputs.data, targets.data)
                    losses.update(loss.item())
                    top1.update(prec1.item())
                    top5.update(prec5.item())

                    t.set_postfix({
                        'loss': losses.avg,
                        'top1': top1.avg,
                        'top5': top5.avg
                    })
                    t.update(1)

        return (losses.avg, top1.avg, top5.avg)
Exemple #19
0
def train(train_loader,
          model,
          criterion,
          optimizer,
          stat_2d,
          stat_3d,
          lr_init=None,
          lr_now=None,
          glob_step=None,
          lr_decay=None,
          gamma=None,
          max_norm=True):

    losses = utils.AverageMeter()

    model.train()

    # for i, (inps, tars) in enumerate(train_loader): # inps = (64, 32)
    pbar = tqdm(train_loader)
    for i, (inps, tars) in enumerate(pbar):  # inps = (64, 32)
        glob_step += 1
        if glob_step % lr_decay == 0 or glob_step == 1:
            lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay,
                                    gamma)

        ### Input unnormalization
        inputs_unnorm = data_process.unNormalizeData(
            inps.data.cpu().numpy(), stat_2d['mean'], stat_2d['std'],
            stat_2d['dim_use'])  # 64, 64
        dim_2d_use = stat_2d['dim_use']
        inputs_use = inputs_unnorm[:, dim_2d_use]  # (64, 32)
        ### Input distance normalization
        inputs_dist_norm, _ = data_process.input_norm(
            inputs_use)  # (64, 32) , array
        input_dist = torch.tensor(inputs_dist_norm, dtype=torch.float32)

        ### Targets unnormalization
        targets_unnorm = data_process.unNormalizeData(
            tars.data.cpu().numpy(), stat_3d['mean'], stat_3d['std'],
            stat_3d['dim_use'])  # (64, 96)
        dim_3d_use = np.array([
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18, 19, 20, 21, 22, 23, 24,
            25, 26, 36, 37, 38, 39, 40, 41, 45, 46, 47, 51, 52, 53, 54, 55, 56,
            57, 58, 59, 75, 76, 77, 78, 79, 80, 81, 82, 83
        ])
        targets_use = targets_unnorm[:, dim_3d_use]  # (51, )

        ### Targets distance normalization
        targets_dist_norm, _ = data_process.output_norm(targets_use)
        targets_dist = torch.tensor(targets_dist_norm, dtype=torch.float32)

        inputs = Variable(input_dist.cuda())
        targets = Variable(targets_dist.cuda(async=True))

        outputs = model(inputs)

        # calculate loss
        optimizer.zero_grad()
        loss = criterion(outputs, targets)
        losses.update(loss.item(), inputs.size(0))
        loss.backward()

        if max_norm:
            nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
        optimizer.step()

        # tqdm.set_postfix(loss='{:05.6f}'.format(losses.avg))
        pbar.set_postfix(tr_loss='{:05.6f}'.format(losses.avg))

    return glob_step, lr_now, losses.avg
Exemple #20
0
def job(tuning, params_path, devices, resume, save_interval):
    global params
    if tuning:
        with open(params_path, 'r') as f:
            params = json.load(f)
        mode_str = 'tuning'
        setting = '_'.join(f'{tp}-{params[tp]}'
                           for tp in params['tuning_params'])
    else:
        mode_str = 'train'
        setting = ''

    # パラメーターを変えるときにseedも変えたい(seed averagingの効果を期待)
    seed = sum(ord(_) for _ in str(params.values()))
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = False

    exp_path = ROOT + f'experiments/{params["ex_name"]}/'
    os.environ['CUDA_VISIBLE_DEVICES'] = devices

    logger, writer = utils.get_logger(
        log_dir=exp_path + f'{mode_str}/log/{setting}',
        tensorboard_dir=exp_path + f'{mode_str}/tf_board/{setting}')

    if params['augmentation'] == 'soft':
        params['scale_limit'] = 0.2
        params['brightness_limit'] = 0.1
    elif params['augmentation'] == 'middle':
        params['scale_limit'] = 0.3
        params['shear_limit'] = 4
        params['brightness_limit'] = 0.1
        params['contrast_limit'] = 0.1
    else:
        raise ValueError

    train_transform, eval_transform = data_utils.build_transforms(
        scale_limit=params['scale_limit'],
        shear_limit=params['shear_limit'],
        brightness_limit=params['brightness_limit'],
        contrast_limit=params['contrast_limit'],
    )

    data_loaders = data_utils.make_train_loaders(
        params=params,
        data_root=ROOT + 'input/' + params['data'],
        train_transform=train_transform,
        eval_transform=eval_transform,
        scale='S',
        test_size=0,
        class_topk=params['class_topk'],
        num_workers=8)

    model = models.LandmarkNet(
        n_classes=params['class_topk'],
        model_name=params['model_name'],
        pooling=params['pooling'],
        loss_module=params['loss'],
        s=params['s'],
        margin=params['margin'],
        theta_zero=params['theta_zero'],
        use_fc=params['use_fc'],
        fc_dim=params['fc_dim'],
    ).cuda()
    optimizer = utils.get_optim(params, model)
    criterion = nn.CrossEntropyLoss()
    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        T_max=params['epochs'] * len(data_loaders['train']),
        eta_min=3e-6)
    start_epoch = 0

    if len(devices.split(',')) > 1:
        model = nn.DataParallel(model)

    for epoch in range(start_epoch, params['epochs']):

        logger.info(
            f'Epoch {epoch}/{params["epochs"]} | lr: {optimizer.param_groups[0]["lr"]}'
        )

        # ============================== train ============================== #
        model.train(True)

        losses = utils.AverageMeter()
        prec1 = utils.AverageMeter()

        for i, (_, x, y) in tqdm(enumerate(data_loaders['train']),
                                 total=len(data_loaders['train']),
                                 miniters=None,
                                 ncols=55):
            x = x.to('cuda')
            y = y.to('cuda')

            outputs = model(x, y)
            loss = criterion(outputs, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()

            acc = metrics.accuracy(outputs, y)
            losses.update(loss.item(), x.size(0))
            prec1.update(acc, x.size(0))

            if i % 100 == 99:
                logger.info(
                    f'{epoch+i/len(data_loaders["train"]):.2f}epoch | {setting} acc: {prec1.avg}'
                )

        train_loss = losses.avg
        train_acc = prec1.avg

        writer.add_scalars('Loss', {'train': train_loss}, epoch)
        writer.add_scalars('Acc', {'train': train_acc}, epoch)
        writer.add_scalar('LR', optimizer.param_groups[0]['lr'], epoch)

        if (epoch + 1) == params['epochs'] or (epoch + 1) % save_interval == 0:
            output_file_name = exp_path + f'ep{epoch}_' + setting + '.pth'
            utils.save_checkpoint(path=output_file_name,
                                  model=model,
                                  epoch=epoch,
                                  optimizer=optimizer,
                                  params=params)

    model = model.module
    datasets = ('roxford5k', 'rparis6k')
    results = eval_datasets(model,
                            datasets=datasets,
                            ms=False,
                            tta_gem_p=1.0,
                            logger=logger)

    if tuning:
        tuning_result = {}
        for d in datasets:
            for key in ['mapE', 'mapM', 'mapH']:
                mapE, mapM, mapH, mpE, mpM, mpH, kappas = results[d]
                tuning_result[d + '-' + key] = [eval(key)]
        utils.write_tuning_result(params, tuning_result,
                                  exp_path + 'tuning/results.csv')
def evaluate(model, validation_generator, criterion, epoch, writer, log_file, print_every=25):
    model.eval()
    losses = utils.AverageMeter()
    accuracies = utils.AverageMeter()
    num_iter_per_epoch = len(validation_generator)

    y_true = []
    y_pred = []

    for iter, batch in tqdm(enumerate(validation_generator), total=num_iter_per_epoch):
        features, labels = batch
        if torch.cuda.is_available():
            features = features.cuda()
            labels = labels.cuda()
        with torch.no_grad():
            predictions = model(features)
        loss = criterion(predictions, labels)

        y_true += labels.cpu().numpy().tolist()
        y_pred += torch.max(predictions, 1)[1].cpu().numpy().tolist()

        validation_metrics = utils.get_evaluation(labels.cpu().numpy(),
                                                  predictions.cpu().detach().numpy(),
                                                  list_metrics=["accuracy", "f1"])
        accuracy = validation_metrics['accuracy']
        f1 = validation_metrics['f1']

        losses.update(loss.data, features.size(0))
        accuracies.update(validation_metrics["accuracy"], features.size(0))

        writer.add_scalar('Test/Loss',
                          loss.item(),
                          epoch * num_iter_per_epoch + iter)

        writer.add_scalar('Test/Accuracy',
                          accuracy,
                          epoch * num_iter_per_epoch + iter)

        writer.add_scalar('Test/f1',
                          f1,
                          epoch * num_iter_per_epoch + iter)

        if (iter % print_every == 0) and (iter > 0):
            print("[Validation - Epoch: {}] , Iteration: {}/{} , Loss: {}, Accuracy: {}".format(
                epoch + 1,
                iter,
                num_iter_per_epoch,
                losses.avg,
                accuracies.avg
            ))

    f1_test = f1_score(y_true, y_pred, average='weighted')

    writer.add_scalar('Test/loss/epoch', losses.avg, epoch + iter)
    writer.add_scalar('Test/acc/epoch', accuracies.avg, epoch + iter)
    writer.add_scalar('Test/f1/epoch', f1_test, epoch + iter)

    report = classification_report(y_true, y_pred)
    print(report)

    with open(log_file, 'a') as f:
        f.write(f'Validation on Epoch {epoch} \n')
        f.write(f'Average loss: {losses.avg.item()} \n')
        f.write(f'Average accuracy: {accuracies.avg.item()} \n')
        f.write(f'F1 score {f1_test} \n\n')
        f.write(report)
        f.write('=' * 50)
        f.write('\n')

    return losses.avg.item(), accuracies.avg.item(), f1_test
Exemple #22
0
    for _ in range(start_epoch*len(data_loaders['train'])):
        scheduler.step()


    if len(devices.split(',')) > 1:
        model = nn.DataParallel(model)

    for epoch in range(start_epoch, end_epoch):
    #while epoch <= end_epoch:
        logger.info(f'Epoch {epoch}/{end_epoch}')

        # ============================== train ============================== #
        model.train(True)

        losses = utils.AverageMeter()
        prec1 = utils.AverageMeter()

        for i, (_, x, y) in tqdm(enumerate(data_loaders['train']),
                                 total=len(data_loaders['train']),
                                 miniters=None, ncols=55):
            
            if num_GPU>0:
                x = x.to('cuda')
                y = y.to('cuda')

            outputs = model(x, y)
            loss = criterion(outputs, y)

            optimizer.zero_grad()
            loss.backward()
def train(model, training_generator, optimizer, criterion, epoch, writer, log_file, scheduler, class_names, args, print_every=25):
    model.train()
    losses = utils.AverageMeter()
    accuracies = utils.AverageMeter()
    num_iter_per_epoch = len(training_generator)

    progress_bar = tqdm(enumerate(training_generator),
                        total=num_iter_per_epoch)

    y_true = []
    y_pred = []

    for iter, batch in progress_bar:
        features, labels = batch
        if torch.cuda.is_available():
            features = features.cuda()
            labels = labels.cuda()

        optimizer.zero_grad()
        predictions = model(features)

        y_true += labels.cpu().numpy().tolist()
        y_pred += torch.max(predictions, 1)[1].cpu().numpy().tolist()

        loss = criterion(predictions, labels)

        loss.backward()
        if args.scheduler == 'clr':
            scheduler.step()

        optimizer.step()
        training_metrics = utils.get_evaluation(labels.cpu().numpy(),
                                                predictions.cpu().detach().numpy(),
                                                list_metrics=["accuracy", "f1"])

        losses.update(loss.data, features.size(0))
        accuracies.update(training_metrics["accuracy"], features.size(0))

        f1 = training_metrics['f1']

        writer.add_scalar('Train/Loss',
                          loss.item(),
                          epoch * num_iter_per_epoch + iter)

        writer.add_scalar('Train/Accuracy',
                          training_metrics['accuracy'],
                          epoch * num_iter_per_epoch + iter)

        writer.add_scalar('Train/f1',
                          f1,
                          epoch * num_iter_per_epoch + iter)

        lr = optimizer.state_dict()["param_groups"][0]["lr"]

        if (iter % print_every == 0) and (iter > 0):
            print("[Training - Epoch: {}], LR: {} , Iteration: {}/{} , Loss: {}, Accuracy: {}".format(
                epoch + 1,
                lr,
                iter,
                num_iter_per_epoch,
                losses.avg,
                accuracies.avg
            ))

            if bool(args.log_f1):
                intermediate_report = classification_report(
                    y_true, y_pred, output_dict=True)

                f1_by_class = 'F1 Scores by class: '
                for class_name in class_names:
                    f1_by_class += f"{class_name} : {np.round(intermediate_report[class_name]['f1-score'], 4)} |"

                print(f1_by_class)

    f1_train = f1_score(y_true, y_pred, average='weighted')

    writer.add_scalar('Train/loss/epoch', losses.avg, epoch + iter)
    writer.add_scalar('Train/acc/epoch', accuracies.avg, epoch + iter)
    writer.add_scalar('Train/f1/epoch', f1_train, epoch + iter)

    report = classification_report(y_true, y_pred)
    print(report)

    with open(log_file, 'a') as f:
        f.write(f'Training on Epoch {epoch} \n')
        f.write(f'Average loss: {losses.avg.item()} \n')
        f.write(f'Average accuracy: {accuracies.avg.item()} \n')
        f.write(f'F1 score: {f1_train} \n\n')
        f.write(report)
        f.write('*' * 25)
        f.write('\n')

    return losses.avg.item(), accuracies.avg.item(), f1_train
def train(train_loader,
          model,
          criterion,
          optimizer,
          num_kpts=15,
          num_classes=200,
          lr_init=None,
          lr_now=None,
          glob_step=None,
          lr_decay=None,
          gamma=None,
          max_norm=True):
    losses = utils.AverageMeter()

    model.train()

    errs, accs = [], []
    start = time.time()
    batch_time = 0
    bar = Bar('>>>', fill='>', max=len(train_loader))

    for i, sample in enumerate(train_loader):
        glob_step += 1
        if glob_step % lr_decay == 0 or glob_step == 1:
            lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay,
                                    gamma)

        inputs = sample['X'].cuda()
        # NOTE: PyTorch issue with dim0=1.
        if inputs.shape[0] == 1:
            continue
        targets = sample['Y'].reshape(-1).cuda()

        outputs = model(inputs)

        # calculate loss
        optimizer.zero_grad()
        loss = criterion(outputs, targets)
        losses.update(loss.item(), inputs.size(0))
        loss.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()

        # Set outputs to [0, 1].
        softmax = nn.Softmax()
        outputs = softmax(outputs)

        outputs = outputs.data.cpu().numpy()
        targets = one_hot(targets.data.cpu().numpy(), num_classes)

        errs.append(np.mean(np.abs(outputs - targets)))
        accs.append(
            metrics.accuracy_score(np.argmax(targets, axis=1),
                                   np.argmax(outputs, axis=1)))

        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}' \
            .format(batch=i + 1,
                    size=len(train_loader),
                    batchtime=batch_time * 10.0,
                    ttl=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg)
        bar.next()
    bar.finish()

    err = np.mean(np.array(errs, dtype=np.float32))
    acc = np.mean(np.array(accs, dtype=np.float32))
    print(">>> train error: {} <<<".format(err))
    print(">>> train accuracy: {} <<<".format(acc))
    return glob_step, lr_now, losses.avg, err, acc
def test(test_loader,
         model,
         criterion,
         num_kpts=15,
         num_classes=2,
         batch_size=64,
         inference=False,
         log=True):
    losses = utils.AverageMeter()

    model.eval()

    errs, accs = [], []
    all_outputs, all_targets = [], []
    start = time.time()
    batch_time = 0
    if log:
        bar = Bar('>>>', fill='>', max=len(test_loader))

    for i, sample in enumerate(test_loader):
        inputs = sample['X'].cuda()
        # NOTE: PyTorch issue with dim0=1.
        if inputs.shape[0] == 1:
            continue
        targets = sample['Y'].reshape(-1).cuda()
        outputs = model(inputs)

        # calculate loss
        loss = criterion(outputs, targets)
        losses.update(loss.item(), inputs.size(0))

        # Set outputs to [0, 1].
        softmax = nn.Softmax()
        outputs = softmax(outputs)

        outputs = outputs.data.cpu().numpy()
        targets = targets.data.cpu().numpy()

        all_outputs.append(outputs)
        all_targets.append(targets)

        #        errs.append(np.mean(np.abs(outputs - targets)))
        #        accs.append(accuracy_score(
        #            np.argmax(targets, axis=1),
        #            np.argmax(outputs, axis=1))
        #        )

        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        if log:
            bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}' \
                .format(batch=i + 1,
                        size=len(test_loader),
                        batchtime=batch_time * 10.0,
                        ttl=bar.elapsed_td,
                        eta=bar.eta_td,
                        loss=losses.avg)
            bar.next()


#    err = np.mean(np.array(errs))
#    acc = np.mean(np.array(accs))

    all_outputs = np.concatenate(all_outputs)
    all_targets = np.concatenate(all_targets)

    pred_values = np.amax(all_outputs, axis=1)
    pred_labels = np.argmax(all_outputs, axis=1)

    err = np.mean(np.abs(pred_values - all_targets))
    acc = np.mean(metrics.accuracy_score(all_targets, pred_labels))
    auc = calc_auc(all_targets, pred_values)
    prec = metrics.average_precision_score(all_targets, pred_values)

    if log:
        bar.finish()
        print('>>> test error: {} <<<'.format(err))
        print('>>> test accuracy: {} <<<'.format(acc))

    return losses.avg, err, acc, auc, prec
def job(tuning, params_path, devices, resume, save_interval):
    global params
    if tuning:
        with open(params_path, 'r') as f:
            params = json.load(f)
        mode_str = 'tuning'
        setting = '_'.join(f'{tp}-{params[tp]}'
                           for tp in params['tuning_params'])
    else:
        mode_str = 'train'
        setting = ''

    exp_path = ROOT + f'experiments/{params["ex_name"]}/'
    os.environ['CUDA_VISIBLE_DEVICES'] = devices

    if resume is None:
        # C-AIRとABCIで整合性が取れるようにしている。
        params[
            'base_ckpt_path'] = f'experiments/v1only/ep4_augmentation-soft_epochs-5_loss-{params["loss"]}.pth'
        params[
            'clean_path'] = ROOT + f'input/clean/train19_cleaned_verifythresh{params["verifythresh"]}_freqthresh{params["freqthresh"]}.csv'
    else:
        params = utils.load_checkpoint(path=resume, params=True)['params']

    logger, writer = utils.get_logger(
        log_dir=exp_path + f'{mode_str}/log/{setting}',
        tensorboard_dir=exp_path + f'{mode_str}/tf_board/{setting}')

    if params['augmentation'] == 'soft':
        params['scale_limit'] = 0.2
        params['brightness_limit'] = 0.1
    elif params['augmentation'] == 'middle':
        params['scale_limit'] = 0.3
        params['shear_limit'] = 4
        params['brightness_limit'] = 0.1
        params['contrast_limit'] = 0.1
    else:
        raise ValueError

    train_transform, eval_transform = data_utils.build_transforms(
        scale_limit=params['scale_limit'],
        shear_limit=params['shear_limit'],
        brightness_limit=params['brightness_limit'],
        contrast_limit=params['contrast_limit'],
    )

    data_loaders = data_utils.make_train_loaders(
        params=params,
        data_root=ROOT + 'input/' + params['data'],
        train_transform=train_transform,
        eval_transform=eval_transform,
        scale='SS2',
        test_size=0,
        class_topk=params['class_topk'],
        num_workers=8)

    model = models.LandmarkNet(
        n_classes=params['class_topk'],
        model_name=params['model_name'],
        pooling=params['pooling'],
        loss_module=params['loss'],
        s=params['s'],
        margin=params['margin'],
        theta_zero=params['theta_zero'],
        use_fc=params['use_fc'],
        fc_dim=params['fc_dim'],
    ).cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = utils.get_optim(params, model)

    if resume is None:
        sdict = torch.load(ROOT + params['base_ckpt_path'])['state_dict']
        if params['loss'] == 'adacos':
            del sdict['final.W']  # remove fully-connected layer
        elif params['loss'] == 'softmax':
            del sdict['final.weight'], sdict[
                'final.bias']  # remove fully-connected layer
        else:
            del sdict['final.weight']  # remove fully-connected layer
        model.load_state_dict(sdict, strict=False)

        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=params['epochs'] * len(data_loaders['train']),
            eta_min=3e-6)
        start_epoch, end_epoch = (0,
                                  params['epochs'] - params['scaleup_epochs'])
    else:
        ckpt = utils.load_checkpoint(path=resume,
                                     model=model,
                                     optimizer=optimizer,
                                     epoch=True)
        model, optimizer, start_epoch = ckpt['model'], ckpt[
            'optimizer'], ckpt['epoch'] + 1
        end_epoch = params['epochs']

        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=params['epochs'] * len(data_loaders['train']),
            eta_min=3e-6,
            last_epoch=start_epoch * len(data_loaders['train']))

        setting += 'scaleup_' + resume.split('/')[-1].replace('.pth', '')

        data_loaders = data_utils.make_verified_train_loaders(
            params=params,
            data_root=ROOT + 'input/' + params['data'],
            train_transform=train_transform,
            eval_transform=eval_transform,
            scale='M2',
            test_size=0,
            num_workers=8)
        batch_norm.freeze_bn(model)

    if len(devices.split(',')) > 1:
        model = nn.DataParallel(model)

    for epoch in range(start_epoch, end_epoch):
        logger.info(f'Epoch {epoch}/{end_epoch}')

        # ============================== train ============================== #
        model.train(True)

        losses = utils.AverageMeter()
        prec1 = utils.AverageMeter()

        for i, (_, x, y) in tqdm(enumerate(data_loaders['train']),
                                 total=len(data_loaders['train']),
                                 miniters=None,
                                 ncols=55):
            x = x.to('cuda')
            y = y.to('cuda')

            outputs = model(x, y)
            loss = criterion(outputs, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()

            acc = metrics.accuracy(outputs, y)
            losses.update(loss.item(), x.size(0))
            prec1.update(acc, x.size(0))

            if i % 100 == 99:
                logger.info(
                    f'{epoch+i/len(data_loaders["train"]):.2f}epoch | {setting} acc: {prec1.avg}'
                )

        train_loss = losses.avg
        train_acc = prec1.avg

        writer.add_scalars('Loss', {'train': train_loss}, epoch)
        writer.add_scalars('Acc', {'train': train_acc}, epoch)
        writer.add_scalar('LR', optimizer.param_groups[0]['lr'], epoch)

        if (epoch + 1) == end_epoch or (epoch + 1) % save_interval == 0:
            output_file_name = exp_path + f'ep{epoch}_' + setting + '.pth'
            utils.save_checkpoint(path=output_file_name,
                                  model=model,
                                  epoch=epoch,
                                  optimizer=optimizer,
                                  params=params)

    model = model.module
    datasets = ('oxford5k', 'paris6k', 'roxford5k', 'rparis6k')
    results = eval_datasets(model,
                            datasets=datasets,
                            ms=True,
                            tta_gem_p=1.0,
                            logger=logger)

    if tuning:
        tuning_result = {}
        for d in datasets:
            if d in ('oxford5k', 'paris6k'):
                tuning_result[d] = results[d]
            else:
                for key in ['mapE', 'mapM', 'mapH']:
                    mapE, mapM, mapH, mpE, mpM, mpH, kappas = results[d]
                    tuning_result[d + '-' + key] = [eval(key)]
        utils.write_tuning_result(params, tuning_result,
                                  exp_path + 'tuning/results.csv')
Exemple #27
0
    def perform_search(self, app):  
    #{{{
        # def check_stopping(mode, state, prevPp, currPp):
        def check_stopping():
        #{{{
            if prevPp == currPp:
                return True
            # if mode == 'memory_opt':
            #     if prevPp == currPp:
            #         return True
            # elif mode == 'cost_opt':
            #     if state == 1:
            #         return True
        
            return False
        #}}}
        
        # get unpruned test accuracy 
        loss, top1, top5 = app.run_inference()
        targetAcc = top1
        print('Epoch,\tLR,\tTrain_Loss,\tTrain_Top1,\tTrain_Top5,\tTest_Loss,\tTest_Top1,\tTest_Top5,\tVal_Loss,\tVal_Top1,\tVal_Top5')
        
        # perform finetuning once
        for epoch in tqdm(range(app.params.start_epoch, app.params.pruneAfter), desc='finetuning', leave=False) : 
        #{{{
            app.params.curr_epoch = epoch
            state = self.update_lr(app.params, app.optimiser)
            
            losses = utils.AverageMeter()
            top1 = utils.AverageMeter()
            top5 = utils.AverageMeter()

            self.batch_iter(app.model, app.criterion, app.optimiser, app.train_loader, app.params, losses, top1, top5)

            app.params.train_loss = losses.avg        
            app.params.train_top1 = top1.avg        
            app.params.train_top5 = top5.avg        
            
            # get test loss
            app.params.test_loss, app.params.test_top1, app.params.test_top5 = app.inferer.test_network(app.params, app.test_loader, app.model, app.criterion, app.optimiser, verbose=False)
            app.params.val_loss, app.params.val_top1, app.params.val_top5 = app.inferer.test_network(app.params, app.valLoader, app.model, app.criterion, app.optimiser, verbose=False)

            app.checkpointer.save_checkpoint(app.model.state_dict(), app.optimiser.state_dict(), app.params)
            
            tqdm.write("{},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f}".format(epoch, app.params.lr, app.params.train_loss, app.params.train_top1, app.params.train_top5, app.params.test_loss, app.params.test_top1, app.params.test_top5, app.params.val_loss, app.params.val_top1, app.params.val_top5))
        #}}}

        # store model to revert to 
        finetunedModel = app.model
        app.checkpointer.save_model_only(app.model.state_dict(), app.params.printOnly, 'pre_pruning')
        app.params.curr_epoch += 1
            
        # initialise search
        app.params.start_epoch = app.params.pruneAfter
        initPp = 50
        prevPp = 0
        currPp = initPp
        uB = 95
        lB = 5 
        bestPp = 0
        state = 0
        bestTestAcc = targetAcc

        # while not check_stopping(mode, state, prevPp, currPp):  
        while not check_stopping(): 
            # prune model 
            app.params.pruningPerc = currPp
            app.model = finetunedModel 
            app.setup_pruners()
            tqdm.write('Pruning Network')
            channelsPruned, prunedModel, app.optimiser = app.pruner.prune_model(finetunedModel)
            totalPrunedPerc, _, _ = app.pruner.prune_rate(prunedModel)
            tqdm.write('Pruned Percentage = {:.2f}%'.format(totalPrunedPerc))
            summary = app.pruner.log_pruned_channels(app.checkpointer.root, app.params, totalPrunedPerc, channelsPruned)
            
            # perform retraining
            testAccs = [1]
            validAccs = [1]
            for epoch in tqdm(range(app.params.curr_epoch, app.params.curr_epoch + app.params.finetuneBudget), desc='training', leave=False) : 
            #{{{
                app.params.curr_epoch = epoch
                state = self.update_lr(app.params, app.optimiser)

                losses = utils.AverageMeter()
                top1 = utils.AverageMeter()
                top5 = utils.AverageMeter()

                self.batch_iter(prunedModel, app.criterion, app.optimiser, app.train_loader, app.params, losses, top1, top5)

                app.params.train_loss = losses.avg        
                app.params.train_top1 = top1.avg        
                app.params.train_top5 = top5.avg        

                # get test loss
                app.params.test_loss, app.params.test_top1, app.params.test_top5 = app.inferer.test_network(app.params, app.test_loader, prunedModel, app.criterion, app.optimiser, verbose=False)
                app.params.val_loss, app.params.val_top1, app.params.val_top5 = app.inferer.test_network(app.params, app.valLoader, prunedModel, app.criterion, app.optimiser, verbose=False)

                testAccs.append(app.params.test_top1)
                validAccs.append(app.params.val_top1)

                app.checkpointer.save_checkpoint(prunedModel.state_dict(), app.optimiser.state_dict(), app.params)
                
                tqdm.write("{},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f},\t{:10.5f}".format(epoch, app.params.lr, app.params.train_loss, app.params.train_top1, app.params.train_top5, app.params.test_loss, app.params.test_top1, app.params.test_top5, app.params.val_loss, app.params.val_top1, app.params.val_top5))
            #}}}

            highestTestAcc = testAccs[validAccs.index(max(validAccs))]
            if int(highestTestAcc) < int(targetAcc): 
                state = -1
            else:
                state = 1
            
            # prune less
            if state == -1: 
                tmp = (lB + currPp) / 2.
                uB = currPp 

            # try to prune more, but return previous model if state goes to -1
            elif state == 1:
                tmp = (uB + currPp) / 2.
                lB = currPp 
                bestPp = currPp 
                bestTestAcc = highestTestAcc

            prevPp = currPp
            currPp = 5 * math.ceil(tmp/5)  
        
        return bestPp, bestTestAcc
Exemple #28
0
def test(test_loader, model, criterion, stat_2d, stat_3d, procrustes=False):

    losses = utils.AverageMeter()

    model.eval()

    all_dist = []

    pbar = tqdm(test_loader)
    for i, (inps, tars) in enumerate(pbar):

        ### input unnorm
        data_coord = data_process.unNormalizeData(inps.data.cpu().numpy(),
                                                  stat_2d['mean'],
                                                  stat_2d['std'],
                                                  stat_2d['dim_use'])  # 64, 64
        dim_2d_use = stat_2d['dim_use']
        data_use = data_coord[:, dim_2d_use]  # (64, 32)

        ### input dist norm
        data_dist_norm, data_dist_set = data_process.input_norm(
            data_use)  # (64, 32) , array
        data_dist = torch.tensor(data_dist_norm, dtype=torch.float32)

        # target unnorm
        label_coord = data_process.unNormalizeData(
            tars.data.cpu().numpy(), stat_3d['mean'], stat_3d['std'],
            stat_3d['dim_use'])  # (64, 96)
        dim_3d_use = np.array([
            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18, 19, 20, 21, 22, 23, 24,
            25, 26, 36, 37, 38, 39, 40, 41, 45, 46, 47, 51, 52, 53, 54, 55, 56,
            57, 58, 59, 75, 76, 77, 78, 79, 80, 81, 82, 83
        ])

        label_use = label_coord[:, dim_3d_use]  # (48, )
        # target dist norm
        label_dist_norm, label_dist_set = data_process.output_norm(label_use)
        label_dist = torch.tensor(label_dist_norm, dtype=torch.float32)

        inputs = Variable(data_dist.cuda())
        targets = Variable(label_dist.cuda(async=True))

        outputs = model(inputs)

        # calculate loss
        pred_coord = outputs
        loss = criterion(pred_coord, targets)  # 64 losses average

        losses.update(loss.item(), inputs.size(0))

        tars = targets
        pred = outputs

        # inputs_dist_set = np.reshape(targets_dist_set, (-1, 1))
        # inputs_dist_set = np.repeat(targets_dist_set, 48, axis=1)

        targets_dist = np.reshape(label_dist_set, (-1, 1))
        targets_dist_set = np.repeat(targets_dist, 48, axis=1)

        c = np.reshape(np.asarray([0, 0, 10]), (1, -1))
        c = np.repeat(c, 16, axis=0)
        c = np.reshape(c, (1, -1))
        c = np.repeat(c, inputs.size(0), axis=0)
        # c_set = np.repeat(np.asarray([0,0,10]), 16, axis=0)

        #### undist -> unnorm
        outputs_undist = (pred.data.cpu().numpy() * targets_dist_set) - c
        # outputs_undist = outputs_undist - c
        targets_undist = (tars.data.cpu().numpy() * targets_dist_set) - c
        # targets_undist = targets_undist - c

        outputs_use = outputs_undist
        targets_use = targets_undist  # (64, 48)

        if procrustes:
            for ba in range(inps.size(0)):
                gt = targets_use[ba].reshape(-1, 3)
                out = outputs_use[ba].reshape(-1, 3)  # (17,3)
                _, Z, T, b, c = get_transformation(gt, out, True)
                out = (b * out.dot(T)) + c
                outputs_use[ba, :] = out.reshape(1, 48)

        sqerr = (outputs_use - targets_use)**2

        # distance = np.zeros((sqerr.shape[0], 17))
        distance = np.zeros((sqerr.shape[0], 16))

        dist_idx = 0
        for k in np.arange(0, 16 * 3, 3):
            # for k in np.arange(0, 17 * 3, 3):

            distance[:, dist_idx] = np.sqrt(np.sum(sqerr[:, k:k + 3], axis=1))
            dist_idx += 1
        all_dist.append(distance)

        pbar.set_postfix(tt_loss='{:05.6f}'.format(losses.avg))

    all_dist = np.vstack(all_dist)
    joint_err = np.mean(all_dist, axis=0)
    ttl_err = np.mean(all_dist)
    # bar.finish()
    print(">>> error: {} <<<".format(ttl_err))
    return losses.avg, ttl_err
Exemple #29
0
def test(test_loader,
         model,
         criterion,
         stat_3d,
         device,
         procrustes=False,
         pck_thresholds=[50, 100, 150, 200, 250],
         noise_fun=lambda x: x,
         refine_dic=None,
         refine_coeff_fun=None,
         refine_extra_kwargs={},
         cache_prefix=None,
         visualize=False):
    model.eval()

    all_outputs = []
    all_targets = []
    losses = utils.AverageMeter()
    for i, (inps, tars) in enumerate(test_loader):
        inps_noise = noise_fun(inps)
        inputs = Variable(inps_noise.to(device))
        targets = Variable(tars.to(device))

        outputs = model(inputs)

        # calculate loss
        outputs_coord = outputs
        loss = criterion(outputs_coord, targets)
        losses.update(loss.item(), inputs.size(0))

        tars = targets

        # calculate erruracy
        targets_unnorm = data_process.unNormalizeData(tars.data.cpu().numpy(),
                                                      stat_3d['mean'],
                                                      stat_3d['std'],
                                                      stat_3d['dim_use'])
        outputs_unnorm = data_process.unNormalizeData(
            outputs.data.cpu().numpy(), stat_3d['mean'], stat_3d['std'],
            stat_3d['dim_use'])

        # remove dim ignored
        dim_use = np.hstack((np.arange(3), stat_3d['dim_use']))

        outputs_use = outputs_unnorm[:, dim_use]
        targets_use = targets_unnorm[:, dim_use]

        all_outputs.append(outputs_use)
        all_targets.append(targets_use)

    accu_frames = np.cumsum(test_loader.dataset.frames)
    all_outputs = np.split(np.concatenate(all_outputs, axis=0),
                           accu_frames)[:-1]
    all_targets = np.split(np.concatenate(all_targets, axis=0),
                           accu_frames)[:-1]

    start = time.time()
    seq_time = 0
    bar = Bar('>>>', fill='>', max=len(all_outputs))

    all_dist, all_pck = [], []
    for i, (outputs_use,
            targets_use) in enumerate(zip(all_outputs, all_targets)):
        if refine_dic is not None:
            origin = outputs_use
            outputs_use, _ = ru.refine(outputs_use, refine_dic,
                                       refine_coeff_fun, **refine_extra_kwargs)

            if visualize:
                visual = [
                    ru.convert_to_pose_16(seq.reshape([-1, 17, 3]))
                    for seq in [outputs_use, origin, targets_use]
                ]
                ru.plot_pose_seq(visual, plot_axis=True, r=1000)

        if procrustes:
            for frame in range(outputs_use.shape[0]):
                gt = targets_use[frame].reshape(-1, 3)
                out = outputs_use[frame].reshape(-1, 3)
                _, Z, T, b, c = get_transformation(gt,
                                                   out,
                                                   True,
                                                   reflection=False)
                out = (b * out.dot(T)) + c
                outputs_use[frame, :] = out.reshape(1, 51)

        for pred, gt in zip(outputs_use, targets_use):
            pred, gt = pred.reshape([-1, 3]), gt.reshape([-1, 3])
            all_dist.append(mpjpe_fun(pred, gt))
            all_pck.append(pck_fun(pred, gt, thresholds=pck_thresholds))

        # update summary
        seq_time = time.time() - start
        start = time.time()

        bar.suffix = '({seq}/{size}) | seq: {seqtime:.4}s | Total: {ttl} | ETA: {eta:} | mpjpe: {loss:.6f}' \
            .format(seq=i + 1,
                    size=len(all_outputs),
                    seqtime=seq_time,
                    ttl=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=np.mean(all_dist))
        bar.next()

    all_dist = np.vstack(all_dist)
    all_pck = np.array(all_pck)
    mpjpe = np.mean(all_dist)
    if cache_prefix:
        with open('cache/{}_.pkl'.format(cache_prefix), 'wb') as f:
            pickle.dump({'mpjpe': all_dist, 'pck': all_pck}, f)
    pck = np.mean(all_pck, axis=0)
    bar.finish()
    print(">>> error: {:4f}, pck: {} <<<".format(
        mpjpe, ' '.join(['{:4f}'.format(val) for val in pck])))
    return losses.avg, mpjpe, pck
def test(test_loader, model, criterion, stat_3d, procrustes=False):
    losses = utils.AverageMeter()

    model.eval()

    all_dist = []
    start = time.time()
    batch_time = 0
    bar = Bar(">>>", fill=">", max=len(test_loader))

    for i, (inps, tars) in enumerate(test_loader):
        inputs = Variable(inps.cuda())
        targets = Variable(tars.cuda())

        outputs = model(inputs)

        # calculate loss
        outputs_coord = outputs
        loss = criterion(outputs_coord, targets)

        losses.update(loss.item(), inputs.size(0))

        tars = targets

        # calculate erruracy
        targets_unnorm = data_process.unNormalizeData(tars.data.cpu().numpy(),
                                                      stat_3d["mean"],
                                                      stat_3d["std"],
                                                      stat_3d["dim_use"])
        outputs_unnorm = data_process.unNormalizeData(
            outputs.data.cpu().numpy(),
            stat_3d["mean"],
            stat_3d["std"],
            stat_3d["dim_use"],
        )

        # remove dim ignored
        dim_use = np.hstack((np.arange(3), stat_3d["dim_use"]))

        outputs_use = outputs_unnorm[:, dim_use]
        targets_use = targets_unnorm[:, dim_use]

        if procrustes:
            for ba in range(inps.size(0)):
                gt = targets_use[ba].reshape(-1, 3)
                out = outputs_use[ba].reshape(-1, 3)
                _, Z, T, b, c = get_transformation(gt, out, True)
                out = (b * out.dot(T)) + c
                outputs_use[ba, :] = out.reshape(1, 51)

        sqerr = (outputs_use - targets_use)**2

        distance = np.zeros((sqerr.shape[0], 17))
        dist_idx = 0
        for k in np.arange(0, 17 * 3, 3):
            distance[:, dist_idx] = np.sqrt(np.sum(sqerr[:, k:k + 3], axis=1))
            dist_idx += 1
        all_dist.append(distance)

        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        bar.suffix = "({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}".format(
            batch=i + 1,
            size=len(test_loader),
            batchtime=batch_time * 10.0,
            ttl=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
        )
        bar.next()

    all_dist = np.vstack(all_dist)
    joint_err = np.mean(all_dist, axis=0)
    ttl_err = np.mean(all_dist)
    bar.finish()
    print(">>> error: {} <<<".format(ttl_err))
    return losses.avg, ttl_err