예제 #1
0
def updatefig(*args):
    rval, frame = vc.read()
    frame = cv2.resize(frame, (256, 256))  # BGR
    img = im_to_torch(frame)
    inp = color_normalize(img, (0.4404, 0.4440, 0.4327),
                          (0.2458, 0.2410, 0.2468))
    input_var = torch.autograd.Variable(inp.cuda())
    output = model(input_var.unsqueeze(0))
    score_map = output[-1].data.cpu()
    pred_batch_img = batch_with_heatmap(inp.unsqueeze(0), score_map)
    ax.clear()
    ax.imshow(pred_batch_img)
    #    oframe = np.array(image)
    #    oframe = oframe[:, :, ::-1].copy()
    #    outvideo.write(oframe)
    return ax
예제 #2
0
def train(train_loader, model, criterion, optimizer, debug=False, flip=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()

    gt_win, pred_win = None, None
    bar = Bar('Processing', max=len(train_loader))
    for i, (inputs, target, meta) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        input_var = torch.autograd.Variable(inputs.cuda())
        target_var = torch.autograd.Variable(target.cuda(async=True))

        # compute output
        output = model(input_var)
        score_map = output[-1].data.cpu()

        loss = criterion(output[0], target_var)
        for j in range(1, len(output)):
            loss += criterion(output[j], target_var)
        acc = accuracy(score_map, target, idx)

        if debug: # visualize groundtruth and predictions
            gt_batch_img = batch_with_heatmap(inputs, target)
            pred_batch_img = batch_with_heatmap(inputs, score_map)
            if not gt_win or not pred_win:
                ax1 = plt.subplot(121)
                ax1.title.set_text('Groundtruth')
                gt_win = plt.imshow(gt_batch_img)
                ax2 = plt.subplot(122)
                ax2.title.set_text('Prediction')
                pred_win = plt.imshow(pred_batch_img)
            else:
                gt_win.set_data(gt_batch_img)
                pred_win.set_data(pred_batch_img)
            plt.pause(.05)
            plt.draw()

        # measure accuracy and record loss
        losses.update(loss.data[0], inputs.size(0))
        acces.update(acc[0], inputs.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix  = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                    batch=i + 1,
                    size=len(train_loader),
                    data=data_time.val,
                    bt=batch_time.val,
                    total=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg,
                    acc=acces.avg
                    )
        bar.next()

    bar.finish()
    return losses.avg, acces.avg
예제 #3
0
def validate(val_loader, model, criterion, num_classes, debug=False, flip=True, _logger=None):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)
    autoloss =  models.loss.UniLoss(valid=True)
    # switch to evaluate mode
    model.eval()
    #model.train()
    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Processing', max=len(val_loader))
    for i, (inputs, target, meta) in enumerate(val_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        target = target.cuda(async=True)

        input_var = torch.autograd.Variable(inputs.cuda(), volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        # compute output
        output = model(input_var)
        score_map = output[-1].data.cpu()
        if flip:
            flip_input_var = torch.autograd.Variable(
                    torch.from_numpy(fliplr(inputs.clone().numpy())).float().cuda(), 
                    volatile=True
                )
            flip_output_var = model(flip_input_var)
            flip_output = flip_back(flip_output_var[-1].data.cpu())
            score_map += flip_output



        loss = 0
        for o in output:
            loss += criterion(o, target_var)
        #acc = accuracy(score_map, target.cpu(), idx)
        _, acc, _ = autoloss(output[-1], meta)
        # generate predictions
        preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64])
        for n in range(score_map.size(0)):
            predictions[meta['index'][n], :, :] = preds[n, :, :]


        if debug:
            gt_batch_img = batch_with_heatmap(inputs, target)
            pred_batch_img = batch_with_heatmap(inputs, score_map)
            if not gt_win or not pred_win:
                plt.subplot(121)
                gt_win = plt.imshow(gt_batch_img)
                plt.subplot(122)
                pred_win = plt.imshow(pred_batch_img)
            else:
                gt_win.set_data(gt_batch_img)
                pred_win.set_data(pred_batch_img)
            plt.pause(.05)
            plt.draw()

        # measure accuracy and record loss
        losses.update(loss.item(), inputs.size(0))
        acces.update(acc.item(), inputs.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix  = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                    batch=i + 1,
                    size=len(val_loader),
                    data=data_time.val,
                    bt=batch_time.avg,
                    total=bar.elapsed_td,
                    loss=losses.avg*100,
                    acc=acces.avg*100
                    )
        _logger.info(bar.suffix)

    bar.finish()
    return losses.avg*100, acces.avg*100, predictions
예제 #4
0
def validate(val_loader,
             model,
             criterion,
             num_classes,
             debug=False,
             flip=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))
    with torch.no_grad():
        for i, (input, target, meta, img_path) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            indexes = []

            input = input.to(device, non_blocking=True)
            #print (input.shape)

            #image = input.cpu().permute(0,2,3,1).numpy()
            #image = np.squeeze(image)

            path = str(img_path)
            path = path[3:len(path) - 2]
            image = cv2.imread(path)
            # cv2.imshow("image", image)
            # cv2.waitKey(10)
            # time.sleep(1)

            target = target.to(device, non_blocking=True)
            target_weight = meta['target_weight'].to(device, non_blocking=True)

            # compute output
            #print (input.shape)
            output = model(input)
            score_map = output[-1].cpu() if type(
                output) == list else output.cpu()

            if flip:
                flip_input = torch.from_numpy(fliplr(
                    input.clone().numpy())).float().to(device)
                flip_output = model(flip_input)
                flip_output = flip_output[-1].cpu() if type(
                    flip_output) == list else flip_output.cpu()
                flip_output = flip_back(flip_output)
                score_map += flip_output

            if type(output) == list:  # multiple output
                loss = 0
                for o in output:
                    loss += criterion(o, target, target_weight)
                output = output[-1]
            else:  # single output
                loss = criterion(output, target, target_weight)

            #print (acc)
            # generate predictions
            preds, vals = final_preds(score_map, meta['center'], meta['scale'],
                                      [64, 64])

            # for z in range(target.shape[1]):
            #     for j in range(target.shape[2]):
            #         for k in range(target.shape[3]):
            #             if target[0,z,j,k]==1.0:
            #                 indexes.append(z)

            # coords = np.squeeze(preds)
            # for m in range(0,len(coords)):
            #     val = vals[0][m].numpy()
            #     if val>0.6: #threshold for confidence score
            #         x,y = coords[m][0].cpu().numpy(), coords[m][1].cpu().numpy()
            #         cv2.circle(image, (x,y), 1, (0,0,255), -1)
            #         #indexes.append(m)

            acc = accuracy(score_map, target.cpu(), indexes)
            #print ((target.cpu()).shape[1])

            for n in range(score_map.size(0)):
                predictions[meta['index'][n], :, :] = preds[n, :, :]

            #print ("scored", score_map.shape)

            if debug:
                gt_batch_img = batch_with_heatmap(input, target)
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    plt.subplot(121)
                    gt_win = plt.imshow(gt_batch_img)
                    plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    gt_win.set_data(gt_batch_img)
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()
                cv2.imwrite(
                    '/home/shantam/Documents/Programs/pytorch-pose/example/predictions/pred'
                    + str(i) + '.png', image)
                #time.sleep(5)

            # measure accuracy and record loss
            losses.update(loss.item(), input.size(0))
            acces.update(acc[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                loss=losses.avg,
                acc=acces.avg)
            bar.next()

        bar.finish()
    return losses.avg, acces.avg, predictions
예제 #5
0
def train(train_loader, model, tmodel, criterion, optimizer, kdloss_alpha, debug=False, flip=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    kdlosses = AverageMeter()
    unkdlosses = AverageMeter()
    tslosses = AverageMeter()
    gtlosses = AverageMeter()
    acces = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()

    gt_win, pred_win = None, None
    bar = Bar('Processing', max=len(train_loader))
    for i, (inputs, target, meta) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        input_var = torch.autograd.Variable(inputs.cuda())
        target_var = torch.autograd.Variable(target.cuda(async=True))

        # compute output
        output = model(input_var)
        score_map = output[-1].data.cpu()

        # compute teacher network output
        toutput = tmodel(input_var)
        toutput = toutput[-1].detach()

        # lmse : student vs ground truth
        # gtmask will filter out the samples without ground truth
        gtloss = torch.tensor(0.0).cuda()
        kdloss = torch.tensor(0.0).cuda()
        kdloss_unlabeled = torch.tensor(0.0).cuda()
        unkdloss_alpha = 1.0
        gtmask = meta['gtmask']

        train_batch = score_map.shape[0]

        for j in range(0, len(output)):
            _output = output[j]
            for i in range(gtmask.shape[0]):
                if gtmask[i] < 0.1:
                    # unlabeled data, gtmask=0.0, kdloss only
                    # need to dividen train_batch to keep number equal
                    kdloss_unlabeled += criterion(_output[i,:,:,:], toutput[i, :,:,:])/train_batch
                else:
                    # labeled data: kdloss + gtloss
                    gtloss += criterion(_output[i,:,:,:], target_var[i, :,:,:])/train_batch
                    kdloss += criterion(_output[i,:,:,:], toutput[i,:,:,:])/train_batch

        loss_labeled = kdloss_alpha * (kdloss) + (1 - kdloss_alpha)*gtloss
        total_loss   = loss_labeled + unkdloss_alpha * kdloss_unlabeled

        acc = accuracy(score_map, target, idx)

        if debug: # visualize groundtruth and predictions
            gt_batch_img = batch_with_heatmap(inputs, target)
            pred_batch_img = batch_with_heatmap(inputs, score_map)
            teacher_batch_img = batch_with_heatmap(inputs, toutput)
            if not gt_win or not pred_win or not pred_teacher:
                ax1 = plt.subplot(131)
                ax1.title.set_text('Groundtruth')
                gt_win = plt.imshow(gt_batch_img)
                ax2 = plt.subplot(132)
                ax2.title.set_text('Prediction')
                pred_win = plt.imshow(pred_batch_img)
                ax2 = plt.subplot(133)
                ax2.title.set_text('teacher')
                pred_teacher = plt.imshow(teacher_batch_img)
            else:
                gt_win.set_data(gt_batch_img)
                pred_win.set_data(pred_batch_img)
                pred_teacher.set_data(teacher_batch_img)
            plt.pause(.05)
            plt.draw()

        # measure accuracy and record loss
        gtlosses.update(gtloss.item(), inputs.size(0))
        kdlosses.update(kdloss.item(), inputs.size(0))
        unkdlosses.update(kdloss_unlabeled.item(), inputs.size(0))
        losses.update(total_loss.item(), inputs.size(0))
        acces.update(acc[0], inputs.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix  = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} ' \
                      '| Loss: {loss:.6f} | KdLoss:{kdloss:.6f}| unKdLoss:{unkdloss:.6f}| GtLoss:{gtloss:.6f} | Acc: {acc: .4f}'.format(
                    batch=i + 1,
                    size=len(train_loader),
                    data=data_time.val,
                    bt=batch_time.val,
                    total=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg,
                    kdloss=kdlosses.avg,
                    unkdloss=unkdlosses.avg,
                    tsloss=tslosses.avg,
                    gtloss=gtlosses.avg,
                    acc=acces.avg
                    )
        bar.next()

    bar.finish()
    return losses.avg, acces.avg
예제 #6
0
파일: main.py 프로젝트: syusukee/MyPose
def myvalidate( model, criterion, num_classes, debug=False, flip=True):

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    img_folder = '/data3/wzwu/dataset/my'
    img_num = 1
    r = 0
    center1 = torch.Tensor([1281,2169])
    center2 = torch.Tensor([[1281,2169]])
    scale = torch.Tensor([10.0])
    inp_res = 256
    meanstd_file = './data/mpii/mean.pth.tar'
    if isfile(meanstd_file):
        meanstd = torch.load(meanstd_file)
        mean = meanstd['mean']
        std = meanstd['std']

    input_list = []
    for i in range(img_num):
        img_name = str(i)+'.jpg'
        img_path = os.path.join(img_folder,img_name)
        print('img_path')
        print(img_path)
        set_trace()
        img = load_image(img_path)
        inp = crop(img, center1, scale, [inp_res, inp_res], rot=r)
        inp = color_normalize(inp, mean, std)
        input_list.append(inp)



    # predictions
    predictions = torch.Tensor(img_num, num_classes, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=img_num)
    with torch.no_grad():
        for i, input in enumerate(input_list):
            # measure data loading time
            s0, s1, s2 = input.size()
            input = input.view(1, s0, s1, s2)
            data_time.update(time.time() - end)

            input = input.to(device, non_blocking=True)

            # compute output
            output = model(input)
            score_map = output[-1].cpu() if type(output) == list else output.cpu()
            #if flip:
            #    flip_input = torch.from_numpy(fliplr(input.clone().numpy())).float().to(device)
            #    flip_output = model(flip_input)
            #    flip_output = flip_output[-1].cpu() if type(flip_output) == list else flip_output.cpu()
            #    flip_output = flip_back(flip_output)
            #    score_map += flip_output

            # generate predictions
            set_trace()
            preds = final_preds(score_map, center2, scale, [64, 64])
            set_trace()
            print('preds')
            print(preds)
            print('predictions')
            print(predictions)
            for n in range(score_map.size(0)):
                predictions[i, :, :] = preds[n, :, :]


            if debug:
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    #plt.subplot(121)
                    #plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()
                plt.savefig('/data3/wzwu/test/'+str(i)+'.png')


            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix  = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                        batch=i + 1,
                        size=img_num,
                        data=data_time.val,
                        bt=batch_time.avg,
                        total=bar.elapsed_td,
                        eta=bar.eta_td,
                        loss=losses.avg,
                        acc=acces.avg
                        )
            bar.next()

        bar.finish()
    return predictions
def validate(val_loader,
             model,
             criterion,
             debug=False,
             flip=True,
             test_batch=6,
             njoints=68):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), njoints, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))

    interocular_dists = torch.zeros((njoints, val_loader.dataset.__len__()))

    with torch.no_grad():
        for i, (input, target, meta) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input = input.to(device, non_blocking=True)
            target = target.to(device, non_blocking=True)
            target_weight = meta['target_weight'].to(device, non_blocking=True)

            # compute output
            output = model(input)
            score_map = output[-1].cpu() if type(
                output) == list else output.cpu()
            if flip:
                flip_input = torch.from_numpy(fliplr(
                    input.clone().numpy())).float().to(device)
                flip_output = model(flip_input)
                flip_output = flip_output[-1].cpu() if type(
                    flip_output) == list else flip_output.cpu()
                flip_output = flip_back(flip_output)
                score_map += flip_output

            if type(output) == list:  # multiple output
                loss = 0
                for o in output:
                    loss += criterion(o, target, target_weight, len(idx))
                output = output[-1]
            else:  # single output
                loss = criterion(output, target, target_weight, len(idx))

            acc, batch_interocular_dists = accuracy(score_map, target.cpu(),
                                                    idx)
            interocular_dists[:, i * test_batch:(i + 1) *
                              test_batch] = batch_interocular_dists

            # generate predictions
            preds = final_preds(score_map, meta['center'], meta['scale'],
                                [64, 64])
            for n in range(score_map.size(0)):
                predictions[meta['index'][n], :, :] = preds[n, :, :]

            if debug:
                gt_batch_img = batch_with_heatmap(input, target)
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    plt.subplot(121)
                    gt_win = plt.imshow(gt_batch_img)
                    plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    gt_win.set_data(gt_batch_img)
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()

            # measure accuracy and record loss
            losses.update(loss.item(), input.size(0))
            acces.update(acc[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f} | Acc: {acc: .8f}'.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                loss=losses.avg,
                acc=acces.avg)
            bar.next()

        bar.finish()
        idx_array = np.array(idx) - 1
        interocular_dists_pickup = interocular_dists[idx_array, :]
        mean_error = torch.mean(
            interocular_dists_pickup[interocular_dists_pickup != -1])
        auc = calc_metrics(interocular_dists,
                           idx)  # this is auc of predicted maps and target.
        #print("=> Mean Error: {:.8f}, [email protected]: {:.8f} based on maps".format(mean_error, auc))
    return losses.avg, acces.avg, predictions, auc, mean_error
예제 #8
0
    def validate(self):
        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()
        acces = AverageMeter()

        predictions = torch.Tensor(self.val_loader.dataset.__len__(),
                                   self.num_classes, 2)

        self.netG.eval()

        gt_win, pred_win = None, None
        end = time.time()
        bar = Bar('Eval ', max=len(self.val_loader))
        with torch.no_grad():
            for i, (input, target, meta, mpii) in enumerate(self.val_loader):
                if mpii == False:
                    continue
                data_time.update(time.time() - end)

                input = input.to(self.device, non_blocking=True)
                target = target.to(self.device, non_blocking=True)
                target_weight = meta['target_weight'].to(self.device,
                                                         non_blocking=True)

                output = self.netG(input)
                score_map = output[-1].cpu() if type(
                    output) == list else output.cpu()
                if self.flip:
                    flip_input = torch.from_numpy
                    flip_output = self.netG(flip_input)
                    flip_output = flip_output[-1].cpu() if type(
                        flip_output) == list else flip_output.cpu()
                    flip_output = flip_back(flip_output)
                    score_map += flip_output

                if type(output) == list:
                    loss = 0
                    for o in output:
                        loss += self.criterion(o, target, target_weight)
                    output = output[-1]
                else:
                    loss = self.criterion(output, target, target_weight)

                acc = accuracy(score_map, target.cpu(), self.idx)

                preds = final_preds(score_map, meta['center'], meta['scale'],
                                    [64, 64])
                for n in range(score_map.size(0)):
                    predictions[meta['index'][n], :, :] = preds[n, :, :]

                if self.debug:
                    gt_batch_img = batch_with_heatmap(input, target)
                    pred_batch_img = batch_with_heatmap(input, score_map)
                    if not gt_win or not pred_win:
                        plt.subplot(121)
                        gt_win = plt.imshow(gt_batch_img)
                        plt.subplot(122)
                        pred_win = plt.imshow(pred_batch_img)
                    else:
                        gt_win.set_data(gt_batch_img)
                        pred_win.set_data(pred_batch_img)
                    plt.pause(.05)
                    plt.draw()

                losses.update(loss.item, input.size(0))
                acces.update(acc[0], input.size(0))

                batch_time.update(time.time() - end)
                end = time.time()

                bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                    batch=i + 1,
                    size=len(self.val_loader),
                    data=data_time.val,
                    bt=batch_time.avg,
                    total=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg,
                    acc=acces.avg)

                bar.next()

            bar.finish()
        return losses.avg, acces.avg, predictions
예제 #9
0
    def train(self, gen_iterations, start_t, epoch, lr):
        batch_time = AverageMeter()
        data_time = AverageMeter()

        self.netG.train()

        end = time.time()

        gt_win, pred_win = None, None
        bar = Bar('Train', max=len(self.train_loader))
        step = 0
        errD_total = None
        errG_total = None
        for i, (input, target, meta, mpii) in enumerate(self.train_loader):
            data_time.update(time.time() - end)

            ######################################################
            # (1) Prepare training data and Compute text embeddings
            ######################################################
            input, target = input.to(self.device), target.to(self.device,
                                                             non_blocking=True)
            target_weight = meta['target_weight'].to(self.device,
                                                     non_blocking=True)

            #######################################################
            # (2) Generate fake heatmaps
            ######################################################
            output = self.netG(input)

            #######################################################
            # (3) Update D network
            ######################################################
            errD_total = 0
            D_logs = ''
            for i in range(self.num_stacks):
                self.netsD[i].zero_grad()
                errD = discriminator_loss(self.netsD[i], target, target_weight,
                                          output[i], input, self.real_labels,
                                          self.fake_labels, mpii)

                errD.backword()
                self.optimizersD[i].step()
                errD_total += errD
                D_logs += 'errD%d: %d.2f ' % (i, errD.data[0])

            #######################################################
            # (4) Update G network: maximize log(D(G(z)))
            ######################################################
            step += 1
            gen_iterations += 1

            self.netG.zero_grad()
            errG_total, G_logs = \
                generator_loss(self.netsD, self.domainD, output, self.real_labels, input, target_weight, mpii)

            if self.debug:
                gt_batch_img = batch_with_heatmap(input, target)
                pred_batch_img = batch_with_heatmap(input, output)
                if not gt_win or not pred_win:
                    ax1 = plt.subplot(121)
                    ax1.title.set_text('Groundtruth')
                    gt_win = plt.imshow(gt_batch_img)
                    ax2 = plt.subplot(122)
                    ax2.title.set_text('Prediction')
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    gt_win.set_data(gt_batch_img)
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()

            errG_total.backward()
            self.optimizerG.step()

            batch_time.update(time.time() - end)
            end = time.time()

            bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:}'.format(
                batch=i + 1,
                size=len(self.train_loader),
                data=data_time.val,
                bt=batch_time.val,
                total=bar.elapsed_td,
                eta=bar.eta_td)
            bar.next()

            if gen_iterations % 100 == 0:
                print(D_logs + '\n' + G_logs)

        end_t = time.time()

        print('''[%d/%d]
                  Loss_D: %.2f Loss_G: %.2f Time: %.2fs''' %
              (epoch, self.epochs, errD_total.data[0], errG_total.data[0],
               end_t - start_t))

        if epoch % cfg.TRAIN.SNAPSHOT_INTERVAL == 0:
            self.save_model(self.netsD, lr, epoch)

        bar.finish()
예제 #10
0
def train(train_loader,
          model,
          criterion,
          optimizer,
          debug=False,
          flip=True,
          train_iters=0):
    print("Train iters: {}".format(train_iters))
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # switch to train mode
    model.train()
    debug_count = 0
    end = time.time()
    gt_win, pred_win = None, None

    bar_len = [train_iters if train_iters != 0 else len(train_loader)][0]
    train_iters = [train_iters if train_iters != 0 else len(train_loader)][0]
    bar = Bar('Train', max=bar_len)

    curr_iter = 0
    while curr_iter < train_iters:
        for i, (input, target, meta) in enumerate(train_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input, target = input.to(device), target.to(device,
                                                        non_blocking=True)
            target_weight = meta['target_weight'].to(device, non_blocking=True)

            # compute output
            output = model(input)
            if type(output) == list:  # multiple output
                loss = 0
                for o in output:
                    loss += criterion(o, target, target_weight)
                output = output[-1]
            else:  # single output
                loss = criterion(output, target, target_weight)
            acc = accuracy(output, target, idx)

            if debug:  # visualize groundtruth and predictions
                gt_batch_img = batch_with_heatmap(input, target)
                pred_batch_img = batch_with_heatmap(input, output)
                fig = plt.figure()
                ax1 = fig.add_subplot(121)
                ax1.title.set_text('Groundtruth')
                gt_win = plt.imshow(gt_batch_img)
                ax2 = fig.add_subplot(122)
                ax2.title.set_text('Prediction')
                pred_win = plt.imshow(pred_batch_img)
                plt.pause(.05)
                plt.draw()
                fig.savefig('debug/debug_{}.png'.format(str(debug_count)),
                            dpi=500)

            debug_count += 1

            # measure accuracy and record loss
            losses.update(loss.item(), input.size(0))
            acces.update(acc[0], input.size(0))

            # compute gradient and do SGD step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                batch=i + 1,
                size=[len(train_loader) if train_iters == 0 else train_iters
                      ][0],
                data=data_time.val,
                bt=batch_time.val,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                loss=losses.avg,
                acc=acces.avg)
            bar.next()

            curr_iter += 1
            if curr_iter >= train_iters - 1:
                break

    bar.finish()
    return losses.avg, acces.avg
예제 #11
0
def train(inqueues,
          outqueues,
          train_loader,
          model,
          criterion,
          optimizer,
          debug=False,
          flip=True,
          clip=1,
          _logger=None):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()
    sum_losses = AverageMeter()
    # switch to train mode
    model.train()
    criterion.valid = False
    end = time.time()
    gt_win, pred_win = None, None
    bar = Bar('Processing', max=len(train_loader))
    for i, (inputs, target, meta) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        if True:
            input_var = torch.autograd.Variable(inputs.cuda())
            target_var = torch.autograd.Variable(target.cuda(async=True))
            # compute output
            output = model(input_var)
            if len(inqueues) > 0:
                loss = []
                acc = []
                sum_loss = []
                for j in range(len(output)):
                    grad = []
                    for ii in range(output[0].size(0)):
                        data = {}
                        data['output'] = output[j][ii]
                        data['meta'] = {}
                        data['meta']['bi_target'] = meta['bi_target'][ii]
                        data['meta']['pck'] = meta['pck'][ii]
                        data['meta']['points'] = meta['points'][ii]
                        data['meta']['tpts'] = meta['tpts'][ii]
                        inqueues[ii].send(data)
                    for ii in range(output[0].size(0)):
                        _loss, _acc, _sum_loss, _grad = outqueues[ii].recv()
                        loss.append(_loss)
                        grad.append(_grad)
                        if j == len(output) - 1:
                            acc.append(_acc)
                            sum_loss.append(_sum_loss)
                    optimizer.zero_grad()
                    output[0].backward(torch.stack(grad, 0))
                    torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
                    optimizer.step()
                loss = sum(loss) / output[0].size(0)
                acc = sum(acc) / output[0].size(0)
                sum_loss = sum(sum_loss) / output[0].size(0)
            else:
                optimizer.zero_grad()
                loss, acc, sum_loss = criterion(output[0], meta)
                for j in range(1, len(output)):
                    _loss, acc, sum_loss = criterion(output[j], meta)
                    loss += _loss
                loss.backward()
                optimizer.step()

            if debug:  # visualize groundtruth and predictions
                gt_batch_img = batch_with_heatmap(inputs, target)
                pred_batch_img = batch_with_heatmap(inputs, score_map)
                if not gt_win or not pred_win:
                    ax1 = plt.subplot(121)
                    ax1.title.set_text('Groundtruth')
                    gt_win = plt.imshow(gt_batch_img)
                    ax2 = plt.subplot(122)
                    ax2.title.set_text('Prediction')
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    gt_win.set_data(gt_batch_img)
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()

            # measure accuracy and record loss
            losses.update(loss.data.item(), inputs.size(0))
            acces.update(acc.item(), inputs.size(0))
            sum_losses.update(sum_loss)
        loss = None
        torch.cuda.empty_cache()
        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | Loss: {loss:.4f} | Sum_Loss: {sum_loss:.4f} | Acc: {acc: .4f}'.format(
            batch=i + 1,
            size=len(train_loader),
            data=data_time.val,
            bt=batch_time.val,
            total=bar.elapsed_td,
            loss=losses.avg * 100,
            sum_loss=sum_losses.avg * 100,
            acc=acces.avg * 100)
        _logger.info(bar.suffix)
    bar.finish()
    return losses.avg * 100, acces.avg * 100
예제 #12
0
def validate(val_loader,
             model,
             criterion,
             criterion_seg,
             debug=False,
             flip=True,
             test_batch=6,
             njoints=68):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses_kpt = AverageMeter()
    losses_seg = AverageMeter()
    acces = AverageMeter()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), njoints, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))

    interocular_dists = torch.zeros((njoints, val_loader.dataset.__len__()))

    with torch.no_grad():
        for i, (input, target, target_seg, meta) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input, target, target_seg = input.to(device), target.to(
                device, non_blocking=True), target_seg.to(device)
            target_weight = meta['target_weight'].to(device, non_blocking=True)

            # compute output
            output_kpt, output_seg = model(input)
            score_map = output_kpt[-1].cpu() if type(
                output_kpt) == list else output_kpt.cpu()

            if flip:
                flip_input = torch.from_numpy(fliplr(
                    input.clone().numpy())).float().to(device)
                flip_output = model(flip_input)
                flip_output = flip_output[-1].cpu() if type(
                    flip_output) == list else flip_output.cpu()
                flip_output = flip_back(flip_output)
                score_map += flip_output

            if type(output_kpt) == list:  # multiple output
                loss_kpt = 0
                loss_seg = 0
                for (o, o_seg) in zip(output_kpt, output_seg):
                    loss_kpt += criterion(o, target, target_weight, len(idx))
                    loss_seg += criterion_seg(o_seg, target_seg)
                output = output_kpt[-1]
                output_seg = output_seg[-1]
            else:  # single output
                loss_kpt = criterion(output_kpt, target, target_weight,
                                     len(idx))
                loss_seg = criterion(output_seg, target_seg)

            acc, batch_interocular_dists = accuracy(score_map, target.cpu(),
                                                    idx)
            _, pred_seg = torch.max(output_seg, 1)

            # generate predictions
            preds = final_preds(score_map, meta['center'], meta['scale'],
                                [64, 64])
            for n in range(score_map.size(0)):
                predictions[meta['index'][n], :, :] = preds[n, :, :]

            if debug:
                gt_batch_img = batch_with_heatmap(input, target)
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    plt.subplot(121)
                    gt_win = plt.imshow(gt_batch_img)
                    plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    gt_win.set_data(gt_batch_img)
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()

            # measure accuracy and record loss
            losses_kpt.update(loss_kpt.item(), input.size(0))
            losses_seg.update(loss_seg.item(), input.size(0))
            acces.update(acc[0], input.size(0))

            inter, union = inter_and_union(
                pred_seg.data.cpu().numpy().astype(np.uint8),
                target_seg.data.cpu().numpy().astype(np.uint8))
            inter_meter.update(inter)
            union_meter.update(union)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            iou = inter_meter.sum / (union_meter.sum + 1e-10)

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss_kpt: {loss_kpt:.8f} | Loss_seg: {loss_seg:.8f} | Acc: {acc: .8f} | IOU: {iou:.2f}'.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                loss_kpt=losses_kpt.avg,
                loss_seg=losses_seg.avg,
                acc=acces.avg,
                iou=iou.mean() * 100)
            bar.next()

        bar.finish()
        print(iou)
    return losses_kpt.avg, acces.avg, predictions, iou.mean() * 100
예제 #13
0
def train(train_loader,
          model,
          criterion,
          criterion_seg,
          optimizer,
          debug=False,
          flip=True,
          train_batch=6,
          epoch=0,
          njoints=68):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses_kpt = AverageMeter()
    losses_seg = AverageMeter()
    acces = AverageMeter()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()

    gt_win, pred_win = None, None
    bar = Bar('Train', max=len(train_loader))

    for i, (input, target, target_seg, meta) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        input, target, target_seg = input.to(device), target.to(
            device, non_blocking=True), target_seg.to(device)
        target_weight = meta['target_weight'].to(device, non_blocking=True)

        # compute output
        output_kpt, output_seg = model(input)
        if type(output_kpt) == list:  # multiple output
            loss_kpt = 0
            loss_seg = 0
            for (o, o_seg) in zip(output_kpt, output_seg):
                loss_kpt += criterion(o, target, target_weight, len(idx))
                loss_seg += criterion_seg(o_seg, target_seg)
            output_kpt = output_kpt[-1]
            output_seg = output_seg[-1]
        else:  # single output
            loss_kpt = criterion(output_kpt, target, target_weight, len(idx))
            loss_seg = criterion_seg(output_seg, target_seg)
        acc, batch_interocular_dists = accuracy(output_kpt, target, idx)
        _, pred_seg = torch.max(output_seg, 1)

        if debug:  # visualize groundtruth and predictions
            gt_batch_img = batch_with_heatmap(input, target)
            pred_batch_img = batch_with_heatmap(input, output)
            if not gt_win or not pred_win:
                ax1 = plt.subplot(121)
                ax1.title.set_text('Groundtruth')
                gt_win = plt.imshow(gt_batch_img)
                ax2 = plt.subplot(122)
                ax2.title.set_text('Prediction')
                pred_win = plt.imshow(pred_batch_img)
            else:
                gt_win.set_data(gt_batch_img)
                pred_win.set_data(pred_batch_img)
            plt.pause(.05)
            plt.draw()

        loss = loss_kpt + (0.01 / (epoch + 1)) * loss_seg

        # measure accuracy and record loss
        losses_kpt.update(loss_kpt.item(), input.size(0))
        losses_seg.update(loss_seg.item(), input.size(0))
        acces.update(acc[0], input.size(0))

        inter, union = inter_and_union(
            pred_seg.data.cpu().numpy().astype(np.uint8),
            target_seg.data.cpu().numpy().astype(np.uint8))
        inter_meter.update(inter)
        union_meter.update(union)
        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        iou = inter_meter.sum / (union_meter.sum + 1e-10)

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss_kpt: {loss_kpt:.8f} | Loss_seg: {loss_seg:.8f} | Acc: {acc: .4f} | IOU: {iou: .2f}'.format(
            batch=i + 1,
            size=len(train_loader),
            data=data_time.val,
            bt=batch_time.val,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss_kpt=losses_kpt.avg,
            loss_seg=losses_seg.avg,
            acc=acces.avg,
            iou=iou.mean() * 100)
        bar.next()

    bar.finish()
    print(iou)
    return losses_kpt.avg, acces.avg
예제 #14
0
def validate(val_loader,
             model,
             criterion,
             num_classes,
             debug=False,
             flip=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Processing', max=len(val_loader))
    for i, (inputs, target, target2, meta) in enumerate(val_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        target = target.cuda(async=True)
        target2 = target2.cuda(async=True)

        input_var = torch.autograd.Variable(inputs.cuda(), volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)
        target2_var = torch.autograd.Variable(target2, volatile=True)

        # compute output
        output = model(input_var)
        score_map_hg = output[-2].data.cpu()
        score_map_emb = output[-1].data.cpu()

        score_map_emb2 = score_map_emb.cuda(async=True)

        loss = criterion(output[0], target_var)
        for j in range(1, (len(output) - 1)):
            loss += criterion(output[j], target_var)
        loss += criterion(output[-1], target2_var)
        acc = accuracy(score_map_emb2, target2, idx)

        # generate predictions
        #print(np.shape(predictions))

        preds = final_preds(score_map_emb, meta['center'], meta['scale'],
                            [64, 64])
        for n in range(score_map_emb.size(0)):
            predictions[meta['index'][n], :, :] = preds[n, ::2, :]
            #predictions[meta['index'][n], :, :] = preds[n, :, :]

        if debug:
            gt_batch_img = batch_with_heatmap(inputs, target)
            pred_batch_img = batch_with_heatmap(inputs, score_map_emb)
            if not gt_win or not pred_win:
                plt.subplot(121)
                gt_win = plt.imshow(gt_batch_img)
                plt.subplot(122)
                pred_win = plt.imshow(pred_batch_img)
            else:
                gt_win.set_data(gt_batch_img)
                pred_win.set_data(pred_batch_img)
            plt.pause(.05)
            plt.draw()

        # measure accuracy and record loss
        losses.update(loss.data[0], inputs.size(0))
        acces.update(acc[0], inputs.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
            batch=i + 1,
            size=len(val_loader),
            data=data_time.val,
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
            acc=acces.avg)
        bar.next()

    bar.finish()
    return losses.avg, acces.avg, predictions
예제 #15
0
def train(train_loader, model, criterion, optimizer, debug=False, flip=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()

    gt_win, pred_win = None, None
    bar = Bar('Train', max=len(train_loader))
    for i, (input, target, meta) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        input, target = input.to(device), target.to(device, non_blocking=True)
        target_weight = meta['target_weight'].to(device, non_blocking=True)

        # compute output
        output = model(input)
        if type(output) == list:  # multiple output
            loss = 0
            for o in output:
                loss += criterion(o, target, target_weight)
            output = output[-1]
        else:  # single output
            loss = criterion(output, target, target_weight)
        acc = accuracy(output, target, idx)

        if debug:  # visualize groundtruth and predictions
            gt_batch_img = batch_with_heatmap(input, target)
            pred_batch_img = batch_with_heatmap(input, output)
            if not gt_win or not pred_win:
                ax1 = plt.subplot(121)
                ax1.title.set_text('Groundtruth')
                gt_win = plt.imshow(gt_batch_img)
                ax2 = plt.subplot(122)
                ax2.title.set_text('Prediction')
                pred_win = plt.imshow(pred_batch_img)
            else:
                gt_win.set_data(gt_batch_img)
                pred_win.set_data(pred_batch_img)
            plt.pause(.05)
            plt.draw()

        # measure accuracy and record loss
        losses.update(loss.item(), input.size(0))
        acces.update(acc[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
            batch=i + 1,
            size=len(train_loader),
            data=data_time.val,
            bt=batch_time.val,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
            acc=acces.avg)
        bar.next()

    bar.finish()
    return losses.avg, acces.avg
def train(train_loader,
          model,
          criterion,
          optimizer,
          debug=False,
          flip=True,
          train_batch=6,
          epoch=0,
          njoints=68):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()

    gt_win, pred_win = None, None
    bar = Bar('Train', max=len(train_loader))

    interocular_dists = torch.zeros((njoints, train_loader.dataset.__len__()))

    for i, (input, target, meta) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        input, target = input.to(device), target.to(device, non_blocking=True)
        target_weight = meta['target_weight'].to(device, non_blocking=True)

        # compute output
        output = model(input)
        if type(output) == list:  # multiple output
            loss = 0
            for o in output:
                loss += criterion(o, target, target_weight, len(idx))
            output = output[-1]
        else:  # single output
            loss = criterion(output, target, target_weight, len(idx))
        acc, batch_interocular_dists = accuracy(output, target, idx)
        interocular_dists[:, i * train_batch:(i + 1) *
                          train_batch] = batch_interocular_dists

        if debug:  # visualize groundtruth and predictions
            gt_batch_img = batch_with_heatmap(input, target)
            pred_batch_img = batch_with_heatmap(input, output)
            if not gt_win or not pred_win:
                ax1 = plt.subplot(121)
                ax1.title.set_text('Groundtruth')
                gt_win = plt.imshow(gt_batch_img)
                ax2 = plt.subplot(122)
                ax2.title.set_text('Prediction')
                pred_win = plt.imshow(pred_batch_img)
            else:
                gt_win.set_data(gt_batch_img)
                pred_win.set_data(pred_batch_img)
            plt.pause(.05)
            plt.draw()

        # measure accuracy and record loss
        losses.update(loss.item(), input.size(0))
        acces.update(acc[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f} | Acc: {acc: .8f}'.format(
            batch=i + 1,
            size=len(train_loader),
            data=data_time.val,
            bt=batch_time.val,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
            acc=acces.avg)
        bar.next()

    bar.finish()
    idx_array = np.array(idx) - 1
    interocular_dists_pickup = interocular_dists[idx_array, :]
    mean_error = torch.mean(
        interocular_dists_pickup[interocular_dists_pickup != -1])
    auc = calc_metrics(interocular_dists,
                       idx)  # this is auc of predicted maps and target.
    #print("=> Mean Error: {:.8f}, [email protected]: {:.8f} based on maps".format(mean_error, auc))
    return losses.avg, acces.avg
예제 #17
0
def validate(val_loader,
             model,
             criterion,
             num_classes,
             debug=False,
             flip=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))
    with torch.no_grad():
        for i, (input, target, meta) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input = input.to(device, non_blocking=True)
            target = target.to(device, non_blocking=True)
            target_weight = meta['target_weight'].to(device, non_blocking=True)

            # compute output
            output = model(input)
            score_map = output[-1].cpu() if type(
                output) == list else output.cpu()
            if flip:
                flip_input = torch.from_numpy(fliplr(
                    input.clone().numpy())).float().to(device)
                flip_output = model(flip_input)
                flip_output = flip_output[-1].cpu() if type(
                    flip_output) == list else flip_output.cpu()
                flip_output = flip_back(flip_output)
                score_map += flip_output

            if type(output) == list:  # multiple output
                loss = 0
                for o in output:
                    loss += criterion(o, target, target_weight)
                output = output[-1]
            else:  # single output
                loss = criterion(output, target, target_weight)

            acc = accuracy(score_map, target.cpu(), idx)

            # generate predictions
            preds = final_preds(score_map, meta['center'], meta['scale'],
                                [64, 64])
            for n in range(score_map.size(0)):
                predictions[meta['index'][n], :, :] = preds[n, :, :]

            if debug:
                gt_batch_img = batch_with_heatmap(input, target)
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    plt.subplot(121)
                    gt_win = plt.imshow(gt_batch_img)
                    plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    gt_win.set_data(gt_batch_img)
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()

            # measure accuracy and record loss
            losses.update(loss.item(), input.size(0))
            acces.update(acc[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                loss=losses.avg,
                acc=acces.avg)
            bar.next()

        bar.finish()
    return losses.avg, acces.avg, predictions
예제 #18
0
def validate(val_loader,
             model,
             criterion,
             debug=False,
             flip=True,
             test_batch=6,
             njoints=68):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces_re = AverageMeter()
    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))

    with torch.no_grad():
        for i, (input, target, meta) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input = input.to(device, non_blocking=True)
            target = target.to(device, non_blocking=True)
            target_weight = meta['target_weight'].to(device, non_blocking=True)

            # compute output
            output, output_refine = model(input)
            score_map = output[-1].cpu() if type(
                output) == list else output.cpu()
            score_map_refine = output_refine[-1].cpu() if type(
                output_refine) == list else output_refine.cpu()
            if flip:
                flip_input = torch.from_numpy(fliplr(
                    input.clone().numpy())).float().to(device)
                flip_output, flip_output_re = model(flip_input)
                flip_output = flip_output[-1].cpu() if type(
                    flip_output) == list else flip_output.cpu()
                flip_output_re = flip_output_re[-1].cpu() if type(
                    flip_output_re) == list else flip_output_re.cpu()
                flip_output = flip_back(flip_output, 'real_animal')
                flip_output_re = flip_back(flip_output_re, 'real_animal')
                score_map += flip_output
                score_map_refine += flip_output_re

            if type(output) == list:  # multiple output
                loss = 0
                for (o, o_re) in (output, output_refine):
                    loss = loss + criterion(
                        o, target, target_weight, len(idx)) + criterion(
                            o_re, target, target_weight, len(idx))
            else:  # single output
                loss = criterion(
                    output, target, target_weight, len(idx)) + criterion(
                        output_refine, target, target_weight, len(idx))

            acc_re, _ = accuracy(score_map_refine, target.cpu(), idx)

            if debug:
                gt_batch_img = batch_with_heatmap(input, target)
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    plt.subplot(121)
                    gt_win = plt.imshow(gt_batch_img)
                    plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    gt_win.set_data(gt_batch_img)
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()

            # measure accuracy and record loss
            losses.update(loss.item(), input.size(0))
            acces_re.update(acc_re[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} ' \
                          '| Loss: {loss:.8f} | Acc_re: {acc_re: .8f}'.format(
                            batch=i + 1,
                            size=len(val_loader),
                            data=data_time.val,
                            bt=batch_time.avg,
                            total=bar.elapsed_td,
                            eta=bar.eta_td,
                            loss=losses.avg,
                            acc_re=acces_re.avg
                            )
            bar.next()

        bar.finish()

    return losses.avg, acces_re.avg