Exemple #1
0
def train(train_loader, net, optimizer, epoch, visualizer, idx, opt):
    # batch_time = AverageMeter()
    # data_time = AverageMeter()
    losses = AverageMeter()
    pckhs = AverageMeter()
    pckhs_origin_res = AverageMeter()
    # switch to train mode
    net.train()

    # end = time.time()
    for i, (img, heatmap, c, s, r, grnd_pts,
            normalizer) in enumerate(train_loader):
        # """measure data loading time"""
        # data_time.update(time.time() - end)

        # input and groundtruth
        # img_var = torch.autograd.Variable(img)
        img = img.cuda(non_blocking=True)
        heatmap = heatmap.cuda(non_blocking=True)
        # target_var = torch.autograd.Variable(heatmap)

        # output and loss
        # output1, output2 = net(img_var)
        # loss = (output1 - target_var) ** 2 + (output2 - target_var) ** 2
        output = net(img)
        # exit()
        # print(type(output))
        # print(len(output))
        loss = 0
        for per_out in output:
            tmp_loss = (per_out - heatmap)**2
            loss = loss + tmp_loss.sum() / tmp_loss.numel()

        # gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # """measure optimization time"""
        # batch_time.update(time.time() - end)
        # end = time.time()
        # print log
        losses.update(loss.item())

        pckh = Evaluation.accuracy(output[-1].cpu(), heatmap.cpu(), idx)
        pckhs.update(pckh[0])
        pckh_origin_res = Evaluation.accuracy_origin_res(
            output[-1].cpu(), c, s, [64, 64], grnd_pts, normalizer, r)
        pckhs_origin_res.update(pckh_origin_res[0])

        loss_dict = OrderedDict([('loss', losses.avg), ('pckh', pckhs.avg),
                                 ('pckh_origin_res', pckhs_origin_res.avg)])
        if i % opt.print_freq == 0 or i == len(train_loader) - 1:
            visualizer.print_log(epoch, i, len(train_loader), value1=loss_dict)
        # if i == 1:
        #     break
    return losses.avg, pckhs_origin_res.avg
Exemple #2
0
def validate(val_loader, net, epoch, visualizer, idx, num_classes):
    batch_time = AverageMeter()
    losses_det = AverageMeter()
    losses = AverageMeter()
    pckhs = AverageMeter()
    pckhs_origin_res = AverageMeter()
    img_batch_list = []
    pts_batch_list = []
    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

    # switch to evaluate mode
    net.eval()

    end = time.time()
    for i, (img, heatmap, center, scale, rot, grnd_pts, normalizer,
            index) in enumerate(val_loader):
        # input and groundtruth
        input_var = torch.autograd.Variable(img, volatile=True)

        heatmap = heatmap.cuda(async=True)
        target_var = torch.autograd.Variable(heatmap)

        # output and loss
        #output1, output2 = net(input_var)
        #loss = (output1 - target_var) ** 2 + (output2 - target_var) ** 2
        output1 = net(input_var)
        loss = 0
        for per_out in output1:
            tmp_loss = (per_out - target_var)**2
            loss = loss + tmp_loss.sum() / tmp_loss.numel()

        # flipping the image
        img_flip = img.numpy()[:, :, :, ::-1].copy()
        img_flip = torch.from_numpy(img_flip)
        input_var = torch.autograd.Variable(img_flip, volatile=True)
        #output11, output22 = net(input_var)
        output2 = net(input_var)
        output2 = HumanAug.flip_channels(output2[-1].cpu().data)
        output2 = HumanAug.shuffle_channels_for_horizontal_flipping(output2)
        output = (output1[-1].cpu().data + output2) / 2

        # calculate measure
        # pred_pts = HumanPts.heatmap2pts(output)  # b x L x 2
        # pts = HumanPts.heatmap2pts(target_var.cpu().data)
        # pckh = HumanAcc.approx_PCKh(pred_pts, pts, idx, heatmap.size(3))  # b -> 1
        pckh = Evaluation.accuracy(output, target_var.data.cpu(), idx)
        pckhs.update(pckh[0])
        pckh_origin_res = Evaluation.accuracy_origin_res(
            output, center, scale, [64, 64], grnd_pts, normalizer, rot)
        pckhs_origin_res.update(pckh_origin_res[0])
        """measure elapsed time"""
        batch_time.update(time.time() - end)
        end = time.time()

        # print(log)
        losses.update(loss.data[0])
        loss_dict = OrderedDict([('loss', losses.avg), ('pckh', pckhs.avg),
                                 ('pckh_origin_res', pckhs_origin_res.avg)])
        visualizer.print_log(epoch, i, len(val_loader), value1=loss_dict)
        # img_batch_list.append(img)
        # pts_batch_list.append(pred_pts*4.)
        # preds = Evaluation.final_preds(output, meta['center'], meta['scale'], [64, 64])
        # for n in range(output.size(0)):
        #     predictions[meta['index'][n], :, :] = preds[n, :, :]
        preds = Evaluation.final_preds(output, center, scale, [64, 64], rot)
        for n in range(output.size(0)):
            predictions[index[n], :, :] = preds[n, :, :]

        # if i == 2:
        #     break
    # return losses.avg, pckhs.avg, img_batch_list, pts_batch_list
    return losses.avg, pckhs_origin_res.avg, predictions
Exemple #3
0
def train(train_loader, net, optimizer, epoch, visualizer, idx, opt):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    pckhs = AverageMeter()
    pckhs_origin_res = AverageMeter()
    # switch to train mode
    net.train()

    end = time.time()
    for i, (img, heatmap, c, s, r, grnd_pts,
            normalizer) in enumerate(train_loader):
        # print('r: ', r)
        # print('s: ', s)
        # print('grnd_pts: ', pts)
        # print('pts_aug_back: ', pts_aug_back)
        # exit()
        """measure data loading time"""
        data_time.update(time.time() - end)

        # input and groundtruth
        img_var = torch.autograd.Variable(img)

        heatmap = heatmap.cuda(async=True)
        target_var = torch.autograd.Variable(heatmap)

        # output and loss
        #output1, output2 = net(img_var)
        #loss = (output1 - target_var) ** 2 + (output2 - target_var) ** 2
        output = net(img_var)
        # print(type(output))
        # print(len(output))
        loss = 0
        for per_out in output:
            tmp_loss = (per_out - target_var)**2
            loss = loss + tmp_loss.sum() / tmp_loss.numel()
            # loss = criterion(per_out, target_var)

        # gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        """measure optimization time"""
        batch_time.update(time.time() - end)
        end = time.time()

        # print(log)
        losses.update(loss.data[0])
        # pred_pts = HumanPts.heatmap2pts(output[-1].cpu().data)  # b x L x 2
        # pts = HumanPts.heatmap2pts(target_var.cpu().data)
        # pckh = HumanAcc.approx_PCKh(pred_pts, pts, idx, heatmap.size(3))
        pckh = Evaluation.accuracy(output[-1].data.cpu(),
                                   target_var.data.cpu(), idx)
        pckhs.update(pckh[0])
        pckh_origin_res = Evaluation.accuracy_origin_res(
            output[-1].data.cpu(), c, s, [64, 64], grnd_pts, normalizer, r)
        pckhs_origin_res.update(pckh_origin_res[0])
        loss_dict = OrderedDict([('loss', losses.avg), ('pckh', pckhs.avg),
                                 ('pckh_origin_res', pckhs_origin_res.avg)])
        if i % opt.print_freq == 0 or i == len(train_loader) - 1:
            visualizer.print_log(epoch, i, len(train_loader), value1=loss_dict)
        # if i == 2:
        #     break

    return losses.avg, pckhs_origin_res.avg
def train_hg(train_loader, hg, optimizer_hg, agent_sr, epoch, visualizer, opt):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses_hg_regular = AverageMeter()
    losses_hg_sr = AverageMeter()
    losses_hg = AverageMeter()
    pckhs_regular = AverageMeter()
    pckhs_sr = AverageMeter()
    pckhs = AverageMeter()

    # switch mode
    hg.train()
    agent_sr.eval()
    # flags = ['neck', 'skip1', 'skip2', 'skip3', 'skip4']
    end = time.time()
    counter = 0
    # idx_pckh = [e for e in idx if e not in (6, 7, 8, 9, 12, 13)]
    drop_count = {}
    is_asn = False
    for i, (img_std, img, heatmap, c, s, r, grnd_pts, normalizer,
            img_index) in enumerate(train_loader):
        """measure data loading time"""
        # print img_index
        data_time.update(time.time() - end)
        # batch_size = img.size(0)
        # save_imgs(img_std, 'std-imgs')
        # save_imgs(img, 'regular-aug-imgs')
        # input and groundtruth
        if i % 2 == 0:
            print 'regular augmentation'
            img_var = torch.autograd.Variable(img)
            # pts = HumanPts.heatmap2pts(heatmap)
            target_var = torch.autograd.Variable(heatmap.cuda(async=True),
                                                 requires_grad=False)
            out_reg = hg(img_var)
            loss_hg_regular = 0
            for per_out in out_reg:
                tmp_loss = (per_out - target_var)**2
                loss_hg_regular = loss_hg_regular + tmp_loss.sum(
                ) / tmp_loss.numel()

            optimizer_hg.zero_grad()
            loss_hg_regular.backward()
            optimizer_hg.step()

            losses_hg_regular.update(loss_hg_regular.data[0])
            losses_hg.update(loss_hg_regular.data[0])
            pckh = Evaluation.accuracy_origin_res(out_reg[-1].data.cpu(), c, s,
                                                  [64, 64], grnd_pts,
                                                  normalizer, r)
            pckhs_regular.update(pckh[0])
            pckhs.update(pckh[0])
        else:
            print 'agent augmentation'
            img_var = torch.autograd.Variable(img_std)

            pred_scale_distri, pred_rotation_distri = hg(img_var,
                                                         agent_sr,
                                                         is_half_hg=True,
                                                         is_aug=True)
            pred_scale_distri = softmax(pred_scale_distri)
            pred_rotation_distri = softmax(pred_rotation_distri)
            pred_scale_distri_numpy = pred_scale_distri.data.cpu().numpy()
            pred_rotation_distri_numpy = pred_rotation_distri.data.cpu().numpy(
            )
            # print pred_scale_distri_numpy
            # print pred_rotation_distri_numpy
            # exit()
            scale_index_list = []
            rotation_index_list = []
            for j in range(0, pred_scale_distri_numpy.shape[0]):
                # print len(dataset.scale_means), pred_scale_distri_numpy[j]
                tmp_scale_index = np.random.choice(
                    len(dataset.scale_means), 1,
                    p=pred_scale_distri_numpy[j])[0]
                # print pred_scale_distri_numpy[j], np.sum(pred_scale_distri_numpy[j]), tmp_scale_index
                tmp_rotation_index = np.random.choice(
                    len(dataset.rotation_means),
                    1,
                    p=pred_rotation_distri_numpy[j])[0]
                # print pred_rotation_distri_numpy[j], np.sum(pred_rotation_distri_numpy[j]), tmp_rotation_index

                scale_index_list.append(tmp_scale_index)
                rotation_index_list.append(tmp_rotation_index)
                # if j == 1:
                #     exit()
            # exit()
            img, heatmap, c, s, r,\
            grnd_pts, normalizer = load_batch_data(scale_index_list,
                                                   rotation_index_list,
                                                   img_index, opt,
                                                   separate_s_r=False)
            # save_imgs(img, 'agent-aug-imgs')
            # exit()
            img_var = torch.autograd.Variable(img)
            target_var = torch.autograd.Variable(heatmap.cuda(async=True),
                                                 requires_grad=False)
            out_reg = hg(img_var)
            loss_hg_sr = 0
            for per_out in out_reg:
                tmp_loss = (per_out - target_var)**2
                loss_hg_sr = loss_hg_sr + tmp_loss.sum() / tmp_loss.numel()

            optimizer_hg.zero_grad()
            loss_hg_sr.backward()
            optimizer_hg.step()
            losses_hg_sr.update(loss_hg_sr.data[0])
            losses_hg.update(loss_hg_sr.data[0])
            pckh = Evaluation.accuracy_origin_res(out_reg[-1].data.cpu(), c, s,
                                                  [64, 64], grnd_pts,
                                                  normalizer, r)
            pckhs_sr.update(pckh[0])
            pckhs.update(pckh[0])

        loss_dict = OrderedDict([('loss_hg_regular', losses_hg_regular.avg),
                                 ('loss_hg_sr', losses_hg_sr.avg),
                                 ('loss_hg', losses_hg.avg),
                                 ('pckhs_regular', pckhs_regular.avg),
                                 ('pckhs_sr', pckhs_sr.avg),
                                 ('pckh', pckhs.avg)])
        # else:
        #     loss_dict = OrderedDict([('loss_hg_normal', losses_normal_hg.avg),
        #                              ('pckh', pckhs.avg)])
        if i % opt.print_freq == 0 or i == len(train_loader) - 1:
            visualizer.print_log(epoch, i, len(train_loader), value1=loss_dict)
        # if i == 1:
        #     break

    return losses_hg.avg, pckhs.avg