示例#1
0
def initLatent(loader, model, Y, nViews, S, AVG = False):
  model.eval()
  nIters = len(loader)
  N = loader.dataset.nImages 
  M = np.zeros((N, ref.J, 3))
  bar = Bar('==>', max=nIters)
  sum_sigma2 = 0
  cnt_sigma2 = 1
  for i, (input, target, meta) in enumerate(loader):
    output = (model(torch.autograd.Variable(input)).data).cpu().numpy()
    G = output.shape[0] / nViews
    output = output.reshape(G, nViews, ref.J, 3)
    if AVG:
      for g in range(G):
        id = int(meta[g * nViews, 1])
        for j in range(nViews):
          RR, tt = horn87(output[g, j].transpose(), output[g, 0].transpose())
          MM = (np.dot(RR, output[g, j].transpose())).transpose().copy()
          M[id] += MM.copy() / nViews
    else:
      for g in range(G):
        #assert meta[g * nViews, 0] > 1 + ref.eps
        p = np.zeros(nViews)
        sigma2 = 0.1
        for j in range(nViews):
          for kk in range(Y.shape[0] / S):
            k = kk * S
            d = Dis(Y[k], output[g, j])
            sum_sigma2 += d 
            cnt_sigma2 += 1
            p[j] += np.exp(- d / 2 / sigma2)
            
        id = int(meta[g * nViews, 1])
        M[id] = output[g, p.argmax()]
        
        if DEBUG and g == 0:
          print 'M[id]', id, M[id], p.argmax()
          debugger = Debugger()
          for j in range(nViews):
            RR, tt = horn87(output[g, j].transpose(), output[g, p.argmax()].transpose())
            MM = (np.dot(RR, output[g, j].transpose())).transpose().copy()
            debugger.addPoint3D(MM, 'b')
            debugger.addImg(input[g * nViews + j].numpy().transpose(1, 2, 0), j)
          debugger.showAllImg()
          debugger.addPoint3D(M[id], 'r')
          debugger.show3D()
        
    
    Bar.suffix = 'Init    : [{0:3}/{1:3}] | Total: {total:} | ETA: {eta:} | Dis: {dis:.6f}'.format(i, nIters, total=bar.elapsed_td, eta=bar.eta_td, dis = sum_sigma2 / cnt_sigma2)
    bar.next()
  bar.finish()
  #print 'mean sigma2', sum_sigma2 / cnt_sigma2
  return M
示例#2
0
def check_logic():
  preproc = nn.ModuleList([model.conv1_, model.bn1, model.relu, model.r1, model.maxpool, model.r4, model.r5 ])
  hg = model.hourglass[0]
  lower_hg = getEncoder(hg) 
  data_loader = torch.utils.data.DataLoader(
      H36M(opts, 'val'),
      batch_size = 1, 
      shuffle = False,
      num_workers = int(ref.nThreads)
  )
  for k, (input, target) in enumerate(data_loader):
      if(k>nSamples):
          break
      input_var = torch.autograd.Variable(input).float().cuda()
      for mod in preproc:
          input_var = mod(input_var)
      for mod in lower_hg:
          input_var = mod(input_var)    
      #decode  
      ups = input_var
      upper_hg = nn.ModuleList(getDecoder(hg))
      for mod in upper_hg:
          ups = mod(ups)
      Residual = model.Residual
      for j in range(nModules):
        ups = Residual[j](ups)
      lin_ = model.lin_
      ups = lin_[0](ups)
      tmpOut = model.tmpOut
      ups = tmpOut[0](ups)
      pred = eval.getPreds(ups.data.cpu().numpy())*4   
      gt = eval.getPreds(target.cpu().numpy()) * 4
      # init = getPreds(input.numpy()[:, 3:])
      debugger = Debugger()
      img = (input[0].numpy()[:3].transpose(1, 2, 0)*256).astype(np.uint8).copy()
      print(img.shape)
      debugger.addImg(img)
      debugger.addPoint2D(pred[0], (255, 0, 0))
      debugger.addPoint2D(gt[0], (0, 0, 255))
      # debugger.addPoint2D(init[0], (0, 255, 0))
      debugger.showAllImg(pause = True)
示例#3
0
def step(split, epoch, opt, dataLoader, model, criterion, optimizer=None):
    if split == 'train':
        model.train()
    else:
        model.eval()
    Loss, Acc = AverageMeter(), AverageMeter()
    preds = []

    nIters = len(dataLoader)
    bar = Bar('{}'.format(opt.expID), max=nIters)

    for i, (input, target, target2, meta) in enumerate(dataLoader):
        input_var = torch.autograd.Variable(input).float().cuda()
        target_var = torch.autograd.Variable(target).float().cuda()
        target_var2 = torch.autograd.Variable(target2).float().cuda()
        #print( input_var)
        output = model(input_var)
        #print(output[-1].size())
        if opt.DEBUG >= 2:
            gt = getPreds(target.cpu().numpy()) * 4
            pred = getPreds((output[opt.nStack - 1].data).cpu().numpy()) * 4
            debugger = Debugger()
            img = (input[0].numpy().transpose(1, 2, 0) * 256).astype(
                np.uint8).copy()
            debugger.addImg(img)
            debugger.addPoint2D(pred[0], (255, 0, 0))
            debugger.addPoint2D(gt[0], (0, 0, 255))
            debugger.showAllImg(pause=True)

        loss = criterion(output[0], target_var)
        for k in range(1, opt.nStack):
            loss += criterion(output[k], target_var)
        Loss.update(loss.data[0], input.size(0))
        Acc.update(
            Accuracy((output[opt.nStack - 1].data).cpu().numpy(),
                     (target_var.data).cpu().numpy()))
        if split == 'train':
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        else:
            input_ = input.cpu().numpy()
            input_[0] = Flip(input_[0]).copy()
            inputFlip_var = torch.autograd.Variable(
                torch.from_numpy(input_).view(1, input_.shape[1], ref.inputRes,
                                              ref.inputRes)).float().cuda(
                                                  opt.GPU)
            outputFlip = model(inputFlip_var)
            outputFlip = ShuffleLR(
                Flip((outputFlip[opt.nStack -
                                 1].data).cpu().numpy()[0])).reshape(
                                     1, ref.nJoints, 64, 64)
            output_ = (
                (output[opt.nStack - 1].data).cpu().numpy() + outputFlip) / 2
            preds.append(
                finalPreds(output_, meta['center'], meta['scale'],
                           meta['rotate'])[0])

        Bar.suffix = '{split} Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {loss.avg:.6f} | Acc {Acc.avg:.6f} ({Acc.val:.6f})'.format(
            epoch,
            i,
            nIters,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=Loss,
            Acc=Acc,
            split=split)
        bar.next()

    bar.finish()
    return {'Loss': Loss.avg, 'Acc': Acc.avg}, preds
示例#4
0
文件: demo.py 项目: wecacuee/StarMap
def main():
    opt = opts().parse()
    model = torch.load(opt.loadModel)
    img = cv2.imread(opt.demo)
    s = max(img.shape[0], img.shape[1]) * 1.0
    c = np.array([img.shape[1] / 2., img.shape[0] / 2.])
    img = Crop(img, c, s, 0, ref.inputRes) / 256.
    input = torch.from_numpy(img.copy()).float()
    input = input.view(1, input.size(0), input.size(1), input.size(2))
    input_var = torch.autograd.Variable(input).float()
    if opt.GPU > -1:
        model = model.cuda(opt.GPU)
        input_var = input_var.cuda(opt.GPU)

    output = model(input_var)
    hm = output[-1].data.cpu().numpy()

    debugger = Debugger()
    img = (input[0].numpy().transpose(1, 2, 0) * 256).astype(np.uint8).copy()
    inp = img.copy()
    star = (cv2.resize(hm[0, 0], (ref.inputRes, ref.inputRes)) * 255)
    star[star > 255] = 255
    star[star < 0] = 0
    star = np.tile(star, (3, 1, 1)).transpose(1, 2, 0)
    trans = 0.8
    star = (trans * star + (1. - trans) * img).astype(np.uint8)

    ps = parseHeatmap(hm[0], thresh=0.1)
    canonical, pred, color, score = [], [], [], []
    for k in range(len(ps[0])):
        x, y, z = ((hm[0, 1:4, ps[0][k], ps[1][k]] + 0.5) *
                   ref.outputRes).astype(np.int32)
        dep = ((hm[0, 4, ps[0][k], ps[1][k]] + 0.5) * ref.outputRes).astype(
            np.int32)
        canonical.append([x, y, z])
        pred.append([ps[1][k], ref.outputRes - dep, ref.outputRes - ps[0][k]])
        score.append(hm[0, 0, ps[0][k], ps[1][k]])
        color.append((1.0 * x / ref.outputRes, 1.0 * y / ref.outputRes,
                      1.0 * z / ref.outputRes))
        cv2.circle(img, (ps[1][k] * 4, ps[0][k] * 4), 4, (255, 255, 255), -1)
        cv2.circle(img, (ps[1][k] * 4, ps[0][k] * 4), 2,
                   (int(z * 4), int(y * 4), int(x * 4)), -1)

    pred = np.array(pred).astype(np.float32)
    canonical = np.array(canonical).astype(np.float32)

    pointS = canonical * 1.0 / ref.outputRes
    pointT = pred * 1.0 / ref.outputRes
    R, t, s = horn87(pointS.transpose(), pointT.transpose(), score)

    rotated_pred = s * np.dot(
        R, canonical.transpose()).transpose() + t * ref.outputRes

    debugger.addImg(inp, 'inp')
    debugger.addImg(star, 'star')
    debugger.addImg(img, 'nms')
    debugger.addPoint3D(canonical / ref.outputRes - 0.5, c=color, marker='^')
    debugger.addPoint3D(pred / ref.outputRes - 0.5, c=color, marker='x')
    debugger.addPoint3D(rotated_pred / ref.outputRes - 0.5,
                        c=color,
                        marker='*')

    debugger.showAllImg(pause=True)
    debugger.show3D()
示例#5
0
            dep = ((hm[0, 4, ps[0][k], ps[1][k]] + 0.5) *
                   ref.outputRes).astype(np.int32)
            color.append((1.0 * x / ref.outputRes, 1.0 * y / ref.outputRes,
                          1.0 * z / ref.outputRes))
            cv2.circle(img, (ps[1][k] * 4, ps[0][k] * 4), 6,
                       (int(x * 4), int(y * 4), int(z * 4)), -1)
        debugger.addImg(img)
        debugger.addImg(star, 'star')
        debugger.addPoint3D(np.array((pred)) / 64. - 0.5, c=color, marker='x')
        rotated = Rotate(canonical, gt_view)
        rotated[:,
                2], rotated[:,
                            1] = -rotated[:, 1].copy(), -rotated[:, 2].copy()
        debugger.addPoint3D(np.array(rotated) / 64. - 0.5, c=color, marker='^')

        debugger.showAllImg(pause=False)
        debugger.show3D()
bar.finish()
accAll10 = 0.
accAll30 = 0.
numAll = 0.
mid = {}
err_all = []
for k, v in ref.ObjectNet3DClassName.items():
    accAll10 += acc10[v]
    accAll30 += acc30[v]
    numAll += num[v]
    if num[v] > 0:
        acc10[v] = 1.0 * acc10[v] / num[v]
        acc30[v] = 1.0 * acc30[v] / num[v]
        mid[v] = np.sort(np.array(err[v]))[len(err[v]) // 2]
示例#6
0
def step(split, epoch, opt, dataLoader, model, criterion, optimizer=None):
    if split == 'train':
        model.train()
    else:
        model.eval()
    Loss, Acc = AverageMeter(), AverageMeter()

    nIters = len(dataLoader)
    # bar = Bar('==>', max=nIters)

    start_time = time.time()

    for i, (input, target) in enumerate(dataLoader):
        input_var = torch.autograd.Variable(input).float().cuda()
        target_var = torch.autograd.Variable(
            target.cuda(async=True)).float().cuda()
        if (model.hgType == 'vae'):
            output, latentspace = model(input_var)
        else:
            output = model(input_var)

        if opt.DEBUG >= 2:
            gt = getPreds(target.cpu().numpy()) * 4
            pred = getPreds((output[opt.nStack - 1].data).cpu().numpy()) * 4
            # init = getPreds(input.numpy()[:, 3:])
            debugger = Debugger()
            img = (input[0].numpy()[:3].transpose(1, 2, 0) * 256).astype(
                np.uint8).copy()
            debugger.addImg(img)
            debugger.addPoint2D(pred[0], (255, 0, 0))
            debugger.addPoint2D(gt[0], (0, 0, 255))
            # debugger.addPoint2D(init[0], (0, 255, 0))
            debugger.showAllImg(pause=True)
            #debugger.saveImg('debug/{}.png'.format(i))

        loss = criterion(output[0], target_var)
        for k in range(1, opt.nStack):
            loss += criterion(output[k], target_var)

        if (model.hgType == 'vae'):
            for k in range(0, opt.nStack):
                loss += ref.vaeloss_wt * _compute_kl(latentspace[k])

        Loss.update(loss.data[0], input.size(0))
        Acc.update(
            Accuracy((output[opt.nStack - 1].data).cpu().numpy(),
                     (target_var.data).cpu().numpy()))
        if split == 'train':
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # Bar.suffix = '{split} Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {loss.avg:.6f} | Acc {Acc.avg:.6f} ({Acc.val:.6f})'.format(epoch, i, nIters, total=bar.elapsed_td, eta=bar.eta_td, loss=Loss, Acc=Acc, split = split)
        # bar.next()
        curr_time = time.time()
        print(
            '{split} Epoch: [{0}][{1}/{2}]| Total: {total:f} | ETA: {eta:f} | Loss {loss.avg:.6f} | Acc {Acc.avg:.6f} ({Acc.val:.6f})'
            .format(epoch,
                    i,
                    nIters,
                    total=curr_time - start_time,
                    eta=(curr_time - start_time) * (nIters - i + 1) / (i + 1),
                    loss=Loss,
                    Acc=Acc,
                    split=split))

    # bar.finish()
    return Loss.avg, Acc.avg
def step(split, epoch, opt, dataLoader, model, criterion, optimizer=None):
    if split == 'train':
        model.train()
    else:
        model.eval()
    Loss, Acc = AverageMeter(), AverageMeter()
    preds = []

    nIters = len(dataLoader)
    bar = Bar('{}'.format(opt.expID), max=nIters)

    for i, (input, targets, action, meta) in enumerate(dataLoader):
        input_var = torch.autograd.Variable(input).float().cuda(opt.GPU)
        target_var = []
        for t in range(len(targets)):
            target_var.append(
                torch.autograd.Variable(targets[t]).float().cuda(opt.GPU))
        z = []
        for k in range(opt.numNoise):
            noise = torch.autograd.Variable(
                torch.randn((input_var.shape[0], 1, 64, 64))).cuda(opt.GPU)
            z.append(noise)

        output, samples = model(input_var, z, action)
        pred_sample = maximumExpectedUtility(samples, criterion)
        target = maximumExpectedUtility(target_var, criterion)

        if opt.DEBUG >= 2:
            gt = getPreds(target.cpu().numpy()) * 4
            pred = getPreds((pred_sample.data).cpu().numpy()) * 4
            debugger = Debugger()
            img = (input[0].numpy().transpose(1, 2, 0) * 256).astype(
                np.uint8).copy()
            debugger.addImg(img)
            debugger.addPoint2D(pred[0], (255, 0, 0))
            debugger.addPoint2D(gt[0], (0, 0, 255))
            debugger.showAllImg(pause=True)

        loss = DiscoLoss(output, samples, target_var, criterion)

        Loss.update(loss.item(), input.size(0))
        Acc.update(
            Accuracy((pred_sample.data).cpu().numpy(),
                     (target.data).cpu().numpy()))
        if split == 'train':
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        else:
            input_ = input.cpu().numpy()
            input_[0] = Flip(input_[0]).copy()
            inputFlip_var = torch.autograd.Variable(
                torch.from_numpy(input_).view(1, input_.shape[1], ref.inputRes,
                                              ref.inputRes)).float().cuda(
                                                  opt.GPU)
            _, samplesFlip = model(inputFlip_var, z, action)
            pred_sample_flip = maximumExpectedUtility(samplesFlip, criterion)
            outputFlip = ShuffleLR(
                Flip((pred_sample_flip.data).cpu().numpy()[0])).reshape(
                    1, ref.nJoints, ref.outputRes, ref.outputRes)
            output_ = old_div(((pred_sample.data).cpu().numpy() + outputFlip),
                              2)
            preds.append(
                finalPreds(output_, meta['center'], meta['scale'],
                           meta['rotate'])[0])

        Bar.suffix = '{split} Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {loss.avg:.6f} | Acc {Acc.avg:.6f} ({Acc.val:.6f})'.format(
            epoch,
            i,
            nIters,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=Loss,
            Acc=Acc,
            split=split)
        bar.next()

    bar.finish()
    return {'Loss': Loss.avg, 'Acc': Acc.avg}, preds
def step(split, epoch, opt, dataLoader, model, criterion, optimizer=None):
    if split == 'train':
        model.train()
    else:
        model.eval()
    preds = []
    Loss, LossStar = AverageMeter(), AverageMeter()

    nIters = len(dataLoader)
    bar = Bar('{}'.format(opt.expID), max=nIters)

    for i, (input, target, mask) in enumerate(dataLoader):
        if mask.size(1) > 1:
            mask[:, 1:, :, :] *= ref.outputRes * (opt.regWeight**0.5)
        if opt.GPU > -1:
            input_var = torch.autograd.Variable(input.cuda(
                opt.GPU, async=True)).float().cuda(opt.GPU)
            target_var = torch.autograd.Variable(
                target.cuda(opt.GPU, async=True)).float().cuda(opt.GPU)
            mask_var = torch.autograd.Variable(mask.cuda(
                opt.GPU, async=True)).float().cuda(opt.GPU)
        else:
            input_var = torch.autograd.Variable(input).float()
            target_var = torch.autograd.Variable(target).float()
            mask_var = torch.autograd.Variable(mask).float()
        output = model(input_var)

        output_pred = output[opt.nStack - 1].data.cpu().numpy().copy()
        for k in range(opt.nStack):
            output[k] = mask_var * output[k]
        target_var = mask_var * target_var

        loss = 0
        for k in range(opt.nStack):
            loss += criterion(output[k], target_var)

        LossStar.update((
            (target.float()[:, 0, :, :] -
             output[opt.nStack - 1].cpu().data.float()[:, 0, :, :])**2).mean())
        Loss.update(loss.data[0], input.size(0))

        if split == 'train':
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        else:
            if opt.test:
                out = {}
                input_ = input.cpu().numpy()
                input_[0] = Flip(input_[0]).copy()
                inputFlip_var = torch.autograd.Variable(
                    torch.from_numpy(input_).view(
                        1, input_.shape[1], ref.inputRes,
                        ref.inputRes)).float().cuda(opt.GPU)
                outputFlip = model(inputFlip_var)
                output_flip = outputFlip[opt.nStack - 1].data.cpu().numpy()
                output_flip[0] = Flip(output_flip[0])
                if not (opt.task == 'star'):
                    output_flip[0, 1, :, :] = -output_flip[0, 1, :, :]
                output_pred = (output_pred + output_flip) / 2.0
                out['map'] = output_pred
                preds.append(out)

        Bar.suffix = '{split:5} Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {loss.avg:.6f} | LossStar {lossStar.avg:.6f}'.format(
            epoch,
            i,
            nIters,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=Loss,
            lossStar=LossStar,
            split=split)
        bar.next()

        if opt.DEBUG > 1 or (opt.DEBUG == 1 and i % (nIters / 200) == 0):
            for j in range(input.size(0)):
                debugger = Debugger()
                img = (input[j].numpy()[:3].transpose(1, 2, 0) * 256).astype(
                    np.uint8).copy()
                img2 = img.copy().astype(np.float32)
                img3 = img.copy().astype(np.float32)
                imgMNS = img.copy()
                out = (cv2.resize(
                    ((output[opt.nStack - 1][j, 0].data).cpu().numpy()).copy(),
                    (ref.inputRes, ref.inputRes)) * 256)
                gtmap = (cv2.resize((target[j, 0].cpu().numpy()).copy(),
                                    (ref.inputRes, ref.inputRes)) * 256)
                out[out < 0] = 0
                out[out > 255] = 255
                img2[:, :, 0] = (img2[:, :, 0] + out)
                img2[img2 > 255] = 255
                img3[:, :, 2] = (img3[:, :, 2] + gtmap)
                img3[img3 > 255] = 255
                gtmap[gtmap > 255] = 255
                idx = i * input.size(0) + j if opt.DEBUG == 1 else 0
                img2, out, gtmap, img3 = img2.astype(np.uint8), out.astype(
                    np.uint8), gtmap.astype(np.uint8), img3.astype(np.uint8)

                if 'emb' in opt.task:
                    gt, pred = [], []
                    ps = parseHeatmap(target[j].numpy())
                    print('ps', ps)
                    for k in range(len(ps[0])):
                        print('target', k, target[j, 1:4, ps[0][k],
                                                  ps[1][k]].numpy())
                        x, y, z = (
                            (target[j, 1:4, ps[0][k], ps[1][k]].numpy() + 0.5)
                            * 255).astype(np.int32)
                        gt.append(target[j, 1:4, ps[0][k], ps[1][k]].numpy())
                        cv2.circle(imgMNS, (ps[1][k] * 4, ps[0][k] * 4), 6,
                                   (int(x), int(y), int(z)), -1)

                    ps = parseHeatmap(output_pred[j])
                    for k in range(len(ps[0])):
                        print('pred', k, output_pred[j, 1:4, ps[0][k],
                                                     ps[1][k]])
                        x, y, z = (
                            (output_pred[j, 1:4, ps[0][k], ps[1][k]] + 0.5) *
                            255).astype(np.int32)
                        pred.append(output_pred[j, 1:4, ps[0][k], ps[1][k]])
                        cv2.circle(imgMNS, (ps[1][k] * 4, ps[0][k] * 4), 4,
                                   (255, 255, 255), -1)
                        cv2.circle(imgMNS, (ps[1][k] * 4, ps[0][k] * 4), 2,
                                   (int(x), int(y), int(z)), -1)
                    debugger.addPoint3D(np.array(gt), c='auto', marker='o')
                    #debugger.addPoint3D(np.array(pred), c = 'auto', marker = 'x')
                debugger.addImg(imgMNS, '{}_mns'.format(idx))
                debugger.addImg(out, '{}_out'.format(idx))
                debugger.addImg(gtmap, '{}_gt'.format(idx))
                debugger.addImg(img, '{}_img'.format(idx))
                debugger.addImg(img2, '{}_img2'.format(idx))
                debugger.addImg(img3, '{}_img3'.format(idx))
                if opt.DEBUG == 1:
                    debugger.saveAllImg(path=opt.debugPath)
                else:
                    debugger.showAllImg(pause=not ('emb' in opt.task))
                if 'emb' in opt.task:
                    debugger.show3D()

    bar.finish()
    return {'Loss': Loss.avg, 'LossStar': LossStar.avg}, preds