コード例 #1
0
def validate(cfg, val_loader, model, metric = None, log = None):
    data_time = AverageMeter()    
    batch_time = AverageMeter()

    # switch to evaluate mode
    model.eval()

    num_samples = len(val_loader.dataset)
    all_preds = []

    idx = 0
    bar = Bar('Processing', max=len(val_loader))
    with torch.no_grad():
        end = time.time()
        for i, batch in enumerate(val_loader):

            data_time.update(time.time() - end)

            size = batch['weight'].size(0)

            # measure data loading time
            data_time.update(time.time() - end)
            
            # compute output
            model.set_batch(batch)
            model.forward()
            # debug, print intermediate result
            if cfg.DEBUG:
                debug(model.outputs, batch)

            preds = model.get_preds()
            all_preds.append(preds)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            suffix = '({batch}/{size}) Data:{data:.1f}s Batch:{bt:.1f}s Total:{total:} ETA:{eta:} '.format(
                        batch=i + 1,
                        size=len(val_loader),
                        data=data_time.val,
                        bt=batch_time.avg,
                        total=bar.elapsed_td,
                        eta=bar.eta_td)

            if cfg.IS_VALID:
                metric_ = model.eval_result()
                metric_['loss'] = model.loss.item()
                metric.update(metric_, size)
                for name in metric.names():
                    suffix += '{}: {:.4f} '.format(name, metric[name].avg)

            bar.suffix  = suffix
            bar.next()

        if log:
            log.info(bar.suffix)
        bar.finish()

    return reduce(combine, all_preds)
def train(cfg, train_loader, model, metric, log):
    data_time = AverageMeter()
    batch_time = AverageMeter()

    # switch to train mode
    model.train()
    end = time.time()
    bar = Bar('Processing', max=len(train_loader))
    model.collections = []
    for i, batch in enumerate(train_loader):
        # print(i)
        size = batch['weight'].size(0)
        # measure data loading time
        data_time.update(time.time() - end)

        model.set_batch(batch)
        model.step()

        # debug, print intermediate result
        if cfg.DEBUG:
            model.debug()

        #calculate the result
        model.get_batch_result(type='train')

        #put the result of this batch into collection for epoch result eval
        model.collect_batch_result()

        # measure accuracy and record loss
        metric_ = model.eval_batch_result()
        metric.update(metric_, size)

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        suffix = '({batch}/{size}) Data:{data:.1f}s Batch:{bt:.1f}s Total:{total:} ETA:{eta:} '.format(
            batch=i + 1,
            size=len(train_loader),
            data=data_time.val,
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td)
        for name in metric_.keys():
            suffix += '{}: {:.4f} '.format(name, metric[name].avg)
        bar.suffix = suffix
        bar.next()

    log.info(bar.suffix)
    bar.finish()

    model.get_epoch_result()
    metric_ = model.eval_epoch_result()
    metric.update(metric_, 1)

    print("".join(["%s : %.4f" % (key, metric_[key]) for key in metric_]))
    return model.epoch_result
コード例 #3
0
def train(
    train_loader,
    model,
    criterion,
    optimizer,
    lr_init=None,
    lr_now=None,
    glob_step=None,
    lr_decay=None,
    gamma=None,
    max_norm=True,
):
    losses = utils.AverageMeter()

    model.train()

    start = time.time()
    batch_time = 0
    bar = Bar(">>>", fill=">", max=len(train_loader))

    for i, (inps, tars) in enumerate(train_loader):
        glob_step += 1
        if glob_step % lr_decay == 0 or glob_step == 1:
            lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay,
                                    gamma)
        inputs = Variable(inps.cuda())
        targets = Variable(tars.cuda())

        outputs = model(inputs)

        # calculate loss
        optimizer.zero_grad()
        loss = criterion(outputs, targets)
        losses.update(loss.item(), inputs.size(0))
        loss.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()

        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        bar.suffix = "({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.4f}".format(
            batch=i + 1,
            size=len(train_loader),
            batchtime=batch_time * 10.0,
            ttl=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
        )
        bar.next()

    bar.finish()
    return glob_step, lr_now, losses.avg
コード例 #4
0
def test(test_loader, model, criterion, joint_num, procrustes=False):
    losses = utils.AverageMeter()

    model.eval()
   
    all_dist = []
    start = time.time()
    batch_time = 0
    bar = Bar('>>>', fill='>', max=len(test_loader))

    for i, data in enumerate(test_loader):
        joint2d,truth=data['joint2d'],data['truth']
        
        inputs=Variable(joint2d.cuda().type(torch.cuda.FloatTensor))
        targets=Variable(truth.cuda().type(torch.cuda.FloatTensor))

        outputs = model(inputs)

        outputs=torch.reshape(outputs,(-1,(joint_num)*3))
        targets=torch.reshape(targets,(-1,(joint_num)*3))

        # calculate loss
        loss = criterion(outputs, targets)
        losses.update(loss.item(), inputs.size(0))
    
        sqerr = (outputs - targets) ** 2
        distance = np.zeros((sqerr.shape[0],joint_num+1))
        dist_idx = 0
        for k in np.arange(0, (joint_num+1) * 3, 3):
            distance[:, dist_idx] = torch.sqrt(torch.sum(sqerr[:, k:k + 3], axis=1)).to('cpu').detach().numpy()
            dist_idx += 1
        all_dist.append(distance)
            
        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}' \
            .format(batch=i + 1,
                    size=len(test_loader),
                    batchtime=batch_time * 10.0,
                    ttl=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg)
        bar.next()
        
    all_dist = np.vstack(all_dist)
#    joint_err = np.mean(all_dist, axis=0)
    ttl_err = np.mean(all_dist)
    bar.finish()
    print (">>> error: {} <<<".format(ttl_err))
    
    return targets, losses.avg, ttl_err
コード例 #5
0
def train(train_loader, model, criterion, optimizer, joint_num,
          lr_init=None, lr_now=None, glob_step=None, lr_decay=None, gamma=None,
          max_norm=True):
    losses = utils.AverageMeter()

    model.train()
 
    start = time.time()
    batch_time = 0
    bar = Bar('>>>', fill='>', max=len(train_loader))
    
    for i, data in enumerate(train_loader):
        # Turn down Learning Rate
        glob_step += 1
        if glob_step % lr_decay == 0 or glob_step == 1:
            lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay, gamma)
        
        joint2d, truth = data['joint2d'], data['truth']
        inputs=Variable(joint2d.cuda().type(torch.cuda.FloatTensor))
        targets=Variable(truth.cuda().type(torch.cuda.FloatTensor))
    
        outputs = model(inputs)
        outputs=torch.reshape(outputs,(-1,(joint_num)*3))
        targets=torch.reshape(targets,(-1,(joint_num)*3))

        # calculate loss
        optimizer.zero_grad()
        loss = criterion(outputs, targets)

        losses.update(loss.item(), inputs.size(0))
        loss.backward()
        
        if max_norm:
            nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
        optimizer.step()

        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.4f}' \
            .format(batch=i + 1,
                    size=len(train_loader),
                    batchtime=batch_time * 10.0,
                    ttl=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg)
        bar.next()

    bar.finish()
    return glob_step, lr_now, losses.avg
コード例 #6
0
def train(cfg, train_loader, model, metric, log):
    data_time = AverageMeter()
    batch_time = AverageMeter()

    # switch to train mode
    model.train()
    end = time.time()
    bar = Bar('Processing', max=len(train_loader))
    for i, batch in enumerate(train_loader):
        size = batch['weight'].size(0)
        # measure data loading time
        data_time.update(time.time() - end)

        model.set_batch(batch)
        model.step()

        # debug, print intermediate result
        if cfg.DEBUG:
            debug(model.outputs, batch)

        # measure accuracy and record loss
        metric_ = model.eval_result()
        metric_['loss'] = model.loss.item() 
        metric.update(metric_, size)

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        suffix = '({batch}/{size}) Data:{data:.1f}s Batch:{bt:.1f}s Total:{total:} ETA:{eta:} '.format(
                    batch=i + 1,
                    size=len(train_loader),
                    data=data_time.val,
                    bt=batch_time.avg,
                    total=bar.elapsed_td,
                    eta=bar.eta_td)
        for name in metric.names():
            suffix += '{}: {:.4f} '.format(name, metric[name].avg)
        bar.suffix  = suffix
        bar.next()
    log.info(bar.suffix)
    bar.finish()
def validate(cfg, val_loader, model, metric=None, log=None):
    data_time = AverageMeter()
    batch_time = AverageMeter()

    # switch to evaluate mode
    model.eval()

    num_samples = len(val_loader.dataset)
    model.collections = []

    idx = 0
    bar = Bar('Processing', max=len(val_loader))
    with torch.no_grad():
        end = time.time()
        for i, batch in enumerate(val_loader):

            data_time.update(time.time() - end)

            size = batch['weight'].size(0)

            # measure data loading time
            data_time.update(time.time() - end)

            # compute output
            model.set_batch(batch)
            model.forward()
            # debug, print intermediate result
            if cfg.DEBUG:
                model.debug()

            model.get_batch_result(type='valid')

            #put the result of this batch into collection for epoch result eval
            model.collect_batch_result()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            suffix = '({batch}/{size}) Data:{data:.1f}s Batch:{bt:.1f}s Total:{total:} ETA:{eta:} '.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td)

            if cfg.IS_VALID:
                metric_ = model.eval_batch_result()
                metric.update(metric_, size)
                for name in metric_.keys():
                    suffix += '{}: {:.4f} '.format(name, metric[name].avg)

            bar.suffix = suffix
            bar.next()

        if log:
            log.info(bar.suffix)
        bar.finish()

    model.get_epoch_result()
    metric_ = model.eval_epoch_result()
    metric.update(metric_, 1)
    print("".join(["%s : %.4f" % (key, metric_[key]) for key in metric_]))
    return model.epoch_result
コード例 #8
0
def validate(val_loader, model, criterion, num_classes, debug=False, flip=True):
	batch_time = AverageMeter()
	data_time = AverageMeter()
	losses = AverageMeter()
	acces = AverageMeter()
	distes = AverageMeter()

	# predictions
	predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

	# switch to evaluate mode
	model.eval()

	gt_win, pred_win = None, None
	end = time.time()
	bar = Bar('Processing', max=len(val_loader))
	for i, (inputs, target) in enumerate(val_loader):
		# measure data loading time
		data_time.update(time.time() - end)

		target = target.cuda(async=True)
		with torch.no_grad():
			input_var = torch.autograd.Variable(inputs.cuda())
			target_var = torch.autograd.Variable(target)

		# compute output
		output = model(input_var)
		score_map = output[-1].data.cpu()
		if flip:
			flip_input_var = torch.autograd.Variable(
					torch.from_numpy(fliplr(inputs.clone().numpy())).float().cuda(), 
					volatile=True
				)
			flip_output_var = model(flip_input_var)
			flip_output = flip_back(flip_output_var[-1].data.cpu())
			score_map += flip_output


		loss = 0
		for o in output:
			loss += criterion(o[:,:21,:,:], target_var)
		acc, dist = accuracy(score_map[:,:21,:,:].contiguous(), target.cpu(), idx)

		# generate predictions		
		# preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64])
		preds = score_map
		# for n in range(score_map.size(0)):
		# 	predictions[meta['index'][n], :, :] = preds[n, :, :]
		# print(debug)
		if debug:
			gt_batch_img = batch_with_heatmap(inputs, target)
			pred_batch_img = batch_with_heatmap(inputs, score_map)

			sz = tuple([x * 4 for x in gt_batch_img[:,:,0].shape])
			gt_batch_img = cv2.resize(gt_batch_img,(sz[1],sz[0]),)
			pred_batch_img = cv2.resize(pred_batch_img, (sz[1],sz[0]))

			if not gt_win or not pred_win:
				# plt.imshow(gt_batch_img)
				plt.subplot(121)
				gt_win = plt.imshow(gt_batch_img[:,:,::-1])
				plt.subplot(122)
				pred_win = plt.imshow(pred_batch_img[:,:,::-1])
			else:
				gt_win.set_data(gt_batch_img)
				pred_win.set_data(pred_batch_img)

			plt.savefig("./tmp/" + str(i) + ".png", dpi = 1000, bbox_inches='tight')

		# measure accuracy and record loss
		losses.update(loss.item(), inputs.size(0))
		acces.update(acc[0], inputs.size(0))
		distes.update(dist[0], inputs.size(0))

		# measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()

		# plot progress
		bar.suffix  = '({batch}/{size}) Data: {data:.1f}s | Batch: {bt:.1f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f} | Dist {dist:.3f}'.format(
					batch=i + 1,
					size=len(val_loader),
					data=data_time.val,
					bt=batch_time.avg,
					total=bar.elapsed_td,
					eta=bar.eta_td,
					loss=losses.avg,
					acc=acces.avg,
					dist=distes.avg
					)
		bar.next()

	bar.finish()
	return losses.avg, acces.avg, predictions
コード例 #9
0
def train(train_loader, model, criterion, optimizer, debug=False, flip=True):
	batch_time = AverageMeter()
	data_time = AverageMeter()
	losses = AverageMeter()
	acces = AverageMeter()
	distes = AverageMeter()

	# switch to train mode
	model.train()

	end = time.time()

	gt_win, pred_win = None, None
	bar = Bar('Processing', max=len(train_loader))
	for i, (inputs, target) in enumerate(train_loader):
		# measure data loading time
		data_time.update(time.time() - end)

		input_var = torch.autograd.Variable(inputs.cuda())
		target_var = torch.autograd.Variable(target.cuda(async=True))

		# compute output
		output = model(input_var)
		score_map = output[-1].data.cpu()
		loss = criterion(output[0], target_var)
		for j in range(1, len(output)):
			loss += criterion(output[j], target_var)
		acc, dist = accuracy(score_map, target, idx)
		if debug: # visualize groundtruth and predictions
			gt_batch_img = batch_with_heatmap(inputs, target)
			pred_batch_img = batch_with_heatmap(inputs, score_map)
			if not gt_win or not pred_win:
				ax1 = plt.subplot(121)
				ax1.title.set_text('Groundtruth')
				gt_win = plt.imshow(gt_batch_img[:,:,::-1])
				ax2 = plt.subplot(122)
				ax2.title.set_text('Prediction')
				pred_win = plt.imshow(pred_batch_img[:,:,::-1])
			else:
				gt_win.set_data(gt_batch_img)
				pred_win.set_data(pred_batch_img)
			plt.plot()
			plt.pause(.5)
			

		# measure accuracy and record loss
		losses.update(loss.item(), inputs.size(0))
		acces.update(acc[0], inputs.size(0))
		distes.update(dist[0], inputs.size(0))

		# compute gradient and do SGD step
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()

		# measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()

		# plot progress
		bar.suffix  = '({batch}/{size}) Data: {data:.1f}s | Batch: {bt:.1f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f} | Dist {dist:.3f}'.format(
					batch=i + 1,
					size=len(train_loader),
					data=data_time.val,
					bt=batch_time.avg,
					total=bar.elapsed_td,
					eta=bar.eta_td,
					loss=losses.avg,
					acc=acces.avg,
					dist=distes.avg
					)
		bar.next()

	bar.finish()
	return losses.avg, acces.avg
コード例 #10
0
def test(test_loader, model, criterion, stat_3d, procrustes=False):
    losses = utils.AverageMeter()

    model.eval()

    all_dist = []
    start = time.time()
    batch_time = 0
    bar = Bar(">>>", fill=">", max=len(test_loader))

    for i, (inps, tars) in enumerate(test_loader):
        inputs = Variable(inps.cuda())
        targets = Variable(tars.cuda())

        outputs = model(inputs)

        # calculate loss
        outputs_coord = outputs
        loss = criterion(outputs_coord, targets)

        losses.update(loss.item(), inputs.size(0))

        tars = targets

        # calculate erruracy
        targets_unnorm = data_process.unNormalizeData(tars.data.cpu().numpy(),
                                                      stat_3d["mean"],
                                                      stat_3d["std"],
                                                      stat_3d["dim_use"])
        outputs_unnorm = data_process.unNormalizeData(
            outputs.data.cpu().numpy(),
            stat_3d["mean"],
            stat_3d["std"],
            stat_3d["dim_use"],
        )

        # remove dim ignored
        dim_use = np.hstack((np.arange(3), stat_3d["dim_use"]))

        outputs_use = outputs_unnorm[:, dim_use]
        targets_use = targets_unnorm[:, dim_use]

        if procrustes:
            for ba in range(inps.size(0)):
                gt = targets_use[ba].reshape(-1, 3)
                out = outputs_use[ba].reshape(-1, 3)
                _, Z, T, b, c = get_transformation(gt, out, True)
                out = (b * out.dot(T)) + c
                outputs_use[ba, :] = out.reshape(1, 51)

        sqerr = (outputs_use - targets_use)**2

        distance = np.zeros((sqerr.shape[0], 17))
        dist_idx = 0
        for k in np.arange(0, 17 * 3, 3):
            distance[:, dist_idx] = np.sqrt(np.sum(sqerr[:, k:k + 3], axis=1))
            dist_idx += 1
        all_dist.append(distance)

        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        bar.suffix = "({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}".format(
            batch=i + 1,
            size=len(test_loader),
            batchtime=batch_time * 10.0,
            ttl=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
        )
        bar.next()

    all_dist = np.vstack(all_dist)
    joint_err = np.mean(all_dist, axis=0)
    ttl_err = np.mean(all_dist)
    bar.finish()
    print(">>> error: {} <<<".format(ttl_err))
    return losses.avg, ttl_err
コード例 #11
0
def test(test_loader,
         model,
         criterion,
         stat_3d,
         device,
         procrustes=False,
         pck_thresholds=[50, 100, 150, 200, 250],
         noise_fun=lambda x: x,
         refine_dic=None,
         refine_coeff_fun=None,
         refine_extra_kwargs={},
         cache_prefix=None,
         visualize=False):
    model.eval()

    all_outputs = []
    all_targets = []
    losses = utils.AverageMeter()
    for i, (inps, tars) in enumerate(test_loader):
        inps_noise = noise_fun(inps)
        inputs = Variable(inps_noise.to(device))
        targets = Variable(tars.to(device))

        outputs = model(inputs)

        # calculate loss
        outputs_coord = outputs
        loss = criterion(outputs_coord, targets)
        losses.update(loss.item(), inputs.size(0))

        tars = targets

        # calculate erruracy
        targets_unnorm = data_process.unNormalizeData(tars.data.cpu().numpy(),
                                                      stat_3d['mean'],
                                                      stat_3d['std'],
                                                      stat_3d['dim_use'])
        outputs_unnorm = data_process.unNormalizeData(
            outputs.data.cpu().numpy(), stat_3d['mean'], stat_3d['std'],
            stat_3d['dim_use'])

        # remove dim ignored
        dim_use = np.hstack((np.arange(3), stat_3d['dim_use']))

        outputs_use = outputs_unnorm[:, dim_use]
        targets_use = targets_unnorm[:, dim_use]

        all_outputs.append(outputs_use)
        all_targets.append(targets_use)

    accu_frames = np.cumsum(test_loader.dataset.frames)
    all_outputs = np.split(np.concatenate(all_outputs, axis=0),
                           accu_frames)[:-1]
    all_targets = np.split(np.concatenate(all_targets, axis=0),
                           accu_frames)[:-1]

    start = time.time()
    seq_time = 0
    bar = Bar('>>>', fill='>', max=len(all_outputs))

    all_dist, all_pck = [], []
    for i, (outputs_use,
            targets_use) in enumerate(zip(all_outputs, all_targets)):
        if refine_dic is not None:
            origin = outputs_use
            outputs_use, _ = ru.refine(outputs_use, refine_dic,
                                       refine_coeff_fun, **refine_extra_kwargs)

            if visualize:
                visual = [
                    ru.convert_to_pose_16(seq.reshape([-1, 17, 3]))
                    for seq in [outputs_use, origin, targets_use]
                ]
                ru.plot_pose_seq(visual, plot_axis=True, r=1000)

        if procrustes:
            for frame in range(outputs_use.shape[0]):
                gt = targets_use[frame].reshape(-1, 3)
                out = outputs_use[frame].reshape(-1, 3)
                _, Z, T, b, c = get_transformation(gt,
                                                   out,
                                                   True,
                                                   reflection=False)
                out = (b * out.dot(T)) + c
                outputs_use[frame, :] = out.reshape(1, 51)

        for pred, gt in zip(outputs_use, targets_use):
            pred, gt = pred.reshape([-1, 3]), gt.reshape([-1, 3])
            all_dist.append(mpjpe_fun(pred, gt))
            all_pck.append(pck_fun(pred, gt, thresholds=pck_thresholds))

        # update summary
        seq_time = time.time() - start
        start = time.time()

        bar.suffix = '({seq}/{size}) | seq: {seqtime:.4}s | Total: {ttl} | ETA: {eta:} | mpjpe: {loss:.6f}' \
            .format(seq=i + 1,
                    size=len(all_outputs),
                    seqtime=seq_time,
                    ttl=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=np.mean(all_dist))
        bar.next()

    all_dist = np.vstack(all_dist)
    all_pck = np.array(all_pck)
    mpjpe = np.mean(all_dist)
    if cache_prefix:
        with open('cache/{}_.pkl'.format(cache_prefix), 'wb') as f:
            pickle.dump({'mpjpe': all_dist, 'pck': all_pck}, f)
    pck = np.mean(all_pck, axis=0)
    bar.finish()
    print(">>> error: {:4f}, pck: {} <<<".format(
        mpjpe, ' '.join(['{:4f}'.format(val) for val in pck])))
    return losses.avg, mpjpe, pck
コード例 #12
0
ファイル: main.py プロジェクト: waiting-gy/3dPoseBaseline
def test(test_loader, model, criterion, stat_3d, procrustes=False):
    losses = utils.AverageMeter()

    model.eval()

    all_dist = []
#    start = time.time()
    batch_time = 0
    bar = Bar('>>>', fill='>', max=len(test_loader))
#
    test_loader = tqdm(test_loader, dynamic_ncols=True)
    fig = plt.figure(figsize=(9.6, 5.4))#1920:1080
    stat_2d = torch.load(os.path.join(opt.data_dir, 'stat_2d.pth.tar'))
#
    for i, (inps, tars) in enumerate(test_loader):
        start = time.time()
        inputs = Variable(inps.cuda())
        targets = Variable(tars.cuda(async=True))

        outputs, outputs_inputs = model(inputs)
        # outputs = model(inputs)

#        print('input:',((inputs)))#16*2
#        print('input:',((inputs[0])))#16*2
#        print('output:',(len(outputs[0])))#16*3
#        print('targets:',(len(targets[0])))#16*3

        # calculate loss

        # ###########
        # alpha = 0.0
        # loss1 = criterion(outputs[0], targets)
        # loss2 = criterion(outputs[1], targets)
        # loss = alpha * loss1 + (1.0 - alpha) * loss2
        # ########
        outputs_coord = outputs

        loss = criterion(outputs, targets)
        loss_input = criterion(outputs_inputs, inputs)
        loss = loss + loss_input

        losses.update(loss.item(), inputs.size(0))

        tars = targets

        # calculate erruracy
        #inputs_unnorm = data_process.unNormalizeData(inps.data.cpu().numpy(), stat_2d['mean'], stat_2d['std'], stat_2d['dim_use'])
        targets_unnorm = data_process.unNormalizeData(tars.data.cpu().numpy(), stat_3d['mean'], stat_3d['std'], stat_3d['dim_use'])
        outputs_unnorm = data_process.unNormalizeData(outputs.data.cpu().numpy(), stat_3d['mean'], stat_3d['std'], stat_3d['dim_use'])

#        print(outputs_unnorm.shape[0])
#
#        print('outputs_unnorm:',((outputs_unnorm)))#16*2

        #_max = 0
        #_min = 10000

        #org_path = r"./data/jsonAlpha_one2/"
        #filelist = strsort(os.listdir(org_path))
        ##print(len(filelist))


        #with open(os.path.join(org_path,filelist[i]),encoding='utf8')as fp:
        #    json_data = json.load(fp)
        #spine_x = json_data['people'][0]['pose_keypoints_2d'][24]
        #spine_y = json_data['people'][0]['pose_keypoints_2d'][25]
        #spine_x = spine_x
        #spine_y = spine_y

        #print(spine_x)

        #for k in range(outputs_unnorm.shape[0]):
        #    for j in range(32):
        #       tmp = outputs_unnorm[k][j * 3 + 2] # tmp = z
        #       outputs_unnorm[k][j * 3 + 2] = outputs_unnorm[k][j * 3 + 1]# z = y
        #       outputs_unnorm[k][j * 3 + 1] = tmp # y = z
        #       if outputs_unnorm[k][j * 3 + 2] > _max:
        #           _max = outputs_unnorm[k][j * 3 + 2]# z max
        #       if outputs_unnorm[k][j * 3 + 2] < _min:
        #           _min = outputs_unnorm[k][j * 3 + 2]# z min
        # #plot出的姿态是倒立的,通过该方法将其校正
        #for k in range(outputs_unnorm.shape[0]):
        #    for j in range(32):
        #        outputs_unnorm[k][j * 3 + 2] = _max - outputs_unnorm[k][j * 3 + 2] + _min# z = max-z
        #        outputs_unnorm[k][j * 3] += (spine_x - 630)# x
        #        outputs_unnorm[k][j * 3 + 2] += (500 - spine_y)# z

        #for k in range(inputs_unnorm.shape[0]):
        #    for j in range(32):
        #       #tmp1 = inputs_unnorm[k][j * 2 + 2] # tmp = z
        #       #inputs_unnorm[k][j * 2 + 2] = inputs_unnorm[k][j * 3 + 1]# z = y
        #       #inputs_unnorm[k][j * 2 + 1] = tmp1 # y = z
        #       if inputs_unnorm[k][j * 2 + 1] > _max:
        #           _max = inputs_unnorm[k][j * 2 + 1]# z max
        #       if inputs_unnorm[k][j * 2 + 1] < _min:
        #           _min = inputs_unnorm[k][j * 2 + 1]# z min
        # #plot出的姿态是倒立的,通过该方法将其校正
        #for k in range(inputs_unnorm.shape[0]):
        #    for j in range(32):
        #        inputs_unnorm[k][j * 2 + 1] = _max - inputs_unnorm[k][j * 2 + 1] + _min# z = max-z
        #        #inputs_unnorm[k][j * 3] += (spine_x - 630)# x
        #        #inputs_unnorm[k][j * 3 + 2] += (500 - spine_y)# z

#        for k in range(len(outputs_unnorm)):
#           for j in range(32):
#              tmp0 = targets_unnorm[k][j * 3 + 2]# tmp = z
#              targets_unnorm[k][j * 3 + 2] = targets_unnorm[k][j * 3 + 1]# z = y
#              targets_unnorm[k][j * 3 + 1] = tmp0 # y = z
#
#              tmp = outputs_unnorm[k][j * 3 + 2]# tmp = z
#              outputs_unnorm[k][j * 3 + 2] = outputs_unnorm[k][j * 3 + 1]# z = y
#              outputs_unnorm[k][j * 3 + 1] = tmp # y = z
#
#        for k in range(len(outputs_unnorm)):
#           hip_z0 = targets_unnorm[k][3]
#           for j in range(32):
#              targets_unnorm[k][j * 3 + 2] = targets_unnorm[k][j * 3 + 2] - 2*(targets_unnorm[k][j * 3 + 2] - hip_z0)
#
#           hip_z = outputs_unnorm[k][3]
#           for j in range(32):
#              outputs_unnorm[k][j * 3 + 2] = outputs_unnorm[k][j * 3 + 2] - 2*(outputs_unnorm[k][j * 3 + 2] - hip_z)

        #for pp in range(len(outputs_unnorm)):

#
           #ax2 = fig.add_subplot(131)
           #ax2.get_xaxis().set_visible(False)
           #ax2.get_yaxis().set_visible(False)
           #ax2.set_axis_off()
           #ax2.set_title('Input')
           #org_path = r"/home/ubuntu/gaoyu/alphapose/Video3D3_cmu/sep-json/"
           #filelist = strsort(os.listdir(org_path))
           #print(filelist[i])
           #img2d = imgplt.imread(os.path.join('/home/ubuntu/gaoyu/alphapose/Video3D3_cmu/vis/', '{0}.jpg'.format((filelist[i].split('.')[0]).zfill(12))))
           #ax2.imshow(img2d, aspect='equal')
#
#           ax0 = fig.add_subplot(131, projection='3d')
#           ax0.view_init(0, 300)
#           viz.show3Dpose( targets_unnorm[pp], ax0, add_labels=True, title = 'GroundTruth')
           #ax0 = fig.add_subplot(132)
           #ax0.view_init(0, 300)
           #viz.show2Dpose( inputs_unnorm[pp]*2.4, ax0, add_labels=True, title = '2DPose Input')

           #Reconstruction = 1/((time.time() - start))
           #start = time.time()
           #ax1 = fig.add_subplot(133, projection='3d')
#          # print(len(outputs_unnorm[pp])) #96
           #ax1.view_init(0, 300) #默认值30,120
           #viz.show3Dpose( outputs_unnorm[pp], ax1, add_labels=True, title = 'Reconstruction:  {}FPS'.format(int(Reconstruction)))

           #plt.pause(0.0000001)
           #plt.clf()


#        print('targets_unnorm:',len(outputs_unnorm[0]))#96=32*3
#        print('outputs_unnorm:',len(outputs_unnorm[0]))#96=32*3
        # remove dim ignored
        dim_use = np.hstack((np.arange(3), stat_3d['dim_use']))

        outputs_use = outputs_unnorm[:, dim_use]
        targets_use = targets_unnorm[:, dim_use]

#        print('targets_unnorm:',len(outputs_use[0]))#51=17*3
#        print('outputs_unnorm:',len(outputs_use[0]))#51=17*3


        if procrustes:
            for ba in range(inps.size(0)):
                gt = targets_use[ba].reshape(-1, 3)
                out = outputs_use[ba].reshape(-1, 3)
                _, Z, T, b, c = get_transformation(gt, out, True)
                out = (b * out.dot(T)) + c
                outputs_use[ba, :] = out.reshape(1, 51)

        sqerr = (outputs_use - targets_use) ** 2

        distance = np.zeros((sqerr.shape[0], 17))
        dist_idx = 0
        for k in np.arange(0, 17 * 3, 3):
            distance[:, dist_idx] = np.sqrt(np.sum(sqerr[:, k:k + 3], axis=1))
            dist_idx += 1
        all_dist.append(distance)

        # update summary
        if (i + 1) % 100 == 0:
#            batch_time = time.time() - start
            batch_time = 1
#            start = time.time()

        #bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}' \
        #    .format(batch=i + 1,
        #            size=len(test_loader),
        #            batchtime=batch_time * 10.0,
        #            ttl=bar.elapsed_td,
        #            eta=bar.eta_td,
        #            loss=losses.avg)
        #bar.next()

#
        test_loader.set_description(
                    '({batch}/{size}) | batch: {batchtime:.4}ms | loss: {loss:.6f}'.format(
                        batch=i + 1,
                        size=len(test_loader),
                        batchtime=batch_time * 10.0,
                        loss=losses.avg)
                    )
    test_loader.close()
#

    all_dist = np.vstack(all_dist)
    joint_err = np.mean(all_dist, axis=0)
    ttl_err = np.mean(all_dist)
#    bar.finish()
    print (">>> error: {} <<<".format(ttl_err))
    return losses.avg, ttl_err
コード例 #13
0
ファイル: main.py プロジェクト: waiting-gy/3dPoseBaseline
def train(train_loader, model, criterion, optimizer,
          lr_init=None, lr_now=None, glob_step=None, lr_decay=None, gamma=None,
          max_norm=True):
    losses = utils.AverageMeter()

    model.train()

    start = time.time()
    batch_time = 0
    bar = Bar('>>>', fill='>', max=len(train_loader))

#
    train_loader = tqdm(train_loader, dynamic_ncols=True)
#

    for i, (inps, tars) in enumerate(train_loader):
        glob_step += 1
        if glob_step % lr_decay == 0 or glob_step == 1:
            lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay, gamma)
        inputs = Variable(inps.cuda())
        targets = Variable(tars.cuda(async=True))

        outputs, outputs_inputs = model(inputs)
        # outputs = model(inputs)

        # calculate loss
        optimizer.zero_grad()

        # ###########
        # alpha = 0.0
        # loss1 = criterion(outputs[0], targets)
        # loss2 = criterion(outputs[1], targets)
        # loss = alpha*loss1 + (1.0-alpha)*loss2
        # ########

        loss = criterion(outputs, targets)
        loss_input  = criterion(outputs_inputs, inputs)
        loss = loss + loss_input

        losses.update(loss.item(), inputs.size(0))
        loss.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()

        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

#        bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.4f}' \
#            .format(batch=i + 1,
#                    size=len(train_loader),
#                    batchtime=batch_time * 10.0,
#                    ttl=bar.elapsed_td,
#                    eta=bar.eta_td,
#                    loss=losses.avg)
#        bar.next()
#
        train_loader.set_description(
                    '({batch}/{size}) | batch: {batchtime:.4}ms | loss: {loss:.6f}'.format(
                        batch=i + 1,
                        size=len(train_loader),
                        batchtime=batch_time * 10.0,
                        loss=losses.avg)
                    )
    train_loader.close()
#

#    bar.finish()
    return glob_step, lr_now, losses.avg