Esempio n. 1
0
def main_worker(gpu, ngpus_per_node):
    print('Use GPU:', gpu)
    dist.init_process_group(backend='nccl',
                            init_method='tcp://localhost:23456',
                            world_size=ngpus_per_node,
                            rank=gpu)
    print('Group initialized.')

    model_dnet = network.get_net()

    saver = M.Saver(model_dnet)
    saver.restore('./model/')
    # model_dnet.bn_eps(1e-5)
    model = loss.ModelWithLoss(model_dnet)

    torch.cuda.set_device(gpu)
    model.cuda(gpu)
    model.train()
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
    print('Model get.')

    loader, sampler = datareader.get_train_dataloader(64)
    optim = torch.optim.AdamW(model.parameters(), lr=config.init_lr)

    for e in range(75, config.max_epoch):
        print('Replica:%d Epoch:%d' % (gpu, e))
        sampler.set_epoch(e)
        for i, (img, hmap) in enumerate(loader):
            # print(img.shape, hmap.shape, hmap_match.shape)
            optim.zero_grad()
            hmap_loss, outs = model(img, hmap)

            hmap_loss = hmap_loss.mean()

            loss_total = hmap_loss
            loss_total.backward()
            optim.step()
            lr = optim.param_groups[0]['lr']

            if i % 200 == 0 and gpu == 0:
                if not os.path.exists('./outputs/'):
                    os.mkdir('./outputs/')

                visutil.vis_batch(img, outs, './outputs/%d_out.jpg' % i)
                visutil.vis_batch(img, hmap, './outputs/%d_gt.jpg' % i)

            if i % 20 == 0:
                curr_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
                print('%s  Replica:%d  Progress:%d/%d  LsC:%.3e LR:%.1e' %
                      (curr_time, gpu, i, len(loader), hmap_loss, lr))

        if e in config.lr_epoch:
            newlr = lr * 0.1
            for param_group in optim.param_groups:
                param_group['lr'] = newlr

        if e % config.save_interval == 0 and gpu == 0:
            stamp = random.randint(0, 1000000)
            saver.save('./model/%d_%d.pth' % (e, stamp))
Esempio n. 2
0
def main_worker(gpu, ngpus_per_node):
    print('Use GPU:', gpu)
    dist.init_process_group(backend='nccl',
                            init_method='tcp://localhost:23456',
                            world_size=ngpus_per_node,
                            rank=gpu)
    print('Group initialized.')

    # initialize the network
    model_dnet = hrnet.DEKRRefine(config.num_pts, 128)
    x = torch.zeros(1, 3, 128, 128)
    with torch.no_grad():
        model_dnet(x)
    M.Saver(model_dnet.dekr).restore('./model_dekr/')
    model = loss.ModelWithLoss(model_dnet)
    saver = M.Saver(model)
    saver.restore('./model/')

    torch.cuda.set_device(gpu)
    model.cuda(gpu)
    model.train()
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
    print('Model initialized.')

    # get loader
    loader, sampler = datareader.get_train_dataloader()
    optim = torch.optim.Adam(model.parameters(), lr=config.init_lr)

    for e in range(config.max_epoch):
        print('Replica:%d Epoch:%d' % (gpu, e))
        sampler.set_epoch(e)
        for i, (img, heatmap, mask, offset,
                offset_weight) in enumerate(loader):
            optim.zero_grad()
            hm, off, refined, hm_loss, off_loss, hm_refined_loss = model(
                img, heatmap, mask, offset, offset_weight)
            loss_total = hm_loss + off_loss * 0.03 + hm_refined_loss
            loss_total.backward()
            optim.step()
            lr = optim.param_groups[0]['lr']

            if i % 100 == 0 and gpu == 0:
                visutil.vis_batch(img, hm, './outputs/%d_out.jpg' % i)
                visutil.vis_batch(img, refined, './outputs/%d_ref.jpg' % i)
                visutil.vis_batch(img, heatmap, './outputs/%d_gt.jpg' % i)

            if i % 20 == 0 and gpu == 0:
                curr_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
                print(
                    '%s  Replica:%d  Progress:%d/%d  LsHM:%.3e  LsRef:%.3e  LsOff:%.3e  LR:%.1e'
                    % (curr_time, gpu, i, len(loader), hm_loss,
                       hm_refined_loss, off_loss, lr))

        if e in config.lr_epoch:
            newlr = lr * 0.1
            for param_group in optim.param_groups:
                param_group['lr'] = newlr

        if e % config.save_interval == 0 and gpu == 0:
            stamp = random.randint(0, 1000000)
            saver.save('./model/%d_%d.pth' % (e, stamp))
Esempio n. 3
0
            buff[i + 1, 2] = d
        buff[:, :2] = p[:, :2]
        results.append(buff)
    return results


if __name__ == '__main__':
    model_dnet = network.DensityNet(config.density_num_layers, config.density_channels, config.density_level,\
          config.gcn_layers, config.gcn_channels, config.head_layernum, config.head_chn, config.upsample_layers, config.upsample_chn)

    with torch.no_grad():
        # initialize model
        x = np.float32(np.random.random(size=[1, 3, 512, 512]))
        x = torch.from_numpy(x)
        model_dnet(x)
        model = loss.ModelWithLoss(model_dnet)
        M.Saver(model).restore('./model/')
        model.eval()
        model.cuda()

        imgname = '233.jpg'
        img = cv2.imread(imgname)
        pts, scores, roots, rels, hmap, img_processed, idout = testutil.run_pipeline(
            img, model.model)
        pts_results = get_pts(pts, roots, rels)

        print(len(pts_results))
        pts_final = []
        for p, s in zip(pts_results, scores):
            if s > 0.2:
                pts_final.append(p)
Esempio n. 4
0
def main_worker(gpu, ngpus_per_node):
	print('Use GPU:', gpu)
	dist.init_process_group(backend='nccl', init_method='tcp://localhost:23456', world_size=ngpus_per_node, rank=gpu)
	print('Group initialized.')

	model_dnet = network.DensityNet(config.density_num_layers, config.density_channels, config.density_level,\
							config.gcn_layers, config.gcn_channels, config.head_layernum, config.head_chn, config.upsample_layers, config.upsample_chn)
	x = np.float32(np.random.random(size=[1,3,512,512]))
	x = torch.from_numpy(x)
	with torch.no_grad():
		outs, idout, depout = model_dnet(x)
	# input()
	M.Saver(model_dnet.backbone).restore('./model_imagenet_w32/')
	# model_dnet.bn_eps(1e-5)
	model = loss.ModelWithLoss(model_dnet)
	M.Saver(model.model.backbone).restore('./backbone/')
	M.Saver(model.model.upsample).restore('./upsample/')
	saver = M.Saver(model)
	saver.restore('./model/')

	torch.cuda.set_device(gpu)
	model.cuda(gpu)
	model.train()
	model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
	print('Model get.')

	loader, sampler = datareader.get_train_dataloader(28)
	optim = torch.optim.Adam(model.parameters(), lr=config.init_lr)

	for e in range(config.max_epoch):
		print('Replica:%d Epoch:%d'%(gpu, e))
		sampler.set_epoch(e)
		for i, (img, hmap, mask, pts, depth, depth_all, is_muco) in enumerate(loader):
			optim.zero_grad()
			hmap_loss, push_loss, pull_loss, rdepth_loss, depth_loss, outs, idout, depout, depallout = model(img, hmap, mask, pts, depth, depth_all, is_muco)
			# print(hmap_loss.shape)

			hmap_loss = hmap_loss.mean()
			push_loss = push_loss.mean()
			pull_loss = pull_loss.mean()
			rdepth_loss = rdepth_loss.mean()
			depth_loss = depth_loss.mean()
			if e<0:
				loss_total = hmap_loss + 0.05*push_loss + 0.001*pull_loss + 0.01 * rdepth_loss + 0.01 * depth_loss
			else:
				loss_total = hmap_loss + 0.003*push_loss + 0.001*pull_loss + 0.003 * rdepth_loss + 0.003 * depth_loss
			loss_total.backward()
			optim.step()
			lr = optim.param_groups[0]['lr']

			if i%100==0 and gpu==0:
				if not os.path.exists('./outputs/'):
					os.mkdir('./outputs/')
				# outs = torch.sigmoid(outs)
				# outs, idout = model_dnet(img.cuda())
				visutil.vis_batch(img, outs, './outputs/%d_out.jpg'%i)
				visutil.vis_batch(img, hmap, './outputs/%d_gt.jpg'%i)
				#visutil.vis_batch(img, mask, './outputs/%d_mask.jpg'%i)
				visutil.vis_batch(img, idout, './outputs/%d_id.jpg'%i, minmax=True)
				visutil.vis_batch(img, depout, './outputs/%d_dep.jpg'%i, minmax=True)
				visutil.vis_batch(img, depallout, './outputs/%d_rel.jpg'%i, minmax=True)
				print(outs.max(), outs.min(), depout.max(), depout.min(), depth[:,:,2].max(), depth[:,:,2].min())

			if i%20==0:
				curr_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
				print('%s  Replica:%d  Progress:%d/%d  Ls:%.3e  LsC:%.3e  IDs:%.3e  IDd:%.3e rD:%.3e D:%.3e LR:%.1e'%(curr_time, gpu, i, len(loader), loss_total, hmap_loss, pull_loss, push_loss, rdepth_loss, depth_loss, lr))

		if e in config.lr_epoch:
			newlr = lr * 0.1 
			for param_group in optim.param_groups:
				param_group['lr'] = newlr

		if e%config.save_interval==0 and gpu==0:
			stamp = random.randint(0, 1000000)
			saver.save('./model/%d_%d.pth'%(e, stamp))
Esempio n. 5
0
def main_worker(gpu, ngpus_per_node):
    print('Use GPU:', gpu)
    dist.init_process_group(backend='nccl',
                            init_method='tcp://localhost:23456',
                            world_size=ngpus_per_node,
                            rank=gpu)
    print('Group initialized.')

    model_dnet = network.DensityNet(config.head_layernum, config.head_chn,
                                    config.upsample_layers,
                                    config.upsample_chn)
    x = np.float32(
        np.random.random(size=[
            1, 3 + config.num_match_pts, config.inp_size, config.inp_size
        ]))
    x = torch.from_numpy(x)
    with torch.no_grad():
        outs = model_dnet(x)
    # input()
    saver = M.Saver(model_dnet.backbone)
    saver.restore('./model_imagenet_w32/',
                  strict=False,
                  exclude=['c1.conv.weight'])
    saver.restore('./model/')
    # model_dnet.bn_eps(1e-5)
    model = loss.ModelWithLoss(model_dnet)

    torch.cuda.set_device(gpu)
    model.cuda(gpu)
    model.train()
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
    print('Model get.')

    loader, sampler = datareader.get_train_dataloader(22)
    optim = torch.optim.Adam(model.parameters(), lr=config.init_lr)

    for e in range(config.max_epoch):
        print('Replica:%d Epoch:%d' % (gpu, e))
        sampler.set_epoch(e)
        for i, (img, hmap, hmap_match) in enumerate(loader):
            # print(img.shape, hmap.shape, hmap_match.shape)
            optim.zero_grad()
            hmap_loss, outs = model(img, hmap, hmap_match)

            hmap_loss = hmap_loss.mean()

            hmap_loss.backward()
            optim.step()
            lr = optim.param_groups[0]['lr']

            if i % 100 == 0 and gpu == 0:
                if not os.path.exists('./outputs/'):
                    os.mkdir('./outputs/')

                visutil.vis_batch(img, outs, './outputs/%d_out.jpg' % i)
                visutil.vis_batch(img, hmap, './outputs/%d_gt.jpg' % i)
                visutil.vis_batch(img,
                                  hmap_match,
                                  './outputs/%d_hmm.jpg' % i,
                                  minmax=True)

            if i % 20 == 0:
                curr_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
                print('%s  Replica:%d  Progress:%d/%d  LsC:%.3e LR:%.1e' %
                      (curr_time, gpu, i, len(loader), hmap_loss, lr))

        if e in config.lr_epoch:
            newlr = lr * 0.1
            for param_group in optim.param_groups:
                param_group['lr'] = newlr

        if e % config.save_interval == 0 and gpu == 0:
            stamp = random.randint(0, 1000000)
            saver.save('./model/%d_%d.pth' % (e, stamp))