コード例 #1
0
ファイル: retina.py プロジェクト: rafale77/RetinaFace
    def __init__(self, modelpath, nms=0.4):
        model = mnet.Detector()
        model = model.eval()
        x = torch.from_numpy(np.ones([1, 3, 640, 640]).astype(np.float32))
        _ = model(x)
        M.Saver(model).restore(modelpath)
        model.cuda()
        self.model = model

        self.nms_threshold = nms

        self.fpn_keys = []
        self._feat_stride_fpn = [32, 16, 8]

        for s in self._feat_stride_fpn:
            self.fpn_keys.append('stride%s' % s)

        self._anchors_fpn = dict(
            zip(self.fpn_keys, generate_anchors_fpn(dense_anchor=False)))
        for k in self._anchors_fpn:
            v = self._anchors_fpn[k].astype(np.float32)
            v = torch.from_numpy(v).cuda()
            self._anchors_fpn[k] = v

        self._num_anchors = dict(
            zip(self.fpn_keys,
                [anchors.shape[0] for anchors in self._anchors_fpn.values()]))
コード例 #2
0
ファイル: retina.py プロジェクト: rafale77/RetinaFace
    def __init__(self, modelpath, nms=0.4, worker=2):
        model = retina_resnet.Detector()
        model = model.eval()
        x = torch.from_numpy(np.ones([1, 3, 640, 640]).astype(np.float32))
        _ = model(x)
        M.Saver(model).restore(modelpath)
        model.cuda()
        if isinstance(config.gpus, list):
            if len(config.gpus) > 1:
                print('Using multiple gpus:', config.gpus)
                model = torch.nn.DataParallel(model, device_ids=config.gpus)
        self.model = model

        self.nms_threshold = nms

        self.fpn_keys = []
        self._feat_stride_fpn = [32, 16, 8]

        for s in self._feat_stride_fpn:
            self.fpn_keys.append('stride%s' % s)

        self._anchors_fpn = dict(
            zip(self.fpn_keys, generate_anchors_fpn(dense_anchor=False)))
        for k in self._anchors_fpn:
            v = self._anchors_fpn[k].astype(np.float32)
            v = torch.from_numpy(v).cuda()
            self._anchors_fpn[k] = v

        self._num_anchors = dict(
            zip(self.fpn_keys,
                [anchors.shape[0] for anchors in self._anchors_fpn.values()]))
        self.worker = worker
コード例 #3
0
def main_worker(gpu, ngpus_per_node):
    print('Use GPU:', gpu)
    dist.init_process_group(backend='nccl',
                            init_method='tcp://localhost:23456',
                            world_size=ngpus_per_node,
                            rank=gpu)
    print('Group initialized.')

    model_dnet = network.get_net()

    saver = M.Saver(model_dnet)
    saver.restore('./model/')
    # model_dnet.bn_eps(1e-5)
    model = loss.ModelWithLoss(model_dnet)

    torch.cuda.set_device(gpu)
    model.cuda(gpu)
    model.train()
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
    print('Model get.')

    loader, sampler = datareader.get_train_dataloader(64)
    optim = torch.optim.AdamW(model.parameters(), lr=config.init_lr)

    for e in range(75, config.max_epoch):
        print('Replica:%d Epoch:%d' % (gpu, e))
        sampler.set_epoch(e)
        for i, (img, hmap) in enumerate(loader):
            # print(img.shape, hmap.shape, hmap_match.shape)
            optim.zero_grad()
            hmap_loss, outs = model(img, hmap)

            hmap_loss = hmap_loss.mean()

            loss_total = hmap_loss
            loss_total.backward()
            optim.step()
            lr = optim.param_groups[0]['lr']

            if i % 200 == 0 and gpu == 0:
                if not os.path.exists('./outputs/'):
                    os.mkdir('./outputs/')

                visutil.vis_batch(img, outs, './outputs/%d_out.jpg' % i)
                visutil.vis_batch(img, hmap, './outputs/%d_gt.jpg' % i)

            if i % 20 == 0:
                curr_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
                print('%s  Replica:%d  Progress:%d/%d  LsC:%.3e LR:%.1e' %
                      (curr_time, gpu, i, len(loader), hmap_loss, lr))

        if e in config.lr_epoch:
            newlr = lr * 0.1
            for param_group in optim.param_groups:
                param_group['lr'] = newlr

        if e % config.save_interval == 0 and gpu == 0:
            stamp = random.randint(0, 1000000)
            saver.save('./model/%d_%d.pth' % (e, stamp))
コード例 #4
0
ファイル: retina.py プロジェクト: ddddwee1/TorchSUL
	def __init__(self, modelpath, use_gpu, nms=0.4):
		self.use_gpu = use_gpu
		model = mnet.Detector()
		model = model.eval()
		x = torch.from_numpy(np.ones([1,3,640,640]).astype(np.float32))
		_ = model(x)
		M.Saver(model).restore('./model/')
		if self.use_gpu:
			model.cuda()
		self.model = model
		self.generate_anchors()
		self.nms = cpu_nms_wrapper(nms)
コード例 #5
0
def main_worker(gpu, ngpus_per_node):
	print('Use GPU:', gpu)
	dist.init_process_group(backend='nccl', init_method='tcp://localhost:23456', world_size=ngpus_per_node, rank=gpu)
	print('Group initialized.')

	model_dnet = network.DensityNet(config.density_num_layers, config.density_channels, config.density_level,\
							config.gcn_layers, config.gcn_channels, config.head_layernum, config.head_chn, config.upsample_layers, config.upsample_chn)
	x = np.float32(np.random.random(size=[1,3,512,512]))
	x = torch.from_numpy(x)
	with torch.no_grad():
		outs, idout, depout = model_dnet(x)
	# input()
	M.Saver(model_dnet.backbone).restore('./model_imagenet_w32/')
	# model_dnet.bn_eps(1e-5)
	model = loss.ModelWithLoss(model_dnet)
	M.Saver(model.model.backbone).restore('./backbone/')
	M.Saver(model.model.upsample).restore('./upsample/')
	saver = M.Saver(model)
	saver.restore('./model/')

	torch.cuda.set_device(gpu)
	model.cuda(gpu)
	model.train()
	model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
	print('Model get.')

	loader, sampler = datareader.get_train_dataloader(28)
	optim = torch.optim.Adam(model.parameters(), lr=config.init_lr)

	for e in range(config.max_epoch):
		print('Replica:%d Epoch:%d'%(gpu, e))
		sampler.set_epoch(e)
		for i, (img, hmap, mask, pts, depth, depth_all, is_muco) in enumerate(loader):
			optim.zero_grad()
			hmap_loss, push_loss, pull_loss, rdepth_loss, depth_loss, outs, idout, depout, depallout = model(img, hmap, mask, pts, depth, depth_all, is_muco)
			# print(hmap_loss.shape)

			hmap_loss = hmap_loss.mean()
			push_loss = push_loss.mean()
			pull_loss = pull_loss.mean()
			rdepth_loss = rdepth_loss.mean()
			depth_loss = depth_loss.mean()
			if e<0:
				loss_total = hmap_loss + 0.05*push_loss + 0.001*pull_loss + 0.01 * rdepth_loss + 0.01 * depth_loss
			else:
				loss_total = hmap_loss + 0.003*push_loss + 0.001*pull_loss + 0.003 * rdepth_loss + 0.003 * depth_loss
			loss_total.backward()
			optim.step()
			lr = optim.param_groups[0]['lr']

			if i%100==0 and gpu==0:
				if not os.path.exists('./outputs/'):
					os.mkdir('./outputs/')
				# outs = torch.sigmoid(outs)
				# outs, idout = model_dnet(img.cuda())
				visutil.vis_batch(img, outs, './outputs/%d_out.jpg'%i)
				visutil.vis_batch(img, hmap, './outputs/%d_gt.jpg'%i)
				#visutil.vis_batch(img, mask, './outputs/%d_mask.jpg'%i)
				visutil.vis_batch(img, idout, './outputs/%d_id.jpg'%i, minmax=True)
				visutil.vis_batch(img, depout, './outputs/%d_dep.jpg'%i, minmax=True)
				visutil.vis_batch(img, depallout, './outputs/%d_rel.jpg'%i, minmax=True)
				print(outs.max(), outs.min(), depout.max(), depout.min(), depth[:,:,2].max(), depth[:,:,2].min())

			if i%20==0:
				curr_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
				print('%s  Replica:%d  Progress:%d/%d  Ls:%.3e  LsC:%.3e  IDs:%.3e  IDd:%.3e rD:%.3e D:%.3e LR:%.1e'%(curr_time, gpu, i, len(loader), loss_total, hmap_loss, pull_loss, push_loss, rdepth_loss, depth_loss, lr))

		if e in config.lr_epoch:
			newlr = lr * 0.1 
			for param_group in optim.param_groups:
				param_group['lr'] = newlr

		if e%config.save_interval==0 and gpu==0:
			stamp = random.randint(0, 1000000)
			saver.save('./model/%d_%d.pth'%(e, stamp))
コード例 #6
0
    l += get_layer('backbone.c27', 'mobilenet0_%s26')

    l += get_convbn('backbone.rf_c3_lateral', 'rf_c3_lateral', bias=True)
    l += get_convbn('backbone.rf_c2_lateral', 'rf_c2_lateral', bias=True)
    l += get_convbn('backbone.rf_c1_red_conv', 'rf_c1_red_conv', bias=True)
    l += get_convbn('backbone.rf_c2_aggr', 'rf_c2_aggr', bias=True)
    l += get_convbn('backbone.rf_c1_aggr', 'rf_c1_aggr', bias=True)

    l += get_dethead('backbone.det3', 'rf_c3_det')
    l += get_dethead('backbone.det2', 'rf_c2_det')
    l += get_dethead('backbone.det1', 'rf_c1_det')

    l += get_regress('backbone.head32', 'face_rpn_%s_stride32')
    l += get_regress('backbone.head16', 'face_rpn_%s_stride16')
    l += get_regress('backbone.head8', 'face_rpn_%s_stride8')

    a, b = totonoi(l)

    for i, j in zip(a, b):
        print(i, j)
        value = source.res[j].asnumpy()
        print(value.shape)
        print(res[i].shape)
        res[i].data[:] = torch.from_numpy(value)[:]

    y = net(x)
    print(y)
    print(y.shape)

    M.Saver(net).save('./model/mbnet.pth')
コード例 #7
0
ファイル: test.py プロジェクト: ddddwee1/TorchSUL
    img = img / 255
    img = img - np.float32([0.485, 0.456, 0.406])
    img = img / np.float32([0.229, 0.224, 0.225])

    img = np.transpose(img, [2, 0, 1])
    return img


if __name__ == '__main__':
    model_dnet = network.DinoNet()
    x = np.float32(
        np.random.random(size=[1, 3, config.inp_size, config.inp_size]))
    x = torch.from_numpy(x)
    with torch.no_grad():
        outs = model_dnet(x)
    M.Saver(model_dnet).restore('./model/')
    model_dnet.eval()
    model_dnet.cuda()

    img = cv2.imread('0000.png')
    img = _pre_process(img)
    img = torch.from_numpy(img[None, ...]).cuda()
    with torch.no_grad():
        out, attn = model_dnet(img, True)

    res = visutil.vis_one(img.cpu().numpy()[0], out.cpu().numpy()[0])
    res = np.concatenate(res, axis=1)
    # cv2.imwrite('out.png', res)

    attn_map = vis_attn_map(img.cpu().numpy()[0],
                            attn.cpu().numpy()[0],
コード例 #8
0
ファイル: hrnet.py プロジェクト: ddddwee1/TorchSUL
			self.convs_off.append(M.ConvLayer(1, 2))

	def forward(self, x):
		x = self.backbone(x)
		h, w = x[0].shape[2], x[0].shape[3]
		x = torch.cat([x[0], F.interpolate(x[1], (h,w), mode='bilinear'), \
			F.interpolate(x[2], (h,w), mode='bilinear'), F.interpolate(x[3], (h,w), mode='bilinear')], dim=1)
		# x0_h, x0_w = x[0].size(2), x[0].size(3)
		# x = torch.cat([x[0], \
		# 	F.upsample(x[1], size=(x0_h, x0_w), mode='bilinear'), \
		# 	F.upsample(x[2], size=(x0_h, x0_w), mode='bilinear'), \
		# 	F.upsample(x[3], size=(x0_h, x0_w), mode='bilinear')], 1)
		# return x 

		hmap = self.conv_hmap(self.head_hmap(self.transition_hmap(x)))

		offsets = []
		off = self.transition_off(x)
		for i in range(len(self.convs_off)):
			o = self.reg_blks_off[i*2](off[:,i*15:i*15+15])
			o = self.reg_blks_off[i*2+1](o)
			o = self.convs_off[i](o)
			offsets.append(o)
		offsets = torch.cat(offsets, dim=1)
		return hmap, offsets

if __name__=='__main__':
	net = HRNET(17)
	M.Saver(net).restore('./model_imagenet/')
	M.Saver(net.backbone).save('./model_body_imagenet/w48.pth')
コード例 #9
0
import glob
import os
from collections import defaultdict
import scipy.io as sio
import util.norm_pose

if __name__ == '__main__':
    seq_len = 243
    nettcn = networktcn.Refine2dNet(17,
                                    seq_len,
                                    input_dimension=2,
                                    output_dimension=1,
                                    output_pts=1)
    x_dumb = torch.zeros(2, 243, 17 * 2)
    nettcn(x_dumb)
    M.Saver(nettcn).restore('./ckpts/model_root/')
    nettcn.cuda()
    nettcn.eval()

    # create result folder
    if not os.path.exists('mupots/pred_dep/'):
        os.makedirs('mupots/pred_dep/')

    results = defaultdict(list)
    gts = defaultdict(list)
    for ptsfile in sorted(glob.glob('mupots/est_p2ds/*.pkl')):
        ptsfile = ptsfile.replace('\\', '/')  # for windows
        print(ptsfile)
        p2d, affpts, affb, occmask = pickle.load(open(ptsfile, 'rb'))
        p2d = torch.from_numpy(p2d).cuda() / 915
コード例 #10
0
    l += get_convbn('ssh_c3_lateral', 'ssh_c3_lateral', bias=True)
    l += get_dethead('det3', 'ssh_m3_det')
    l += get_regress('head32', 'face_rpn_%s_stride32')

    l += get_convbn('ssh_c2_lateral', 'ssh_c2_lateral', bias=True)
    l += get_convbn('ssh_c2_aggr', 'ssh_c2_aggr', bias=True)
    l += get_dethead('det2', 'ssh_m2_det')
    l += get_regress('head16', 'face_rpn_%s_stride16')

    l += get_convbn('ssh_m1_red_conv', 'ssh_m1_red_conv', bias=True)
    l += get_convbn('ssh_c1_aggr', 'ssh_c1_aggr', bias=True)
    l += get_dethead('det1', 'ssh_m1_det')
    l += get_regress('head8', 'face_rpn_%s_stride8')

    a, b = totonoi(l)
    # print(a,b)
    import source
    for i, j in zip(a, b):
        # print(i,j)
        value = source.res[j].asnumpy()
        # print(value.shape)
        # print(res[i].shape)
        res[i].data[:] = torch.from_numpy(value)[:]

    # net.bn_eps(2e-5)
    y = net(x)
    print(y[0])
    print(y[0].shape)

    M.Saver(net).save('./model_r50/r50_retina.pth')
コード例 #11
0
                  dep_out_path='./mupots/pred_dep_bu/',
                  gt_dep_path='./mupots/depths/')

    ## step 2: infer the integrated results
    print('Inferring the integrated poses...')
    # create data loader
    data = inteutil.InteDataset(bu_path='./mupots/pred_bu/',
                                bu_dep_path='./mupots/pred_dep_bu/',
                                td_path='./mupots/pred/',
                                td_dep_path='./mupots/pred_dep/')
    # initialize the network
    net = networkinte.IntegrationNet()
    pts_dumb = torch.zeros(2, 102)
    dep_dumb = torch.zeros(2, 2)
    net(pts_dumb, dep_dumb)
    M.Saver(net).restore('./ckpts/model_inte/')
    net.cuda()

    # create paths
    if not os.path.exists('./mupots/pred_inte/'):
        os.makedirs('./mupots/pred_inte/')
    if not os.path.exists('./mupots/pred_dep_inte/'):
        os.makedirs('./mupots/pred_dep_inte/')

    with torch.no_grad():
        all_pts = defaultdict(list)
        for src_pts, src_dep, vid_inst in tqdm(data):
            src_pts = torch.from_numpy(src_pts).cuda()
            src_dep = torch.from_numpy(src_dep).cuda()
            res_pts, res_dep = net(src_pts, src_dep)
            res_pts = res_pts.cpu().numpy()
コード例 #12
0
ファイル: run.py プロジェクト: ddddwee1/TorchSUL
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from TorchSUL import Model as M
import resnet
import resnet2

x = torch.from_numpy(np.zeros([1, 3, 16 * 4, 112], dtype=np.float32))
net = resnet.Res34()
net.eval()
# initialize
net(x)
saver = M.Saver(net)
saver.restore('./newmodel/')
# end: initialize
# record params
net.record()
net.merge_bn()
net(x)
c1param = M.get_record()
# end record params

# record simplified network
net2 = resnet2.Res34()
net2.eval()
M.reset_record()
net2.record()
net2(x)
c2param = M.get_record()
# end: record
コード例 #13
0
bone_matrix = torch.from_numpy(bone_matrix)

seq_len = 243
netgcn = networkgcn.TransNet(256, 17)
nettcn = networktcn.Refine2dNet(17, seq_len)

# initialize the network with dumb input 
x_dumb = torch.zeros(2,17,2)
affb = torch.ones(2,16,16) / 16
affpts = torch.ones(2,17,17) / 17
netgcn(x_dumb, affpts, affb, bone_matrix, bone_matrix_inv)
x_dumb = torch.zeros(2,243, 17*3)
nettcn(x_dumb)

# load networks 
M.Saver(netgcn).restore('./ckpts/model_gcnwild/')
M.Saver(nettcn).restore('./ckpts/model_tcn/')

# push to gpu 
netgcn.cuda()
netgcn.eval()
nettcn.cuda()
nettcn.eval()
bone_matrix = bone_matrix.cuda()
bone_matrix_inv = bone_matrix_inv.cuda()

# create result folder 
if not os.path.exists('mupots/pred/'):
	os.makedirs('mupots/pred/')

# run prediction 
コード例 #14
0
        buff[:, :2] = p[:, :2]
        results.append(buff)
    return results


if __name__ == '__main__':
    model_dnet = network.DensityNet(config.density_num_layers, config.density_channels, config.density_level,\
          config.gcn_layers, config.gcn_channels, config.head_layernum, config.head_chn, config.upsample_layers, config.upsample_chn)

    with torch.no_grad():
        # initialize model
        x = np.float32(np.random.random(size=[1, 3, 512, 512]))
        x = torch.from_numpy(x)
        model_dnet(x)
        model = loss.ModelWithLoss(model_dnet)
        M.Saver(model).restore('./model/')
        model.eval()
        model.cuda()

        imgname = '233.jpg'
        img = cv2.imread(imgname)
        pts, scores, roots, rels, hmap, img_processed, idout = testutil.run_pipeline(
            img, model.model)
        pts_results = get_pts(pts, roots, rels)

        print(len(pts_results))
        pts_final = []
        for p, s in zip(pts_results, scores):
            if s > 0.2:
                pts_final.append(p)
        # print(pts_final[0][:,0].min(), pts_final[0][:,0].max(), pts_final[0][:,1].min(), pts_final[0][:,1].max(), pts_final[0][:,2].min(), pts_final[0][:,2].max())
コード例 #15
0
def main_worker(gpu, ngpus_per_node):
    print('Use GPU:', gpu)
    dist.init_process_group(backend='nccl',
                            init_method='tcp://localhost:23456',
                            world_size=ngpus_per_node,
                            rank=gpu)
    print('Group initialized.')

    model_dnet = network.DensityNet(config.head_layernum, config.head_chn,
                                    config.upsample_layers,
                                    config.upsample_chn)
    x = np.float32(
        np.random.random(size=[
            1, 3 + config.num_match_pts, config.inp_size, config.inp_size
        ]))
    x = torch.from_numpy(x)
    with torch.no_grad():
        outs = model_dnet(x)
    # input()
    saver = M.Saver(model_dnet.backbone)
    saver.restore('./model_imagenet_w32/',
                  strict=False,
                  exclude=['c1.conv.weight'])
    saver.restore('./model/')
    # model_dnet.bn_eps(1e-5)
    model = loss.ModelWithLoss(model_dnet)

    torch.cuda.set_device(gpu)
    model.cuda(gpu)
    model.train()
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
    print('Model get.')

    loader, sampler = datareader.get_train_dataloader(22)
    optim = torch.optim.Adam(model.parameters(), lr=config.init_lr)

    for e in range(config.max_epoch):
        print('Replica:%d Epoch:%d' % (gpu, e))
        sampler.set_epoch(e)
        for i, (img, hmap, hmap_match) in enumerate(loader):
            # print(img.shape, hmap.shape, hmap_match.shape)
            optim.zero_grad()
            hmap_loss, outs = model(img, hmap, hmap_match)

            hmap_loss = hmap_loss.mean()

            hmap_loss.backward()
            optim.step()
            lr = optim.param_groups[0]['lr']

            if i % 100 == 0 and gpu == 0:
                if not os.path.exists('./outputs/'):
                    os.mkdir('./outputs/')

                visutil.vis_batch(img, outs, './outputs/%d_out.jpg' % i)
                visutil.vis_batch(img, hmap, './outputs/%d_gt.jpg' % i)
                visutil.vis_batch(img,
                                  hmap_match,
                                  './outputs/%d_hmm.jpg' % i,
                                  minmax=True)

            if i % 20 == 0:
                curr_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
                print('%s  Replica:%d  Progress:%d/%d  LsC:%.3e LR:%.1e' %
                      (curr_time, gpu, i, len(loader), hmap_loss, lr))

        if e in config.lr_epoch:
            newlr = lr * 0.1
            for param_group in optim.param_groups:
                param_group['lr'] = newlr

        if e % config.save_interval == 0 and gpu == 0:
            stamp = random.randint(0, 1000000)
            saver.save('./model/%d_%d.pth' % (e, stamp))
コード例 #16
0
ファイル: TCN.py プロジェクト: ddddwee1/PoseMono
from . import networktcn
import torch
import numpy as np
import torch.nn.functional as F
from TorchSUL import Model as M
import config

seq_len = 129
nettcn = networktcn.NetBundle(17, seq_len)
x_dumb = torch.zeros(2, seq_len, 17 * 2)
nettcn(x_dumb)
M.Saver(nettcn).restore(config.TCN_path)
nettcn.eval()
nettcn.cuda()


def run_points(pts):
    pts = np.float32(pts)[:, :, :2]
    pts = torch.from_numpy(pts).cuda()
    pts = pts.unsqueeze(0).unsqueeze(0)
    pts = F.pad(pts, (0, 0, 0, 0, seq_len // 2, seq_len // 2),
                mode='replicate')
    pts = pts.squeeze()
    print('TCN padded:', pts.shape)
    with torch.no_grad():
        pred = nettcn.evaluate(pts)
    pred = pred.cpu().numpy()
    return pred
コード例 #17
0
def main_worker(gpu, ngpus_per_node):
    print('Use GPU:', gpu)
    dist.init_process_group(backend='nccl',
                            init_method='tcp://localhost:23456',
                            world_size=ngpus_per_node,
                            rank=gpu)
    print('Group initialized.')

    # initialize the network
    model_dnet = hrnet.DEKRRefine(config.num_pts, 128)
    x = torch.zeros(1, 3, 128, 128)
    with torch.no_grad():
        model_dnet(x)
    M.Saver(model_dnet.dekr).restore('./model_dekr/')
    model = loss.ModelWithLoss(model_dnet)
    saver = M.Saver(model)
    saver.restore('./model/')

    torch.cuda.set_device(gpu)
    model.cuda(gpu)
    model.train()
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
    print('Model initialized.')

    # get loader
    loader, sampler = datareader.get_train_dataloader()
    optim = torch.optim.Adam(model.parameters(), lr=config.init_lr)

    for e in range(config.max_epoch):
        print('Replica:%d Epoch:%d' % (gpu, e))
        sampler.set_epoch(e)
        for i, (img, heatmap, mask, offset,
                offset_weight) in enumerate(loader):
            optim.zero_grad()
            hm, off, refined, hm_loss, off_loss, hm_refined_loss = model(
                img, heatmap, mask, offset, offset_weight)
            loss_total = hm_loss + off_loss * 0.03 + hm_refined_loss
            loss_total.backward()
            optim.step()
            lr = optim.param_groups[0]['lr']

            if i % 100 == 0 and gpu == 0:
                visutil.vis_batch(img, hm, './outputs/%d_out.jpg' % i)
                visutil.vis_batch(img, refined, './outputs/%d_ref.jpg' % i)
                visutil.vis_batch(img, heatmap, './outputs/%d_gt.jpg' % i)

            if i % 20 == 0 and gpu == 0:
                curr_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
                print(
                    '%s  Replica:%d  Progress:%d/%d  LsHM:%.3e  LsRef:%.3e  LsOff:%.3e  LR:%.1e'
                    % (curr_time, gpu, i, len(loader), hm_loss,
                       hm_refined_loss, off_loss, lr))

        if e in config.lr_epoch:
            newlr = lr * 0.1
            for param_group in optim.param_groups:
                param_group['lr'] = newlr

        if e % config.save_interval == 0 and gpu == 0:
            stamp = random.randint(0, 1000000)
            saver.save('./model/%d_%d.pth' % (e, stamp))
コード例 #18
0
ファイル: fracnet.py プロジェクト: ddddwee1/TorchSUL
        self.stage2 = Stage(channel_list[2], blocknum_list[1], drop_prob)
        self.stage3 = Stage(channel_list[3], blocknum_list[2], drop_prob)
        self.stage4 = Stage(channel_list[4], blocknum_list[3], drop_prob)

        self.bn1 = M.BatchNorm()
        self.fc1 = M.Dense(512, usebias=False, batch_norm=True)

    def forward(self, x):
        x = self.c1(x)
        x = self.stage1(x)
        x = self.stage2(x)
        x = self.stage3(x)
        x = self.stage4(x)
        x = self.bn1(x)
        x = M.flatten(x)
        x = F.dropout(x, 0.4, self.training, False)
        x = self.fc1(x)
        return x


def Frac100():
    return FracNet([64, 32, 64, 128, 256], [1, 1, 7, 3], 0.5)


if __name__ == '__main__':
    net = Frac100()
    x = np.zeros([2, 3, 112, 112]).astype(np.float32)
    x = torch.from_numpy(x)
    y = net(x)
    M.Saver(net).save('./model/abc.pth')
コード例 #19
0
ファイル: train.py プロジェクト: ddddwee1/NERF_pytorch
    rays = pickle.load(open('rays.pkl', 'rb'))
else:
    rays = []
    for i in tqdm(range(poses.shape[0])):
        pose = poses[i, :3, :4]
        rays_o, rays_d = get_rays(H, W, focal, pose)
        rays.append([rays_o, rays_d])
    pickle.dump(rays, open('rays.pkl', 'wb'))

# train loop
embed_fn, embeddirs_fn, net, net_fine = create_nerf()
embed_fn.cuda()
embeddirs_fn.cuda()
net.cuda()
net_fine.cuda()
saver = M.Saver(net)
saver.restore('./model/')
saver_fine = M.Saver(net_fine)
saver_fine.restore('./model_fine/')
optim = torch.optim.Adam([{
    'params': net.parameters()
}, {
    'params': net_fine.parameters()
}], 0.0005)

bar = tqdm(range(config.N_iters))
for i in bar:
    img_idx = np.random.choice(i_train)
    target = imgs[img_idx]
    # pose = poses[img_idx, :3, :4]
    # rays_o, rays_d = get_rays(H, W, focal, pose) # can be moved to pre-computed
コード例 #20
0
    return pts, scores


# initialize
model = network.DensityNet(config.density_num_layers, config.density_channels, config.density_level,\
      config.gcn_layers, config.gcn_channels, config.head_layernum, config.head_chn, config.upsample_layers, config.upsample_chn)

# coco = COCO('person_keypoints_val2017.json')
# ids = list(coco.imgs.keys())

with torch.no_grad():
    x = np.float32(
        np.random.random(size=[1, 3, config.inp_size, config.inp_size]))
    x = torch.from_numpy(x)
    model(x)
    M.Saver(model).restore('model_coco/')
    model.eval()
    model.cuda()

    imgname = '000000410650.jpg'
    img = cv2.imread(imgname)

    pts, scores = test_img(img, model)

    # results = {}
    # for i in tqdm(ids):
    # 	fname = './val2017/%012d.jpg'%i
    # 	img = cv2.imread(fname)
    # 	pts, scores = test_img(img, model)
    # 	results[i] = [pts, scores]
コード例 #21
0
ファイル: trainr100.py プロジェクト: ddddwee1/TorchSUL
    BackboneRes100 = resnet.Res100()
    classifier = losses.DistributedClassifier(reader.max_label, devices)

    # init
    dumb_x = torch.from_numpy(np.float32(np.zeros([2, 3, 112, 112])))
    dumb_y = torch.from_numpy(np.int64(np.zeros(2)))
    _ = BackboneRes100(dumb_x)
    _ = classifier(_, dumb_y)

    # restore
    if devices is not None:
        BackboneRes100 = nn.DataParallel(BackboneRes100,
                                         device_ids=devices).cuda()
        classifier = classifier.cuda()
    saver = M.Saver(BackboneRes100)
    saver_classifier = M.Saver(classifier)
    saver.restore('./model_r100/')
    saver_classifier.restore('./classifier/')

    # define optim
    optim = torch.optim.SGD([{
        'params': BackboneRes100.parameters()
    }, {
        'params': classifier.parameters()
    }],
                            lr=0.1,
                            momentum=0.9,
                            weight_decay=0.0005)
    classifier.train()
    BackboneRes100.train()
コード例 #22
0
import os
import cv2
import glob
import config
import pickle
import numpy as np
from . import network
from tqdm import tqdm
from TorchSUL import Model as M
import torch

model_dnet = network.get_network()
M.Saver(model_dnet).restore(config.estimator_path)
model_dnet.eval()
model_dnet.cuda()

model_guided = network.get_network_guided()
M.Saver(model_guided).restore(config.guided_estimator_path)
model_guided.eval()
model_guided.cuda()


def _pre_process(img):
    img = cv2.resize(img, (config.inp_size, config.inp_size))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = np.float32(img)
    img = img / 255
    img = img - np.float32([0.485, 0.456, 0.406])
    img = img / np.float32([0.229, 0.224, 0.225])
    img = np.transpose(img, [2, 0, 1])
    return img
コード例 #23
0
    pts_init = grouping(vals, indk, tagk)
    if len(pts_init) == 0:
        return [], [], [], [], []
    pts_adjusted, scores = adjust(pts_init, hmap)
    pts_refined = refine(pts_adjusted, hmap, tags)
    pts_final = get_final_preds([pts_refined], center, scale,
                                [hmap.size(3), hmap.size(2)])
    scores = scores.cpu().detach().numpy().tolist()
    return pts_final, scores, hmap, img, tags


if __name__ == '__main__':
    import custom.network
    #%% load model
    model = custom.network.DensityNet(config.density_num_layers, config.density_channels, config.density_level,\
          config.gcn_layers, config.gcn_channels, config.head_layernum, config.head_chn, config.upsample_layers, config.upsample_chn)
    x = np.float32(np.random.random(size=[1, 3, 512, 512]))
    x = torch.from_numpy(x)
    with torch.no_grad():
        model(x)
    model.bn_eps(1e-5)
    saver = M.Saver(model)
    saver.restore('./model/')
    model.eval()
    model.cuda()

    #%% Main func
    pickle.dump(pts_refined, open('ref.pkl', 'wb'))

    print('abc')
コード例 #24
0
import scipy.io as sio
import numpy as np
import pickle
from lib.models import networkadapt
import torch
import TorchSUL.Model as M

net = networkadapt.AdaptNet()
pts_dumb = torch.zeros(2, 17 * 3)
net(pts_dumb)
M.Saver(net).restore('./ckpts/model_adapt/')
net.cuda()

data_dict = {}


def get_pred(seq_idx, frame_idx):
    global data_dict
    seq_idx = seq_idx + 1
    if not seq_idx in data_dict:
        data = pickle.load(open('mupots/pred_inte/%d.pkl' % seq_idx, 'rb'))
        data_dict[seq_idx] = np.float32(data)
    pts = data_dict[seq_idx][frame_idx]
    with torch.no_grad():
        pts = net(pts).cpu().numpy()
    return pts
コード例 #25
0
import time

if __name__=='__main__':

	start_time = time.time()

	### Stage 1: Do bottom-up estimation
	model = networkbtmup.HR3DNet(config.head_layernum, config.head_chn, config.upsample_layers, config.upsample_chn)
	hmapGen = hmaputil.HMapGenerator('./mupots/est_p2ds/') # detection is applied on 1024 size

	with torch.no_grad():
		# initialize model
		x = torch.zeros(1,3+17,config.inp_size,config.inp_size)
		model(x)
		M.Saver(model).restore('./ckpts/model_btmup/')
		model.eval()
		model.cuda()

		res_all = {}
		for i in range(20):
			print('Seq:',i)
			imgs = sorted(glob.glob('./MultiPersonTestSet/TS%d/*.jpg'%(i+1)))
			buff = []
			for frame_idx,imgname in enumerate(tqdm(sorted(imgs))):
				imgname = imgname.replace('\\','/')  # for windows users
				img = cv2.imread(imgname)
				pts, scores, roots, rels = btmuputil.run_pipeline(img, model, hmap_generator=hmapGen, vid_idx=i, frame_idx=frame_idx)
				pts_final = btmuputil.get_pts3d(pts, roots, rels)
				buff.append([imgname, pts_final, scores])
			res_all[i+1] = buff
コード例 #26
0
def main_worker(gpu, world_size):
	BSIZE = 256
	FORMAT = '%(asctime)-15s  Replica:%(name)s  %(message)s'
	logging.basicConfig(format=FORMAT)
	logger = logging.getLogger('%d'%gpu)
	logger.setLevel(10)
	logger.info('Initialize process.')
	torch.cuda.set_device(gpu)
	dist.init_process_group(backend='nccl', init_method='tcp://localhost:23456', world_size=world_size, rank=gpu)
	
	net = resnet.Res100()
	dumb_x = torch.from_numpy(np.float32(np.zeros([2,3,112,112])))
	with torch.no_grad():
		net(dumb_x)
	saver = M.Saver(net)
	saver.restore('./model/')
	net.cuda(gpu)
	net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[gpu])
	net.train()
	logger.info('Model initialization finished.')

	loader, max_label = datareader.get_train_dataloader(BSIZE, distributed=True)
	classifier = losses.SplitClassifier(max_label, world_size, gpu, logger)
	# classifier = losses.TotalClassifier(max_label, gpu, logger)
	dumb_x = torch.from_numpy(np.float32(np.zeros([2,512])))
	dumb_y = torch.from_numpy(np.int64(np.zeros(2)))
	with torch.no_grad():
		classifier(dumb_x, dumb_y)
	classifier.cuda(gpu)
	# classifier = torch.nn.parallel.DistributedDataParallel(classifier, device_ids=[gpu])
	optim = torch.optim.SGD([{'params':net.parameters(), 'weight_decay':0.0005}, {'params':classifier.parameters(), 'weight_decay':0.0}], lr=0.1, momentum=0.9)

	for e in range(16):
		logger.info('Epoch:%d'%e)
		for i, (img, label) in enumerate(loader):
			label = label.cuda(gpu)
			labels = [torch.zeros_like(label) for _ in range(world_size)]
			dist.all_gather(labels, label)
			labels = torch.cat(labels, dim=0)
			# labels = label
			optim.zero_grad()
			feat = net(img)
			feat_list = [torch.zeros_like(feat) for _ in range(world_size)]
			dist.all_gather(feat_list, feat)
			feat_cat = torch.cat(feat_list, dim=0)
			feat_cat = feat_cat.requires_grad_()
			# logger.info('%s  %s'%(feat_cat.shape, labels.shape))
			loss, correct = classifier(feat_cat, labels, m2=0.5)
			# loss, correct = classifier(feat, labels, m2=0.0)
			loss = loss.sum()
			# logger.info(f'{loss}')
			loss.backward()
			
			# logger.info('%s'%feat_cat.grad)
			dist.all_reduce(feat_cat.grad, dist.ReduceOp.SUM)
			grad_feat = feat_cat.grad[BSIZE*gpu : BSIZE*gpu+BSIZE] 
			# logger.info('%s  %s'%(feat.shape, grad_feat.shape))
			feat.backward(gradient=grad_feat)
			# logger.info('%s'%net.module.c1.conv.weight.max())
			optim.step()

			dist.all_reduce(loss)
			dist.all_reduce(correct)
			# logger.info(f'{loss}  {feat_cat.shape}')
			# loss = loss / feat_cat.shape[0]
			# acc = correct / feat_cat.shape[0]
			loss = loss / BSIZE / world_size
			acc = correct / BSIZE / world_size
			lr = optim.param_groups[0]['lr']
			if gpu==0 and i%20==0:
				logger.info('Iter:%d/%d  Loss:%.4f Acc:%.4f LR:%.1e'%(i,len(loader),loss, acc, lr))

		if e%4==0 and e>0:
			newlr = lr * 0.1 
			for param_group in optim.param_groups:
				param_group['lr'] = newlr

		if gpu==0:
			stamp = random.randint(0, 1000000)
			saver.save('./model/%d_%d.pth'%(e, stamp))
コード例 #27
0
			return imgs 

if __name__=='__main__':
	DIM = 512
	devices = (0,1,2,3,4,5)
	with torch.no_grad():
		mtx_prev = torch.load('./classifier_ft/classifier_40_200.pth')
		print('Previous classifier shape:', mtx_prev.shape)
		feats = []

		net = resnet.Res50(dim=DIM)
		dumb_x = torch.from_numpy(np.float32(np.zeros([2,3,128,128])))
		zeros = torch.zeros(2, 1, 1, 1)
		
		net(dumb_x, zeros)
		saver = M.Saver(net)
		saver.restore('./model_ft/')

		net = torch.nn.DataParallel(net, device_ids=devices).cuda()
		net.eval()
		net.cuda()

		reader = Reader()
		zeros = zeros.cuda()

		while 1:
			imgs = reader.get_next()
			if imgs is None:
				break 
			imgs = torch.from_numpy(imgs)
			feat = net(imgs.cuda(), zeros)
コード例 #28
0
    source_buffs.append(p)
for p in model.offset_final_layer.named_parameters():
    source_params.append(p)
for p in model.offset_final_layer.named_buffers():
    name = p[0]
    if 'tracked' in name:
        continue
    source_buffs.append(p)

for ps, pt in zip(source_params, target_params):
    # print(pt[0], ps[0])
    pt[1].data[:] = ps[1].data[:]
for ps, pt in zip(source_buffs, target_buffs):
    # print(pt[0], ps[0])
    pt[1].data[:] = ps[1].data[:]

print(len(source_params), len(source_buffs))
print(len(target_params), len(target_buffs))

ylist = model.forward_test(x)
# print(ylist[0].shape, ylist[1].shape, ylist[2].shape, ylist[3].shape)
print(ylist[1], ylist[1].shape)
# print(ylist.shape)

# net.bn_eps(1e-5)
y2 = net_dekr(x)
print(y2[1], y2[1].shape)
print()

M.Saver(net_dekr).save('./model_dekr/model_dekr.pth')