コード例 #1
0
def application(netR, optimizer, data_loader, data_length, epoch):

    # 3.1 switch to training mode
    torch.cuda.synchronize()

    netR.eval()

    out_mse = 0.0
    timer = time.time()
    for i, data in enumerate(tqdm(data_loader, 0)):
        if len(data[0]) == 1:
            continue
        torch.cuda.synchronize()
        # 3.1.1 load inputs and ground truth
        points = data
        points = points.cuda()

        inputs_level1, inputs_level1_center = group_points(points, opt)
        inputs_level1 = Variable(inputs_level1, requires_grad=False)
        inputs_level1_center = Variable(inputs_level1_center,
                                        requires_grad=False)

        # 3.1.2 compute output
        optimizer.zero_grad()
        estimation = netR(inputs_level1, inputs_level1_center)

        torch.cuda.synchronize()

        # 3.1.4 update training error
        outputs = estimation.data

        np.save('./offline/results/out%d.npy' % epoch, outputs.cpu())

    # time taken
    torch.cuda.synchronize()
    timer = time.time() - timer
    timer = timer / data_length
    print('==> time to learn 1 sample = %f (ms)' % (timer * 1000))

    # print mse
    out_mse = out_mse / data_length
    print('mean-square error of 1 sample: %f, #train_data = %d' %
          (out_mse, data_length))

    return out_mse
コード例 #2
0
ファイル: match2.py プロジェクト: thisisbilly1/adj-innolux
	def match(self, img, template=None, templatename=""):
		if not template is None:
			self.template=template
		else:
			if self.template is None:
				return
			
		#cv2.imwrite("test.png",template)
		#self.tempautolabels=[]
		templabel=[]
		
		self.templateheight,self.templatewidth,_=self.template.shape
		
		#run template matching, get minimum val
		gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
		gray_template = cv2.cvtColor(self.template,cv2.COLOR_BGR2GRAY)
		
		res = cv2.matchTemplate(gray, gray_template, cv2.TM_CCOEFF_NORMED)#TM_SQDIFF_NORMED
		min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
		
		# create threshold from min val, find where sqdiff is less than thresh
		min_thresh = self.accuracyslider.slideValue#.9# (min_val + 1e-6) * 1.5
		match_locations = np.where(res >= min_thresh)
		
		match_locations = (np.array(match_locations).T).tolist()
		match_locations = group_points(match_locations,10)
		
		#get the defaults from the actionbar
		try:
			labeltext = self.controller.actionbar.classes[self.controller.actionbar.defaultmatch]
			labelcolor=hsv_to_rgb(self.controller.actionbar.colors[self.controller.actionbar.defaultmatch].slideValue/360,1,1)
		except:
			labeltext = "NONE"
			labelcolor=(0,0,0)
			
		for m in match_locations:
			l=label(self.world,self.controller,m[1],m[0],self.templatewidth,self.templateheight,labeltext,(0,255,0),accuracy=res[m[0]][m[1]]*100,template=templatename)
			#self.labels.append(l)
			#self.tempautolabels.append(l)
			templabel.append(l)
			
		#print(self.match_locations)
		self.predict_missing_boxes(labeltext,templabel,templatename=templatename)
コード例 #3
0
ファイル: automate.py プロジェクト: hibikiledo/coc-dock
points = []
for output_file in listdir((TEMP_PATH)):
    # open file
    with open('/'.join([TEMP_PATH, output_file]), 'rb') as of:
        # get reader
        reader = csv.reader(of, delimiter=':')
        # create new Point instance and keep in a list
        for rec in reader:
            points.append(Point(rec[0], rec[1]))

print "[INFO] List of points before grouping"
for p in points:
    print "     %-5d : %-5d" % (p.x, p.y)

# Perform point grouping
grouped_points = group_points(points)

print "[INFO] List of points after grouping"
for p in grouped_points:
     print "     %-5d : %-5d" % (p.x, p.y)

# Tap according to groupped points
for p in grouped_points:
    print "[INFO] Tapping at", "%-5d : %-5d" % (p.x, p.y)
    adb.tap(p.x, p.y)





コード例 #4
0
torch.cuda.synchronize()

netR.eval()
test_mse = 0.0
test_wld_err = 0.0
timer = time.time()
for i, data in enumerate(tqdm(test_dataloader, 0)):
    torch.cuda.synchronize()
    # 3.1 load inputs and targets
    points, volume_length, gt_pca, gt_xyz = data
    gt_pca = Variable(gt_pca, volatile=True).cuda()
    points, volume_length, gt_xyz = points.cuda(), volume_length.cuda(
    ), gt_xyz.cuda()

    # points: B * 1024 * 6
    inputs_level1, inputs_level1_center = group_points(points, opt)
    inputs_level1, inputs_level1_center = Variable(inputs_level1,
                                                   volatile=True), Variable(
                                                       inputs_level1_center,
                                                       volatile=True)

    # 3.2 compute output
    estimation = netR(inputs_level1, inputs_level1_center)
    loss = criterion(estimation, gt_pca) * opt.PCA_SZ
    torch.cuda.synchronize()
    test_mse = test_mse + loss.data[0] * len(points)

    # 3.3 compute error in world cs
    outputs_xyz = test_data.PCA_mean.expand(estimation.data.size(0),
                                            test_data.PCA_mean.size(1))
    outputs_xyz = torch.addmm(outputs_xyz, estimation.data,
コード例 #5
0
def main(args=None):
	parser = argparse.ArgumentParser(description = "Training")

	parser.add_argument('--batchSize', type=int, default=48, help='input batch size')
	parser.add_argument('--nepoch', type=int, default=80, help='number of epochs to train for')
	parser.add_argument('--INPUT_FEATURE_NUM', type=int, default = 8,  help='number of input point features')
	parser.add_argument('--temperal_num', type=int, default = 5,  help='number of input point features')
	parser.add_argument('--pooling', type=str, default='concatenation', help='how to aggregate temporal split features: vlad | concatenation | bilinear')
	parser.add_argument('--dataset', type=str, default='ntu60', help='how to aggregate temporal split features: ntu120 | ntu60')


	parser.add_argument('--weight_decay', type=float, default=0.0008, help='weight decay (SGD only)')
	parser.add_argument('--learning_rate', type=float, default=0.0005, help='learning rate at t=0')
	parser.add_argument('--momentum', type=float, default=0.9, help='momentum (SGD only)')
	parser.add_argument('--workers', type=int, default=0, help='number of data loading workers')

	parser.add_argument('--root_path', type=str, default='/UCLA_point/UCLA_vsize40_feature_2048_ff_rawdi_2',  help='preprocess folder')# 3DV points path
	parser.add_argument('--depth_path', type=str, default='3DV_construction/UCLA_point/UCLA_bboxed_2048',  help='raw_depth_png')# appearance points path
	parser.add_argument('--save_root_dir', type=str, default='results_ucla/UCLA_v40_MultiStream_rawdi2',  help='output folder')
	parser.add_argument('--model', type=str, default = '',  help='model name for training resume')
	parser.add_argument('--optimizer', type=str, default = '',  help='optimizer name for training resume')
	parser.add_argument('--ngpu', type=int, default=1, help='# GPUs')
	parser.add_argument('--main_gpu', type=int, default=0, help='main GPU id') # CUDA_VISIBLE_DEVICES=0 python train.py

	
	parser.add_argument('--learning_rate_decay', type=float, default=1e-7, help='learning rate decay')

	parser.add_argument('--size', type=str, default='full', help='how many samples do we load: small | full')
	parser.add_argument('--SAMPLE_NUM', type=int, default = 2048,  help='number of sample points')

	parser.add_argument('--Num_Class', type=int, default = 10,  help='number of outputs')
	parser.add_argument('--knn_K', type=int, default = 64,  help='K for knn search')
	parser.add_argument('--sample_num_level1', type=int, default = 512,  help='number of first layer groups')
	parser.add_argument('--sample_num_level2', type=int, default = 128,  help='number of second layer groups')
	parser.add_argument('--ball_radius', type=float, default=0.11, help='square of radius for ball query in level 1')#0.025 -> 0.05 for detph
	parser.add_argument('--ball_radius2', type=float, default=0.24, help='square of radius for ball query in level 2')# 0.08 -> 0.01 for depth


	opt = parser.parse_args()
	print (opt)



	torch.cuda.set_device(opt.main_gpu)

	opt.manualSeed = 1
	random.seed(opt.manualSeed)
	torch.manual_seed(opt.manualSeed)

	try:
		os.makedirs(opt.save_root_dir)
	except OSError:
		pass
	logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S', filename=os.path.join(opt.save_root_dir, 'print.log'), level=logging.INFO)
	logging.info('======================================================')

	os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'

	torch.backends.cudnn.benchmark = True
	#torch.backends.cudnn.deterministic = True
	torch.cuda.empty_cache()

	data_train = NTU_RGBD(root_path = opt.root_path,opt=opt,
		test = False,
		Transform = True
		)
	train_loader = DataLoader(dataset = data_train, batch_size = opt.batchSize, shuffle = True, drop_last = True)
	data_val = NTU_RGBD(root_path = opt.root_path, opt=opt,
		test = True,
		Transform = False
		)
	val_loader = DataLoader(dataset = data_val, batch_size = 24)

	#net =

	netR = PointNet_Plus(opt)
	#netR = Attension_Point(opt)
	#netR = load_state_dict(torch.load())
	netR.load_state_dict(torch.load('pointnet_para_44.pth'),strict = False)
	netR = torch.nn.DataParallel(netR).cuda()
	netR.cuda()
	print(netR)

	criterion = torch.nn.CrossEntropyLoss().cuda()
	optimizer = torch.optim.Adam(netR.parameters(), lr=opt.learning_rate, betas = (0.5, 0.999), eps=1e-06)
	scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)


	for epoch in range(opt.nepoch):
		scheduler.step(epoch)
		
		# switch to train mode
		torch.cuda.synchronize()
		netR.train()
		acc = 0.0
		loss_sigma = 0.0
		total1 = 0.0
		timer = time.time()

		for i, data in enumerate(tqdm(train_loader, 0)):
			if len(data[0])==1:
				continue
			torch.cuda.synchronize()
			#print(i)
			# 1 load imputs and target
			points_xyzc,points_1xyz,points2_xyz,points3_xyz,label,vid_name = data
			#if i == 1:
			#print(vid_name)
			points_xyzc,points_1xyz,points2_xyz,points3_xyz,label = points_xyzc.cuda(),points_1xyz.cuda(),points2_xyz.cuda(),points3_xyz.cuda(),label.cuda()
			#print(points_xyz.shape)
			#print(label,vid_name)
			# points: B*2048*4; target: B*1
			opt.ball_radius = opt.ball_radius + random.uniform(-0.02,0.02)
			xt, yt = group_points_pro(points_xyzc, opt)

			xs1, ys1 = group_points(points_1xyz, opt)
			xs2, ys2 = group_points(points2_xyz, opt)
			xs3, ys3 = group_points(points3_xyz, opt)
			#print('123',inputs_level1_r[1,:,55])
			#print(inputs_level1_r.shape,'123',inputs_level1_r[1,:,88,55])
			# 2 compute outputs:
			prediction = netR(xt, xs1, xs2, xs3, yt, ys1, ys2, ys3)
			#print(pridiction)
			loss = criterion(prediction,label)

			optimizer.zero_grad()

			# compute gradient and do SGD step
			loss.backward()
			optimizer.step()
			torch.cuda.synchronize()

			# update training error
			loss_sigma += loss.item()
			#print(prediction.data.shape)
			#_, predicted60 = torch.max(prediction.data[:,0:60], 1)
			_, predicted = torch.max(prediction.data, 1)
			#print(predicted,'----',label)
			#print(predicted.shape,predicted60)
			#print(predicted,predicted60)
			#print(predicted==label)
			#print(label,'label')
			acc += (predicted==label).cpu().sum().numpy()
			total1 += label.size(0)

		
		acc_avg = acc/total1
		loss_avg = loss_sigma/total1
		print('======>>>>> Online epoch: #%d, lr=%f,Acc=%f,avg_loss=%f  <<<<<======' %(epoch, scheduler.get_lr()[0],acc_avg,loss_avg))
		#print("Epoch: " + str(epoch) + " Iter: " + str(i) + " Acc: " + ("%.2f" % acc_avg) +" Classification Loss: " + str(loss_avg))
		 
		if epoch>-1:
			# evaluate mode
			torch.cuda.synchronize()
			netR.eval()
			conf_mat = np.zeros([opt.Num_Class, opt.Num_Class])
			acc = 0.0
			loss_sigma = 0.0


			for i, data in enumerate(tqdm(val_loader)):
				#print(i)
				if i >-1:
					torch.cuda.synchronize()
					points_xyzc,points_1xyz,points2_xyz,points3_xyz,label,vid_name = data
					#if i == 1:
					#print(vid_name)
					points_xyzc,points_1xyz,points2_xyz,points3_xyz,label = points_xyzc.cuda(),points_1xyz.cuda(),points2_xyz.cuda(),points3_xyz.cuda(),label.cuda()
					#print(points_xyz.shape)
					#print(label,vid_name)
					# points: B*2048*4; target: B*1
					opt.ball_radius = opt.ball_radius + random.uniform(-0.02,0.02)
					xt, yt = group_points_pro(points_xyzc, opt)

					xs1, ys1 = group_points(points_1xyz, opt)
					xs2, ys2 = group_points(points2_xyz, opt)
					xs3, ys3 = group_points(points3_xyz, opt)
					#print('123',inputs_level1_r[1,:,55])
					#print(inputs_level1_r.shape,'123',inputs_level1_r[1,:,88,55])
					# 2 compute outputs:
					prediction = netR(xt, xs1, xs2, xs3, yt, ys1, ys2, ys3)
					#print(pridiction)
					loss = criterion(prediction,label)
					_, predicted = torch.max(prediction.data, 1)
					#print(prediction.data)
					loss_sigma += loss.item()

					for j in range(len(label)):
						cate_i = label[j].cpu().numpy()
						#print(cate_i)
						pre_i = predicted[j].cpu().numpy()
						conf_mat[cate_i, pre_i] += 1.0


			print('UCLA:{:.2%}  ===Average loss:{:.6%}'.format(conf_mat.trace() / conf_mat.sum(),loss_sigma/(i+1)/16))
		logging.info('{} --epoch{} UCLA:{:.2%}  ===Average loss:{:.6%}'.format('Valid',epoch,conf_mat.trace() / conf_mat.sum(),loss_sigma/(i+1)/16))

		torch.save(netR.module.state_dict(), '%s/pointnet_para_%d.pth' % (opt.save_root_dir, epoch))
コード例 #6
0
def main(args=None):
    parser = argparse.ArgumentParser(description="Training")

    parser.add_argument('--batchSize',
                        type=int,
                        default=64,
                        help='input batch size')
    parser.add_argument('--nepoch',
                        type=int,
                        default=50,
                        help='number of epochs to train for')
    parser.add_argument('--INPUT_FEATURE_NUM',
                        type=int,
                        default=8,
                        help='number of input point features')
    parser.add_argument('--temperal_num',
                        type=int,
                        default=3,
                        help='number of input point features')
    parser.add_argument(
        '--pooling',
        type=str,
        default='concatenation',
        help=
        'how to aggregate temporal split features: vlad | concatenation | bilinear'
    )
    parser.add_argument(
        '--dataset',
        type=str,
        default='ntu120',
        help='how to aggregate temporal split features: ntu120 | ntu60')

    parser.add_argument('--weight_decay',
                        type=float,
                        default=0.0008,
                        help='weight decay (SGD only)')
    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.001,
                        help='learning rate at t=0')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        help='momentum (SGD only)')
    parser.add_argument('--workers',
                        type=int,
                        default=0,
                        help='number of data loading workers')

    parser.add_argument(
        '--root_path',
        type=str,
        default=
        '/data/data3/wangyancheng/pointcloudData/NTU_voxelz40_feature_2048',
        help='preprocess folder')
    parser.add_argument('--depth_path',
                        type=str,
                        default='/data/data3/wangyancheng/ntu120dataset/',
                        help='raw_depth_png')
    parser.add_argument(
        '--save_root_dir',
        type=str,
        default='results_ntu120/NTU60_v40_cv_notransform_MultiStream',
        help='output folder')
    parser.add_argument('--model',
                        type=str,
                        default='',
                        help='model name for training resume')
    parser.add_argument('--optimizer',
                        type=str,
                        default='',
                        help='optimizer name for training resume')

    parser.add_argument('--ngpu', type=int, default=1, help='# GPUs')
    parser.add_argument(
        '--main_gpu', type=int, default=0,
        help='main GPU id')  # CUDA_VISIBLE_DEVICES=0 python train.py

    parser.add_argument('--learning_rate_decay',
                        type=float,
                        default=1e-7,
                        help='learning rate decay')

    parser.add_argument('--size',
                        type=str,
                        default='full',
                        help='how many samples do we load: small | full')
    parser.add_argument('--SAMPLE_NUM',
                        type=int,
                        default=2048,
                        help='number of sample points')

    parser.add_argument('--Num_Class',
                        type=int,
                        default=120,
                        help='number of outputs')
    parser.add_argument('--knn_K',
                        type=int,
                        default=64,
                        help='K for knn search')
    parser.add_argument('--sample_num_level1',
                        type=int,
                        default=512,
                        help='number of first layer groups')
    parser.add_argument('--sample_num_level2',
                        type=int,
                        default=128,
                        help='number of second layer groups')
    parser.add_argument('--ball_radius',
                        type=float,
                        default=0.16,
                        help='square of radius for ball query in level 1'
                        )  #0.025 -> 0.05 for detph
    parser.add_argument('--ball_radius2',
                        type=float,
                        default=0.25,
                        help='square of radius for ball query in level 2'
                        )  # 0.08 -> 0.01 for depth

    opt = parser.parse_args()
    print(opt)

    torch.cuda.set_device(opt.main_gpu)

    opt.manualSeed = 1
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    try:
        os.makedirs(opt.save_root_dir)
    except OSError:
        pass

    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'

    torch.backends.cudnn.benchmark = True
    #torch.backends.cudnn.deterministic = True
    torch.cuda.empty_cache()

    data_train = NTU_RGBD(root_path=opt.root_path,
                          opt=opt,
                          DATA_CROSS_VIEW=True,
                          full_train=True,
                          validation=False,
                          test=False,
                          Transform=False)
    train_loader = DataLoader(dataset=data_train,
                              batch_size=opt.batchSize,
                              shuffle=True,
                              drop_last=True,
                              num_workers=8)
    data_val = NTU_RGBD(root_path=opt.root_path,
                        opt=opt,
                        DATA_CROSS_VIEW=True,
                        full_train=False,
                        validation=False,
                        test=True,
                        Transform=False)
    val_loader = DataLoader(dataset=data_val, batch_size=24, num_workers=8)

    netR = PointNet_Plus(opt)

    netR = torch.nn.DataParallel(netR).cuda()
    netR.cuda()
    print(netR)

    criterion = torch.nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.Adam(netR.parameters(),
                                 lr=opt.learning_rate,
                                 betas=(0.5, 0.999),
                                 eps=1e-06)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=8,
                                                gamma=0.5)

    for epoch in range(opt.nepoch):
        scheduler.step(epoch)

        # switch to train mode
        torch.cuda.synchronize()
        netR.train()
        acc = 0.0
        loss_sigma = 0.0
        total1 = 0.0
        timer = time.time()

        for i, data in enumerate(tqdm(train_loader, 0)):
            if len(data[0]) == 1:
                continue
            torch.cuda.synchronize()
            # 1 load imputs and target
            ## 3DV points and 3 temporal segment appearance points
            ## points_xyzc: B*2048*8;points_1xyz:B*2048*3  target: B*1
            points_xyzc, points_1xyz, points2_xyz, points3_xyz, label, v_name = data
            points_xyzc, points_1xyz, points2_xyz, points3_xyz, label = points_xyzc.cuda(
            ), points_1xyz.cuda(), points2_xyz.cuda(), points3_xyz.cuda(
            ), label.cuda()

            ## group for 3DV points
            opt.ball_radius = opt.ball_radius + random.uniform(-0.02, 0.02)
            xt, yt = group_points_3DV(points_xyzc, opt)
            ## group for appearance points (3 seg)
            xs1, ys1 = group_points(points_1xyz, opt)
            xs2, ys2 = group_points(points2_xyz, opt)
            xs3, ys3 = group_points(points3_xyz, opt)

            prediction = netR(xt, xs1, xs2, xs3, yt, ys1, ys2, ys3)

            loss = criterion(prediction, label)
            optimizer.zero_grad()

            loss.backward()
            optimizer.step()
            torch.cuda.synchronize()
            # update training error
            loss_sigma += loss.item()
            #_, predicted60 = torch.max(prediction.data[:,0:60], 1)
            _, predicted = torch.max(prediction.data, 1)

            acc += (predicted == label).cpu().sum().numpy()
            total1 += label.size(0)

        acc_avg = acc / total1
        loss_avg = loss_sigma / total1
        print(
            '======>>>>> Online epoch: #%d, lr=%f,Acc=%f,avg_loss=%f  <<<<<======'
            % (epoch, scheduler.get_lr()[0], acc_avg, loss_avg))
        #print("Epoch: " + str(epoch) + " Iter: " + str(i) + " Acc: " + ("%.2f" % acc_avg) +" Classification Loss: " + str(loss_avg))

        if epoch > 5:
            # evaluate mode
            torch.cuda.synchronize()
            netR.eval()
            conf_mat = np.zeros([opt.Num_Class, opt.Num_Class])
            conf_mat60 = np.zeros([60, 60])
            acc = 0.0
            loss_sigma = 0.0

            for i, data in enumerate(tqdm(val_loader)):
                torch.cuda.synchronize()

                points_xyzc, points_1xyz, points2_xyz, points3_xyz, label, v_name = data
                points_xyzc, points_1xyz, points2_xyz, points3_xyz, label = points_xyzc.cuda(
                ), points_1xyz.cuda(), points2_xyz.cuda(), points3_xyz.cuda(
                ), label.cuda()

                opt.ball_radius = opt.ball_radius + random.uniform(-0.02, 0.02)
                xt, yt = group_points_3DV(points_xyzc, opt)

                xs1, ys1 = group_points(points_1xyz, opt)
                xs2, ys2 = group_points(points2_xyz, opt)
                xs3, ys3 = group_points(points3_xyz, opt)

                prediction = netR(xt, xs1, xs2, xs3, yt, ys1, ys2, ys3)

                loss = criterion(prediction, label)
                _, predicted60 = torch.max(prediction.data[:, 0:60], 1)
                _, predicted = torch.max(prediction.data, 1)
                #print(prediction.data)
                loss_sigma += loss.item()

                for j in range(len(label)):
                    cate_i = label[j].cpu().numpy()
                    pre_i = predicted[j].cpu().numpy()
                    conf_mat[cate_i, pre_i] += 1.0
                    if cate_i < 60:
                        pre_i60 = predicted60[j].cpu().numpy()
                        conf_mat60[cate_i, pre_i60] += 1.0

            print('NTU120:{:.2%} NTU60:{:.2%}===Average loss:{:.6%}'.format(
                conf_mat.trace() / conf_mat.sum(),
                conf_mat60.trace() / conf_mat60.sum(),
                loss_sigma / (i + 1) / 2))
        #logging.info('{} --epoch{} set Accuracy:{:.2%}===Average loss:{}'.format('Valid', epoch, conf_mat.trace() / conf_mat.sum(), loss_sigma/(i+1)))

        torch.save(netR.module.state_dict(),
                   '%s/pointnet_para_%d.pth' % (opt.save_root_dir, epoch))
コード例 #7
0
ファイル: match.py プロジェクト: thisisbilly1/adj-innolux
    def update(self):
        try:
            self.height, self.width, channels = self.img.shape
        except:
            return

        #update the labels selection
        tempselect = False
        for l in self.labels:
            l.update()
            #deselecting
            if self.world.mouse_left_down:
                if (self.x < self.world.mouse_x < self.x + self.width and
                        self.y < self.world.mouse_y < self.y + self.height):
                    t = l.checkselect()
                    if t:
                        tempselect = True
        if self.world.mouse_left_down:
            if (self.x < self.world.mouse_x < self.x + self.width
                    and self.y < self.world.mouse_y < self.y + self.height):
                if not tempselect:
                    self.updatelabelselect(None)

        #start single select
        if self.world.mouse_left_down:
            if self.selectsinglebox:
                if (self.x < self.world.mouse_x < self.x + self.width and
                        self.y < self.world.mouse_y < self.y + self.height):
                    self.selectbox[0] = self.world.mouse_x
                    self.selectbox[1] = self.world.mouse_y
                    self.selecting = True
                    self.selected = False

        #start multi select
        if self.world.mouse_left_down:
            if self.selectmultibox:
                if (self.x < self.world.mouse_x < self.x + self.width and
                        self.y < self.world.mouse_y < self.y + self.height):
                    self.selectbox[0] = self.world.mouse_x
                    self.selectbox[1] = self.world.mouse_y
                    self.selecting = True
                    self.selected = False
                    self.lines = []
                    self.intersections = []
                    #self.labels=[]
                    self.tempautolabels = []

        if self.selecting:
            self.selectbox[2] = clamp(self.world.mouse_x - self.selectbox[0],
                                      self.x - self.selectbox[0],
                                      self.x + self.width - self.selectbox[0])
            self.selectbox[3] = clamp(self.world.mouse_y - self.selectbox[1],
                                      self.y - self.selectbox[1],
                                      self.y + self.height - self.selectbox[1])

            #single
            if self.selectsinglebox:
                if self.world.mouse_left_up:
                    self.selecting = False
                    self.selectmultibox = False
                    if abs(self.selectbox[2]) > 0 and abs(
                            self.selectbox[3]) > 0:
                        self.selected = True
                        #create the label
                        #if not self.controller.actionbar.classes==None:
                        if not self.controller.actionbar.selected == None:
                            labeltext = self.controller.actionbar.classes[
                                self.controller.actionbar.selected]
                            labelcolor = hsv_to_rgb(
                                self.controller.actionbar.colors[
                                    self.controller.actionbar.selected].
                                slideValue / 360, 1, 1)
                        else:
                            labeltext = "None"
                            labelcolor = (0, 0, 0)

                        l = label(self.world, self, self.selectbox[0] - self.x,
                                  self.selectbox[1] - self.y,
                                  self.selectbox[2], self.selectbox[3],
                                  labeltext, labelcolor)
                        self.labels.append(l)
                        self.selectsinglebox = False
                        #select the label that was just created
                        self.updatelabelselect(l)
                        l.selected = True
                    else:
                        self.selected = False
                        return
            #multi
            if self.selectmultibox:
                if self.world.mouse_left_up:
                    self.selecting = False
                    self.selectmultibox = False
                    if abs(self.selectbox[2]) > 0 and abs(
                            self.selectbox[3]) > 0:
                        self.selected = True
                    else:
                        self.selected = False
                        return
                    #create the template
                    if self.selectbox[2] > 0:
                        x1 = self.selectbox[0] - self.x
                        x2 = self.selectbox[0] + self.selectbox[2] - self.x
                    else:
                        x1 = self.selectbox[0] + self.selectbox[2] - self.x
                        x2 = self.selectbox[0] - self.x
                    if self.selectbox[3] > 0:
                        y1 = self.selectbox[1] - self.y
                        y2 = self.selectbox[1] + self.selectbox[3] - self.y
                    else:
                        y1 = self.selectbox[1] + self.selectbox[3] - self.y
                        y2 = self.selectbox[1] - self.y
                    self.template = self.img[y1:y2, x1:x2]

                    #run template matching, get minimum val
                    gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
                    template = cv2.cvtColor(self.template, cv2.COLOR_BGR2GRAY)
                    res = cv2.matchTemplate(
                        gray, template,
                        cv2.TM_CCOEFF_NORMED)  #TM_SQDIFF_NORMED
                    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

                    # create threshold from min val, find where sqdiff is less than thresh
                    min_thresh = .9  # (min_val + 1e-6) * 1.5
                    match_locations = np.where(res >= min_thresh)

                    match_locations = (np.array(match_locations).T).tolist()
                    match_locations = group_points(match_locations, 10)

                    #get the defaults from the actionbar
                    try:
                        labeltext = self.controller.actionbar.classes[
                            self.controller.actionbar.defaultmatch]
                        labelcolor = hsv_to_rgb(
                            self.controller.actionbar.colors[
                                self.controller.actionbar.defaultmatch].
                            slideValue / 360, 1, 1)
                    except:
                        labeltext = "NONE"
                        labelcolor = (0, 0, 0)

                    for m in match_locations:
                        l = label(self.world, self, m[1], m[0],
                                  self.selectbox[2], self.selectbox[3],
                                  labeltext, (0, 255, 0))
                        #self.labels.append(l)
                        self.tempautolabels.append(l)

                    #print(self.match_locations)

                    self.predict_missing_boxes(labeltext)
コード例 #8
0
ファイル: test.py プロジェクト: XiaoJiNu/3DV-Action
def main(args=None):
	parser = argparse.ArgumentParser(description = "Training")

	parser.add_argument('--batchSize', type=int, default=36, help='input batch size')
	parser.add_argument('--nepoch', type=int, default=80, help='number of epochs to train for')
	parser.add_argument('--INPUT_FEATURE_NUM', type=int, default = 8,  help='number of input point features')
	parser.add_argument('--temperal_num', type=int, default = 3,  help='number of input point features')
	parser.add_argument('--pooling', type=str, default='concatenation', help='how to aggregate temporal split features: vlad | concatenation | bilinear')
	parser.add_argument('--dataset', type=str, default='ntu60', help='how to aggregate temporal split features: ntu120 | ntu60')


	parser.add_argument('--weight_decay', type=float, default=0.0008, help='weight decay (SGD only)')
	parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate at t=0')
	parser.add_argument('--momentum', type=float, default=0.9, help='momentum (SGD only)')
	parser.add_argument('--workers', type=int, default=0, help='number of data loading workers')

	parser.add_argument('--root_path', type=str, default='/data/data3/wangyancheng/pointcloudData/NTU_voxelz40_feature_2048',  help='preprocess folder')
	parser.add_argument('--depth_path', type=str, default='/data/data3/wangyancheng/ntu120dataset/',  help='raw_depth_png')
	parser.add_argument('--save_root_dir', type=str, default='results_ntu120/NTU60_v40_cv_MultiStream',  help='output folder')
	parser.add_argument('--model', type=str, default = '',  help='model name for training resume')
	parser.add_argument('--optimizer', type=str, default = '',  help='optimizer name for training resume')
	
	parser.add_argument('--ngpu', type=int, default=1, help='# GPUs')
	parser.add_argument('--main_gpu', type=int, default=0, help='main GPU id') # CUDA_VISIBLE_DEVICES=0 python train.py

	parser.add_argument('--learning_rate_decay', type=float, default=1e-7, help='learning rate decay')

	parser.add_argument('--size', type=str, default='full', help='how many samples do we load: small | full')
	parser.add_argument('--SAMPLE_NUM', type=int, default = 2048,  help='number of sample points')

	parser.add_argument('--Num_Class', type=int, default = 120,  help='number of outputs')
	parser.add_argument('--knn_K', type=int, default = 64,  help='K for knn search')
	parser.add_argument('--sample_num_level1', type=int, default = 512,  help='number of first layer groups')
	parser.add_argument('--sample_num_level2', type=int, default = 128,  help='number of second layer groups')
	parser.add_argument('--ball_radius', type=float, default=0.16, help='square of radius for ball query in level 1')#0.025 -> 0.05 for detph
	parser.add_argument('--ball_radius2', type=float, default=0.25, help='square of radius for ball query in level 2')# 0.08 -> 0.01 for depth


	opt = parser.parse_args()
	print (opt)
	logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S', filename=os.path.join(opt.save_root_dir, 'print.log'), level=logging.INFO)
	torch.cuda.set_device(opt.main_gpu)

	opt.manualSeed = 1
	random.seed(opt.manualSeed)
	torch.manual_seed(opt.manualSeed)

	try:
		os.makedirs(opt.save_root_dir)
	except OSError:
		pass

	os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'

	torch.backends.cudnn.benchmark = True
	#torch.backends.cudnn.deterministic = True
	torch.cuda.empty_cache()

	data_val = NTU_RGBD(root_path = opt.root_path, opt=opt,
		DATA_CROSS_VIEW = True,
		full_train = False,
		validation = False,
		test = True,
		Transform = False
		)
	val_loader = DataLoader(dataset = data_val, batch_size = 18,num_workers = 8)

	#net =

	netR = PointNet_Plus(opt)

	netR.load_state_dict(torch.load("/results_ntu120/NTU60_v40_cv_MultiStream/pointnet_para_45.pth"))

	netR = torch.nn.DataParallel(netR).cuda()
	netR.cuda()
	print(netR)

	
	# evaluate mode
	torch.cuda.synchronize()
	netR.eval()
	conf_mat = np.zeros([opt.Num_Class, opt.Num_Class])
	conf_mat60 = np.zeros([60, 60])
	acc = 0.0
	loss_sigma = 0.0


	for i, data in enumerate(tqdm(val_loader)):
		#print(i)
		torch.cuda.synchronize()
		group_time_start = time.time()
		points_xyzc,points_1xyz,points2_xyz,points3_xyz,label,vid_name = data
		points_xyzc,points_1xyz,points2_xyz,points3_xyz,label = points_xyzc.cuda(),points_1xyz.cuda(),points2_xyz.cuda(),points3_xyz.cuda(),label.cuda()
		#print(points.shape,'===')
		#print(label,'label')
		# points: B*2048*4; target: B*1
		opt.ball_radius = opt.ball_radius + random.uniform(-0.02,0.02)
		xt, yt = group_points_pro(points_xyzc, opt)
		xs1, ys1 = group_points(points_1xyz, opt)
		xs2, ys2 = group_points(points2_xyz, opt)
		xs3, ys3 = group_points(points3_xyz, opt)
		forward_time_start = time.time()
		#print(inputs_level1.shape,inputs_level1_center.shape)
		prediction = netR(xt, xs1, xs2, xs3, yt, ys1, ys2, ys3)
		forward_time_end = time.time()

		print('forward time:',forward_time_end-forward_time_start)
		_, predicted60 = torch.max(prediction.data[:,0:60], 1)
		_, predicted = torch.max(prediction.data, 1)
		#print(prediction.data)
		
		for j in range(len(label)):
			cate_i = label[j].cpu().numpy()
			pre_i = predicted[j].cpu().numpy()
			if pre_i != cate_i:
				logging.info('Video Name:{} -- correct label {} predicted to {}'.format(vid_name[j],cate_i,pre_i))
			conf_mat[cate_i, pre_i] += 1.0
			if cate_i<60:
				pre_i60 = predicted60[j].cpu().numpy()
				conf_mat60[cate_i, pre_i60] += 1.0

	print('NTU120:{:.2%} NTU60:{:.2%}===Average loss:{:.6%}'.format(conf_mat.trace() / conf_mat.sum(),conf_mat60.trace() / conf_mat60.sum(),loss_sigma/(i+1)/2))
	logging.info('{} --epoch{} set Accuracy:{:.2%}===Average loss:{}'.format('Valid', epoch, conf_mat.trace() / conf_mat.sum(), loss_sigma/(i+1)))
コード例 #9
0
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import ztorch_simulation
import utils

plt_color_codes = 'bgrcmykw'

sim200 = ztorch_simulation.Simulation(std=2.0, steps=1, on_the_fly=True)

steps, centres, aff_groups, points, granularity = sim200.run_ekm()
vnf_groups = utils.group_points(points, aff_groups)

plot_ekm_results = True

if plot_ekm_results:
    fig = plt.figure()
    ax = fig.gca(projection='3d')

    avail_colors = set(plt_color_codes)

    for gid in vnf_groups:
        group = vnf_groups[gid]
        if len(avail_colors) > 0:
            color = avail_colors.pop()
            ax.scatter(*group.T, c=color, alpha=0.3)

    ax.set_xlabel('CPU ($\mu$) [%]')
    ax.set_ylabel('Memory ($m$) [%]')
    ax.set_zlabel('Network ($\eta$) [%]')