コード例 #1
0
def main():
	print(0)
	args  =parser.parse_args()
	torch.manual_seed(666)
	torch.backends.cudnn.deterministic = True
	torch.backends.cudnn.benchmark = False

	#no parrallel
	device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
	print(1)
	torch.cuda.set_device(0)


	print(2)

	train_data = SignDataset('../dataset/train.mat','../frame/',
										args.window_size,
										transforms.Compose([
										transforms.ToTensor(),
										transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
										]))
	vali_data = SignDataset('../dataset/validation.mat','../frame/',
										args.window_size,
										transforms.Compose([
										transforms.ToTensor(),
										transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
										]))
	train_loader = torch.utils.data.DataLoader(train_data,batch_size = args.batch_size,shuffle = True,num_workers = args.workers,pin_memory = True)
	vali_loader = torch.utils.data.DataLoader(vali_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
	D_xs = resnet_lstm(args.window_size)
	D_xs.apply(weights_init)
	args.cuda = True
	if args.cuda:
		print('lets use',torch.cuda.device_count(),"gpus")
		D_xs = D_xs.cuda()

	lr = args.learning_rate
	D_xs_solver = optim.Adam(D_xs.parameters(),lr = lr)
	BCE = nn.BCELoss().cuda()
	e_shift =0 
	min_val_loss = 99999
	no_improve_epoch = 0
	now = datetime.now()
#log
	log_path = '../log/lr_{}_time_{}'.format(args.learning_rate,now.strftime("%Y%m%d-%H%M%S"))
	try:
		os.mkdir(log_path)
	except:
		pass
	#writer = SummaryWriter(log_path)
	logger = Logger(log_path)
#train
	mes_sum=0
	n_iter = 0
	for epoch in range(args.epochs):
		print(1)

		D_xs.train()
		print('len of train_loader',len(train_loader))
		for i,(data1,v1) in enumerate(train_loader):
			if len(data1)<args.batch_size:
				continue
			n_iter+=2
			D_xs.zero_grad()
			x_1 =data1
			x_1 = x_1.to(device)

			# x_2 = data2
			# x_2 = x_2.to(device)
			vv_1 = v1.type(torch.FloatTensor)
			vv_1= vv_1.cuda()
			vv_1 = Variable(vv_1,requires_grad =False)
			# vv_2 = v2.type(torch.FloatTensor)
			# vv_2= vv_2.cuda()
			# vv_2 = Variable(vv_2,requires_grad =False)
			
			
			
			# score_2 = D_xs(x_2)
			# v_loss=L1Loss(score_2,vv_2)
			# v_loss.backward()
			# D_xs_solver.step()
			# mes_sum+=v_loss.item()
			# print('nega:',v_loss.item())

			score_1 = D_xs(x_1)
			#print(score_1.shape)
			
			v_loss=BCE(score_1,vv_1)
			v_loss.backward()
			D_xs_solver.step()
			mes_sum+=v_loss.item()
			pre = round(list(score_1))
			print(pre)
			# print(vv_1)
			# print(score_1)
			# print('loss:',v_loss.item())
			if i%10 ==0:
				print(vv_1)
				print(score_1)
				#writer.add_scalar('train/loss',mes_sum/10,n_iter)
				info = { 'loss': v_loss.item()}

				for tag, value in info.items():
					logger.scalar_summary(tag, value, (i+epoch*1000))
				for tag, value in D_xs.named_parameters():
					tag = tag.replace('.','/')
					logger.histo_summary(tag, value.data.cpu().numpy(), i)
					logger.histo_summary(tag+'/grad', value.grad.data.cpu().numpy(), i)
				print('epoch:[%2d] [%4d/%4d] loss: %.4f' % (epoch + e_shift, i, (len(train_data)/args.batch_size), mes_sum/10))
				mes_sum=0
				

		mse_sum = 0
		D_xs.eval()
		
		with torch.no_grad():
			for  i,(data1,v1) in enumerate(vali_loader):
				x_1 =data1
				x_1 = x_1.to(device)

				
				vv_1 = v1.type(torch.FloatTensor)
				vv_1= vv_1.cuda()
				vv_1 = Variable(vv_1,requires_grad =False)
				
				
				score=D_xs(x_1)
				v_loss =BCE(score,vv_1)
				mse_sum = mse_sum+v_loss.item()
				
				

			val_loss = mse_sum/float(i+1)
			print('epoch:[%2d] ,val_mse :%.6f  '%(epoch+e_shift,val_loss))
			#writer.add_scalar('Test/Loss',val_loss,n_iter)
			info = { 'vali_loss': v_loss.item()}

			for tag, value in info.items():
				logger.scalar_summary(tag, value, epoch)
			if val_loss < min_val_loss:
				min_val_loss = val_loss
				no_improve_epoch = 0
				val_loss = round(val_loss,2)
				torch.save(D_xs.state_dict(),'{}/epoch_{}_val_loss_{}.pth'.format(args.outf, epoch + e_shift, val_loss))
				print("performance improve, saved the new model......")
			else:
				no_improve_epoch+=1
			if no_improve_epoch>args.patiences:
				print('stop training')
				break
コード例 #2
0
ファイル: new_train2.py プロジェクト: runfengxu/ASL
def main():
	os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
	os.environ['CUDA_VISIBLE_DEVICES']='1'
	# print(0)
	args  =parser.parse_args()
	torch.manual_seed(666)
	torch.backends.cudnn.deterministic = True
	torch.backends.cudnn.benchmark = False
	
	#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
	#print(1)
	#torch.cuda.set_device(0)


	#print(2)

	train_data = TwoStream_dataset('../dataset/train.mat','../frame/','../optical_flow_matrix/',
										args.window_size,
										transforms.Compose([
										transforms.ToTensor(),
										transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
										]))
	vali_data = TwoStream_dataset('../dataset/validation.mat','../frame/','../optical_flow_matrix/',
										args.window_size,
										transforms.Compose([
										transforms.ToTensor(),
										transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
										]))
	train_loader = torch.utils.data.DataLoader(train_data,batch_size = args.batch_size,shuffle = True,num_workers = args.workers,pin_memory = True)
	vali_loader = torch.utils.data.DataLoader(vali_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
	D_xs = TwoStream_Fusion2()
	D_xs.apply(weights_init)
	args.cuda = True
	if args.cuda:
		print('lets use',torch.cuda.device_count(),"gpus")
		D_xs = torch.nn.DataParallel(D_xs).cuda()

	lr = args.learning_rate
	D_xs_solver = optim.Adam(D_xs.parameters(),lr = lr)
	BCE = nn.BCELoss().cuda()
	e_shift =0 
	min_val_loss = 99999
	max_accuracy = 0
	no_improve_epoch = 0
	now = datetime.now()
#log
	log_path = '../log/lr_{}_time_{}'.format(args.learning_rate,now.strftime("%Y%m%d-%H%M%S"))
	try:
		os.mkdir(log_path)
	except:
		pass
	#writer = SummaryWriter(log_path)
	logger = Logger(log_path)
#train
	mes_sum=0
	n_iter = 0
	for epoch in range(args.epochs):
		print(1)

		D_xs.train()
		print('len of train_loader',len(train_loader))
		train_epoch_score = 0
		train_predictions = []
		train_gt = []
		for i,(data1,v1) in enumerate(train_loader):
			if len(data1[0])<args.batch_size:
				continue
			n_iter+=2
			D_xs.zero_grad()
			x_1 =data1[0]
			x_2 = data1[1]
			x_1 = x_1.cuda()
			x_2 = x_2.cuda()

			# x_2 = data2
			# x_2 = x_2.to(device)
			vv_1 = v1.type(torch.FloatTensor)
			vv_1= vv_1.cuda()
			vv_1 = Variable(vv_1,requires_grad =False)
			# vv_2 = v2.type(torch.FloatTensor)
			# vv_2= vv_2.cuda()
			# vv_2 = Variable(vv_2,requires_grad =False)
			
			
			
			# score_2 = D_xs(x_2)
			# v_loss=L1Loss(score_2,vv_2)
			# v_loss.backward()
			# D_xs_solver.step()
			# mes_sum+=v_loss.item()
			# print('nega:',v_loss.item())
			score_1 = D_xs(x_1,x_2)
			
			v_loss=BCE(score_1,vv_1)
			v_loss.backward()
			D_xs_solver.step()
			mes_sum+=v_loss.item()

			for item in score_1.tolist():
					if item > 0.5:
						train_predictions.append(1)
					else:
						train_predictions.append(0)

			train_gt += vv_1.tolist()

			# pre = round(list(score_1))
			# print(pre)
			# print(vv_1)
			# print(score_1)
			# print('loss:',v_loss.item())
			if i%3 ==0:
				#print(vv_1)
				#print(score_1)

				#writer.add_scalar('train/loss',mes_sum/10,n_iter)
				info = { 'loss': v_loss.item()}

				for tag, value in info.items():
					logger.scalar_summary(tag, value, (i+epoch*1000))
				# for tag, value in D_xs.named_parameters():
				# 	tag = tag.replace('.','/')
				# 	logger.histo_summary(tag, value.data.cpu().numpy(), i)
				# 	logger.histo_summary(tag+'/grad', value.grad.data.cpu().numpy(), i)
				print('epoch:[%2d] [%4d/%4d] loss: %.4f'  % (epoch + e_shift, i, (len(train_data)/args.batch_size), mes_sum/10))
				mes_sum=0
		
		train_epoch_score = sum(i == j for i, j in zip(train_predictions, train_gt))		
		train_accuracy = train_epoch_score/(len(train_loader)*args.batch_size)
		train_f1_score = f1_score(train_gt, train_predictions)
		print("Train accuracy for epoch:[%2d]: %.4f" %(epoch, train_accuracy))
		print("Train F1 score for epoch:[%2d]: %.4f" %(epoch, train_f1_score))
		info2 = {'train_accuracy' : train_accuracy, "train_f1_score" : train_f1_score}
		for tag, value in info2.items():
			logger.scalar_summary(tag, value, epoch)

		mse_sum = 0
		D_xs.eval()
		
		vali_epoch_score = 0
		vali_predictions = []
		vali_gt = []
		with torch.no_grad():
			for  i,(data1,v1) in enumerate(vali_loader):
				x_1 = data1[0]
				x_2 = data1[1]

				x_1 = x_1.cuda()
				x_2 = x_2.cuda()

				
				vv_1 = v1.type(torch.FloatTensor)
				vv_1= vv_1.cuda()
				vv_1 = Variable(vv_1,requires_grad =False)
				
				
				score=D_xs(x_1,x_2)
				v_loss =BCE(score,vv_1)
				mse_sum = mse_sum+v_loss.item()

				for item in score.tolist():
					if item > 0.5:
						vali_predictions.append(1)
					else:
						vali_predictions.append(0)

				vali_gt += vv_1.tolist()

			val_loss = mse_sum/float(i+1)
			print('epoch:[%2d] ,val_mse :%.6f  '%(epoch+e_shift,val_loss))
			vali_epoch_score = sum(i == j for i, j in zip(vali_predictions, vali_gt))
			vali_accuracy = vali_epoch_score/(len(vali_loader)*args.batch_size)
			vali_f1_score = f1_score(vali_gt, vali_predictions)
			#writer.add_scalar('Test/Loss',val_loss,n_iter)
			print("validation accuracy for epoch:[%2d]: %.4f" %(epoch, vali_accuracy))
			print("validation F1 score for epoch:[%2d]: %.4f" %(epoch, vali_f1_score))
			info3 = { 'vali_loss': mse_sum, 'vali_accuracy': vali_accuracy, 'vali_f1_score' : vali_f1_score}

			for tag, value in info3.items():
				logger.scalar_summary(tag, value, epoch)
			if max_accuracy < vali_accuracy:
				max_accuracy = vali_accuracy
				no_improve_epoch = 0
				max_accuracy = round(max_accuracy,4)
				torch.save(D_xs.state_dict(),'{}/epoch_{}_max_accuracy_{}.pth'.format(args.outf, epoch + e_shift, max_accuracy))
				print("performance improve, saved the new model......")
			else:
				no_improve_epoch+=1
			if no_improve_epoch>args.patiences:
				print('stop training')
				break