コード例 #1
0
def main():
	print(0)
	args  =parser.parse_args()
	torch.manual_seed(666)
	torch.backends.cudnn.deterministic = True
	torch.backends.cudnn.benchmark = False

	#no parrallel
	device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
	print(1)
	torch.cuda.set_device(0)


	print(2)

	train_data = SignDataset('../dataset/train.mat','../frame/',
										args.window_size,
										transforms.Compose([
										transforms.ToTensor(),
										transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
										]))
	vali_data = SignDataset('../dataset/validation.mat','../frame/',
										args.window_size,
										transforms.Compose([
										transforms.ToTensor(),
										transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
										]))
	train_loader = torch.utils.data.DataLoader(train_data,batch_size = args.batch_size,shuffle = True,num_workers = args.workers,pin_memory = True)
	vali_loader = torch.utils.data.DataLoader(vali_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
	D_xs = resnet_lstm(args.window_size)
	D_xs.apply(weights_init)
	args.cuda = True
	if args.cuda:
		print('lets use',torch.cuda.device_count(),"gpus")
		D_xs = D_xs.cuda()

	lr = args.learning_rate
	D_xs_solver = optim.Adam(D_xs.parameters(),lr = lr)
	BCE = nn.BCELoss().cuda()
	e_shift =0 
	min_val_loss = 99999
	no_improve_epoch = 0
	now = datetime.now()
#log
	log_path = '../log/lr_{}_time_{}'.format(args.learning_rate,now.strftime("%Y%m%d-%H%M%S"))
	try:
		os.mkdir(log_path)
	except:
		pass
	#writer = SummaryWriter(log_path)
	logger = Logger(log_path)
#train
	mes_sum=0
	n_iter = 0
	for epoch in range(args.epochs):
		print(1)

		D_xs.train()
		print('len of train_loader',len(train_loader))
		for i,(data1,v1) in enumerate(train_loader):
			if len(data1)<args.batch_size:
				continue
			n_iter+=2
			D_xs.zero_grad()
			x_1 =data1
			x_1 = x_1.to(device)

			# x_2 = data2
			# x_2 = x_2.to(device)
			vv_1 = v1.type(torch.FloatTensor)
			vv_1= vv_1.cuda()
			vv_1 = Variable(vv_1,requires_grad =False)
			# vv_2 = v2.type(torch.FloatTensor)
			# vv_2= vv_2.cuda()
			# vv_2 = Variable(vv_2,requires_grad =False)
			
			
			
			# score_2 = D_xs(x_2)
			# v_loss=L1Loss(score_2,vv_2)
			# v_loss.backward()
			# D_xs_solver.step()
			# mes_sum+=v_loss.item()
			# print('nega:',v_loss.item())

			score_1 = D_xs(x_1)
			#print(score_1.shape)
			
			v_loss=BCE(score_1,vv_1)
			v_loss.backward()
			D_xs_solver.step()
			mes_sum+=v_loss.item()
			pre = round(list(score_1))
			print(pre)
			# print(vv_1)
			# print(score_1)
			# print('loss:',v_loss.item())
			if i%10 ==0:
				print(vv_1)
				print(score_1)
				#writer.add_scalar('train/loss',mes_sum/10,n_iter)
				info = { 'loss': v_loss.item()}

				for tag, value in info.items():
					logger.scalar_summary(tag, value, (i+epoch*1000))
				for tag, value in D_xs.named_parameters():
					tag = tag.replace('.','/')
					logger.histo_summary(tag, value.data.cpu().numpy(), i)
					logger.histo_summary(tag+'/grad', value.grad.data.cpu().numpy(), i)
				print('epoch:[%2d] [%4d/%4d] loss: %.4f' % (epoch + e_shift, i, (len(train_data)/args.batch_size), mes_sum/10))
				mes_sum=0
				

		mse_sum = 0
		D_xs.eval()
		
		with torch.no_grad():
			for  i,(data1,v1) in enumerate(vali_loader):
				x_1 =data1
				x_1 = x_1.to(device)

				
				vv_1 = v1.type(torch.FloatTensor)
				vv_1= vv_1.cuda()
				vv_1 = Variable(vv_1,requires_grad =False)
				
				
				score=D_xs(x_1)
				v_loss =BCE(score,vv_1)
				mse_sum = mse_sum+v_loss.item()
				
				

			val_loss = mse_sum/float(i+1)
			print('epoch:[%2d] ,val_mse :%.6f  '%(epoch+e_shift,val_loss))
			#writer.add_scalar('Test/Loss',val_loss,n_iter)
			info = { 'vali_loss': v_loss.item()}

			for tag, value in info.items():
				logger.scalar_summary(tag, value, epoch)
			if val_loss < min_val_loss:
				min_val_loss = val_loss
				no_improve_epoch = 0
				val_loss = round(val_loss,2)
				torch.save(D_xs.state_dict(),'{}/epoch_{}_val_loss_{}.pth'.format(args.outf, epoch + e_shift, val_loss))
				print("performance improve, saved the new model......")
			else:
				no_improve_epoch+=1
			if no_improve_epoch>args.patiences:
				print('stop training')
				break
コード例 #2
0
                    default='model.pth',
                    type=str,
                    help='pretrained model checkpoint')
parser.add_argument('--message',
                    default='message',
                    type=str,
                    help='pretrained model checkpoint')
parser.add_argument('--epochs', default=101, type=int, help='train epochs')
parser.add_argument('--train', default=True, type=bool, help='train')
args = parser.parse_args()

save_path = args.save_path + f'{args.message}_{time_str}'

if not os.path.exists(save_path):
    os.mkdir(save_path)
logger = Logger(f'{save_path}/log.log')
logger.Print(args)

train_data, val_data, test_data = load_cisia_surf(train_size=args.batch_size,
                                                  test_size=args.test_size)
model = Model(pretrained=False, num_classes=2)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
                      lr=0.01,
                      momentum=0.9,
                      weight_decay=5e-4)
scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.95)

if use_cuda:
    model = model.cuda()
    criterion = criterion.cuda()
コード例 #3
0
ファイル: new_train2.py プロジェクト: runfengxu/ASL
def main():
	os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
	os.environ['CUDA_VISIBLE_DEVICES']='1'
	# print(0)
	args  =parser.parse_args()
	torch.manual_seed(666)
	torch.backends.cudnn.deterministic = True
	torch.backends.cudnn.benchmark = False
	
	#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
	#print(1)
	#torch.cuda.set_device(0)


	#print(2)

	train_data = TwoStream_dataset('../dataset/train.mat','../frame/','../optical_flow_matrix/',
										args.window_size,
										transforms.Compose([
										transforms.ToTensor(),
										transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
										]))
	vali_data = TwoStream_dataset('../dataset/validation.mat','../frame/','../optical_flow_matrix/',
										args.window_size,
										transforms.Compose([
										transforms.ToTensor(),
										transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
										]))
	train_loader = torch.utils.data.DataLoader(train_data,batch_size = args.batch_size,shuffle = True,num_workers = args.workers,pin_memory = True)
	vali_loader = torch.utils.data.DataLoader(vali_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
	D_xs = TwoStream_Fusion2()
	D_xs.apply(weights_init)
	args.cuda = True
	if args.cuda:
		print('lets use',torch.cuda.device_count(),"gpus")
		D_xs = torch.nn.DataParallel(D_xs).cuda()

	lr = args.learning_rate
	D_xs_solver = optim.Adam(D_xs.parameters(),lr = lr)
	BCE = nn.BCELoss().cuda()
	e_shift =0 
	min_val_loss = 99999
	max_accuracy = 0
	no_improve_epoch = 0
	now = datetime.now()
#log
	log_path = '../log/lr_{}_time_{}'.format(args.learning_rate,now.strftime("%Y%m%d-%H%M%S"))
	try:
		os.mkdir(log_path)
	except:
		pass
	#writer = SummaryWriter(log_path)
	logger = Logger(log_path)
#train
	mes_sum=0
	n_iter = 0
	for epoch in range(args.epochs):
		print(1)

		D_xs.train()
		print('len of train_loader',len(train_loader))
		train_epoch_score = 0
		train_predictions = []
		train_gt = []
		for i,(data1,v1) in enumerate(train_loader):
			if len(data1[0])<args.batch_size:
				continue
			n_iter+=2
			D_xs.zero_grad()
			x_1 =data1[0]
			x_2 = data1[1]
			x_1 = x_1.cuda()
			x_2 = x_2.cuda()

			# x_2 = data2
			# x_2 = x_2.to(device)
			vv_1 = v1.type(torch.FloatTensor)
			vv_1= vv_1.cuda()
			vv_1 = Variable(vv_1,requires_grad =False)
			# vv_2 = v2.type(torch.FloatTensor)
			# vv_2= vv_2.cuda()
			# vv_2 = Variable(vv_2,requires_grad =False)
			
			
			
			# score_2 = D_xs(x_2)
			# v_loss=L1Loss(score_2,vv_2)
			# v_loss.backward()
			# D_xs_solver.step()
			# mes_sum+=v_loss.item()
			# print('nega:',v_loss.item())
			score_1 = D_xs(x_1,x_2)
			
			v_loss=BCE(score_1,vv_1)
			v_loss.backward()
			D_xs_solver.step()
			mes_sum+=v_loss.item()

			for item in score_1.tolist():
					if item > 0.5:
						train_predictions.append(1)
					else:
						train_predictions.append(0)

			train_gt += vv_1.tolist()

			# pre = round(list(score_1))
			# print(pre)
			# print(vv_1)
			# print(score_1)
			# print('loss:',v_loss.item())
			if i%3 ==0:
				#print(vv_1)
				#print(score_1)

				#writer.add_scalar('train/loss',mes_sum/10,n_iter)
				info = { 'loss': v_loss.item()}

				for tag, value in info.items():
					logger.scalar_summary(tag, value, (i+epoch*1000))
				# for tag, value in D_xs.named_parameters():
				# 	tag = tag.replace('.','/')
				# 	logger.histo_summary(tag, value.data.cpu().numpy(), i)
				# 	logger.histo_summary(tag+'/grad', value.grad.data.cpu().numpy(), i)
				print('epoch:[%2d] [%4d/%4d] loss: %.4f'  % (epoch + e_shift, i, (len(train_data)/args.batch_size), mes_sum/10))
				mes_sum=0
		
		train_epoch_score = sum(i == j for i, j in zip(train_predictions, train_gt))		
		train_accuracy = train_epoch_score/(len(train_loader)*args.batch_size)
		train_f1_score = f1_score(train_gt, train_predictions)
		print("Train accuracy for epoch:[%2d]: %.4f" %(epoch, train_accuracy))
		print("Train F1 score for epoch:[%2d]: %.4f" %(epoch, train_f1_score))
		info2 = {'train_accuracy' : train_accuracy, "train_f1_score" : train_f1_score}
		for tag, value in info2.items():
			logger.scalar_summary(tag, value, epoch)

		mse_sum = 0
		D_xs.eval()
		
		vali_epoch_score = 0
		vali_predictions = []
		vali_gt = []
		with torch.no_grad():
			for  i,(data1,v1) in enumerate(vali_loader):
				x_1 = data1[0]
				x_2 = data1[1]

				x_1 = x_1.cuda()
				x_2 = x_2.cuda()

				
				vv_1 = v1.type(torch.FloatTensor)
				vv_1= vv_1.cuda()
				vv_1 = Variable(vv_1,requires_grad =False)
				
				
				score=D_xs(x_1,x_2)
				v_loss =BCE(score,vv_1)
				mse_sum = mse_sum+v_loss.item()

				for item in score.tolist():
					if item > 0.5:
						vali_predictions.append(1)
					else:
						vali_predictions.append(0)

				vali_gt += vv_1.tolist()

			val_loss = mse_sum/float(i+1)
			print('epoch:[%2d] ,val_mse :%.6f  '%(epoch+e_shift,val_loss))
			vali_epoch_score = sum(i == j for i, j in zip(vali_predictions, vali_gt))
			vali_accuracy = vali_epoch_score/(len(vali_loader)*args.batch_size)
			vali_f1_score = f1_score(vali_gt, vali_predictions)
			#writer.add_scalar('Test/Loss',val_loss,n_iter)
			print("validation accuracy for epoch:[%2d]: %.4f" %(epoch, vali_accuracy))
			print("validation F1 score for epoch:[%2d]: %.4f" %(epoch, vali_f1_score))
			info3 = { 'vali_loss': mse_sum, 'vali_accuracy': vali_accuracy, 'vali_f1_score' : vali_f1_score}

			for tag, value in info3.items():
				logger.scalar_summary(tag, value, epoch)
			if max_accuracy < vali_accuracy:
				max_accuracy = vali_accuracy
				no_improve_epoch = 0
				max_accuracy = round(max_accuracy,4)
				torch.save(D_xs.state_dict(),'{}/epoch_{}_max_accuracy_{}.pth'.format(args.outf, epoch + e_shift, max_accuracy))
				print("performance improve, saved the new model......")
			else:
				no_improve_epoch+=1
			if no_improve_epoch>args.patiences:
				print('stop training')
				break
コード例 #4
0
import math
import os
import json
from loger import Logger
import numpy as np
import copy
import matplotlib.pyplot as plt
from matplotlib.pyplot import MultipleLocator

SRC_NUMBER = 1
DST_NUMBER = 1
UAV_COUNTS = 10
UAV_BATCH = 10

CONFIG_PATH = "./config.json"
logger = Logger('log_' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
CONFIG = json.loads(open(CONFIG_PATH, "r").read())
destination = CONFIG['width'] * CONFIG['height']
turns_of_change = 30
rate_of_change = 0.02
logger.openLogger = False
MOVE_RECORD = dict()


def run():
    global turns_of_change
    global rate_of_change
    global destination

    time_swap = 1
    MAP_RECORD = []
コード例 #5
0
ファイル: autodemo.py プロジェクト: qileDai/autoframework
#coding=utf-8
import requests, xlrd, pymysql, time, sys
# 导入需要用到的模块
from xlutils import copy
import requests
from loger import Logger

logger = Logger(logger='AutoDemo').getloger()


class AutoDemo:
    # 从xlutils模块中导入copy这个函数

    def __init__(self):
        self.autodemo = AutoDemo()

    def readExcel(self, file_path):
        '''
        读取excel测试用例的函数
        :param file_path:传入一个excel文件,或者文件的绝对路径
        :return:返回这个excel第一个sheet页中的所有测试用例的list
        '''
        try:
            logger.info('start read case file !')
            book = xlrd.open_workbook(file_path)  # 打开excel
        except Exception as e:
            # 如果路径不在或者excel不正确,返回报错信息
            print('路径不在或者excel不正确', e)
            logger.error(u'路径不在或者excel不正确')
            return e
        else:
コード例 #6
0
import stradegy
import pathTree
import numpy as np
import UAV
import time
import random
from loger import Logger

logger = Logger()

"""
uav_1 = UAV.UAV(1,'./config.json',logger)
global_map = np.random.randn(uav_1.config['width'],uav_1.config['height'])
global_map = (np.multiply(global_map,255.0/(2.58*2)) + 127.5).astype(np.uint8)
uav_1.initialize_global_information(global_map)
uav_1.update_all_level_maps()
uav_1.position[0] = 0
uav_1.position[1] = 0

uav_2 = UAV.UAV(2,'./config.json',logger)
uav_2.initialize_global_information(global_map)
uav_2.update_all_level_maps()
uav_2.position[0] = 0
uav_2.position[1] = 0

uav_3 = UAV.UAV(3,'./config.json',logger)
uav_3.initialize_global_information(global_map)
uav_3.update_all_level_maps()
uav_3.position[0] = 0
uav_3.position[1] = 0
コード例 #7
0
        timeArray = time.localtime(time.time())
        otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
        decoded["lastsynctime"] = otherStyleTime
        decoded["isLastsyncSuccess"] = "True"

        try:
            fss = open("status.list", "w")
        except Exception, arg:
            crlog.error(arg)
            crlog.error("write status.list failed, pls check it!")
            crlog.critical("exit")
            sys.exit(1)
        fss.truncate()
        fss.write(json.dumps(decoded).replace('\'', '\"'))  #将单引号替换双引号
        fss.close()

        # 任务结束进入休眠
        crlog.info("current task is over.")
        sleep()
    return 0


#测试执行顺序列表

loadlogdir()  # 初始化日志目录
crlog = Logger(logdir)  #实例化日志管理类
my = Hparser()  #实例化解析类

pconfig()
isFirstrun()