def load_data(type=1): empty_extc = empty_extractor.PGEmptyFeatureExtractor() empty_data = empty_extc.load_data() empty_data = empty_data.tolist() # print('empty_data: ', empty_data) # 有目标的类别 data_extractor = extractor.FeatureExtractor() # 此处全使用默认的文件路径配置 car_data_list = data_extractor.load_data() # car_data_list = random.shuffle(car_data_list) input_data = [] # car_label = [] for item in car_data_list: input_data.append(item[0]) # 强度信息 random.shuffle(input_data) car_data = input_data[0:5490] # 存储起来用来做验证的 pg_car_data = input_data[5490:] np.save( 'D:\home\zeewei\projects\\77GRadar\model\cnn\\test_data_pg\\input_data_classification_pg_car_data.npy', pg_car_data) np.save( 'D:\home\zeewei\projects\\77GRadar\model\cnn\\test_data_pg\\input_data_classification_pg_empty_data.npy', empty_data[300:]) if type != 1: random.shuffle(car_data) car_data = car_data[0:300] road_extractor = classification_data_extractor.ClassificationExtractor( ) road_car_data = road_extractor.load_car_data() random.shuffle(road_car_data) road_car_data = road_car_data[0:300] for item in road_car_data: car_data.append(item) road_empty_data = road_extractor.load_empty_data() print('road_empty_data: ', len(road_empty_data)) road_empty_data = road_empty_data # random.shuffle(road_empty_data) # empty_data = np.concatenate(empty_data, road_empty_data) empty_data = empty_data[0:300] for item in road_empty_data: empty_data.append(item) return car_data, empty_data
def load_playground_data(): if os.path.exists(PLAYGROUND_TRAIN_DATA_INPUT) \ and os.path.exists(PLAYGROUND_TRAIN_DATA_LABEL) \ and os.path.exists(PLAYGROUND_TEST_DATA_INPUT) \ and os.path.exists(PLAYGROUND_TEST_DATA_LABEL): train_data_input = np.load(PLAYGROUND_TRAIN_DATA_INPUT) train_data_label = np.load(PLAYGROUND_TRAIN_DATA_LABEL) test_data_input = np.load(PLAYGROUND_TEST_DATA_INPUT) test_data_label = np.load(PLAYGROUND_TEST_DATA_LABEL) else: train_data_path = os.path.join(config.processed_data_dir, config.train_data_file_name) if os.path.exists(train_data_path): data_list = np.load(train_data_path) else: data_extractor = extractor.FeatureExtractor( origin_data_dir=config.origin_train_data_dir, processed_data_dir=config.processed_data_dir, input_data_file_name=config.train_data_file_name) data_list = data_extractor.load_data() random.shuffle(data_list) # 随机打乱 train_num = int(10 * len(data_list) / 10) # 训练集与测试集7:3比例,此处如果测试集是按帧取数据而不是按录取的数据组划分则这么做 train_data = data_list[0:train_num] test_data = data_list[train_num:] train_data_input = [] train_data_label = [] for item in train_data: train_data_input.append(item[0]) train_data_label.append(item[1]) test_data_input = [] test_data_label = [] for item in test_data: test_data_input.append(item[0]) test_data_label.append(item[1]) np.save(PLAYGROUND_TRAIN_DATA_INPUT, train_data_input) np.save(PLAYGROUND_TRAIN_DATA_LABEL, train_data_label) np.save(PLAYGROUND_TEST_DATA_INPUT, test_data_input) np.save(PLAYGROUND_TEST_DATA_LABEL, test_data_label) return train_data_input, train_data_label, test_data_input, test_data_label
def load_val_data(): val_data_path = os.path.join(config.processed_data_dir, config.val_data_file_name) if os.path.exists(val_data_path): val_data = np.load(val_data_path) else: data_extractor = extractor.FeatureExtractor( origin_data_dir=config.origin_val_data_dir, processed_data_dir=config.processed_data_dir, input_data_file_name=config.val_data_file_name) val_data = data_extractor.load_data() random.shuffle(val_data) val_data_input = [] val_data_label = [] for item in val_data: val_data_input.append(item[0]) val_data_label.append(item[1]) return val_data_input, val_data_label
def load_all_data(): pg_data_extractor = extractor.FeatureExtractor() # 此处全使用默认的文件路径配置 data_list = pg_data_extractor.load_data() # 获取操场数据 data_list = data_list.tolist() random.shuffle(data_list) data_list = data_list[0:3000] input_data, label_data, test_data, test_label_data = load_road_data( ) # 获取道路数据 input_data = input_data.tolist() label_data = label_data.tolist() for i in range(len(input_data)): a_group_data = [] a_group_data.append(input_data[i]) a_group_data.append(label_data[i]) data_list.append(a_group_data) for i in range(len(test_data)): a_group_data = [] a_group_data.append(test_data[i]) a_group_data.append(test_label_data[i]) data_list.append(a_group_data) return data_list
def load_pg_data_by_range(start=0, end=64): ''' start,开始的距离单元 end,结束的距离单元位置 ''' pg_train_data_input_range = os.path.join( DATA_DIR, str(start) + '_' + str(end) + '_pg_train_data_input.npy') pg_train_data_label_range = os.path.join( DATA_DIR, str(start) + '_' + str(end) + '_pg_train_data_label.npy') pg_test_data_input_range = os.path.join( DATA_DIR, str(start) + '_' + str(end) + '_pg_test_data_input.npy') pg_test_data_label_range = os.path.join( DATA_DIR, str(start) + '_' + str(end) + '_pg_test_data_label.npy') if os.path.exists(pg_train_data_input_range) and os.path.exists( pg_train_data_label_range) and os.path.exists( pg_test_data_input_range) and os.path.exists( pg_test_data_label_range): train_data_input = np.load(pg_train_data_input_range) train_data_label = np.load(pg_train_data_label_range) test_data_input = np.load(pg_test_data_input_range) test_data_label = np.load(pg_test_data_label_range) return train_data_input, train_data_label, test_data_input, test_data_label data_extractor = extractor.FeatureExtractor() # 此处全使用默认的文件路径配置 data_list = data_extractor.load_data() # print('划分数据集') # data_list = np.load('D:\home\zeewei\projects\\77GRadar\data\\all\\all_train_data.npy') remain_list = [] ''' 对数据进行范围过滤 ''' for item in data_list: max_index = item[1].argmax(axis=0) # 求每一行的最大下标,即目标的位置 if max_index >= start and max_index <= end: remain_list.append(item) data_list = remain_list random.shuffle(data_list) # 随机打乱 train_num = int(7 * len(data_list) / 10) # 训练集与测试集7:3比例 train_data = data_list[0:train_num] test_data = data_list[train_num:] train_data_input = [] train_data_label = [] for item in train_data: train_data_input.append(item[0]) train_data_label.append(item[1]) test_data_input = [] test_data_label = [] for item in test_data: test_data_input.append(item[0]) test_data_label.append(item[1]) np.save(pg_train_data_input_range, train_data_input) np.save(pg_train_data_label_range, train_data_label) np.save(pg_test_data_input_range, test_data_input) np.save(pg_test_data_label_range, test_data_label) return train_data_input, train_data_label, test_data_input, test_data_label
def train(): model = BP_Net().cuda(0) optimizer = torch.optim.Adam(model.parameters(), lr=5e-5) # data_extractor = feature_extractor.FeatureExtractor() # 此处全使用默认的文件路径配置 data_extractor = extractor.FeatureExtractor() # 此处全使用默认的文件路径配置 data_list = data_extractor.load_data() # data_list = load_all_data() # 道路操场数据混合 print('the length of data_list : ', len(data_list)) random.shuffle(data_list) batch_num = int(7 * len(data_list) / 10) train_data = data_list[0:batch_num] test_data = data_list[batch_num:] train_data_input = [] train_data_label = [] for item in train_data: train_data_input.append(item[0]) train_data_label.append(item[1]) test_data_input = [] test_data_label = [] for item in test_data: test_data_input.append(item[0]) test_data_label.append(item[1]) '''将距离分辨率改为6m''' train_data_input_n = [] train_data_label_n = [] for i in range(len(train_data_label)): input = decrease_range_resolution(train_data_input[i]) label = decrease_range_resolution(train_data_label[i]) train_data_input_n.append(input) train_data_label_n.append(label) test_data_input_n = [] test_data_label_n = [] for i in range(len(test_data_label)): input = decrease_range_resolution(test_data_input[i]) label = decrease_range_resolution(test_data_label[i]) test_data_input_n.append(input) test_data_label_n.append(label) input_data_tensor = torch.FloatTensor(train_data_input_n).cuda(0) label_data_tensor = torch.FloatTensor(train_data_label_n).cuda(0) # test_data, test_label_data = data_extractor.load_test_data() test_data_tensor = torch.FloatTensor(test_data_input_n).cuda(0) test_label_tensor = torch.FloatTensor(test_data_label_n).cuda(0) np.save('D:\home\zeewei\projects\\77GRadar\model\\bp\\test_data\\bp3.npy', test_data_input_n) np.save('D:\home\zeewei\projects\\77GRadar\model\\bp\\test_data\\bp3.npy', test_data_label_n) min_loss = 200 # L = len(input_data) for i in range(300000): # loss_sum = 0 prediction = model(input_data_tensor) loss = loss_fn(prediction, label_data_tensor) loss_sum = loss.data.cpu().numpy() loss.backward() optimizer.step() test_prediction = model(test_data_tensor) test_loss = loss_fn(test_prediction, test_label_tensor) test_loss = test_loss.data.cpu().numpy() if i % 50 == 0: if test_loss < min_loss: min_loss = test_loss if test_loss < 0.711: torch.save(model, MODEL_SAVE_DIR + 'bp2_' + str(i) + '.pkl') print(i, ' train_mean_loss: ', loss_sum, ' test_loss: ', test_loss, ' min_test_loss: ', min_loss) print('test_min_loss: ', min_loss) torch.save(model, 'BP2_data_with_empty_loss.pkl')
def train_playground(): model = Net().cuda(0) optimizer = torch.optim.Adam(model.parameters(), lr=LR) data_extractor = extractor.FeatureExtractor() # 此处全使用默认的文件路径配置 data_list = data_extractor.load_data() # data_list = load_all_data() # 道路操场数据混合 # print('the length of data_list : ', len(data_list)) random.shuffle(data_list) batch_num = int(7 * len(data_list) / 10) train_data = data_list[0:batch_num] test_data = data_list[batch_num:] train_data_input = [] train_data_label = [] for item in train_data: train_data_input.append(item[0]) train_data_label.append(item[1]) test_data_input = [] test_data_label = [] for item in test_data: test_data_input.append(item[0]) test_data_label.append(item[1]) np.save('D:\home\zeewei\projects\\77GRadar\model\cnn\\test_data_pg\\input_data_bp2.npy', test_data_input) np.save('D:\home\zeewei\projects\\77GRadar\model\cnn\\test_data_pg\\label_data_bp2.npy', test_data_label) # test_batch_num = len(test_data_input) # test_data_input = np.array(test_data_input).reshape(test_batch_num, 1, 64) # test_data_label = np.array(test_data_label).reshape(test_batch_num, 1, 64) test_data_tensor = torch.FloatTensor(test_data_input).cuda(0) test_label_tensor = torch.FloatTensor(test_data_label).cuda(0) # train_data_input = np.array(train_data_input).reshape(batch_num, 1, 64) # train_data_label = np.array(train_data_label).reshape(batch_num, 1, 64) train_data_tensor = torch.FloatTensor(train_data_input).cuda(0) train_label_tensor = torch.FloatTensor(train_data_label).cuda(0) min_loss = 2 for i in range(3000000): optimizer.zero_grad() prediction = model(train_data_tensor) loss = loss_fn(prediction, train_label_tensor) loss_val = loss.data.cpu().numpy() loss.backward() optimizer.step() test_prediction = model(test_data_tensor) test_loss = loss_fn(test_prediction, test_label_tensor) test_loss = test_loss.data.cpu().numpy() if i % 50 == 0: if test_loss < min_loss: min_loss = test_loss if test_loss < 0.6993: torch.save(model, MODEL_SAVE_DIR + 'bp2_pg_' + str(i) + '.pkl') print(i, ' train_mean_loss: ', loss_val, ' test_loss: ', test_loss, 'min_loss: ', min_loss) print('test_min_loss: ', min_loss)