def __init__(self, datasets_path_list, image_size_dims=[720, 576], neighborhood_size=32, neighborhood_radius=32, grid_radius=4, grid_angle=45, train_steps=5, pred_steps=5): super().__init__() self.train_steps = train_steps self.pred_steps = pred_steps person_data = [] group_data = [] scene_data = [] ground_truth = [] for i, dataset_path in enumerate(datasets_path_list): obs = np.load(dataset_path + "/obs.npy") print(dataset_path, ": {}".format(len(obs))) ground_truth.append( (model_expected_ouput(np.load(dataset_path + "/pred.npy"), self.pred_steps))) raw_data, _ = preprocess(dataset_path + "/pixel_pos.csv") person_data.append(person_model_input(obs, self.train_steps)) group_data.append( log_group_model_input(obs, self.train_steps, neighborhood_size, image_size_dims, neighborhood_radius, grid_radius, grid_angle, [1, 1, 1, 1, 1, 1, 1, 1], raw_data)) scene_data += self.get_sceneData(obs, dataset_path) self.person_data = np.concatenate(person_data, axis=0) #(num_obs, 8, 2) self.group_data = np.concatenate(group_data, axis=0) #(num_obs, 8, -1) self.scene_data = scene_data #(num_obs, 3, 720, 576) self.ground_truth = np.concatenate(ground_truth, axis=0) #(num_obs, 12, 2) #TODO: call utils api to split data into person, group, scene data # apply transformations self.transformations = transforms.Compose([transforms.ToTensor()])
pred_9 = check_9.pred pred_10 = check_10.pred #input轨迹数据输入 vehicle_input_1 = vehicle_model_input(obs_veh_1, observed_frame_num) #车辆 vehicle_input_2 = vehicle_model_input(obs_veh_2, observed_frame_num) vehicle_input_3 = vehicle_model_input(obs_veh_3, observed_frame_num) vehicle_input_4 = vehicle_model_input(obs_veh_4, observed_frame_num) vehicle_input_5 = vehicle_model_input(obs_veh_5, observed_frame_num) vehicle_input_6 = vehicle_model_input(obs_veh_6, observed_frame_num) vehicle_input_7 = vehicle_model_input(obs_veh_7, observed_frame_num) vehicle_input_8 = vehicle_model_input(obs_veh_8, observed_frame_num) vehicle_input_9 = vehicle_model_input(obs_veh_9, observed_frame_num) vehicle_input_10 = vehicle_model_input(obs_veh_10, observed_frame_num) person_input_1 = person_model_input(obs_1, observed_frame_num) #行人 person_input_2 = person_model_input(obs_2, observed_frame_num) person_input_3 = person_model_input(obs_3, observed_frame_num) person_input_4 = person_model_input(obs_4, observed_frame_num) person_input_5 = person_model_input(obs_5, observed_frame_num) person_input_6 = person_model_input(obs_6, observed_frame_num) person_input_7 = person_model_input(obs_7, observed_frame_num) person_input_8 = person_model_input(obs_8, observed_frame_num) person_input_9 = person_model_input(obs_9, observed_frame_num) person_input_10 = person_model_input(obs_10, observed_frame_num) #行人影响数据 group_circle_1 = circle_group_model_input(obs_1, observed_frame_num, neighborhood_size, dimensions_1, neighborhood_radius, grid_radius, grid_angle, circle_map_weights,
person_input='person_input_'+str(i) group_circle='group_circle_'+str(i) gruop_grid_veh2ped='gruop_grid_veh2ped_'+str(i) vehicle_expect_output='vehicle_expect_output_'+str(i) expected_ouput='expected_ouput_'+str(i) locals()[data_dir] = r'C:\Users\asus\Desktop\lstm项目\ss-lstm_0529\ss-lstm_0529\datadut\0'+str(i) locals()[veh_data], locals()[numveh] = preprocess_vehicle(locals()[data_dir]) locals()[raw_data], locals()[numPeds]= preprocess(locals()[data_dir]) locals()[check_veh] = vdp.veh_DataProcesser(locals()[data_dir], observed_frame_num, predicting_frame_num) locals()[check] = dp.DataProcesser(locals()[data_dir], observed_frame_num, predicting_frame_num) locals()[obs_veh] = locals()[check_veh].obs locals()[obs] = locals()[check].obs locals()[pred_veh]= locals()[check_veh].pred locals()[pred] = locals()[check].pred locals()[vehicle_input] = vehicle_model_input(locals()[obs_veh], observed_frame_num) locals()[person_input] = person_model_input(locals()[obs], observed_frame_num) locals()[group_circle] = circle_group_model_input(locals()[obs], observed_frame_num, neighborhood_size, dimensions_1, neighborhood_radius, grid_radius, grid_angle, circle_map_weights, locals()[raw_data]) locals()[gruop_grid_veh2ped] = veh2ped_circle_group_model_input(locals()[obs], observed_frame_num, dimensions_1, veh_neighborhood_size, grid_radius, grid_angle, locals()[raw_data],locals()[veh_data]) # 圆形区域,若要矩形区域改成veh2ped_grid_model_input locals()[vehicle_expect_output] = vehicle_model_expected_ouput(locals()[pred_veh], predicting_frame_num) # 期望输出 locals()[expected_ouput] = model_expected_ouput(locals()[pred], predicting_frame_num) # 矩形区域只写一个做存档 group_grid_1 = group_model_input(obs_1, observed_frame_num, neighborhood_size, dimensions_1, grid_size, raw_data_1) group_log_3 = log_group_model_input(obs_3, observed_frame_num, neighborhood_size, dimensions_1, neighborhood_radius, grid_radius, grid_angle, circle_map_weights, raw_data_3) #logmap同样是存档 # print(data_dir_1)
frame_dir_2 = './data/ETHuniv/frames/' frame_dir_3 = './data/UCYuniv/frames/' frame_dir_4 = './data/UCYzara01/frames/' frame_dir_5 = './data/UCYzara02/frames/' data_str_1 = 'ETHhotel-' data_str_2 = 'ETHuniv-' data_str_3 = 'UCYuniv-' data_str_4 = 'zara01-' data_str_5 = 'zara02-' # data_dir_1 raw_data_1, numPeds_1 = preprocess(data_dir_1) obs_1 = np.load('./data/obs_1.npy') pred_1 = np.load('./data/pred_1.npy') img_1 = np.load('./data/img_1.npy') person_input_1 = person_model_input(obs_1, observed_frame_num) expected_ouput_1 = model_expected_ouput(pred_1, predicting_frame_num) group_log_1 = log_group_model_input(obs_1, observed_frame_num, neighborhood_size, dimensions_1, neighborhood_radius, grid_radius, grid_angle, circle_map_weights, raw_data_1) group_grid_1 = group_model_input(obs_1, observed_frame_num, neighborhood_size, dimensions_1, grid_size, raw_data_1) group_circle_1 = circle_group_model_input(obs_1, observed_frame_num, neighborhood_size, dimensions_1, neighborhood_radius, grid_radius, grid_angle, circle_map_weights, raw_data_1) # data_dir_2 raw_data_2, numPeds_2 = preprocess(data_dir_2) obs_2 = np.load('./data/obs_2.npy') pred_2 = np.load('./data/pred_2.npy') img_2 = np.load('./data/img_2_resize.npy') # img_2 = all_image_tensor(frame_dir_2, data_str_2, obs_2, 576, 720) person_input_2 = person_model_input(obs_2, observed_frame_num) expected_ouput_2 = model_expected_ouput(pred_2, predicting_frame_num)
data_str_4 = 'zara01-' data_str_5 = 'zara02-' # data_dir_1 raw_data_1, numPeds_1 = preprocess(data_dir_1) print(raw_data_1) print(numPeds_1) check = dp.DataProcesser(data_dir_1, observed_frame_num, predicting_frame_num) #obs_1 = np.load('./data/obs_1.npy') #pred_1 = np.load('./data/pred_1.npy') obs_1 = check.obs pred_1 = check.pred #img_1 = np.load('./data/img_1.npy') img_1 = all_image_tensor(data_dir_1, data_str_1, obs_1, img_width_1, img_height_1) person_input_1 = person_model_input(obs_1, observed_frame_num) expected_ouput_1 = model_expected_ouput(pred_1, predicting_frame_num) group_log_1 = log_group_model_input(obs_1, observed_frame_num, neighborhood_size, dimensions_1, neighborhood_radius, grid_radius, grid_angle, circle_map_weights, raw_data_1) group_grid_1 = group_model_input(obs_1, observed_frame_num, neighborhood_size, dimensions_1, grid_size, raw_data_1) group_circle_1 = circle_group_model_input(obs_1, observed_frame_num, neighborhood_size, dimensions_1, neighborhood_radius, grid_radius, grid_angle, circle_map_weights, raw_data_1) ''' # data_dir_2 raw_data_2, numPeds_2 = preprocess(data_dir_2)