check_veh_4 = vdp.veh_DataProcesser(data_dir_4, observed_frame_num, predicting_frame_num) check_veh_5 = vdp.veh_DataProcesser(data_dir_5, observed_frame_num, predicting_frame_num) check_veh_6 = vdp.veh_DataProcesser(data_dir_6, observed_frame_num, predicting_frame_num) check_veh_7 = vdp.veh_DataProcesser(data_dir_7, observed_frame_num, predicting_frame_num) check_veh_8 = vdp.veh_DataProcesser(data_dir_8, observed_frame_num, predicting_frame_num) check_veh_9 = vdp.veh_DataProcesser(data_dir_9, observed_frame_num, predicting_frame_num) check_veh_10 = vdp.veh_DataProcesser(data_dir_10, observed_frame_num, predicting_frame_num) check_1 = dp.DataProcesser(data_dir_1, observed_frame_num, predicting_frame_num) #行人 check_2 = dp.DataProcesser(data_dir_2, observed_frame_num, predicting_frame_num) check_3 = dp.DataProcesser(data_dir_3, observed_frame_num, predicting_frame_num) check_4 = dp.DataProcesser(data_dir_4, observed_frame_num, predicting_frame_num) check_5 = dp.DataProcesser(data_dir_5, observed_frame_num, predicting_frame_num) check_6 = dp.DataProcesser(data_dir_6, observed_frame_num, predicting_frame_num) check_7 = dp.DataProcesser(data_dir_7, observed_frame_num, predicting_frame_num) check_8 = dp.DataProcesser(data_dir_8, observed_frame_num, predicting_frame_num) check_9 = dp.DataProcesser(data_dir_9, observed_frame_num,
print(s) # frame_dir_6="/home/asyed/Frames/UNIV" # # frame_dir_test= "/home/asyed/Frames/HOTEL" # frame_dir_1 = './data/ETHhotel/frames/' # frame_dir_2 = './data/ETHuniv/frames/' # frame_dir_3 = './data/UCYuniv/frames/' # frame_dir_4 = './data/UCYzara01/frames/' # frame_dir_5 = './data/UCYzara02/frames/' # data_dir_1 raw_data_1, numPeds_1 = preprocess(data_dir_1) print(raw_data_1) print(numPeds_1) check = dp.DataProcesser(data_dir_1, observed_frame_num, predicting_frame_num) #obs_1 = np.load('./data/obs_1.npy') #pred_1 = np.load('./data/pred_1.npy') obs_1 = check.obs pred_1 = check.pred #img_1 = np.load('./data/img_1.npy') # img_1=all_image_tensor(data_dir_1,data_str_1,obs_1,img_width_1,img_height_1) person_input_1 = person_model_input(obs_1, observed_frame_num) expected_ouput_1 = model_expected_ouput(pred_1, predicting_frame_num) print(person_input_1[0]) # file = open("/home/asyed/SS-LSTM/traj_zara1",'rb') ####VISUALIXE############ # file = open("/home/asyed/SS-LSTM/traj_segnet_1000_zara1",'rb') # file1 = open("/home/asyed/SS-LSTM/traj_zara1",'rb')
check='check_'+str(i) obs_veh='obs_veh_'+str(i) obs='obs_'+str(i) pred_veh='pre_veh_'+str(i) pred='pred_'+str(i) vehicle_input='vehicle_input_'+str(i) person_input='person_input_'+str(i) group_circle='group_circle_'+str(i) gruop_grid_veh2ped='gruop_grid_veh2ped_'+str(i) vehicle_expect_output='vehicle_expect_output_'+str(i) expected_ouput='expected_ouput_'+str(i) locals()[data_dir] = r'C:\Users\asus\Desktop\lstm项目\ss-lstm_0529\ss-lstm_0529\datadut\0'+str(i) locals()[veh_data], locals()[numveh] = preprocess_vehicle(locals()[data_dir]) locals()[raw_data], locals()[numPeds]= preprocess(locals()[data_dir]) locals()[check_veh] = vdp.veh_DataProcesser(locals()[data_dir], observed_frame_num, predicting_frame_num) locals()[check] = dp.DataProcesser(locals()[data_dir], observed_frame_num, predicting_frame_num) locals()[obs_veh] = locals()[check_veh].obs locals()[obs] = locals()[check].obs locals()[pred_veh]= locals()[check_veh].pred locals()[pred] = locals()[check].pred locals()[vehicle_input] = vehicle_model_input(locals()[obs_veh], observed_frame_num) locals()[person_input] = person_model_input(locals()[obs], observed_frame_num) locals()[group_circle] = circle_group_model_input(locals()[obs], observed_frame_num, neighborhood_size, dimensions_1, neighborhood_radius, grid_radius, grid_angle, circle_map_weights, locals()[raw_data]) locals()[gruop_grid_veh2ped] = veh2ped_circle_group_model_input(locals()[obs], observed_frame_num, dimensions_1, veh_neighborhood_size, grid_radius, grid_angle, locals()[raw_data],locals()[veh_data]) # 圆形区域,若要矩形区域改成veh2ped_grid_model_input locals()[vehicle_expect_output] = vehicle_model_expected_ouput(locals()[pred_veh], predicting_frame_num) # 期望输出 locals()[expected_ouput] = model_expected_ouput(locals()[pred], predicting_frame_num)
frame_dir_1 = './data/ETHhotel/frames/' frame_dir_2 = './data/ETHuniv/frames/' frame_dir_3 = './data/UCYuniv/frames/' frame_dir_4 = './data/UCYzara01/frames/' frame_dir_5 = './data/UCYzara02/frames/' data_str_1 = 'ETHhotel-' data_str_2 = 'ETHuniv-' data_str_3 = 'UCYuniv-' data_str_4 = 'zara01-' data_str_5 = 'zara02-' # data_dir_1 raw_data_1, numPeds_1 = preprocess(data_dir_1) print(raw_data_1) print(numPeds_1) check = dp.DataProcesser(data_dir_1, observed_frame_num, predicting_frame_num) #obs_1 = np.load('./data/obs_1.npy') #pred_1 = np.load('./data/pred_1.npy') obs_1 = check.obs pred_1 = check.pred #img_1 = np.load('./data/img_1.npy') img_1 = all_image_tensor(data_dir_1, data_str_1, obs_1, img_width_1, img_height_1) person_input_1 = person_model_input(obs_1, observed_frame_num) expected_ouput_1 = model_expected_ouput(pred_1, predicting_frame_num) group_log_1 = log_group_model_input(obs_1, observed_frame_num, neighborhood_size, dimensions_1, neighborhood_radius, grid_radius, grid_angle, circle_map_weights, raw_data_1) group_grid_1 = group_model_input(obs_1, observed_frame_num, neighborhood_size, dimensions_1, grid_size, raw_data_1)