_video_db_tar = _video_db_tar.reshape( (_video_db_tar.shape[0], _video_db_tar.shape[1], fps, 3)) _video_db_future_tar = _video_db_future_tar.reshape( (_video_db_tar.shape[0], _video_db_tar.shape[1], fps, 3)) total_num_samples = _video_db_tar.shape[0] num_testing_sample = int(0.15 * total_num_samples) #use last 1000 as test if cfg.shuffle_data: #### shuffle the whole dataset # index_shuf = get_shuffle_index(total_num_samples) index_shuf = pickle.load( open('index_shuf' + '_exp' + str(experiment) + '.p', 'rb')) print('Shuffle data before training and testing.') _video_db_tar = shuffle_data(index_shuf, _video_db_tar) _video_db_future_tar = shuffle_data(index_shuf, _video_db_future_tar) _video_db_future_input_tar = shuffle_data(index_shuf, _video_db_future_input_tar) _video_db_oth = shuffle_data(index_shuf, _video_db_oth) _video_db_future_oth = shuffle_data(index_shuf, _video_db_future_oth) # _video_db_future_input_oth = shuffle_data(index_shuf,_video_db_future_input_oth) #### prepare training data # data_sanity_check(_video_db_tar,_video_db_future_tar,_video_db_future_input_tar) ### target user encoder_input_data = _video_db_tar[:-num_testing_sample, :, :][:, :, np. newaxis, :, :] # decoder_target_data = _video_db_future_tar[:-num_testing_sample,:,:]
## assign data ## only use one video for now # encoder_input_data = per_video_db[:,100:110,:] # temp = per_video_db[:,110:120,:].copy() # temp1 = np.zeros_like(temp) # temp1[:,1:,:] = temp[:,:-1,:].copy() # temp1[:,0,:] = encoder_input_data[:,-1,:] _video_db,_video_db_future,_video_db_future_input = get_data(datadb,pick_user=False) total_num_samples = _video_db.shape[0] if cfg.shuffle_data: #shuffle the whole dataset # index_shuf = get_shuffle_index(total_num_samples) index_shuf = pickle.load(open('index_shuf.p','rb')) _video_db = shuffle_data(index_shuf,_video_db) _video_db_future = shuffle_data(index_shuf,_video_db_future) _video_db_future_input = shuffle_data(index_shuf,_video_db_future_input) #use last few as test # num_testing_sample = int(0.15*total_num_samples) num_testing_sample = 1 encoder_input_data = _video_db[:-num_testing_sample,:,:] decoder_target_data = get_gt_target_xyz(_video_db_future)[:-num_testing_sample,:,:] decoder_input_data = get_gt_target_xyz(_video_db_future_input)[:-num_testing_sample,:,:] ### ====================Graph def==================== # Define an input sequence and process it. encoder_inputs = Input(shape=(None, num_encoder_tokens))