def multi_step_2D(model, x_test, y_test, mmn, len_closeness, step): # model = build_model(external_dim) dict_multi_score = {} nb_flow = 2 y_pre = [] y_test = copy(y_test) x_test_now = [copy(e) for e in x_test] # inference for i in range(1, step + 1): y_pre_inference = model.predict(x_test_now) # 1 # expand dims [timeslots, flow, height, width] --> [step, timeslots, flow, height, width] y_pre_expand_dims = np.expand_dims(y_pre_inference, axis=0) # append in all step y_pre.append(y_pre_expand_dims) x_test_remove = x_test_now[0].transpose((1, 0, 2, 3)) y_pre_remove = y_pre_inference.transpose((1, 0, 2, 3)) for j in range(len_closeness * nb_flow, len_closeness, -nb_flow): x_test_remove[j - 2:j] = x_test_remove[j - 4:j - 2] # x_test_remove[0:2] = y_pre_remove x_test_remove = x_test_remove.transpose((1, 0, 2, 3)) x_test_remove = x_test_remove[:-1] x_test_next = [x_test_remove] # new closeness component x_test_next.append(x_test_now[1][1:]) # new period component x_test_next.append(x_test_now[2][1:]) # new trend component # # make training data x_test_makeData = x_test_next x_test_makeData.append(x_test[-1][i:]) # meta feature x_test_now = x_test_makeData for i in range(len(y_pre)): print(f'Step {i+1}:') score = evaluate(y_test[i:], y_pre[i][0], mmn) dict_multi_score[i] = score return dict_multi_score
(mmn._max - mmn._min) / 2. * m_factor)) score = model.evaluate(X_test, Y_test, batch_size=Y_test.shape[0], verbose=0) print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' % (score[0], score[1], score[1] * (mmn._max - mmn._min) / 2. * m_factor)) print('evaluating using the model that has the best loss on the valid set') model.load_weights(fname_param) # load best weights for current iteration Y_pred = model.predict(X_test) # compute predictions score = evaluate(Y_test, Y_pred, mmn, rmse_factor=1) # evaluate performance # save to csv csv_name = os.path.join('results', 'stresnet_bikeNYC_results.csv') if not os.path.isfile(csv_name): if os.path.isdir('results') is False: os.mkdir('results') with open(csv_name, 'a', encoding="utf-8") as file: file.write('iteration,' 'rsme_in,rsme_out,rsme_tot,' 'mape_in,mape_out,mape_tot,' 'ape_in,ape_out,ape_tot') file.write("\n") file.close() with open(csv_name, 'a', encoding="utf-8") as file: file.write(f'{i},{score[0]},{score[1]},{score[2]},{score[3]},'
def train_model(lr, batch_size, residual_units, save_results=False, i=''): # get discrete parameters residual_units = int(residual_units) * 2 batch_size = 16 * int(batch_size) # kernel_size = int(kernel_size) lr = round(lr,5) # build model tf.keras.backend.set_image_data_format('channels_first') model = build_model(len_closeness, len_period, len_trend, nb_flow, map_height, map_width, external_dim, residual_units, bn=True, bn2=True, save_model_pic=False, lr=lr ) # model.summary() hyperparams_name = 'TaxiNYC{}.c{}.p{}.t{}.resunits_{}.lr_{}.batchsize_{}'.format( i, len_closeness, len_period, len_trend, residual_units, lr, batch_size) fname_param = os.path.join('MODEL', '{}.best.h5'.format(hyperparams_name)) early_stopping = EarlyStopping(monitor='val_rmse', patience=25, mode='min') # lr_callback = LearningRateScheduler(lrschedule) model_checkpoint = ModelCheckpoint( fname_param, monitor='val_rmse', verbose=0, save_best_only=True, mode='min') # train model print("training model...") ts = time.time() if (i): print(f'Iteration {i}') np.random.seed(i * 18) tf.random.set_seed(i * 18) history = model.fit(X_train, Y_train, epochs=nb_epoch, batch_size=batch_size, validation_data=(X_test, Y_test), # callbacks=[early_stopping, model_checkpoint], # callbacks=[model_checkpoint, lr_callback], callbacks=[model_checkpoint], verbose=2) model.save_weights(os.path.join( 'MODEL', '{}.h5'.format(hyperparams_name)), overwrite=True) pickle.dump((history.history), open(os.path.join( path_result, '{}.history.pkl'.format(hyperparams_name)), 'wb')) print("\nelapsed time (training): %.3f seconds\n" % (time.time() - ts)) # evaluate model.load_weights(fname_param) score = model.evaluate( X_test, Y_test, batch_size=Y_test.shape[0], verbose=0) print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' % (score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.)) if (save_results): print('evaluating using the model that has the best loss on the valid set') model.load_weights(fname_param) # load best weights for current iteration Y_pred = model.predict(X_test) # compute predictions score = evaluate(Y_test, Y_pred, mmn, rmse_factor=1) # evaluate performance # save to csv csv_name = os.path.join('results', 'star_taxiNYC_results.csv') if not os.path.isfile(csv_name): if os.path.isdir('results') is False: os.mkdir('results') with open(csv_name, 'a', encoding="utf-8") as file: file.write('iteration,' 'rsme_in,rsme_out,rsme_tot,' 'mape_in,mape_out,mape_tot,' 'ape_in,ape_out,ape_tot' ) file.write("\n") file.close() with open(csv_name, 'a', encoding="utf-8") as file: file.write(f'{i},{score[0]},{score[1]},{score[2]},{score[3]},' f'{score[4]},{score[5]},{score[6]},{score[7]},{score[8]}' ) file.write("\n") file.close() K.clear_session() # bayes opt is a maximization algorithm, to minimize validation_loss, return 1-this bayes_opt_score = 1.0 - score[1] return bayes_opt_score
verbose=0) print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' % (score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.)) score = model.evaluate(X_test, Y_test, batch_size=Y_test.shape[0], verbose=0) print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' % (score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.)) print('evaluating using the model that has the best loss on the valid set') model.load_weights(fname_param) # load best weights for current iteration Y_pred = model.predict(X_test) # compute predictions score = evaluate(Y_test, Y_pred, mmn) # evaluate performance # save to csv csv_name = os.path.join('results', 'stresnet_taxiBJ_results.csv') if not os.path.isfile(csv_name): if os.path.isdir('results') is False: os.mkdir('results') with open(csv_name, 'a', encoding="utf-8") as file: file.write('iteration,' 'rsme_in,rsme_out,rsme_tot,' 'mape_in,mape_out,mape_tot,' 'ape_in,ape_out,ape_tot') file.write("\n") file.close() with open(csv_name, 'a', encoding="utf-8") as file: file.write(f'{i},{score[0]},{score[1]},{score[2]},{score[3]},'