def solve_Multiscale_PDE(R): log_out_path = R['FolderName'] # 将路径从字典 R 中提取出来 if not os.path.exists(log_out_path): # 判断路径是否已经存在 os.mkdir(log_out_path) # 无 log_out_path 路径,创建一个 log_out_path 路径 outfile_name1 = '%s%s.txt' % ('log2', 'train') log_fileout_NN = open(os.path.join(log_out_path, outfile_name1), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件 dictionary_out2file(R, log_fileout_NN, actName2normal=R['act_name2NN1'], actName2scale=R['act_name2NN2']) # laplace 问题需要的设置 batchsize_it = R['batch_size2interior'] batchsize_bd = R['batch_size2boundary'] bd_penalty_init = R[ 'init_boundary_penalty'] # Regularization parameter for boundary conditions lr_decay = R['learning_rate_decay'] learning_rate = R['learning_rate'] init_penalty2powU = R['balance2solus'] hidden2normal = R['hidden2normal'] hidden2scale = R['hidden2scale'] wb_regular = R[ 'regular_weight_biases'] # Regularization parameter for weights and biases # ------- set the problem --------- input_dim = R['input_dim'] out_dim = R['output_dim'] act_func1 = R['act_name2NN1'] act_func2 = R['act_name2NN2'] region_l = 0.0 region_r = 1.0 if R['PDE_type'] == 'general_laplace': # -laplace u = f region_l = 0.0 region_r = 1.0 f, u_true, u_left, u_right = laplace_eqs1d.get_laplace_infos( input_dim=input_dim, out_dim=out_dim, left_bottom=region_l, right_top=region_r, laplace_name=R['equa_name']) elif R['PDE_type'] == 'p_laplace': # 求解如下方程, A_eps(x) 震荡的比较厉害,具有多个尺度 # d **** d **** # - ---- | A_eps(x)* ---- u_eps(x) | =f(x), x \in R^n # dx **** dx **** # 问题区域,每个方向设置为一样的长度。等网格划分,对于二维是方形区域 p_index = R['order2laplace'] epsilon = R['epsilon'] if 2 == p_index: region_l = 0.0 region_r = 1.0 u_true, f, A_eps, u_left, u_right = pLaplace_eqs1d.get_infos_2laplace( in_dim=input_dim, out_dim=out_dim, region_a=region_l, region_b=region_r, p=p_index, eps=epsilon) elif 3 == p_index: region_l = 0.0 region_r = 1.0 u_true, f, A_eps, u_left, u_right = pLaplace_eqs1d.get_infos_3laplace( in_dim=input_dim, out_dim=out_dim, region_a=region_l, region_b=region_r, p=p_index, eps=epsilon) elif 5 == p_index: region_l = 0.0 region_r = 1.0 u_true, f, A_eps, u_left, u_right = pLaplace_eqs1d.get_infos_5laplace( in_dim=input_dim, out_dim=out_dim, region_a=region_l, region_b=region_r, p=p_index, eps=epsilon) elif 8 == p_index: region_l = 0.0 region_r = 1.0 u_true, f, A_eps, u_left, u_right = pLaplace_eqs1d.get_infos_8laplace( in_dim=input_dim, out_dim=out_dim, region_a=region_l, region_b=region_r, p=p_index, eps=epsilon) else: region_l = 0.0 region_r = 1.0 u_true, f, A_eps, u_left, u_right = pLaplace_eqs1d.get_infos_pLaplace( in_dim=input_dim, out_dim=out_dim, region_a=region_l, region_b=region_r, p=p_index, eps=epsilon, eqs_name=R['equa_name']) # 初始化权重和和偏置的模式 if R['weight_biases_model'] == 'general_model': flag_normal = 'WB_NN2normal' flag_scale = 'WB_NN2scale' # Weights, Biases = PDE_DNN_base.Initial_DNN2different_hidden(input_dim, out_dim, hidden_layers, flag) # Weights, Biases = laplace_DNN1d_base.initialize_NN_xavier(input_dim, out_dim, hidden_layers, flag1) # Weights, Biases = laplace_DNN1d_base.initialize_NN_random_normal(input_dim, out_dim, hidden_layers, flag1) if R['model2normal'] == 'PDE_DNN_Cos_C_Sin_Base' or R[ 'model2normal'] == 'DNN_adaptCosSin_Base': W2NN_Normal, B2NN_Normal = DNN_base.initialize_NN_random_normal2_CS( input_dim, out_dim, hidden2normal, flag_normal) else: W2NN_Normal, B2NN_Normal = DNN_base.initialize_NN_random_normal2( input_dim, out_dim, hidden2normal, flag_normal) if R['model2scale'] == 'PDE_DNN_Cos_C_Sin_Base' or R[ 'model2scale'] == 'DNN_adaptCosSin_Base': W2NN_freqs, B2NN_freqs = DNN_base.initialize_NN_random_normal2_CS( input_dim, out_dim, hidden2scale, flag_scale) else: W2NN_freqs, B2NN_freqs = DNN_base.initialize_NN_random_normal2( input_dim, out_dim, hidden2scale, flag_scale) global_steps = tf.Variable(0, trainable=False) with tf.device('/gpu:%s' % (R['gpuNo'])): with tf.variable_scope('vscope', reuse=tf.AUTO_REUSE): X_it = tf.placeholder(tf.float32, name='X_it', shape=[None, input_dim]) # * 行 1 列 X_left_bd = tf.placeholder(tf.float32, name='X_left_bd', shape=[None, input_dim]) # * 行 1 列 X_right_bd = tf.placeholder(tf.float32, name='X_right_bd', shape=[None, input_dim]) # * 行 1 列 bd_penalty = tf.placeholder_with_default(input=1e3, shape=[], name='bd_p') penalty2powU = tf.placeholder_with_default(input=1.0, shape=[], name='p_powU') in_learning_rate = tf.placeholder_with_default(input=1e-5, shape=[], name='lr') train_opt = tf.placeholder_with_default(input=True, shape=[], name='train_opt') if R['model2normal'] == 'PDE_DNN': U_NN_Normal = DNN_base.PDE_DNN(X_it, W2NN_Normal, B2NN_Normal, hidden2normal, activate_name=act_func1) ULeft_NN_Normal = DNN_base.PDE_DNN(X_left_bd, W2NN_Normal, B2NN_Normal, hidden2normal, activate_name=act_func1) URight_NN_Normal = DNN_base.PDE_DNN(X_right_bd, W2NN_Normal, B2NN_Normal, hidden2normal, activate_name=act_func1) elif R['model2normal'] == 'PDE_DNN_Cos_C_Sin_Base': freq = [1] U_NN_Normal = DNN_base.PDE_DNN_Cos_C_Sin_Base( X_it, W2NN_Normal, B2NN_Normal, hidden2normal, freq, activate_name=act_func1) ULeft_NN_Normal = DNN_base.PDE_DNN_Cos_C_Sin_Base( X_left_bd, W2NN_Normal, B2NN_Normal, hidden2normal, freq, activate_name=act_func1) URight_NN_Normal = DNN_base.PDE_DNN_Cos_C_Sin_Base( X_right_bd, W2NN_Normal, B2NN_Normal, hidden2normal, freq, activate_name=act_func1) elif R['model2normal'] == 'DNN_adaptCosSin_Base': freq = [1] U_NN_Normal = DNN_base.DNN_adaptCosSin_Base( X_it, W2NN_Normal, B2NN_Normal, hidden2normal, freq, activate_name=act_func1) ULeft_NN_Normal = DNN_base.DNN_adaptCosSin_Base( X_left_bd, W2NN_Normal, B2NN_Normal, hidden2normal, freq, activate_name=act_func1) URight_NN_Normal = DNN_base.DNN_adaptCosSin_Base( X_right_bd, W2NN_Normal, B2NN_Normal, hidden2normal, freq, activate_name=act_func1) freqs = R['freqs'] if R['model2scale'] == 'PDE_DNN_scale': U_NN_freqs = DNN_base.PDE_DNN_scale(X_it, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) ULeft_NN_freqs = DNN_base.PDE_DNN_scale( X_left_bd, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) URight_NN_freqs = DNN_base.PDE_DNN_scale( X_right_bd, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) elif R['model2scale'] == 'PDE_DNN_adapt_scale': U_NN_freqs = DNN_base.PDE_DNN_adapt_scale( X_it, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) ULeft_NN_freqs = DNN_base.PDE_DNN_adapt_scale( X_left_bd, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) URight_NN_freqs = DNN_base.PDE_DNN_adapt_scale( X_right_bd, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) elif R['model2scale'] == 'PDE_DNN_FourierBase': U_NN_freqs = DNN_base.PDE_DNN_FourierBase( X_it, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) ULeft_NN_freqs = DNN_base.PDE_DNN_FourierBase( X_left_bd, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) URight_NN_freqs = DNN_base.PDE_DNN_FourierBase( X_right_bd, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) elif R['model2scale'] == 'PDE_DNN_Cos_C_Sin_Base': U_NN_freqs = DNN_base.PDE_DNN_Cos_C_Sin_Base( X_it, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) ULeft_NN_freqs = DNN_base.PDE_DNN_Cos_C_Sin_Base( X_left_bd, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) URight_NN_freqs = DNN_base.PDE_DNN_Cos_C_Sin_Base( X_right_bd, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) elif R['model2scale'] == 'DNN_adaptCosSin_Base': U_NN_freqs = DNN_base.DNN_adaptCosSin_Base( X_it, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) ULeft_NN_freqs = DNN_base.DNN_adaptCosSin_Base( X_left_bd, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) URight_NN_freqs = DNN_base.DNN_adaptCosSin_Base( X_right_bd, W2NN_freqs, B2NN_freqs, hidden2scale, freqs, activate_name=act_func2) U_NN = U_NN_Normal + U_NN_freqs # 变分形式的loss of interior,训练得到的 U_NN1 是 * 行 1 列, 因为 一个点对(x,y) 得到一个 u 值 dU_NN_Normal = tf.gradients(U_NN_Normal, X_it)[0] # * 行 2 列 dU_NN_freqs = tf.gradients(U_NN_freqs, X_it)[0] # * 行 2 列 if R['variational_loss'] == 1: dU_NN = tf.add(dU_NN_Normal, dU_NN_freqs) if R['PDE_type'] == 'general_laplace': laplace_norm2NN = tf.reduce_sum(tf.square(dU_NN), axis=-1) loss_it_NN = (1.0 / 2) * tf.reshape(laplace_norm2NN, shape=[-1, 1]) - \ tf.multiply(tf.reshape(f(X_it), shape=[-1, 1]), U_NN) elif R['PDE_type'] == 'p_laplace': # a_eps = A_eps(X_it) # * 行 1 列 a_eps = 1 / (2 + tf.cos(2 * np.pi * X_it / epsilon)) laplace_p_pow2NN = tf.reduce_sum( a_eps * tf.pow(tf.abs(dU_NN), p_index), axis=-1) loss_it_NN = (1.0 / p_index) * tf.reshape(laplace_p_pow2NN, shape=[-1, 1]) - \ tf.multiply(tf.reshape(f(X_it), shape=[-1, 1]), U_NN) Loss_it2NN = tf.reduce_mean(loss_it_NN) * (region_r - region_l) if R['wavelet'] == 1: # |Uc*Uf|^2-->0 norm2UdU = tf.square(tf.multiply(U_NN_Normal, U_NN_freqs)) UNN_dot_UNN = tf.reduce_mean(norm2UdU, axis=0) elif R['wavelet'] == 2: # |a(x)*(grad Uc)*(grad Uf)|^2-->0 dU_dot_dU = tf.multiply(dU_NN_Normal, dU_NN_freqs) sum2dUdU = tf.reshape(tf.reduce_sum(dU_dot_dU, axis=-1), shape=[-1, 1]) norm2AdUdU = tf.square(tf.multiply(a_eps, sum2dUdU)) # norm2AdUdU = tf.square(sum2dUdU) UNN_dot_UNN = tf.reduce_mean(norm2AdUdU, axis=0) else: U_dot_U = tf.reduce_mean(tf.square( tf.multiply(U_NN_Normal, U_NN_freqs)), axis=0) dU_dot_dU = tf.multiply(dU_NN_Normal, dU_NN_freqs) sum2dUdU = tf.reshape(tf.reduce_sum(dU_dot_dU, axis=-1), shape=[-1, 1]) norm2AdUdU = tf.square(tf.multiply(a_eps, sum2dUdU)) UNN_dot_UNN = tf.reduce_mean(norm2AdUdU, axis=0) + U_dot_U elif R['variational_loss'] == 2: dU_NN = tf.add(dU_NN_Normal, dU_NN_freqs) if R['PDE_type'] == 'general_laplace': laplace_norm2NN = tf.reduce_sum(tf.square(dU_NN), axis=-1) loss_it_NN = (1.0 / 2) * tf.reshape(laplace_norm2NN, shape=[-1, 1]) - \ tf.multiply(tf.reshape(f(X_it), shape=[-1, 1]), U_NN) elif R['PDE_type'] == 'p_laplace': # a_eps = A_eps(X_it) # * 行 1 列 a_eps = 1 / (2 + tf.cos(2 * np.pi * X_it / epsilon)) laplace_p_pow2NN = tf.reduce_sum( a_eps * tf.pow(tf.abs(dU_NN), p_index), axis=-1) loss_it_NN = (1.0 / p_index) * tf.reshape(laplace_p_pow2NN, shape=[-1, 1]) - \ tf.multiply(tf.reshape(f(X_it), shape=[-1, 1]), U_NN) Loss_it2NN = tf.reduce_mean(loss_it_NN) * (region_r - region_l) if R['wavelet'] == 1: norm2UdU = tf.square(tf.multiply(U_NN_Normal, U_NN_freqs)) UNN_dot_UNN = tf.reduce_mean(norm2UdU, axis=0) else: UNN_dot_UNN = tf.constant(0.0) Loss2UNN_dot_UNN = penalty2powU * UNN_dot_UNN U_left = tf.reshape(u_left(X_left_bd), shape=[-1, 1]) U_right = tf.reshape(u_right(X_right_bd), shape=[-1, 1]) loss_bd_Normal = tf.square(ULeft_NN_Normal - U_left) + tf.square(URight_NN_Normal - U_right) loss_bd_Freqs = tf.square(ULeft_NN_freqs - U_left) + tf.square(URight_NN_freqs - U_right) Loss_bd2NN = tf.reduce_mean(loss_bd_Normal) + tf.reduce_mean( loss_bd_Freqs) if R['regular_weight_model'] == 'L1': regular_WB_Normal = DNN_base.regular_weights_biases_L1( W2NN_Normal, B2NN_Normal) # 正则化权重和偏置 L1正则化 regular_WB_Scale = DNN_base.regular_weights_biases_L1( W2NN_freqs, B2NN_freqs) # 正则化权重和偏置 L1正则化 elif R['regular_weight_model'] == 'L2': regular_WB_Normal = DNN_base.regular_weights_biases_L2( W2NN_Normal, B2NN_Normal) # 正则化权重和偏置 L2正则化 regular_WB_Scale = DNN_base.regular_weights_biases_L2( W2NN_freqs, B2NN_freqs) # 正则化权重和偏置 L2正则化 else: regular_WB_Normal = tf.constant(0.0) # 无正则化权重参数 regular_WB_Scale = tf.constant(0.0) penalty_Weigth_Bias = wb_regular * (regular_WB_Normal + regular_WB_Scale) Loss2NN = Loss_it2NN + bd_penalty * Loss_bd2NN + Loss2UNN_dot_UNN + penalty_Weigth_Bias my_optimizer = tf.train.AdamOptimizer(in_learning_rate) if R['variational_loss'] == 1: if R['train_group'] == 1: train_op1 = my_optimizer.minimize(Loss_it2NN, global_step=global_steps) train_op2 = my_optimizer.minimize(Loss_bd2NN, global_step=global_steps) train_op3 = my_optimizer.minimize(Loss2UNN_dot_UNN, global_step=global_steps) train_op4 = my_optimizer.minimize(Loss2NN, global_step=global_steps) train_Loss2NN = tf.group(train_op1, train_op2, train_op3, train_op4) elif R['train_group'] == 2: train_op1 = my_optimizer.minimize(Loss2NN, global_step=global_steps) train_op2 = my_optimizer.minimize(Loss_bd2NN, global_step=global_steps) train_op3 = my_optimizer.minimize(Loss2UNN_dot_UNN, global_step=global_steps) train_Loss2NN = tf.group(train_op1, train_op2, train_op3) else: train_Loss2NN = my_optimizer.minimize( Loss2NN, global_step=global_steps) elif R['variational_loss'] == 2: if R['train_group'] == 1: train_op1 = my_optimizer.minimize(Loss_it2NN, global_step=global_steps) train_op2 = my_optimizer.minimize(Loss_bd2NN, global_step=global_steps) train_op3 = my_optimizer.minimize(Loss2NN, global_step=global_steps) train_Loss2NN = tf.group(train_op1, train_op2, train_op3) elif R['train_group'] == 2: train_op1 = my_optimizer.minimize(Loss2NN, global_step=global_steps) train_op2 = my_optimizer.minimize(Loss_bd2NN, global_step=global_steps) train_Loss2NN = tf.group(train_op1, train_op2) else: train_Loss2NN = my_optimizer.minimize( Loss2NN, global_step=global_steps) # 训练上的真解值和训练结果的误差 U_true = u_true(X_it) train_mse_NN = tf.reduce_mean(tf.square(U_true - U_NN)) train_rel_NN = train_mse_NN / tf.reduce_mean(tf.square(U_true)) t0 = time.time() # 空列表, 使用 append() 添加元素 lossIt_all2NN, lossBD_all2NN, loss_all2NN, UDU_NN, train_mse_all2NN, train_rel_all2NN = [], [], [], [], [], [] test_mse_all2NN, test_rel_all2NN = [], [] test_epoch = [] test_batch_size = 1000 test_x_bach = np.reshape( np.linspace(region_l, region_r, num=test_batch_size), [-1, 1]) saveData.save_testData_or_solus2mat(test_x_bach, dataName='testx', outPath=R['FolderName']) # ConfigProto 加上allow_soft_placement=True就可以使用 gpu 了 config = tf.ConfigProto(allow_soft_placement=True) # 创建sess的时候对sess进行参数配置 config.gpu_options.allow_growth = True # True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。 config.allow_soft_placement = True # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行 with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) tmp_lr = learning_rate for i_epoch in range(R['max_epoch'] + 1): x_it_batch = DNN_data.rand_it(batchsize_it, input_dim, region_a=region_l, region_b=region_r) xl_bd_batch, xr_bd_batch = DNN_data.rand_bd_1D(batchsize_bd, input_dim, region_a=region_l, region_b=region_r) tmp_lr = tmp_lr * (1 - lr_decay) if R['activate_penalty2bd_increase'] == 1: if i_epoch < int(R['max_epoch'] / 10): temp_penalty_bd = bd_penalty_init elif i_epoch < int(R['max_epoch'] / 5): temp_penalty_bd = 10 * bd_penalty_init elif i_epoch < int(R['max_epoch'] / 4): temp_penalty_bd = 50 * bd_penalty_init elif i_epoch < int(R['max_epoch'] / 2): temp_penalty_bd = 100 * bd_penalty_init elif i_epoch < int(3 * R['max_epoch'] / 4): temp_penalty_bd = 200 * bd_penalty_init else: temp_penalty_bd = 500 * bd_penalty_init elif R['activate_penalty2bd_increase'] == 2: if i_epoch < int(R['max_epoch'] / 10): temp_penalty_bd = 5 * bd_penalty_init elif i_epoch < int(R['max_epoch'] / 5): temp_penalty_bd = 1 * bd_penalty_init elif i_epoch < int(R['max_epoch'] / 4): temp_penalty_bd = 0.5 * bd_penalty_init elif i_epoch < int(R['max_epoch'] / 2): temp_penalty_bd = 0.1 * bd_penalty_init elif i_epoch < int(3 * R['max_epoch'] / 4): temp_penalty_bd = 0.05 * bd_penalty_init else: temp_penalty_bd = 0.02 * bd_penalty_init else: temp_penalty_bd = bd_penalty_init if R['activate_powSolus_increase'] == 1: if i_epoch < int(R['max_epoch'] / 10): temp_penalty_powU = init_penalty2powU elif i_epoch < int(R['max_epoch'] / 5): temp_penalty_powU = 10 * init_penalty2powU elif i_epoch < int(R['max_epoch'] / 4): temp_penalty_powU = 50 * init_penalty2powU elif i_epoch < int(R['max_epoch'] / 2): temp_penalty_powU = 100 * init_penalty2powU elif i_epoch < int(3 * R['max_epoch'] / 4): temp_penalty_powU = 200 * init_penalty2powU else: temp_penalty_powU = 500 * init_penalty2powU elif R['activate_powSolus_increase'] == 2: if i_epoch < int(R['max_epoch'] / 10): temp_penalty_powU = 5 * init_penalty2powU elif i_epoch < int(R['max_epoch'] / 5): temp_penalty_powU = 1 * init_penalty2powU elif i_epoch < int(R['max_epoch'] / 4): temp_penalty_powU = 0.5 * init_penalty2powU elif i_epoch < int(R['max_epoch'] / 2): temp_penalty_powU = 0.1 * init_penalty2powU elif i_epoch < int(3 * R['max_epoch'] / 4): temp_penalty_powU = 0.05 * init_penalty2powU else: temp_penalty_powU = 0.02 * init_penalty2powU else: temp_penalty_powU = init_penalty2powU p_WB = 0.0 _, loss_it_nn, loss_bd_nn, loss_nn, udu_nn, train_mse_nn, train_rel_nn = sess.run( [ train_Loss2NN, Loss_it2NN, Loss_bd2NN, Loss2NN, UNN_dot_UNN, train_mse_NN, train_rel_NN ], feed_dict={ X_it: x_it_batch, X_left_bd: xl_bd_batch, X_right_bd: xr_bd_batch, in_learning_rate: tmp_lr, bd_penalty: temp_penalty_bd, penalty2powU: temp_penalty_powU }) lossIt_all2NN.append(loss_it_nn) lossBD_all2NN.append(loss_bd_nn) loss_all2NN.append(loss_nn) UDU_NN.append(udu_nn) train_mse_all2NN.append(train_mse_nn) train_rel_all2NN.append(train_rel_nn) if i_epoch % 1000 == 0: run_times = time.time() - t0 DNN_tools.print_and_log_train_one_epoch(i_epoch, run_times, tmp_lr, temp_penalty_bd, temp_penalty_powU, p_WB, loss_it_nn, loss_bd_nn, loss_nn, udu_nn, train_mse_nn, train_rel_nn, log_out=log_fileout_NN) # --------------------------- test network ---------------------------------------------- test_epoch.append(i_epoch / 1000) train_option = False u_true2test, utest_nn, u_nn_normal, u_nn_scale = sess.run( [U_true, U_NN, U_NN_Normal, U_NN_freqs], feed_dict={ X_it: test_x_bach, train_opt: train_option }) test_mse2nn = np.mean(np.square(u_true2test - utest_nn)) test_mse_all2NN.append(test_mse2nn) test_rel2nn = test_mse2nn / np.mean(np.square(u_true2test)) test_rel_all2NN.append(test_rel2nn) DNN_tools.print_and_log_test_one_epoch(test_mse2nn, test_rel2nn, log_out=log_fileout_NN) # ----------------------- save training results to mat files, then plot them --------------------------------- saveData.save_trainLoss2mat_1actFunc(lossIt_all2NN, lossBD_all2NN, loss_all2NN, actName=act_func1, outPath=R['FolderName']) saveData.save_train_MSE_REL2mat(train_mse_all2NN, train_rel_all2NN, actName=act_func1, outPath=R['FolderName']) plotData.plotTrain_loss_1act_func(lossIt_all2NN, lossType='loss_it', seedNo=R['seed'], outPath=R['FolderName']) plotData.plotTrain_loss_1act_func(lossBD_all2NN, lossType='loss_bd', seedNo=R['seed'], outPath=R['FolderName'], yaxis_scale=True) plotData.plotTrain_loss_1act_func(loss_all2NN, lossType='loss', seedNo=R['seed'], outPath=R['FolderName']) plotData.plotTrain_MSE_REL_1act_func(train_mse_all2NN, train_rel_all2NN, actName=act_func2, seedNo=R['seed'], outPath=R['FolderName'], yaxis_scale=True) # ---------------------- save testing results to mat files, then plot them -------------------------------- saveData.save_testData_or_solus2mat(u_true2test, dataName='Utrue', outPath=R['FolderName']) saveData.save_testData_or_solus2mat(utest_nn, dataName=act_func1, outPath=R['FolderName']) saveData.save_testData_or_solus2mat(u_nn_normal, dataName='normal', outPath=R['FolderName']) saveData.save_testData_or_solus2mat(u_nn_scale, dataName='scale', outPath=R['FolderName']) saveData.save_testMSE_REL2mat(test_mse_all2NN, test_rel_all2NN, actName=act_func2, outPath=R['FolderName']) plotData.plotTest_MSE_REL(test_mse_all2NN, test_rel_all2NN, test_epoch, actName=act_func2, seedNo=R['seed'], outPath=R['FolderName'], yaxis_scale=True)
def solve_SIR2COVID(R): log_out_path = R['FolderName'] # 将路径从字典 R 中提取出来 if not os.path.exists(log_out_path): # 判断路径是否已经存在 os.mkdir(log_out_path) # 无 log_out_path 路径,创建一个 log_out_path 路径 log_fileout = open(os.path.join(log_out_path, 'log_train.txt'), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件 DNN_LogPrint.dictionary_out2file(R, log_fileout) log2trianSolus = open(os.path.join(log_out_path, 'train_Solus.txt'), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件 log2testSolus = open(os.path.join(log_out_path, 'test_Solus.txt'), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件 log2testSolus2 = open(os.path.join(log_out_path, 'test_Solus_temp.txt'), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件 log2testParas = open(os.path.join(log_out_path, 'test_Paras.txt'), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件 trainSet_szie = R['size2train'] batchSize_train = R['batch_size2train'] batchSize_test = R['batch_size2test'] pt_penalty_init = R[ 'init_penalty2predict_true'] # Regularization parameter for difference of predict and true wb_penalty = R['regular_weight'] # Regularization parameter for weights lr_decay = R['lr_decay'] learning_rate = R['learning_rate'] act_func2SIR = R['act2sir'] act_func2paras = R['act2paras'] input_dim = R['input_dim'] out_dim = R['output_dim'] flag2S = 'WB2S' flag2I = 'WB2I' flag2R = 'WB2R' flag2beta = 'WB2beta' flag2gamma = 'WB2gamma' hidden_sir = R['hidden2SIR'] hidden_para = R['hidden2para'] Weight2S, Bias2S = DNN.init_DNN(size2in=input_dim, size2out=out_dim, hiddens=hidden_sir, scope=flag2S, opt2Init=R['SIR_opt2init_NN']) Weight2I, Bias2I = DNN.init_DNN(size2in=input_dim, size2out=out_dim, hiddens=hidden_sir, scope=flag2I, opt2Init=R['SIR_opt2init_NN']) Weight2R, Bias2R = DNN.init_DNN(size2in=input_dim, size2out=out_dim, hiddens=hidden_sir, scope=flag2R, opt2Init=R['SIR_opt2init_NN']) Weight2beta, Bias2beta = DNN.init_DNN(size2in=input_dim, size2out=out_dim, hiddens=hidden_para, scope=flag2beta, opt2Init=R['Para_opt2init_NN']) Weight2gamma, Bias2gamma = DNN.init_DNN(size2in=input_dim, size2out=out_dim, hiddens=hidden_para, scope=flag2gamma, opt2Init=R['Para_opt2init_NN']) global_steps = tf.Variable(0, trainable=False) with tf.device('/gpu:%s' % (R['gpuNo'])): with tf.variable_scope('vscope', reuse=tf.AUTO_REUSE): T_it = tf.placeholder(tf.float32, name='T_it', shape=[None, input_dim]) I_observe = tf.placeholder(tf.float32, name='I_observe', shape=[None, input_dim]) N_observe = tf.placeholder(tf.float32, name='N_observe', shape=[None, input_dim]) predict_true_penalty = tf.placeholder_with_default(input=1e3, shape=[], name='bd_p') in_learning_rate = tf.placeholder_with_default(input=1e-5, shape=[], name='lr') train_opt = tf.placeholder_with_default(input=True, shape=[], name='train_opt') SNN_temp = DNN.PDE_DNN(x=T_it, hiddenLayer=hidden_sir, Weigths=Weight2S, Biases=Bias2S, DNNmodel=R['model2sir'], activation=act_func2SIR, freqs=R['freqs']) INN_temp = DNN.PDE_DNN(x=T_it, hiddenLayer=hidden_sir, Weigths=Weight2I, Biases=Bias2I, DNNmodel=R['model2sir'], activation=act_func2SIR, freqs=R['freqs']) RNN_temp = DNN.PDE_DNN(x=T_it, hiddenLayer=hidden_sir, Weigths=Weight2R, Biases=Bias2R, DNNmodel=R['model2sir'], activation=act_func2SIR, freqs=R['freqs']) in_beta = DNN.PDE_DNN(x=T_it, hiddenLayer=hidden_para, Weigths=Weight2beta, Biases=Bias2beta, DNNmodel=R['model2paras'], activation=act_func2paras, freqs=R['freqs']) in_gamma = DNN.PDE_DNN(x=T_it, hiddenLayer=hidden_para, Weigths=Weight2gamma, Biases=Bias2gamma, DNNmodel=R['model2paras'], activation=act_func2paras, freqs=R['freqs']) # Remark: beta, gamma,S_NN.I_NN,R_NN都应该是正的. beta.1--15之间,gamma在(0,1)使用归一化的话S_NN.I_NN,R_NN都在[0,1)范围内 if (R['total_population'] == R['scale_population']) and R['scale_population'] != 1: beta = in_beta gamma = in_gamma # SNN = SNN_temp # INN = INN_temp # RNN = RNN_temp # SNN = tf.nn.relu(SNN_temp) # INN = tf.nn.relu(INN_temp) # RNN = tf.nn.relu(RNN_temp) # SNN = tf.abs(SNN_temp) # INN = tf.abs(INN_temp) # RNN = tf.abs(RNN_temp) # SNN = DNN_base.gauss(SNN_temp) # INN = tf.square(INN_temp) # RNN = tf.square(RNN_temp) # SNN = DNN_base.gauss(SNN_temp) # INN = tf.square(INN_temp) # RNN = tf.nn.sigmoid(RNN_temp) # SNN = DNN_base.gauss(SNN_temp) # INN = tf.nn.sigmoid(INN_temp) # RNN = tf.square(RNN_temp) # SNN = tf.sqrt(tf.square(SNN_temp)) # INN = tf.sqrt(tf.square(INN_temp)) # RNN = tf.sqrt(tf.square(RNN_temp)) SNN = tf.nn.sigmoid(SNN_temp) INN = tf.nn.sigmoid(INN_temp) RNN = tf.nn.sigmoid(RNN_temp) else: beta = in_beta gamma = in_gamma # SNN = SNN_temp # INN = INN_temp # RNN = RNN_temp # SNN = tf.nn.relu(SNN_temp) # INN = tf.nn.relu(INN_temp) # RNN = tf.nn.relu(RNN_temp) SNN = tf.nn.sigmoid(SNN_temp) INN = tf.nn.sigmoid(INN_temp) RNN = tf.nn.sigmoid(RNN_temp) N_NN = SNN + INN + RNN dSNN2t = tf.gradients(SNN, T_it)[0] dINN2t = tf.gradients(INN, T_it)[0] dRNN2t = tf.gradients(RNN, T_it)[0] dN_NN2t = tf.gradients(N_NN, T_it)[0] temp_snn2t = -beta * SNN * INN temp_inn2t = beta * SNN * INN - gamma * INN temp_rnn2t = gamma * INN if str.lower( R['loss_function']) == 'l2_loss' and R['scale_up'] == 0: # LossS_Net_obs = tf.reduce_mean(tf.square(SNN - S_observe)) LossI_Net_obs = tf.reduce_mean(tf.square(INN - I_observe)) # LossR_Net_obs = tf.reduce_mean(tf.square(RNN - R_observe)) LossN_Net_obs = tf.reduce_mean(tf.square(N_NN - N_observe)) Loss2dS = tf.reduce_mean(tf.square(dSNN2t - temp_snn2t)) Loss2dI = tf.reduce_mean(tf.square(dINN2t - temp_inn2t)) Loss2dR = tf.reduce_mean(tf.square(dRNN2t - temp_rnn2t)) Loss2dN = tf.reduce_mean(tf.square(dN_NN2t)) elif str.lower( R['loss_function']) == 'l2_loss' and R['scale_up'] == 1: scale_up = R['scale_factor'] # LossS_Net_obs = tf.reduce_mean(tf.square(scale_up*SNN - scale_up*S_observe)) LossI_Net_obs = tf.reduce_mean( tf.square(scale_up * INN - scale_up * I_observe)) # LossR_Net_obs = tf.reduce_mean(tf.square(scale_up*RNN - scale_up*R_observe)) LossN_Net_obs = tf.reduce_mean( tf.square(scale_up * N_NN - scale_up * N_observe)) Loss2dS = tf.reduce_mean( tf.square(scale_up * dSNN2t - scale_up * temp_snn2t)) Loss2dI = tf.reduce_mean( tf.square(scale_up * dINN2t - scale_up * temp_inn2t)) Loss2dR = tf.reduce_mean( tf.square(scale_up * dRNN2t - scale_up * temp_rnn2t)) Loss2dN = tf.reduce_mean(tf.square(scale_up * dN_NN2t)) elif str.lower(R['loss_function']) == 'lncosh_loss': # LossS_Net_obs = tf.reduce_mean(tf.ln(tf.cosh(SNN - S_observe))) LossI_Net_obs = tf.reduce_mean(tf.log(tf.cosh(INN - I_observe))) # LossR_Net_obs = tf.reduce_mean(tf.log(tf.cosh(RNN - R_observe))) LossN_Net_obs = tf.reduce_mean( tf.log(tf.cosh(N_NN - N_observe))) Loss2dS = tf.reduce_mean(tf.log(tf.cosh(dSNN2t - temp_snn2t))) Loss2dI = tf.reduce_mean(tf.log(tf.cosh(dINN2t - temp_inn2t))) Loss2dR = tf.reduce_mean(tf.log(tf.cosh(dRNN2t - temp_rnn2t))) Loss2dN = tf.reduce_mean(tf.log(tf.cosh(dN_NN2t))) if R['regular_weight_model'] == 'L1': regular_WB2S = DNN_base.regular_weights_biases_L1( Weight2S, Bias2S) regular_WB2I = DNN_base.regular_weights_biases_L1( Weight2I, Bias2I) regular_WB2R = DNN_base.regular_weights_biases_L1( Weight2R, Bias2R) regular_WB2Beta = DNN_base.regular_weights_biases_L1( Weight2beta, Bias2beta) regular_WB2Gamma = DNN_base.regular_weights_biases_L1( Weight2gamma, Bias2gamma) elif R['regular_weight_model'] == 'L2': regular_WB2S = DNN_base.regular_weights_biases_L2( Weight2S, Bias2S) regular_WB2I = DNN_base.regular_weights_biases_L2( Weight2I, Bias2I) regular_WB2R = DNN_base.regular_weights_biases_L2( Weight2R, Bias2R) regular_WB2Beta = DNN_base.regular_weights_biases_L2( Weight2beta, Bias2beta) regular_WB2Gamma = DNN_base.regular_weights_biases_L2( Weight2gamma, Bias2gamma) else: regular_WB2S = tf.constant(0.0) regular_WB2I = tf.constant(0.0) regular_WB2R = tf.constant(0.0) regular_WB2Beta = tf.constant(0.0) regular_WB2Gamma = tf.constant(0.0) PWB2S = wb_penalty * regular_WB2S PWB2I = wb_penalty * regular_WB2I PWB2R = wb_penalty * regular_WB2R PWB2Beta = wb_penalty * regular_WB2Beta PWB2Gamma = wb_penalty * regular_WB2Gamma Loss2S = Loss2dS + PWB2S Loss2I = predict_true_penalty * LossI_Net_obs + Loss2dI + PWB2I Loss2R = Loss2dR + PWB2R Loss2N = predict_true_penalty * LossN_Net_obs + Loss2dN Loss = Loss2S + Loss2I + Loss2R + Loss2N + PWB2Beta + PWB2Gamma my_optimizer = tf.train.AdamOptimizer(in_learning_rate) if R['train_model'] == 'train_group': train_Loss2S = my_optimizer.minimize(Loss2S, global_step=global_steps) train_Loss2I = my_optimizer.minimize(Loss2I, global_step=global_steps) train_Loss2R = my_optimizer.minimize(Loss2R, global_step=global_steps) train_Loss2N = my_optimizer.minimize(Loss2N, global_step=global_steps) train_Loss = my_optimizer.minimize(Loss, global_step=global_steps) train_Losses = tf.group(train_Loss2S, train_Loss2I, train_Loss2R, train_Loss2N, train_Loss) elif R['train_model'] == 'train_union_loss': train_Losses = my_optimizer.minimize(Loss, global_step=global_steps) t0 = time.time() loss_s_all, loss_i_all, loss_r_all, loss_n_all, loss_all = [], [], [], [], [] test_epoch = [] test_mse2I_all, test_rel2I_all = [], [] # filename = 'data2csv/Wuhan.csv' # filename = 'data2csv/Italia_data.csv' filename = 'data2csv/Korea_data.csv' date, data = DNN_data.load_csvData(filename) assert (trainSet_szie + batchSize_test <= len(data)) train_date, train_data2i, test_date, test_data2i = \ DNN_data.split_csvData2train_test(date, data, size2train=trainSet_szie, normalFactor=R['scale_population']) if R['scale_population'] == 1: nbatch2train = np.ones(batchSize_train, dtype=np.float32) * float( R['total_population']) elif (R['total_population'] != R['scale_population']) and R['scale_population'] != 1: nbatch2train = np.ones(batchSize_train, dtype=np.float32) * ( float(R['total_population']) / float(R['scale_population'])) elif (R['total_population'] == R['scale_population']) and R['scale_population'] != 1: nbatch2train = np.ones(batchSize_train, dtype=np.float32) # 对于时间数据来说,验证模型的合理性,要用连续的时间数据验证 test_t_bach = DNN_data.sample_testDays_serially(test_date, batchSize_test) i_obs_test = DNN_data.sample_testData_serially(test_data2i, batchSize_test, normalFactor=1.0) print('The test data about i:\n', str(np.transpose(i_obs_test))) print('\n') DNN_tools.log_string( 'The test data about i:\n%s\n' % str(np.transpose(i_obs_test)), log_fileout) # ConfigProto 加上allow_soft_placement=True就可以使用 gpu 了 config = tf.ConfigProto(allow_soft_placement=True) # 创建sess的时候对sess进行参数配置 config.gpu_options.allow_growth = True # True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。 config.allow_soft_placement = True # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行 with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) tmp_lr = learning_rate for i_epoch in range(R['max_epoch'] + 1): t_batch, i_obs = \ DNN_data.randSample_Normalize_existData(train_date, train_data2i, batchsize=batchSize_train, normalFactor=1.0, sampling_opt=R['opt2sample']) n_obs = nbatch2train.reshape(batchSize_train, 1) tmp_lr = tmp_lr * (1 - lr_decay) train_option = True if R['activate_stage_penalty'] == 1: if i_epoch < int(R['max_epoch'] / 10): temp_penalty_pt = pt_penalty_init elif i_epoch < int(R['max_epoch'] / 5): temp_penalty_pt = 10 * pt_penalty_init elif i_epoch < int(R['max_epoch'] / 4): temp_penalty_pt = 50 * pt_penalty_init elif i_epoch < int(R['max_epoch'] / 2): temp_penalty_pt = 100 * pt_penalty_init elif i_epoch < int(3 * R['max_epoch'] / 4): temp_penalty_pt = 200 * pt_penalty_init else: temp_penalty_pt = 500 * pt_penalty_init elif R['activate_stage_penalty'] == 2: if i_epoch < int(R['max_epoch'] / 3): temp_penalty_pt = pt_penalty_init elif i_epoch < 2 * int(R['max_epoch'] / 3): temp_penalty_pt = 10 * pt_penalty_init else: temp_penalty_pt = 50 * pt_penalty_init else: temp_penalty_pt = pt_penalty_init _, loss_s, loss_i, loss_r, loss_n, loss, pwb2s, pwb2i, pwb2r = sess.run( [ train_Losses, Loss2S, Loss2I, Loss2R, Loss2N, Loss, PWB2S, PWB2I, PWB2R ], feed_dict={ T_it: t_batch, I_observe: i_obs, N_observe: n_obs, in_learning_rate: tmp_lr, train_opt: train_option, predict_true_penalty: temp_penalty_pt }) loss_s_all.append(loss_s) loss_i_all.append(loss_i) loss_r_all.append(loss_r) loss_n_all.append(loss_n) loss_all.append(loss) if i_epoch % 1000 == 0: # 以下代码为输出训练过程中 S_NN, I_NN, R_NN, beta, gamma 的训练结果 DNN_LogPrint.print_and_log2train(i_epoch, time.time() - t0, tmp_lr, temp_penalty_pt, pwb2s, pwb2i, pwb2r, loss_s, loss_i, loss_r, loss_n, loss, log_out=log_fileout) s_nn2train, i_nn2train, r_nn2train = sess.run( [SNN, INN, RNN], feed_dict={T_it: np.reshape(train_date, [-1, 1])}) # 以下代码为输出训练过程中 S_NN, I_NN, R_NN, beta, gamma 的测试结果 test_epoch.append(i_epoch / 1000) train_option = False s_nn2test, i_nn2test, r_nn2test, beta_test, gamma_test = sess.run( [SNN, INN, RNN, beta, gamma], feed_dict={ T_it: test_t_bach, train_opt: train_option }) point_ERR2I = np.square(i_nn2test - i_obs_test) test_mse2I = np.mean(point_ERR2I) test_mse2I_all.append(test_mse2I) test_rel2I = test_mse2I / np.mean(np.square(i_obs_test)) test_rel2I_all.append(test_rel2I) DNN_tools.print_and_log_test_one_epoch(test_mse2I, test_rel2I, log_out=log_fileout) DNN_tools.log_string( '------------------The epoch----------------------: %s\n' % str(i_epoch), log2testSolus) DNN_tools.log_string( 'The test result for s:\n%s\n' % str(np.transpose(s_nn2test)), log2testSolus) DNN_tools.log_string( 'The test result for i:\n%s\n' % str(np.transpose(i_nn2test)), log2testSolus) DNN_tools.log_string( 'The test result for r:\n%s\n\n' % str(np.transpose(r_nn2test)), log2testSolus) # --------以下代码为输出训练过程中 S_NN_temp, I_NN_temp, R_NN_temp, in_beta, in_gamma 的测试结果------------- s_nn_temp2test, i_nn_temp2test, r_nn_temp2test, in_beta_test, in_gamma_test = sess.run( [SNN_temp, INN_temp, RNN_temp, in_beta, in_gamma], feed_dict={ T_it: test_t_bach, train_opt: train_option }) DNN_tools.log_string( '------------------The epoch----------------------: %s\n' % str(i_epoch), log2testSolus2) DNN_tools.log_string( 'The test result for s_temp:\n%s\n' % str(np.transpose(s_nn_temp2test)), log2testSolus2) DNN_tools.log_string( 'The test result for i_temp:\n%s\n' % str(np.transpose(i_nn_temp2test)), log2testSolus2) DNN_tools.log_string( 'The test result for r_temp:\n%s\n\n' % str(np.transpose(r_nn_temp2test)), log2testSolus2) DNN_tools.log_string( '------------------The epoch----------------------: %s\n' % str(i_epoch), log2testParas) DNN_tools.log_string( 'The test result for in_beta:\n%s\n' % str(np.transpose(in_beta_test)), log2testParas) DNN_tools.log_string( 'The test result for in_gamma:\n%s\n' % str(np.transpose(in_gamma_test)), log2testParas) DNN_tools.log_string( 'The train result for S:\n%s\n' % str(np.transpose(s_nn2train)), log2trianSolus) DNN_tools.log_string( 'The train result for I:\n%s\n' % str(np.transpose(i_nn2train)), log2trianSolus) DNN_tools.log_string( 'The train result for R:\n%s\n\n' % str(np.transpose(r_nn2train)), log2trianSolus) saveData.true_value2convid(train_data2i, name2Array='itrue2train', outPath=R['FolderName']) saveData.save_Solu2mat_Covid(s_nn2train, name2solus='s2train', outPath=R['FolderName']) saveData.save_Solu2mat_Covid(i_nn2train, name2solus='i2train', outPath=R['FolderName']) saveData.save_Solu2mat_Covid(r_nn2train, name2solus='r2train', outPath=R['FolderName']) saveData.save_SIR_trainLoss2mat_Covid(loss_s_all, loss_i_all, loss_r_all, loss_n_all, actName=act_func2SIR, outPath=R['FolderName']) plotData.plotTrain_loss_1act_func(loss_s_all, lossType='loss2s', seedNo=R['seed'], outPath=R['FolderName'], yaxis_scale=True) plotData.plotTrain_loss_1act_func(loss_i_all, lossType='loss2i', seedNo=R['seed'], outPath=R['FolderName'], yaxis_scale=True) plotData.plotTrain_loss_1act_func(loss_r_all, lossType='loss2r', seedNo=R['seed'], outPath=R['FolderName'], yaxis_scale=True) plotData.plotTrain_loss_1act_func(loss_n_all, lossType='loss2n', seedNo=R['seed'], outPath=R['FolderName'], yaxis_scale=True) saveData.true_value2convid(i_obs_test, name2Array='i_true2test', outPath=R['FolderName']) saveData.save_testMSE_REL2mat(test_mse2I_all, test_rel2I_all, actName='Infected', outPath=R['FolderName']) plotData.plotTest_MSE_REL(test_mse2I_all, test_rel2I_all, test_epoch, actName='Infected', seedNo=R['seed'], outPath=R['FolderName'], yaxis_scale=True) saveData.save_SIR_testSolus2mat_Covid(s_nn2test, i_nn2test, r_nn2test, name2solus1='snn2test', name2solus2='inn2test', name2solus3='rnn2test', outPath=R['FolderName']) saveData.save_SIR_testParas2mat_Covid(beta_test, gamma_test, name2para1='beta2test', name2para2='gamma2test', outPath=R['FolderName']) plotData.plot_testSolu2convid(i_obs_test, name2solu='i_true', coord_points2test=test_t_bach, outPath=R['FolderName']) plotData.plot_testSolu2convid(s_nn2test, name2solu='s_test', coord_points2test=test_t_bach, outPath=R['FolderName']) plotData.plot_testSolu2convid(i_nn2test, name2solu='i_test', coord_points2test=test_t_bach, outPath=R['FolderName']) plotData.plot_testSolu2convid(r_nn2test, name2solu='r_test', coord_points2test=test_t_bach, outPath=R['FolderName']) plotData.plot_testSolus2convid(i_obs_test, i_nn2test, name2solu1='i_true', name2solu2='i_test', coord_points2test=test_t_bach, seedNo=R['seed'], outPath=R['FolderName']) plotData.plot_testSolu2convid(beta_test, name2solu='beta_test', coord_points2test=test_t_bach, outPath=R['FolderName']) plotData.plot_testSolu2convid(gamma_test, name2solu='gamma_test', coord_points2test=test_t_bach, outPath=R['FolderName'])