def calculate_simulation(self, mode='displacement'): nodes_pos, edges_indices, edges_thickness, _ = self.extract_node_edge_info( ) node_num = nodes_pos.shape[0] assert node_num >= np.max( edges_indices), 'edges_indicesに,ノード数以上のindexを示しているものが発生' input_nodes, output_nodes, frozen_nodes, nodes_pos, edges_indices = remove_node_which_nontouchable_in_edge_indices( self.input_nodes, self.output_nodes, self.frozen_nodes, nodes_pos, edges_indices) displacement = barfem(nodes_pos, edges_indices, edges_thickness, input_nodes, self.input_vectors, frozen_nodes, mode) efficiency = calc_efficiency(input_nodes, self.input_vectors, output_nodes, self.output_vectors, displacement) return efficiency
edges_thickness = np.load(os.path.join(np_save_path, "edges_thickness.npy")) edges_thickness = edges_thickness * 3 volume = calc_volume(nodes_pos, edges_indices, edges_thickness) print(volume) condition_nodes_pos, input_nodes, input_vectors, output_nodes, \ output_vectors, frozen_nodes, condition_edges_indices, condition_edges_thickness\ = make_main_node_edge_info(*condition(), condition_edge_thickness=0.05) # スレンダー比を考慮し,長さ方向に対して1/20の値の幅にした input_nodes, output_nodes, frozen_nodes, edges_thickness \ = conprocess_seperate_edge_indice_procedure(input_nodes, output_nodes, frozen_nodes, condition_nodes_pos, condition_edges_indices, condition_edges_thickness, nodes_pos, edges_indices, edges_thickness) displacement = barfem(nodes_pos, edges_indices, edges_thickness, input_nodes, input_vectors, frozen_nodes, mode='displacement') efficiency = calc_efficiency(input_nodes, input_vectors, output_nodes, output_vectors, displacement) print(efficiency) #render_graph(nodes_pos, edges_indices, edges_thickness, os.path.join("GA/体積率に関する調査", "image.png"), display_number=False)
def actor_critic_mean(max_episodes, test_name): """Actor-Criticの5回実験したときの平均グラフを作成する関数""" test_num = 5 log_dir = "confirm/step1/ac_results/{}".format(test_name) assert not os.path.exists(log_dir), "already folder exists" os.makedirs(log_dir, exist_ok=True) history = {} for i in range(test_num): history["{}".format(i)] = {} history["{}".format(i)]['epoch'] = [] history["{}".format(i)]['result_efficiency'] = [] node_pos, input_nodes, input_vectors,\ output_nodes, output_vectors, frozen_nodes,\ edges_indices, edges_thickness, frozen_nodes = easy_dev() max_steps = 1 lr_actor = 1e-4 lr_critic = 1e-3 weight_decay = 1e-2 gamma = 0.99 device = torch.device('cpu') actorNet = Edgethick_Actor().to(device) criticNet = Edgethick_Critic().to(device) optimizer_actor = optim.Adam(actorNet.parameters(), lr=lr_actor) optimizer_critic = optim.Adam(criticNet.parameters(), lr=lr_critic, weight_decay=weight_decay) for episode in tqdm(range(max_episodes)): observation = np.array([0, 1]) for step in range(max_steps): action = select_action(observation, actorNet, criticNet, device) edges_thickness = action['edge_thickness'] displacement = barfem(node_pos, edges_indices, edges_thickness, input_nodes, input_vectors, frozen_nodes, mode="force") reward = displacement[1 * 3] criticNet.rewards.append(reward) loss = finish_episode(criticNet, actorNet, optimizer_critic, optimizer_actor, gamma) history["{}".format(i)]['epoch'].append(episode + 1) history["{}".format(i)]['result_efficiency'].append(reward) if episode % 1000 == 0: print("episode:{} total reward:{}".format(episode, reward)) plot_efficiency_history( history["{}".format(i)], os.path.join(log_dir, 'learning_effi_curve{}.png'.format(i))) mean = np.stack([ history["{}".format(i)]['result_efficiency'] for i in range(test_num) ]) std = np.std(mean[:, -1]) print('最終結果の標準偏差:', std) mean = np.mean(mean, axis=0) meanhistory = {} meanhistory['epoch'] = history['0']['epoch'] meanhistory['result_efficiency'] = mean # 学習履歴を保存 with open(os.path.join(log_dir, 'history.pkl'), 'wb') as f: pickle.dump(history, f) plot_efficiency_history( meanhistory, os.path.join(log_dir, 'mean_learning_effi_curve.png'))
def compare_apdl_barfem(nodes_pos, edges_indices, edges_thickness, input_nodes, input_vectors, frozen_nodes, tmax=100000, eps=1.0e-11, mode="force"): # C言語を用いたbarfem displacement = barfem(nodes_pos, edges_indices, edges_thickness, input_nodes, input_vectors, frozen_nodes, mode=mode, tmax=tmax, eps=eps) # APDLの設定 mapdl = launch_mapdl() mapdl.finish() mapdl.clear() mapdl.prep7() # 材料物性値の設定 mapdl.et(1, 3) mapdl.mp("ex", 1, 1) # ヤング率 mapdl.mp("prxy", 1, 0.3) # ポアソン比 mapdl.mat(1) # 節点部分の設定 for i, node_pos in enumerate(nodes_pos): mapdl.n(i + 1, node_pos[0], node_pos[1], 0) # エッジ部分の設定 for i, edges_indice in enumerate(edges_indices): b = 0.2 # 奥行 h = edges_thickness[i] mapdl.r(i + 1, b * h, b * (h * h**2) / 12, h, 0) # A,I,height=y方向の長さ,SHEARZ mapdl.real(i + 1) mapdl.e(edges_indice[0] + 1, edges_indice[1] + 1) mapdl.finish() mapdl.run('/solu') mapdl.antype('static') # 解析条件設定 # 固定ノード設定 for i in frozen_nodes: mapdl.d(i + 1, "all", 0) # 外力設定 for i, input_vector in enumerate(input_vectors): if mode == "displacement": mapdl.d(input_nodes[i] + 1, "UX", input_vector[0]) mapdl.d(input_nodes[i] + 1, "UY", input_vector[1]) else: mapdl.f(input_nodes[i] + 1, "FX", input_vector[0]) mapdl.f(input_nodes[i] + 1, "FY", input_vector[1]) # 解析開始 mapdl.solve() mapdl.finish() # 結果出力 x_disp = mapdl.post_processing.nodal_displacement('X') y_disp = mapdl.post_processing.nodal_displacement('Y') z_rot = mapdl.post_processing.nodal_rotation('Z') ansys_disp = np.stack([x_disp, y_disp, z_rot]).T.flatten() #result = mapdl.result #nnum, principal_nodal_stress = result.principal_nodal_stress(0) # von_mises = principal_nodal_stress[:, -1] # von-Mises stress is the right most column # print(von_mises) # print(np.max(von_mises)) mapdl.exit() # ポート番号などをリセットするために必要 # 厳密には少し小さい値の部分が異なる為,allclose構文を利用 return np.allclose(ansys_disp, displacement)