def main(): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # prepare neural network validate_size = 0.1 num_bands = 100 hs_indices = range(48) model = torch.nn.Sequential( torch.nn.LeakyReLU(), torch.nn.Linear(len(hs_indices) * num_bands, 1000), torch.nn.LeakyReLU(), torch.nn.Linear(1000, 250), torch.nn.LeakyReLU(), torch.nn.Linear(250, 73), torch.nn.LeakyReLU(), ) model = model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.01) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.75) criterion = torch.nn.CrossEntropyLoss() # prepare data def json2inputlabel(data_json): data_input_np = np.array(data_json["bands"])[:, hs_indices].flatten().T data_label_np = np.array([ arithmeticcrystalclass.arithmeticcrystalclass_number( data_json["number"]) - 1 ]) return data_input_np, data_label_np dataset = data_loader.AnyDataset([ f"list/actual/arithmeticcrystalclass_list_{blnum}.txt" for blnum in range(1, 74) ], json2inputlabel, validate_size) validate_loader, train_loader = data_loader.get_validate_train_loader( dataset, 32) # train function_training.validate_train_loop( device, model, optimizer, scheduler, criterion, validate_loader, train_loader, num_epoch=10, num_epoch_per_validate=5, state_dict_path="state_dicts/state_dict_bs2bl") # apply function_list.create_any_guess_list_files( device, model, hs_indices, validate_size, num_group=73, in_list_paths=[ f"list/actual/arithmeticcrystalclass_list_{blnum}.txt" for blnum in range(1, 74) ], out_list_path_format="list/guess/arithmeticcrystalclass_list_{}.txt") import winsound winsound.Beep(200, 500) # analyse function_analysis.print_result( group_numbers=range(1, 74), guess_list_dir="list/guess/", actual_list_dir="list/actual/", list_format="arithmeticcrystalclass_list_{}.txt", validate_size=0.1) def json2label(data_json): data_label_np = np.array([ arithmeticcrystalclass.arithmeticcrystalclass_number( data_json["number"]) - 1 ]) return data_label_np function_analysis.show_confusion(json2label, [ f"list/guess/arithmeticcrystalclass_list_{i}.txt" for i in range(1, 74) ], show_text=False)
def main_one(csnum): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # prepare neural network validate_size = 0.1 num_bands = 100 tmp = [] for hs in range(48): tmp.append(hs) hs_indices = tmp #hs_indices = [0, 1, 3, 4, 5, 7, 8, 13, 31, 34, 37] # 11 hs points in Brillouin zone out of 40 cs_sizes = crystalsystem.crystalsystem_sizes() output_size = cs_sizes[csnum - 1] - cs_sizes[csnum - 2] + 1 if csnum > 1 else 3 """ model = torch.nn.Sequential( #torch.nn.LeakyReLU(), torch.nn.Linear(len(hs_indices)*num_bands, 64), #torch.nn.LeakyReLU(), #torch.nn.Linear(64, 32), torch.nn.Conv1d(64, [32], 1), torch.nn.LeakyReLU(), torch.nn.Linear(32, output_size), #torch.nn.LeakyReLU(), #torch.nn.Softmax(dim=7), ) """ model = torch.nn.Sequential( torch.nn.LeakyReLU(), torch.nn.Linear(len(hs_indices) * num_bands, 300), torch.nn.LeakyReLU(), torch.nn.Linear(300, 100), torch.nn.LeakyReLU(), torch.nn.Linear(100, output_size), torch.nn.LeakyReLU(), ) model = model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.01) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.75) criterion = torch.nn.CrossEntropyLoss() # prepare data crystal_upper = crystalsystem.spacegroup_index_upper(csnum) crystal_lower = crystalsystem.spacegroup_index_lower(csnum) crystal_size = crystal_upper - crystal_lower def json2inputlabel(data_json): data_input_np = np.array(data_json["bands"])[:, hs_indices].flatten().T sgnum = data_json["number"] if crystal_lower < sgnum - 1 < crystal_upper: data_label_np = np.array([sgnum - 1 - crystal_lower]) else: data_label_np = np.array([crystal_size]) return data_input_np, data_label_np dataset = data_loader.AnyDataset([ f"list/actual/spacegroup_list_{sgnum}.txt" for sgnum in crystalsystem.spacegroup_number_range(csnum) ], json2inputlabel, validate_size) with open("data.pickle", "wb+") as f: pickle.dump(dataset, f) exit() with open("data.pickle", "rb") as f: dataset = pickle.load(f) validate_loader, train_loader = data_loader.get_validate_train_loader( dataset, 32) # train ech, loss, ech_a, acc = function_training.validate_train_loop( device, model, optimizer, scheduler, criterion, validate_loader, train_loader, num_epoch=5, num_epoch_per_validate=1, state_dict_path=f"state_dicts/state_dict_cs2sg_{csnum}") plot(ech, loss, ech_a, acc) # apply function_list.append_any_guess_list_files( device, model, hs_indices, validate_size, num_group=230, in_list_paths=[ f"list/actual/spacegroup_list_{sgnum}.txt" for sgnum in crystalsystem.spacegroup_number_range(csnum) ], out_list_path_format="list/guess/spacegroup_list_{}.txt")