self.data, self.slices = self.collate(data_list) def _download(self): return def _process(self): return def __repr__(self): return '{}()'.format(self.__class__.__name__) L = 8 P1 = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06] P2 = [0.01] H, H_one = error_generate.generate_PCM(2 * L * L - 2, L) #64, 30 H, H_one = torch.from_numpy(H).t(), torch.from_numpy(H_one).t() h_prep = error_generate.H_Prep(H.t()) H_prep = torch.from_numpy(h_prep.get_H_Prep()) BATCH_SIZE = 128 lr = 3e-4 Nc = 25 run1 = 8192 run2 = 2048 adj = H.to_sparse() edge_info = torch.cat([adj._indices()[0].unsqueeze(0), \ adj._indices()[1].unsqueeze(0).add(H.size()[0])], dim=0).repeat(1, BATCH_SIZE).cuda() dataset1 = error_generate.gen_syn(P1, L, H, run1) dataset2 = error_generate.gen_syn(P2, L, H, run2) train_dataset = CustomDataset(H, dataset1) test_dataset = CustomDataset(H, dataset2)
self.data, self.slices = self.collate(data_list) def _download(self): return def _process(self): return def __repr__(self): return '{}()'.format(self.__class__.__name__) L = 4 P1 = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1] P2 = [0.1] H = torch.from_numpy(error_generate.generate_PCM(2 * L * L - 2, L)).t() #64, 30 h_prep = error_generate.H_Prep(H.t()) H_prep = torch.from_numpy(h_prep.get_H_Prep()) BATCH_SIZE = 512 lr = 3e-4 Nc = 25 run1 = 40960 run2 = 8192 dataset1 = error_generate.gen_syn(P1, L, H, run1) dataset2 = error_generate.gen_syn(P2, L, H, run2) train_dataset = CustomDataset(H, dataset1) test_dataset = CustomDataset(H, dataset2) rows, cols = H.size(0), H.size(1) train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False) test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False) logical, stab = h_prep.get_logical(H_prep)
self.data, self.slices = self.collate(data_list) def _download(self): return def _process(self): return def __repr__(self): return '{}()'.format(self.__class__.__name__) L = 6 P1 = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06] P2 = [0.01] H_init, H_prime = error_generate.generate_PCM(2 * L * L - 2, L) H = torch.from_numpy(H_init).t() #64, 30 H_prime = torch.from_numpy(H_prime).t() h_prep = error_generate.H_Prep(H.t()) H_prep = torch.from_numpy(h_prep.get_H_Prep()) BATCH_SIZE = 512 lr = 2e-4 Nc = 25 run1 = 81920 run2 = 2048 adj = H.to_sparse() edge_info = torch.cat([adj._indices()[0].unsqueeze(0), \ adj._indices()[1].unsqueeze(0).add(H.size()[0])], dim=0).repeat(1, BATCH_SIZE).cuda() dataset1 = error_generate.gen_syn(P1, L, H, run1) dataset2 = error_generate.gen_syn(P2, L, H, run2) train_dataset = CustomDataset(H, dataset1)