class MLP_Regression(): def __init__(self, label, parameters): super().__init__() self.writer = SummaryWriter(comment=f"_{label}_training") self.label = label self.lr = parameters['lr'] self.hidden_units = parameters['hidden_units'] self.mode = parameters['mode'] self.batch_size = parameters['batch_size'] self.num_batches = parameters['num_batches'] self.x_shape = parameters['x_shape'] self.y_shape = parameters['y_shape'] self.save_model_path = f'{parameters["save_dir"]}/{label}_model.pt' self.best_loss = np.inf self.init_net(parameters) def init_net(self, parameters): if not os.path.exists(parameters["save_dir"]): os.makedirs(parameters["save_dir"]) model_params = { 'input_shape': self.x_shape, 'classes': self.y_shape, 'batch_size': self.batch_size, 'hidden_units': self.hidden_units, 'mode': self.mode } self.net = MLP(model_params).to(DEVICE) self.optimiser = torch.optim.Adam(self.net.parameters(), lr=self.lr) self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimiser, step_size=5000, gamma=0.5) print("MLP Parameters: ") print( f'batch size: {self.batch_size}, input shape: {model_params["input_shape"]}, hidden units: {model_params["hidden_units"]}, output shape: {model_params["classes"]}, lr: {self.lr}' ) def train_step(self, train_data): self.net.train() for _, (x, y) in enumerate(train_data): x, y = x.to(DEVICE), y.to(DEVICE) self.net.zero_grad() self.loss_info = torch.nn.functional.mse_loss(self.net(x), y, reduction='sum') self.loss_info.backward() self.optimiser.step() self.epoch_loss = self.loss_info.item() def evaluate(self, x_test): self.net.eval() with torch.no_grad(): y_test = self.net(x_test.to(DEVICE)).detach().cpu().numpy() return y_test def log_progress(self, step): write_loss(self.writer, self.loss_info, step)
def load_mlp_class_model(saved_model): '''Load model weights from path saved_model.''' config = ClassConfig model_params = { 'input_shape': config.x_shape, 'classes': config.classes, 'batch_size': config.batch_size, 'hidden_units': config.hidden_units, 'mode': config.mode, 'dropout': False } model = MLP(model_params) model.load_state_dict(torch.load(saved_model)) return model.eval()
nc = int(opt.nc) imageSize = int(opt.imageSize) nz = int(opt.nz) nblk = int(opt.nblk) model_netG = MLP(input_dim=nc * imageSize * imageSize, output_dim=nc * imageSize * imageSize, dim=nz, n_blk=nblk, norm='none', activ='relu').to(device) model_netG.load_state_dict(torch.load(opt.netG, map_location=device)) print_and_write_log(test_log_file, 'netG:') print_and_write_log(test_log_file, str(model_netG)) if opt.eval: model_netG.eval() for i, data in enumerate(tqdm(dataloader), 0): img, img_path = data img_name = os.path.splitext(os.path.basename(img_path[0]))[0] + '.png' if i >= opt.num_test: break real = img.to(device) with torch.no_grad(): recon = model_netG(real) recon_img = tensor2im(recon) if opt.show_input: real_img = tensor2im(real) real_recon_img = np.concatenate([real_img, recon_img], 1) real_recon_img_pil = Image.fromarray(real_recon_img) real_recon_img_pil.save(os.path.join(opt.resfwi, img_name))
class MLP_Classification(): def __init__(self, label, parameters): super().__init__() self.writer = SummaryWriter(comment=f"_{label}_training") self.label = label self.lr = parameters['lr'] self.hidden_units = parameters['hidden_units'] self.mode = parameters['mode'] self.batch_size = parameters['batch_size'] self.num_batches = parameters['num_batches'] self.x_shape = parameters['x_shape'] self.classes = parameters['classes'] self.save_model_path = f'{parameters["save_dir"]}/{label}_model.pt' self.best_acc = 0. self.dropout = parameters['dropout'] self.init_net(parameters) def init_net(self, parameters): if not os.path.exists(parameters["save_dir"]): os.makedirs(parameters["save_dir"]) model_params = { 'input_shape': self.x_shape, 'classes': self.classes, 'batch_size': self.batch_size, 'hidden_units': self.hidden_units, 'mode': self.mode, 'dropout': self.dropout, } if self.dropout: self.net = MLP_Dropout(model_params).to(DEVICE) print('MLP Dropout Parameters: ') print(f'batch size: {self.batch_size}, input shape: {model_params["input_shape"]}, hidden units: {model_params["hidden_units"]}, output shape: {model_params["classes"]}, lr: {self.lr}') else: self.net = MLP(model_params).to(DEVICE) print('MLP Parameters: ') print(f'batch size: {self.batch_size}, input shape: {model_params["input_shape"]}, hidden units: {model_params["hidden_units"]}, output shape: {model_params["classes"]}, lr: {self.lr}') self.optimiser = torch.optim.SGD(self.net.parameters(), lr=self.lr) self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimiser, step_size=100, gamma=0.5) def train_step(self, train_data): self.net.train() for _, (x, y) in enumerate(tqdm(train_data)): x, y = x.to(DEVICE), y.to(DEVICE) self.net.zero_grad() self.loss_info = torch.nn.functional.cross_entropy(self.net(x), y, reduction='sum') self.loss_info.backward() self.optimiser.step() def predict(self, X): probs = torch.nn.Softmax(dim=1)(self.net(X)) preds = torch.argmax(probs, dim=1) return preds, probs def evaluate(self, test_loader): self.net.eval() print('Evaluating on validation data') correct = 0 total = 0 with torch.no_grad(): for data in tqdm(test_loader): X, y = data X, y = X.to(DEVICE), y.to(DEVICE) preds, _ = self.predict(X) total += self.batch_size correct += (preds == y).sum().item() self.acc = correct / total print(f'{self.label} validation accuracy: {self.acc}') def log_progress(self, step): write_loss(self.writer, self.loss_info, step) write_acc(self.writer, self.acc, step)
def main(args): seed = 999 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.backends.cudnn.deterministic = True sites = ['NYU','UCLA','UM','USM'] models_cross = [] for file in sites: if file != args.site: model = MLP(6105,8,2).to(device) model.load_state_dict(torch.load(os.path.join('./model/cross_overlap',file+'.pth'))) models_cross.append(model) model_single = MLP(6105,8,2).to(device) model_single.load_state_dict(torch.load(os.path.join('./model/single_overlap', args.site, str(args.split) + '.pth'))) data1 = dd.io.load(os.path.join(args.vec_dir,'NYU_correlation_matrix.h5')) data2 = dd.io.load(os.path.join(args.vec_dir,'UM_correlation_matrix.h5')) data3 = dd.io.load(os.path.join(args.vec_dir,'USM_correlation_matrix.h5')) data4 = dd.io.load(os.path.join(args.vec_dir,'UCLA_correlation_matrix.h5')) x1 = torch.from_numpy(data1['data']).float() y1 = torch.from_numpy(data1['label']).long() x2 = torch.from_numpy(data2['data']).float() y2 = torch.from_numpy(data2['label']).long() x3 = torch.from_numpy(data3['data']).float() y3 = torch.from_numpy(data3['label']).long() x4 = torch.from_numpy(data4['data']).float() y4 = torch.from_numpy(data4['label']).long() idNYU = dd.io.load('./idx/NYU_sub_overlap.h5') idUM = dd.io.load('./idx/UM_sub_overlap.h5') idUSM = dd.io.load('./idx/USM_sub_overlap.h5') idUCLA = dd.io.load('./idx/UCLA_sub_overlap.h5') if args.split == 0: tr1 = idNYU['1'] + idNYU['2'] + idNYU['3'] + idNYU['4'] tr2 = idUM['1'] + idUM['2'] + idUM['3'] + idUM['4'] tr3 = idUSM['1'] + idUSM['2'] + idUSM['3'] + idUSM['4'] tr4 = idUCLA['1'] + idUCLA['2'] + idUCLA['3'] + idUCLA['4'] te1 = idNYU['0'] te2 = idUM['0'] te3 = idUSM['0'] te4 = idUCLA['0'] elif args.split == 1: tr1 = idNYU['0'] + idNYU['2'] + idNYU['3'] + idNYU['4'] tr2 = idUM['0'] + idUM['2'] + idUM['3'] + idUM['4'] tr3 = idUSM['0'] + idUSM['2'] + idUSM['3'] + idUSM['4'] tr4 = idUCLA['0'] + idUCLA['2'] + idUCLA['3'] + idUCLA['4'] te1 = idNYU['1'] te2 = idUM['1'] te3 = idUSM['1'] te4 = idUCLA['1'] elif args.split == 2: tr1 = idNYU['0'] + idNYU['1'] + idNYU['3'] + idNYU['4'] tr2 = idUM['0'] + idUM['1'] + idUM['3'] + idUM['4'] tr3 = idUSM['0'] + idUSM['1'] + idUSM['3'] + idUSM['4'] tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['3'] + idUCLA['4'] te1 = idNYU['2'] te2 = idUM['2'] te3 = idUSM['2'] te4 = idUCLA['2'] elif args.split == 3: tr1 = idNYU['0'] + idNYU['1'] + idNYU['2'] + idNYU['4'] tr2 = idUM['0'] + idUM['1'] + idUM['2'] + idUM['4'] tr3 = idUSM['0'] + idUSM['1'] + idUSM['2'] + idUSM['4'] tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['2'] + idUCLA['4'] te1 = idNYU['3'] te2 = idUM['3'] te3 = idUSM['3'] te4 = idUCLA['3'] elif args.split == 4: tr1 = idNYU['0'] + idNYU['1'] + idNYU['2'] + idNYU['3'] tr2 = idUM['0'] + idUM['1'] + idUM['2'] + idUM['3'] tr3 = idUSM['0'] + idUSM['1'] + idUSM['2'] + idUSM['3'] tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['2'] + idUCLA['3'] te1 = idNYU['4'] te2 = idUM['4'] te3 = idUSM['4'] te4 = idUCLA['4'] x1_train = x1[tr1] y1_train = y1[tr1] x2_train = x2[tr2] y2_train = y2[tr2] x3_train = x3[tr3] y3_train = y3[tr3] x4_train = x4[tr4] y4_train = y4[tr4] x1_test = x1[te1] y1_test = y1[te1] x2_test = x2[te2] y2_test = y2[te2] x3_test = x3[te3] y3_test = y3[te3] x4_test = x4[te4] y4_test = y4[te4] mean = x1_train.mean(0, keepdim=True) dev = x1_train.std(0, keepdim=True) x1_test = (x1_test - mean) / dev mean = x2_train.mean(0, keepdim=True) dev = x2_train.std(0, keepdim=True) x2_test = (x2_test - mean) / dev mean = x3_train.mean(0, keepdim=True) dev = x3_train.std(0, keepdim=True) x3_test = (x3_test - mean) / dev mean = x4_train.mean(0, keepdim=True) dev = x4_train.std(0, keepdim=True) x4_test = (x4_test - mean) / dev test1 = TensorDataset(x1_test, y1_test) test2 = TensorDataset(x2_test, y2_test) test3 = TensorDataset(x3_test, y3_test) test4 = TensorDataset(x4_test, y4_test) if args.site == 'NYU': test = test1 elif args.site == 'UM': test = test2 elif args.site == 'USM': test = test3 elif args.site == 'UCLA': test = test4 te_data = test.tensors[0].to(device) te_outputs = [] targets = test.tensors[1].numpy() preds =[] #cross model for model in models_cross: model.eval() te_output = model(te_data) te_outputs.append(torch.exp(te_output)) # single_model model_single.eval() te_output = model_single(te_data) te_outputs.append(torch.exp(te_output)) outputtorch = torch.stack(te_outputs,dim=0) output_mean = torch.mean(outputtorch,dim=0) preds = output_mean.data.max(1)[1].detach().cpu().numpy() if not os.path.exists(args.res_dir): os.mkdir(args.res_dir) dd.io.save(os.path.join(args.res_dir, args.site+ '_' + str(args.split) + '.h5'), {'preds': preds, 'targets': targets})
class VanillaAE(nn.Module): def __init__(self, opt): super(VanillaAE, self).__init__() self.opt = opt self.device = torch.device("cuda:0" if not opt.no_cuda else "cpu") nc = int(opt.nc) imageSize = int(opt.imageSize) nz = int(opt.nz) nblk = int(opt.nblk) # generator self.netG = MLP(input_dim=nc * imageSize * imageSize, output_dim=nc * imageSize * imageSize, dim=nz, n_blk=nblk, norm='none', activ='relu').to(self.device) weights_init(self.netG) if opt.netG != '': self.netG.load_state_dict( torch.load(opt.netG, map_location=self.device)) print_and_write_log(opt.train_log_file, 'netG:') print_and_write_log(opt.train_log_file, str(self.netG)) # losses self.criterion = nn.MSELoss() # define focal frequency loss self.criterion_freq = FFL(loss_weight=opt.ffl_w, alpha=opt.alpha, patch_factor=opt.patch_factor, ave_spectrum=opt.ave_spectrum, log_matrix=opt.log_matrix, batch_matrix=opt.batch_matrix).to( self.device) # misc self.to(self.device) # optimizer self.optimizerG = optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) def forward(self): pass def gen_update(self, data, epoch, matrix=None): self.netG.zero_grad() real = data.to(self.device) if matrix is not None: matrix = matrix.to(self.device) recon = self.netG(real) # apply pixel-level loss errG_pix = self.criterion(recon, real) * self.opt.mse_w # apply focal frequency loss if epoch >= self.opt.freq_start_epoch: errG_freq = self.criterion_freq(recon, real, matrix) else: errG_freq = torch.tensor(0.0).to(self.device) errG = errG_pix + errG_freq errG.backward() self.optimizerG.step() return errG_pix, errG_freq def sample(self, x): x = x.to(self.device) self.netG.eval() with torch.no_grad(): recon = self.netG(x) self.netG.train() return recon def save_checkpoints(self, ckpt_dir, epoch): torch.save(self.netG.state_dict(), '%s/netG_epoch_%03d.pth' % (ckpt_dir, epoch))
def file_classify_demo(fobjs: List[FileInfo]): words = {} for fobj in filter(lambda x: not x.istest, fobjs): for kw, freq in zip(fobj.keywords, fobj.kwfreq): if kw in words: words[kw] += freq else: words[kw] = freq # make keyword score vec: train and test all_wordvec = [] all_wordvec_test = [] labels_train = [] labels_test = [] for fobj in fobjs: fobj.set_wordvec(words) if not fobj.kwfreq: continue if not fobj.istest: all_wordvec.append(fobj.wordvec) labels_train.append(fobj.label) # curlabel = [0] * 5 # curlabel[fobj.label] = 1 # labels.append(curlabel) else: all_wordvec_test.append(fobj.wordvec) labels_test.append(fobj.label) # pca make fingerprints inputdim = 20 outputdim = 7 pca = PCA(n_components=inputdim) pca.fit(all_wordvec) fprints = pca.transform(all_wordvec) fprints_test = pca.transform(all_wordvec_test) print('PCA ratio sum:', sum(pca.explained_variance_ratio_)) print() x_train = torch.from_numpy(fprints).float() x_test = torch.from_numpy(fprints_test).float() y_train = torch.Tensor(labels_train).long() # float() train_dataset = TensorDataset(x_train, y_train) dloader = DataLoader(train_dataset, batch_size=6, shuffle=True) model = MLP(inputdim, outputdim) optimizer = optim.Adam(model.parameters(), lr=0.01) lossfunc = nn.CrossEntropyLoss() epoch = 300 + 1 for ecnt in range(epoch): for i, data in enumerate(dloader): optimizer.zero_grad() inputs, labels = data inputs = torch.autograd.Variable(inputs) labels = torch.autograd.Variable(labels) outputs = model(inputs) loss = lossfunc(outputs, labels) # / outputs.size()[0] # loss = torch.Tensor([0]) # for b in range(outputs.size()[0]): # loss += sum(abs(outputs[b] - labels[b])) # loss /= outputs.size()[0] loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 5) # clip param important optimizer.step() # if i % 1 == 0: # print(i, ":", loss) # print(outputs) # print(labels) if ecnt % 20 == 0: print('Epoch:', ecnt) model.eval() y_train_step = model(x_train) y_train_step_label = np.argmax(y_train_step.data, axis=1) y_test_step = model(x_test) y_test_step_label = np.argmax(y_test_step.data, axis=1) tran_accu = len( list( filter(lambda x: x == 0, y_train_step_label - np.array(labels_train)))) / len(labels_train) test_accu = len( list( filter(lambda x: x == 0, y_test_step_label - np.array(labels_test)))) / len(labels_test) print('tran_accu', tran_accu) print('test_accu', test_accu) print() model.train() # save model save_path = r'.\ai\classify-demo.pth' torch.save(model.state_dict(), save_path) # load model new_model = MLP(inputdim, outputdim) new_model.load_state_dict(torch.load(save_path)) y_train_look = new_model(x_train) y_test = new_model(x_test) print(y_train_look) print(y_test) print(np.argmax(y_test.data, axis=1)) print(labels_test) print(len(labels_test)) print(len(labels_train))