def test_train_tranformer_cls(): from types import SimpleNamespace opt = SimpleNamespace(nclasses=2, d_model=32, N=2, d_ff=1024, h=8, dropout=0.1, vocab_size=1000, batch_size=128, n_batches=1000, gpuid=1) device = torch.device("cuda:{}".format(gpuid) if gpuid >= 0 else "cpu") model = TransformerCls(nclasses, vocab_size, h, d_model, d_ff, dropout=dropout, n_layer=N) # optimizer = NoamOpt(model.encoders.emb.d_model, 1, 400, torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9)) optimizer = torch.optim.Adam(params=model.parameters(), lr=0.0001) criterion = CrossEntropyLoss() model.to(device) criterion.to(device) n_iter = 1000 gen = data_gen_binary_classification(batch_size, n_batches, device, vocab_size) for i in range(n_iter): try: batch = next(gen) except: gen = data_gen_binary_classification(batch_size, n_batches, device, vocab_size) batch = next(gen) out = model(batch.src, batch.src_mask) ty_true = batch.trg loss = criterion(out, ty_true) loss.backward() optimizer.step() optimizer.zero_grad() acc = (out.argmax(-1) == ty_true).sum().item() / ty_true.size(0) if i > 0 and i % 50 == 1: print("iter: {}, acc: {}, loss: {}".format(i, acc, loss.item()))
def main(): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = CNN() #♦ lr=0.01 ile dene. optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) criterion = CrossEntropyLoss() model = model.to(device) criterion = criterion.to(device) transform_train = transforms.Compose([ transforms.Resize((224, 224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(12), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5)) ]) """transform_test = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ])""" train_data = XRAYDataset('dataset/train/', transform=transform_train) train_val_dataset, test_dataset = train_test_split(train_data, test_size=0.2, shuffle=True) train_dataset, val_dataset = train_test_split(train_val_dataset, test_size=0.2, shuffle=True) # val_dataset = XRAYDataset('dataset/val/', transform=transform) # test_dataset = XRAYDataset('dataset/test/', transform=transform_test) train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True) val_loader = DataLoader(dataset=val_dataset, batch_size=64, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=64, shuffle=True) train_val_loader = dict() train_val_loader['train'] = train_loader train_val_loader['val'] = val_loader best_model = train(model, train_val_loader, optimizer, criterion, device, 100) test(best_model, criterion, test_loader, device)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=4) # defining the model model = Net() # defining the optimizer optimizer = Adam(model.parameters(), lr=0.0001) # defining the loss function criterion = CrossEntropyLoss() # checking if GPU is available if torch.cuda.is_available(): model = model.to(device) criterion = criterion.to(device) state_dict = torch.load( '/media/mvisionai/Backups/reggie/bindsnet-master/examples/mnist/NC_MCI_r/0.698ad_net.pth' ) model.load_state_dict(state_dict) model.eval() test_acc = [] test_loss = [] labels = [] with torch.no_grad(): for step, (batch, label) in enumerate(test_dataloader): x_test, y_test = Variable(batch), Variable(label) # getting the validation set