def main(args): DATA_PATH = Path(args.data_path) test_data_path = DATA_PATH / "Fpz-Cz_test_encoding" test_transforms = transforms.Compose([ transforms.ToTensor(), ]) test_dataset = Encoding_Dataset(root_path=test_data_path, transform=test_transforms) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True) test_total = len(test_loader) model = resnet18(num_classes=5).to(args.device) model.load_state_dict(torch.load('./resnet18_encoding.pth')) criterion = nn.CrossEntropyLoss().to(args.device) test_correct, test_loss = test(model, test_loader, criterion, device=args.device) test_acc = test_correct / (test_total * args.batch_size) test_loss = test_loss / (test_total * args.batch_size) print(f"[TEST ACC : {test_acc}] | [TEST LOSS : {test_loss}]")
def main(args): DATA_PATH = Path(args.data_path) image_size = (128, 1024) test_data_path = DATA_PATH / f"{args.channel}_test" test_transforms = transforms.Compose([ transforms.Grayscale(), transforms.Resize(image_size), transforms.ToTensor(), ]) test_dataset = ImageFolder(root=test_data_path, transform=test_transforms) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True) test_total = len(test_loader) model = resnet18(num_classes=5).to(args.device) # model = models.efficientnet_b3() # model.conv_stem = nn.Conv2d(1, 40, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) # model.classifier = nn.Linear(in_features=1536, out_features=5, bias=True) model.load_state_dict(torch.load(args.load_path)['model_state_dict']) model = model.to(args.device) criterion = nn.CrossEntropyLoss().to(args.device) test_correct, test_loss = test(model, test_loader, criterion, device=args.device) test_acc = test_correct / (test_total * args.batch_size) test_loss = test_loss / (test_total * args.batch_size) print(f"[TEST ACC : {test_acc}] | [TEST LOSS : {test_loss}]")
def main(args): DATA_PATH = Path(args.data_path) image_size = (128, 1024) train_data_path = DATA_PATH / f"{args.channel}_train" valid_data_path = DATA_PATH / f"{args.channel}_valid" train_transforms = transforms.Compose([ transforms.Grayscale(), transforms.Resize(image_size), transforms.ToTensor(), ]) valid_transforms = transforms.Compose([ transforms.Grayscale(), transforms.Resize(image_size), transforms.ToTensor(), ]) train_dataset = ImageFolder(root=train_data_path, transform=train_transforms) valid_dataset = ImageFolder(root=valid_data_path, transform=valid_transforms) weights = make_weights_for_balanced_classes(train_dataset.imgs, len(train_dataset.classes)) weights = torch.DoubleTensor(weights) sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights)) train_loader = torch.utils.data.DataLoader(train_dataset, sampler=sampler, batch_size=args.batch_size) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False) train_total = len(train_loader) valid_total = len(valid_loader) model = resnet18(num_classes=5).to(args.device) # model = models.efficientnet_b3() # model.conv_stem = nn.Conv2d(1, 40, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) # model.classifier = nn.Linear(in_features=1536, out_features=5, bias=True) model = model.to(args.device) ############################## test # data_iter = iter(train_loader) # data, labels = data_iter.next() # # data = data.float().to("cuda") # # print(data.shape) # # output = model(data) # # print(output.shape) ############################## criterion = nn.CrossEntropyLoss().to(args.device) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [30, 80], gamma=0.1) best_acc = 0 for e in range(0, args.epochs): train_correct, train_loss = train(model, train_loader, optimizer, criterion, device=args.device) train_acc = train_correct / (train_total * args.batch_size) train_loss = train_loss / (train_total * args.batch_size) valid_correct, valid_loss = valid(model, valid_loader, criterion, device=args.device) valid_acc = valid_correct / (valid_total * args.batch_size) valid_loss = valid_loss / (valid_total * args.batch_size) scheduler.step() print(f"[EPOCH : {args.epochs} / {e}] || [TRAIN ACC : {train_acc}] || [TRAIN LOSS : {train_loss}]" f"|| [VALID ACC : {valid_acc}] || [VALID LOSS : {valid_loss}]") if best_acc < valid_acc: torch.save({'epoch': e, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, args.save_path) best_acc = valid_acc
def main(args): DATA_PATH = Path("store/public_dataset") train_data_path = DATA_PATH / "Fpz-Cz_train_encoding" valid_data_path = DATA_PATH / "Fpz-Cz_valid_encoding" transformer = transforms.Compose([transforms.ToTensor()]) train_dataset = Encoding_Dataset(root_path=train_data_path, transform=transformer) valid_dataset = Encoding_Dataset(root_path=valid_data_path, transform=transformer) weights = make_weights_for_balanced_classes(train_dataset.data, len(train_dataset.classes)) weights = torch.DoubleTensor(weights) sampler = torch.utils.data.sampler.WeightedRandomSampler( weights, len(weights)) train_loader = torch.utils.data.DataLoader(train_dataset, sampler=sampler, batch_size=args.batch_size) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False) train_total = len(train_loader) valid_total = len(valid_loader) model = resnet18(num_classes=5).to(args.device) # ############################## test # data_iter = iter(train_loader) # data, labels = data_iter.next() # # data = data.float().to("cuda") # # print(data.shape) # # output = model(data) # # print(output.shape) # ############################## criterion = nn.CrossEntropyLoss().to(args.device) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [30], gamma=0.1) best_acc = 0 for e in range(0, args.epochs): train_correct, train_loss = train(model, train_loader, optimizer, criterion, device=args.device) train_acc = train_correct / (train_total * args.batch_size) train_loss = train_loss / (train_total * args.batch_size) valid_correct, valid_loss = valid(model, valid_loader, criterion, device=args.device) valid_acc = valid_correct / (valid_total * args.batch_size) valid_loss = valid_loss / (valid_total * args.batch_size) scheduler.step() print( f"[EPOCH : {args.epochs} / {e}] || [TRAIN ACC : {train_acc}] || [TRAIN LOSS : {train_loss}]" f"|| [VALID ACC : {valid_acc}] || [VALID LOSS : {valid_loss}]") if best_acc < valid_acc: torch.save(model.state_dict(), './resnet18_encoding.pth') best_acc = valid_acc