shuffle=True, num_workers=2) augloader1 = torch.utils.data.DataLoader(augset1, batch_size=256, shuffle=True, num_workers=2) augloader2 = torch.utils.data.DataLoader(augset2, batch_size=256, shuffle=True, num_workers=2) augloader3 = torch.utils.data.DataLoader(augset3, batch_size=256, shuffle=True, num_workers=2) net = get_model() checkPointDir = './model.pth' checkpoint = torch.load(checkPointDir) net.load_state_dict(checkpoint) net = net.cuda() criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(net.parameters(), lr=0.001) print('Start Training') net.train() for epoch in range(100): running_loss = 0.0 for i, data in enumerate(trainloader): #Original training set # get the inputs; data is a list of [inputs, labels]
# ========================================================================== # ========================================================================== from submission import get_model, eval_transform, team_id, team_name, email_address trainset = CustomDataset(root='./dataset', split="train", transform=train_transform) # trainset = addIndexToTrainData(trainset) trainloader = torch.utils.data.DataLoader(trainset, batch_size=256, shuffle=True, num_workers=2) net = get_model().cuda() net = torch.nn.DataParallel(net) net = net.cuda() # trainLabeledImage(net, trainloader) # unLabeledSet = CustomDataset(root='./dataset', split="unlabeled", transform=train_transform) xxx = addIndexToTrainData(unLabeledSet) # unLabeledLoader = torch.utils.data.DataLoader(unLabeledSet, batch_size = 10, shuffle=True, num_workers = 2) # # net = get_model() # checkPointDir = './checkPoint/net_demo.pth' # # checkpoint = torch.load(args.checkpoint_path) # checkpoint = torch.load(checkPointDir) # net.load_state_dict(checkpoint)