def train(args): # the number of N way, K shot images k = args.nway * args.kshot # Train data loading dataset = Dataset(args.dpath, state='train') train_sampler = Train_Sampler(dataset._labels, n_way=args.nway, k_shot=args.kshot, query=args.query) data_loader = DataLoader(dataset=dataset, batch_sampler=train_sampler, num_workers=4, pin_memory=True) # Validation data loading val_dataset = Dataset(args.dpath, state='val') val_sampler = Sampler(val_dataset._labels, n_way=args.nway, k_shot=args.kshot, query=args.query) val_data_loader = DataLoader(dataset=val_dataset, batch_sampler=val_sampler, num_workers=4, pin_memory=True) """ TODO 1.a """ " Make your own model for Few-shot Classification in 'model.py' file." # model setting model = FewShotModel() """ TODO 1.a END """ # pretrained model load if args.restore_ckpt is not None: state_dict = torch.load(args.restore_ckpt) model.load_state_dict(state_dict) model.cuda() model.train() """ TODO 1.b (optional) """ " Set an optimizer or scheduler for Few-shot classification (optional) " # Default optimizer setting optimizer = torch.optim.Adam(model.parameters(), lr=0.001) """ TODO 1.b (optional) END """ tl = Averager() # save average loss ta = Averager() # save average accuracy # training start print('train start') for i in range(TOTAL): for episode in data_loader: optimizer.zero_grad() data, label = [_.cuda() for _ in episode] # load an episode # split an episode images and labels into shots and query set # note! data_shot shape is ( nway * kshot, 3, h, w ) not ( kshot * nway, 3, h, w ) # Take care when reshape the data shot data_shot, data_query = data[:k], data[k:] label_shot, label_query = label[:k], label[k:] label_shot = sorted(list(set(label_shot.tolist()))) # convert labels into 0-4 values label_query = label_query.tolist() labels = [] for j in range(len(label_query)): label = label_shot.index(label_query[j]) labels.append(label) labels = torch.tensor(labels).cuda() """ TODO 2 ( Same as above TODO 2 ) """ """ Make a loss function and train your own model Input: data_shot : torch.tensor, shot images, [args.nway * args.kshot, 3, h, w] be careful when using torch.reshape or .view functions data_query : torch.tensor, query images, [args.query, 3, h, w] labels : torch.tensor, labels of query images, [args.query] output: loss : torch scalar tensor which used for updating your model logits : A value to measure accuracy and loss """ YOUR CODE """ TODO 2 END """ acc = count_acc(logit, labels) tl.add(loss.item()) ta.add(acc) loss.backward() optimizer.step() proto = None; logits = None; loss = None if (i+1) % PRINT_FREQ == 0: print('train {}, loss={:.4f} acc={:.4f}'.format(i+1, tl.item(), ta.item())) # initialize loss and accuracy mean tl = None ta = None tl = Averager() ta = Averager() # validation start if (i+1) % VAL_FREQ == 0: print('validation start') model.eval() with torch.no_grad(): vl = Averager() # save average loss va = Averager() # save average accuracy for j in range(VAL_TOTAL): for episode in val_data_loader: data, label = [_.cuda() for _ in episode] data_shot, data_query = data[:k], data[k:] # load an episode label_shot, label_query = label[:k], label[k:] label_shot = sorted(list(set(label_shot.tolist()))) label_query = label_query.tolist() labels = [] for j in range(len(label_query)): label = label_shot.index(label_query[j]) labels.append(label) labels = torch.tensor(labels).cuda() """ TODO 2 ( Same as above TODO 2 ) """ """ Train the model Input: data_shot : torch.tensor, shot images, [args.nway * args.kshot, 3, h, w] be careful when using torch.reshape or .view functions data_query : torch.tensor, query images, [args.query, 3, h, w] labels : torch.tensor, labels of query images, [args.query] output: loss : torch scalar tensor which used for updating your model logits : A value to measure accuracy and loss """ YOUR CODE """ TODO 2 END """ acc = count_acc(logit,labels) vl.add(loss.item()) va.add(acc) proto = None; logits = None; loss = None print('val accuracy mean : %.4f' % va.item()) print('val loss mean : %.4f' % vl.item()) # initialize loss and accuracy mean vl = None va = None vl = Averager() va = Averager() model.train() if (i+1) % SAVE_FREQ == 0: PATH = 'checkpoints/%d_%s.pth' % (i + 1, args.name) torch.save(model.state_dict(), PATH) print('model saved, iteration : %d' % i)
def val(args): # the number of N way, K shot images k = args.nway * args.kshot """ TODO 1.a """ " Make your own model for Few-shot Classification in 'model.py' file." """ TODO 1.a END """ model1 = AlexNet() model1.cuda() model2 = AlexNet() model2.cuda() model3 = AlexNet() model3.cuda() model4 = AlexNet() model4.cuda() state_dict = torch.load('checkpoints/alexnet_63.pth') model1.load_state_dict(state_dict) state_dict = torch.load('checkpoints/alexnet_6240.pth') model2.load_state_dict(state_dict) state_dict = torch.load('checkpoints/alexnet_6215.pth') model3.load_state_dict(state_dict) state_dict = torch.load('checkpoints/alexnet_6085.pth') model4.load_state_dict(state_dict) #state_dict = torch.load('') #model.load_state_dict(state_dict) #model1 = model.clone() model1 = torch.nn.DataParallel(model1, device_ids=range(torch.cuda.device_count())) model2 = torch.nn.DataParallel(model2, device_ids=range(torch.cuda.device_count())) model3 = torch.nn.DataParallel(model3, device_ids=range(torch.cuda.device_count())) model4 = torch.nn.DataParallel(model4, device_ids=range(torch.cuda.device_count())) model1.eval() model2.eval() model3.eval() model4.eval() csv = csv_write(args) # Validation data loading val_dataset = Dataset(args.dpath, state='val') val_sampler = Sampler(val_dataset._labels, n_way=args.nway, k_shot=args.kshot, query=args.query) val_data_loader = DataLoader(dataset=val_dataset, batch_sampler=val_sampler, num_workers=8, pin_memory=True) """ TODO 1.b (optional) """ " Set an optimizer or scheduler for Few-shot classification (optional) " # Default optimizer setting #optimizer = torch.optim.Adam(model.parameters(), lr=1e-5) #optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4) """ TODO 1.b (optional) END """ tl = Averager() # save average loss ta = Averager() # save average accuracy print('test start') test_correct = 0 test_total = 0 test_loss = 0 # validation start print('validation start') model1.eval() model2.eval() model3.eval() model4.eval() with torch.no_grad(): vl = Averager() # save average loss va = Averager() # save average accuracy for j in range(VAL_TOTAL): for episode in val_data_loader: data, label = [_.cuda() for _ in episode] data_shot, data_query = data[:k], data[k:] # load an episode label_shot, label_query = label[:k], label[k:] label_shot = sorted(list(set(label_shot.tolist()))) label_query = label_query.tolist() labels = [] for j in range(len(label_query)): label = label_shot.index(label_query[j]) labels.append(label) labels = torch.tensor(labels).cuda() """ TODO 2 ( Same as above TODO 2 ) """ """ Train the model Input: data_shot : torch.tensor, shot images, [args.nway * args.kshot, 3, h, w] be careful when using torch.reshape or .view functions data_query : torch.tensor, query images, [args.query, 3, h, w] labels : torch.tensor, labels of query images, [args.query] output: loss : torch scalar tensor which used for updating your model logits : A value to measure accuracy and loss """ data, label = [_.cuda() for _ in episode] # load an episode # split an episode images and labels into shots and query set # note! data_shot shape is ( nway * kshot, 3, h, w ) not ( kshot * nway, 3, h, w ) # Take care when reshape the data shot data_shot, data_query = data[:k], data[k:] #print('label : ',label) label_shot, label_query = label[:k], label[k:] label_shot = sorted(list(set(label_shot.tolist()))) # convert labels into 0-4 values label_query = label_query.tolist() labels = [] for j in range(len(label_query)): label = label_shot.index(label_query[j]) labels.append(label) labels = torch.tensor(labels).cuda() """ TODO 2 ( Same as above TODO 2 ) """ """ Make a loss function and train your own model Input: data_shot : torch.tensor, shot images, [args.nway * args.kshot, 3, h, w] be careful when using torch.reshape or .view functions (25, 3, 400, 400) data_query : torch.tensor, query images, [args.query, 3, h, w] (20, 3, 400, 400) labels : torch.tensor, labels of query images, [args.query] (20) output: loss : torch scalar tensor which used for updating your model logits : A value to measure accuracy and loss """ model1.eval() features_shot = model1(data_shot) n_sample = int(args.query/args.nway) features_shot_mean = torch.zeros(args.nway, features_shot.size(1)).cuda() for j in range(int(args.nway)): start = j*args.kshot end = (j+1)*args.kshot features_shot_mean[j] = features_shot[start:end].mean(dim=0) features_query = model1(data_query) logits1 = square_euclidean_metric(features_query, features_shot_mean) model2.eval() features_shot = model2(data_shot) n_sample = int(args.query/args.nway) features_shot_mean = torch.zeros(args.nway, features_shot.size(1)).cuda() for j in range(int(args.nway)): start = j*args.kshot end = (j+1)*args.kshot features_shot_mean[j] = features_shot[start:end].mean(dim=0) features_query = model2(data_query) logits2 = square_euclidean_metric(features_query, features_shot_mean) model3.eval() features_shot = model3(data_shot) n_sample = int(args.query/args.nway) features_shot_mean = torch.zeros(args.nway, features_shot.size(1)).cuda() for j in range(int(args.nway)): start = j*args.kshot end = (j+1)*args.kshot features_shot_mean[j] = features_shot[start:end].mean(dim=0) features_query = model3(data_query) logits3 = square_euclidean_metric(features_query, features_shot_mean) model4.eval() features_shot = model4(data_shot) n_sample = int(args.query/args.nway) features_shot_mean = torch.zeros(args.nway, features_shot.size(1)).cuda() for j in range(int(args.nway)): start = j*args.kshot end = (j+1)*args.kshot features_shot_mean[j] = features_shot[start:end].mean(dim=0) features_query = model4(data_query) logits4 = square_euclidean_metric(features_query, features_shot_mean) logits = (logits1+logits2+logits3+logits4)/4 lsoft = F.log_softmax(-logits, dim=1).view(args.kshot, n_sample, -1) pred = torch.argmin(logits,dim=1) """ TODO 2 END """ acc = count_acc(logits, labels) va.add(acc) csv.add(pred) proto = None; logits = None; loss = None print('val accuracy mean : %.4f' % va.item()) print('val loss mean : %.4f' % vl.item()) # initialize loss and accuracy mean vl = None va = None vl = Averager() va = Averager() csv.close() print('Test finished, check the csv file!') exit()
def train(args): # the number of N way, K shot images k = args.nway * args.kshot """ TODO 1.a """ " Make your own model for Few-shot Classification in 'model.py' file." # model setting model = AlexNet() model.cuda() """ TODO 1.a END """ # pretrained model load if args.restore_ckpt is not None: state_dict = torch.load(args.restore_ckpt) model.load_state_dict(state_dict) model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count())) if args.test_mode == 1: Test_phase(model, args, k) else: # Train data loading dataset = Dataset(args.dpath, state='train') train_sampler = Train_Sampler(dataset._labels, n_way=args.nway, k_shot=args.kshot, query=args.query) data_loader = DataLoader(dataset=dataset, batch_sampler=train_sampler, num_workers=8, pin_memory=True) # Validation data loading val_dataset = Dataset(args.dpath, state='val') val_sampler = Sampler(val_dataset._labels, n_way=args.nway, k_shot=args.kshot, query=args.query) val_data_loader = DataLoader(dataset=val_dataset, batch_sampler=val_sampler, num_workers=8, pin_memory=True) """ TODO 1.b (optional) """ " Set an optimizer or scheduler for Few-shot classification (optional) " # Default optimizer setting #optimizer = torch.optim.Adam(model.parameters(), lr=1e-5) optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4) """ TODO 1.b (optional) END """ tl = Averager() # save average loss ta = Averager() # save average accuracy # training start print('train start') train_correct = 0 train_total = 0 train_loss = 0 test_correct = 0 test_total = 0 test_loss = 0 model.train() for i in range(args.se + 1, TOTAL): for episode in data_loader: optimizer.zero_grad() data, label = [_ for _ in episode] # load an episode # split an episode images and labels into shots and query set # note! data_shot shape is ( nway * kshot, 3, h, w ) not ( kshot * nway, 3, h, w ) # Take care when reshape the data shot data_shot, data_query = data[:k], data[k:] label_shot, label_query = label[:k], label[k:] label_shot = sorted(list(set(label_shot.tolist()))) # convert labels into 0-4 values label_query = label_query.tolist() labels = [] for j in range(len(label_query)): label = label_shot.index(label_query[j]) labels.append(label) labels = torch.tensor(labels).cuda() """ TODO 2 ( Same as above TODO 2 ) """ """ Train the model Input: data_shot : torch.tensor, shot images, [args.nway * args.kshot, 3, h, w] be careful when using torch.reshape or .view functions data_query : torch.tensor, query images, [args.query, 3, h, w] labels : torch.tensor, labels of query images, [args.query] output: loss : torch scalar tensor which used for updating your model logits : A value to measure accuracy and loss """ features_shot = model(data_shot.cuda()) n_sample = int(args.query / args.nway) features_shot_mean = torch.zeros(args.nway, features_shot.size(1)).cuda() for j in range(int(args.nway)): start = j * args.kshot end = (j + 1) * args.kshot features_shot_mean[j] = features_shot[start:end].mean(dim=0) features_query = model(data_query.cuda()) logits = square_euclidean_metric(features_query, features_shot_mean) labels_expanded = labels.view(args.query, 1, 1) labels_expanded = labels_expanded.expand(args.query, args.nway, 1) lsoft = F.log_softmax(-logits, dim=1).view(args.kshot, n_sample, -1) labels_expanded = labels_expanded.view(lsoft.size()) loss = -lsoft.gather(2, labels_expanded).squeeze().view(-1).mean() _, pred = lsoft.max(2) """ TODO 2 END """ acc = count_acc(logits, labels) tl.add(loss.item()) ta.add(acc) loss.backward() optimizer.step() proto = None logits = None loss = None if (i + 1) % PRINT_FREQ == 0: print('train {}, loss={:.4f} acc={:.4f}'.format( i + 1, tl.item(), ta.item())) # initialize loss and accuracy mean tl = None ta = None tl = Averager() ta = Averager() # validation start if (i + 1) % VAL_FREQ == 0: print('validation start') model.eval() with torch.no_grad(): vl = Averager() # save average loss va = Averager() # save average accuracy for j in range(VAL_TOTAL): for episode in val_data_loader: data, label = [_.cuda() for _ in episode] data_shot, data_query = data[:k], data[ k:] # load an episode label_shot, label_query = label[:k], label[k:] label_shot = sorted(list(set(label_shot.tolist()))) label_query = label_query.tolist() labels = [] for j in range(len(label_query)): label = label_shot.index(label_query[j]) labels.append(label) labels = torch.tensor(labels).cuda() """ TODO 2 ( Same as above TODO 2 ) """ """ Train the model Input: data_shot : torch.tensor, shot images, [args.nway * args.kshot, 3, h, w] be careful when using torch.reshape or .view functions data_query : torch.tensor, query images, [args.query, 3, h, w] labels : torch.tensor, labels of query images, [args.query] output: loss : torch scalar tensor which used for updating your model logits : A value to measure accuracy and loss """ optimizer.zero_grad() data, label = [_.cuda() for _ in episode] # load an episode # split an episode images and labels into shots and query set # note! data_shot shape is ( nway * kshot, 3, h, w ) not ( kshot * nway, 3, h, w ) # Take care when reshape the data shot data_shot, data_query = data[:k], data[k:] label_shot, label_query = label[:k], label[k:] label_shot = sorted(list(set(label_shot.tolist()))) # convert labels into 0-4 values label_query = label_query.tolist() labels = [] for j in range(len(label_query)): label = label_shot.index(label_query[j]) labels.append(label) labels = torch.tensor(labels).cuda() """ TODO 2 ( Same as above TODO 2 ) """ """ Make a loss function and train your own model Input: data_shot : torch.tensor, shot images, [args.nway * args.kshot, 3, h, w] be careful when using torch.reshape or .view functions (25, 3, 400, 400) data_query : torch.tensor, query images, [args.query, 3, h, w] (20, 3, 400, 400) labels : torch.tensor, labels of query images, [args.query] (20) output: loss : torch scalar tensor which used for updating your model logits : A value to measure accuracy and loss """ features_shot = model(data_shot.cuda()) n_sample = int(args.query / args.nway) features_shot_mean = torch.zeros( args.nway, features_shot.size(1)).cuda() for j in range(int(args.nway)): start = j * args.kshot end = (j + 1) * args.kshot features_shot_mean[j] = features_shot[ start:end].mean(dim=0) features_query = model(data_query.cuda()) logits = square_euclidean_metric( features_query, features_shot_mean) labels_expanded = labels.view(args.query, 1, 1) labels_expanded = labels_expanded.expand( args.query, args.nway, 1) lsoft = F.log_softmax(-logits, dim=1).view( args.kshot, n_sample, -1) labels_expanded = labels_expanded.view(lsoft.size()) loss = -lsoft.gather( 2, labels_expanded).squeeze().view(-1).mean() _, pred = lsoft.max(2) """ TODO 2 END """ acc = count_acc(logits, labels) vl.add(loss.item()) va.add(acc) proto = None logits = None loss = None print('val accuracy mean : %.4f' % va.item()) print('val loss mean : %.4f' % vl.item()) # initialize loss and accuracy mean vl = None va = None vl = Averager() va = Averager() if (i + 1) % SAVE_FREQ == 0: PATH = 'checkpoints/%d_%s.pth' % (i + 1, args.name) torch.save(model.module.state_dict(), PATH) print('model saved, iteration : %d' % i)