def validate(self, valLoader, lr=None, mode='val'): if mode == 'test': nEpisode = self.nEpisode self.logger.info( '\n\nTest mode: randomly sample {:d} episodes...'.format( nEpisode)) elif mode == 'val': nEpisode = len(valLoader) self.logger.info( '\n\nValidation mode: pre-defined {:d} episodes...'.format( nEpisode)) valLoader = iter(valLoader) else: raise ValueError('mode is wrong!') episodeAccLog = [] top1 = AverageMeter() self.netFeat.eval() #self.netSIB.eval() # set train mode, since updating bn helps to estimate better gradient if lr is None: lr = self.optimizer.param_groups[0]['lr'] #for batchIdx, data in enumerate(valLoader): for batchIdx in range(nEpisode): data = valLoader.getEpisode() if mode == 'test' else next( valLoader) data = to_device(data, self.device) SupportTensor, SupportLabel, QueryTensor, QueryLabel = \ data['SupportTensor'].squeeze(0), data['SupportLabel'].squeeze(0), \ data['QueryTensor'].squeeze(0), data['QueryLabel'].squeeze(0) with torch.no_grad(): SupportFeat, QueryFeat = self.netFeat( SupportTensor), self.netFeat(QueryTensor) SupportFeat, QueryFeat, SupportLabel = \ SupportFeat.unsqueeze(0), QueryFeat.unsqueeze(0), SupportLabel.unsqueeze(0) clsScore = self.netSIB(lr, SupportFeat, SupportLabel, QueryFeat) clsScore = clsScore.view(QueryFeat.size()[0] * QueryFeat.size()[1], -1) QueryLabel = QueryLabel.view(-1) acc1 = accuracy(clsScore, QueryLabel, topk=(1, )) top1.update(acc1[0].item(), clsScore.size()[0]) msg = 'Top1: {:.3f}%'.format(top1.avg) progress_bar(batchIdx, nEpisode, msg) episodeAccLog.append(acc1[0].item()) mean, ci95 = getCi(episodeAccLog) self.logger.info( 'Final Perf with 95% confidence intervals: {:.3f}%, {:.3f}%'. format(mean, ci95)) return mean, ci95
def validate(self, valLoader, lr=None): nEpisode = self.nEpisode self.logger.info( '\n\nTest mode: randomly sample {:d} episodes...'.format(nEpisode)) episodeAccLog = [] top1 = AverageMeter() self.netFeat.eval() if lr is None: lr = self.optimizer.param_groups[0]['lr'] for batchIdx in range(nEpisode): data = valLoader.getEpisode() data = to_device(data, self.device) SupportTensor, SupportLabel, QueryTensor, QueryLabel = \ data['SupportTensor'].squeeze(0), data['SupportLabel'].squeeze(0), \ data['QueryTensor'].squeeze(0), data['QueryLabel'].squeeze(0) with torch.no_grad(): SupportFeat, QueryFeat = self.netFeat( SupportTensor), self.netFeat(QueryTensor) SupportFeat, QueryFeat, SupportLabel = \ SupportFeat.unsqueeze(0), QueryFeat.unsqueeze(0), SupportLabel.unsqueeze(0) clsScore = self.netSIB(SupportFeat, SupportLabel, QueryFeat, lr) clsScore = clsScore.view(QueryFeat.shape[0] * QueryFeat.shape[1], -1) QueryLabel = QueryLabel.view(-1) acc1 = accuracy(clsScore, QueryLabel, topk=(1, )) top1.update(acc1[0].item(), clsScore.shape[0]) msg = 'Top1: {:.3f}%'.format(top1.avg) progress_bar(batchIdx, nEpisode, msg) episodeAccLog.append(acc1[0].item()) mean, ci95 = getCi(episodeAccLog) self.logger.info( 'Final Perf with 95% confidence intervals: {:.3f}%, {:.3f}%'. format(mean, ci95)) return mean, ci95
def validate(self, valLoader, lr=None, mode='val'): """ Run one epoch on val-set. :param valLoader: the dataloader of val-set :type valLoader: class `ValLoader` :param float lr: learning rate for synthetic GD :param string mode: 'val' or 'train' """ if mode == 'test': nEpisode = self.nEpisode self.logger.info( '\n\nTest mode: randomly sample {:d} episodes...'.format( nEpisode)) elif mode == 'val': nEpisode = len(valLoader) self.logger.info( '\n\nValidation mode: pre-defined {:d} episodes...'.format( nEpisode)) valLoader = iter(valLoader) else: raise ValueError('mode is wrong!') episodeAccLog = [] top1 = AverageMeter() self.netFeat.eval() #self.netSIB.eval() # set train mode, since updating bn helps to estimate better gradient if lr is None: lr = self.optimizer.param_groups[0]['lr'] #for batchIdx, data in enumerate(valLoader): # nEpisode = 1 for batchIdx in range(nEpisode): data = valLoader.getEpisode() if mode == 'test' else next( valLoader) data = to_device(data, self.device) SupportTensor, SupportLabel, QueryTensor, QueryLabel = \ data['SupportTensor'].squeeze(0), data['SupportLabel'].squeeze(0), \ data['QueryTensor'].squeeze(0), data['QueryLabel'].squeeze(0) with torch.no_grad(): # SupportFeat, QueryFeat = self.netFeat(SupportTensor), self.netFeat(QueryTensor) SupportFeat, QueryFeat = self.pretrain.get_features( SupportTensor), self.pretrain.get_features(QueryTensor) SupportFeat, QueryFeat, SupportLabel = \ SupportFeat.unsqueeze(0), QueryFeat.unsqueeze(0), SupportLabel.unsqueeze(0) clsScore = self.netSIB(SupportFeat, SupportLabel, QueryFeat, lr) clsScore = clsScore.view(QueryFeat.shape[0] * QueryFeat.shape[1], -1) # Inductive ''' clsScore = torch.zeros(QueryFeat.shape[1], 5).cuda() for i in range(QueryFeat.shape[1]): singleScore = self.netSIB(SupportFeat, SupportLabel, QueryFeat[:, i, :].unsqueeze(1), lr) clsScore[i] = singleScore[0][0] ''' QueryLabel = QueryLabel.view(-1) if self.davg: # diff_scores = self.calc_diff_scores(self.pretrain, SupportFeat.squeeze(0), QueryFeat.squeeze(0), SupportLabel.squeeze(0), QueryLabel) # cosine similarity diff_scores = self._evaluate_hardness_logodd( self.pretrain, SupportFeat.squeeze(0), QueryFeat.squeeze(0), SupportLabel.squeeze(0), QueryLabel) # logodd else: diff_scores = None acc1 = accuracy(clsScore, QueryLabel, topk=(1, ), diff_scores=diff_scores) top1.update(acc1[0].item(), clsScore.shape[0]) msg = 'Top1: {:.3f}%'.format(top1.avg) progress_bar(batchIdx, nEpisode, msg) episodeAccLog.append(acc1[0].item()) mean, ci95 = getCi(episodeAccLog) msg = 'Final Perf with 95% confidence intervals: {:.3f}%, {:.3f}%'.format( mean, ci95) self.logger.info(msg) self.write_output_message(msg) return mean, ci95
def validate(self, valLoader, mode='val'): if mode == 'test': nEpisode = self.nEpisode self.logger.info( '\n\nTest mode: randomly sample {:d} episodes...'.format( nEpisode)) elif mode == 'val': nEpisode = len(valLoader) self.logger.info( '\n\nValidation mode: pre-defined {:d} episodes...'.format( nEpisode)) valLoader = iter(valLoader) else: raise ValueError('mode is wrong!') episodeAccLog = [] top1 = AverageMeter() self.netFeat.eval() self.netRefine.eval() self.netClassifier.eval() #for batchIdx, data in enumerate(valLoader): for batchIdx in range(nEpisode): data = valLoader.getEpisode() if mode == 'test' else next( valLoader) data = to_device(data, self.device) SupportTensor, SupportLabel, QueryTensor, QueryLabel = \ data['SupportTensor'].squeeze(0), data['SupportLabel'].squeeze(0), \ data['QueryTensor'].squeeze(0), data['QueryLabel'].squeeze(0) with torch.no_grad(): SupportFeat, QueryFeat = self.netFeat( SupportTensor), self.netFeat(QueryTensor) SupportFeat, QueryFeat, SupportLabel = \ SupportFeat.unsqueeze(0), QueryFeat.unsqueeze(0), SupportLabel.unsqueeze(0) nbSupport, nbQuery = SupportFeat.size()[1], QueryFeat.size()[1] feat = torch.cat((SupportFeat, QueryFeat), dim=1) refine_feat = self.netRefine(feat) refine_feat = feat + refine_feat refine_support, refine_query = refine_feat.narrow( 1, 0, nbSupport), refine_feat.narrow(1, nbSupport, nbQuery) clsScore = self.netClassifier(refine_support, SupportLabel, refine_query) clsScore = clsScore.squeeze(0) QueryLabel = QueryLabel.view(-1) acc1 = accuracy(clsScore, QueryLabel, topk=(1, )) top1.update(acc1[0].item(), clsScore.size()[0]) msg = 'Top1: {:.3f}%'.format(top1.avg) progress_bar(batchIdx, nEpisode, msg) episodeAccLog.append(acc1[0].item()) mean, ci95 = getCi(episodeAccLog) self.logger.info( 'Final Perf with 95% confidence intervals: {:.3f}%, {:.3f}%'. format(mean, ci95)) self.netRefine.train() self.netClassifier.train() return mean, ci95