def __init__(self): self.args = cli.parse_commandline_args() self.context = RunContext(logging) self.training_log = self.context.create_train_log("training") self.results_all_log = self.context.create_results_all_log( "results_all") useCuda = torch.cuda.is_available() self.device = torch.device("cuda" if useCuda else "cpu")
for param_group in optimizer.param_groups: param_group['lr'] = lr def get_current_consistency_weight(epoch): # Consistency ramp-up from https://arxiv.org/abs/1610.02242 return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) def accuracy(output, target, topk=(1, )): """Computes the precision@k for the specified values of k""" maxk = max(topk) labeled_minibatch_size = max(target.ne(NO_LABEL).sum(), 1e-8) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / labeled_minibatch_size.float())) return res if __name__ == '__main__': logging.basicConfig(level=logging.INFO) args = cli.parse_commandline_args() main(RunContext(__file__, 0))
for param_group in optimizer.param_groups: param_group['lr'] = lr def get_current_consistency_weight(epoch): # Consistency ramp-up from https://arxiv.org/abs/1610.02242 return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup) def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" maxk = max(topk) labeled_minibatch_size = max(target.ne(NO_LABEL).sum(), 1e-8) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / labeled_minibatch_size)) return res if __name__ == '__main__': logging.basicConfig(level=logging.INFO) args = cli.parse_commandline_args() main(RunContext(__file__, 0))