def metrics(self, testloader, accuracy = True, auc = False, conf_matrix = False): am = meter.AUCMeter() cm = meter.ConfusionMeter(2) correct = 0 total = 0 for data in testloader: x,y = data y_ = self.model(Variable(x)) _, predicted = torch.max(y_.data, 1) cm.add(y_.data, y) am.add(y_.data[:,1].clone(),y) total += y.size(0) correct += (predicted == y).sum() print (correct, total) if accuracy: print("Accuracy for the model is", round(correct/float(total)*100, 4), correct, "/", total) if auc: print("Area under ROC curve for the given model is", round(am.value()[0],4)) if conf_matrix: print ("Confusion Matrix for the given model is\n", cm.value())
def test(args, test_set, model, device): loss_fn = nn.CrossEntropyLoss() # MAE_fn = nn.L1Loss() mse_meter = meter.AverageValueMeter( ) # mae_meter = meter.AverageValueMeter() acc_meter = meter.ConfusionMeter(10) model.eval() model.to(device) test_loader = DataLoader(dataset=test_set, batch_size=args.batchsize, shuffle=args.shuffle, num_workers=args.workers) # model.set_mean_std(test_set.mean,test_set.std) with torch.no_grad(): for idx, (datas, label) in enumerate(test_loader): label = label.to(device) datas = datas.to(device) scores = model(datas).squeeze() out_classes = torch.argmax(scores, 1) target_digit = torch.argmax(label, 1) loss = loss_fn(scores, target_digit) # acc_meter.add(mae.detach().item()) mse_meter.add(loss.detach().item()) acc_meter.add(out_classes, target_digit) acc = 100 * sum(acc_meter.value()[i, i] for i in range(10)) / acc_meter.value().sum() return mse_meter.value()[0], acc
def test(args): config = getattr(configs, args.model + 'Config')() config.word2id = build_word2id( [config.train_path, config.validation_path, config.test_path]) config.embedding_pretrained = t.from_numpy( build_word2vec(config.embedding_pretrained_path, config.word2id)) config.max_seq_len = get_max_len( [config.train_path, config.validation_path, config.test_path]) test_set = MovieReviewDataset(root_path=config.test_path, config=config) test_dataloader = DataLoader(test_set, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers) model = getattr(network, args.model)(config).eval() if args.load_model_path: model.load(args.load_model_path) if args.use_gpu: model.cuda() y_true = [] y_pred = [] test_confusion_matrix = meter.ConfusionMeter(config.num_classes) test_confusion_matrix.reset() model.eval() for _iter, (test_data, test_label) in enumerate(test_dataloader): test_data = t.from_numpy(np.array([data.numpy() for data in test_data])) test_label = t.max(test_label, 1)[1] if args.use_gpu: test_data = test_data.cuda() test_data, test_label = Variable(test_data), Variable(test_label) test_logits, test_output = model(test_data) y_true.extend(test_label.numpy().tolist()) y_pred.extend(test_logits.max(dim=1)[1].detach().tolist()) test_confusion_matrix.add(test_logits.detach().squeeze(), test_label.type(t.LongTensor)) test_cm = test_confusion_matrix.value() acc = 100. * (test_cm.diagonal().sum()) / (test_cm.sum()) FP, FN, TP, TN, FPR, FNR, TPR, P, F1 = cal_metrics(y_true, y_pred) print('acc', acc) print('FP: {FP}, FN: {FN}, TP: {TP}, TN: {TN}'.format(FP=FP, FN=FN, TP=TP, TN=TN)) print('FPR: {FPR}, FNR: {FNR}, TPR: {TPR}, P: {P}, F1: {F1}'.format( FPR=FPR, FNR=FNR, TPR=TPR, P=P, F1=F1)) print("test_cm:\n{test_cm}".format(test_cm=str(test_cm), ))
def test(self, test_data, val=False): self.model.eval() confusion_matrix = meter.ConfusionMeter(10) test_dataloader = DataLoader(test_data, 2000, shuffle=True) results = [] for i, (data, label) in enumerate(test_dataloader): if self.opt.use_gpu: data = data.cuda() label = label.cuda() score = self.model(data) out_digit = T.argmax(score, 1) target_digit = T.argmax(label, 1) if not val: bacth_results = [(target_digit.data.cpu().numpy(), out_digit.data.cpu().numpy()) for target_digit, out_digit in zip(target_digit, out_digit)] results += bacth_results confusion_matrix.add(out_digit, target_digit) accuracy = 100*sum([confusion_matrix.value()[i][i] for i in range(10)])/confusion_matrix.value().sum() self.model.train() if val: return confusion_matrix, accuracy else: return results, confusion_matrix, accuracy
def val(model, dataloader): """ 计算模型在验证集上的准确率等信息 验证:注意需要将模型设置与验证模式model.eval,验证完毕后需要将模式改回训练模式model.train :return: """ #吧模型设置为验证模式 model.eval() confusion_matrix = meter.ConfusionMeter(2) for ii, data in enumerate(dataloader): input, label = data val_input = Variable(input, volatile=True) val_label = Variable(label.long(), volatile=True) if opt.use_gpu: val_input = val_input.cuda() val_label = val_label.cuda() score = model(val_input) confusion_matrix.add(score.data.squeeze(), label.long()) #将复制模式改为训练模式 model.train() cm_value = confusion_matrix.value() accuracy = 100 * (cm_value[0][0] + cm_value[1][1]) / cm_value.sum() return confusion_matrix, accuracy
def val(model, dataloader): model.eval() confusion_matrix = meter.ConfusionMeter(opt.num_class) auc_meter = meter.AUCMeter() for ii, data in enumerate(dataloader): input, label = data val_input = Variable(input) val_label = Variable(label.long()) if opt.use_gpu: val_input = val_input.cuda() val_label = val_label.cuda() with t.no_grad(): score = model(val_input) confusion_matrix.add(score.data.squeeze(), label) if opt.num_class == 2: auc_meter.add(score.data[:, 1], label) model.train() cm_value = confusion_matrix.value() accuracy = 1. * np.trace(cm_value) / (cm_value.sum()) if opt.num_class == 2: return confusion_matrix, accuracy, auc_meter.value()[0] else: return confusion_matrix, accuracy
def _train_one_epoch(self): self.model.train() for step, (data, label) in enumerate(self.train_data): # meters loss_meter = meter.AverageValueMeter() confusion_matrix = meter.ConfusionMeter(256) inputs = Variable(data) target = Variable(label) if len(self.params.gpus) > 0: inputs = inputs.cuda() target = target.cuda() # forward score = self.model(inputs) loss = self.criterion(score, target) # backward self.optimizer.zero_grad() loss.backward() self.optimizer.step(None) # meters update loss_meter.add(loss.data) # print(score.data,target.data) confusion_matrix.add(score.data, target.data) train_loss = loss_meter.mean cm_value = confusion_matrix.value() train_acc = 100. * np.trace(cm_value) / (cm_value.sum()) print("Train_Loss:{},Train_Acc:{}".format(train_loss, train_acc)) return train_loss, train_acc
def val(val_loader, model, criterion, epoch, use_cuda): global best_acc losses = AverageMeter() val_acc = AverageMeter() model.eval() # 将模型设置为验证模式 # 混淆矩阵 confusion_matrix = meter.ConfusionMeter(args.num_classes) for _, (inputs, targets) in enumerate(val_loader): if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = torch.autograd.Variable( inputs), torch.autograd.Variable(targets) # compute output outputs = model(inputs) loss = criterion(outputs, targets) confusion_matrix.add(outputs.data.squeeze(), targets.long()) acc1 = accuracy(outputs.data, targets.data) # compute accuracy by confusion matrix # cm_value = confusion_matrix.value() # acc2 = 0 # for i in range(args.num_classes): # acc2 += 100. * cm_value[i][i]/(cm_value.sum()) # measure accuracy and record loss losses.update(loss.item(), inputs.size(0)) val_acc.update(acc1.item(), inputs.size(0)) return losses.avg, val_acc.avg
def val(model, data_loader): """ validation :param model: :param data_loader: :return: """ # set the model in evaluation mode model.eval() confusion_matrix = meter.ConfusionMeter(k=2) for ii, data in enumerate(data_loader): input_, label = data val_input = Variable(input_) val_lable = Variable(label.long()) if opt.use_gpu: val_input.cuda() val_lable.cuda() model.cpu() score = model(val_input) confusion_matrix.add(score.data.squeeze(), label.long()) model.cuda() model.train() cm_value = confusion_matrix.value() accuracy = 100. * (cm_value[0][0] + cm_value[1][1]) / cm_value.sum() return confusion_matrix, accuracy
def val(model, dataloader, data_len): # 把模型设为验证模式 criterion = FocalLoss(2) model.train(False) running_loss = 0 running_corrects = 0 confusion_matrix = meter.ConfusionMeter(2) for ii, data in enumerate( tqdm(dataloader, desc='Val On Anti-spoofing', unit='batch')): input, label = data with torch.no_grad(): val_input = Variable(input) val_label = Variable(label) if opt.use_gpu: val_input = val_input.cuda() val_label = val_label.cuda() score = model(val_input) _, preds = torch.max(score, 1) loss = criterion(score, val_label) confusion_matrix.add(score.data.squeeze(), val_label) running_loss += loss.item() * val_input.size(0) running_corrects += torch.sum(preds == val_label.data) # 把模型恢复为训练模式 model.train(True) cm_value = confusion_matrix.value() val_loss = running_loss / data_len val_accuracy = running_corrects.double() / float(data_len) return confusion_matrix, val_loss, val_accuracy
def val(model, val_loader): model.eval() con_matx = meter.ConfusionMeter(5) for ii, (data, label) in enumerate(val_loader): print(ii) data = data.cuda() label = label.cuda() pre_ = model(data) con_matx.add(pre_.detach(), label.detach()) cm_value = con_matx.value() kappa_ = kappa(cm_value, 5) model.train() cm_sum = 0 # kap_sum = [0,0,0] for i in range(5): # kap_sum[0] = 0 # kap_sum[1] = 0 cm_sum += cm_value[i][i] # for j in range(5): # kap_sum[0] += cm_value[i][j] # kap_sum[1] += cm_value[j][i] # kap_sum[2] += kap_sum[0]*kap_sum[1] acc = 100. * (cm_sum) / cm_value.sum() # Pe = kap_sum[2]*1.0/pow(cm_value.sum(),2) # kappa = (acc/100.-Pe)/(1-Pe)*100. return con_matx, acc, kappa_
def _val_one_epoch(self, val_data): self.model.eval() confusion_matrix = meter.ConfusionMeter(self.num_classes) logger.info('Val on validation set...') for step, (data, label) in enumerate(val_data): # val model with t.no_grad(): inputs = Variable(data) target = Variable(label.type(t.LongTensor)) if len(self.params.gpus) > 0: inputs = inputs.cuda() target = target.cuda() score = self.model(inputs) #print(score) confusion_matrix.add(score.data.squeeze(), label.type(t.LongTensor)) self.model.train() cm_value = confusion_matrix.value() print(cm_value) cm_value_correct = 0 for i in range(0, self.num_classes): cm_value_correct += cm_value[i][i] accuracy = (100. * cm_value_correct) / (cm_value.sum()) print('accuracy is {}'.format(accuracy)) return confusion_matrix, accuracy
def get_metrics(model, criterion, dataloaders, dataset_sizes, phase='valid'): ''' Loops over phase (train or valid) set to determine acc, loss and confusion meter of the model. ''' confusion_matrix = meter.ConfusionMeter(2, normalized=True) running_loss = 0.0 running_corrects = 0 for i, data in enumerate(dataloaders[phase]): print(i, end='\r') labels = data['label'].type(torch.FloatTensor) inputs = data['images'][0] # wrap them in Variable inputs = Variable(inputs.cuda()) labels = Variable(labels.cuda()) # forward outputs = model(inputs) outputs = torch.mean(outputs) loss = criterion(outputs, labels, phase) # statistics running_loss += loss.data[0] * inputs.size(0) preds = (outputs.data > 0.5).type(torch.cuda.FloatTensor) preds = preds.view(-1) running_corrects += torch.sum(preds == labels.data) confusion_matrix.add(preds, labels.data) loss = running_loss.item() / dataset_sizes[phase] acc = running_corrects.item() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, loss, acc)) print('Confusion Meter:\n', confusion_matrix.value())
def _val_one_epoch(self): self.model.eval() confusion_matrix = meter.ConfusionMeter(6) logger.info('Val on validation set...') for step, (data, label) in enumerate(self.val_data): # val model inputs = Variable(data, volatile=True) target = Variable(label.type(t.LongTensor), volatile=True) if len(self.params.gpus) > 0: inputs = inputs.cuda() target = target.cuda() score = self.model(inputs) confusion_matrix.add(score.data.squeeze(), label.type(t.LongTensor)) self.model.train() cm_value = confusion_matrix.value() accuracy = 100. * (cm_value[0][0] + cm_value[1][1] + cm_value[2][2] + cm_value[3][3] + cm_value[4][4] + cm_value[5][5]) / (cm_value.sum()) print("val accuracy:{}".format(accuracy)) return confusion_matrix, accuracy
def val(model, dataloader): ''' 计算模型在验证集上的准确率等信息 ''' model.eval() # 将模型调整为验证模式 model.eval() confusion_matrix = meter.ConfusionMeter(2) for ii, data in tqdm(enumerate(dataloader)): input, label = data #设置为验证模式 val_input = Variable(input, volatile=True) val_lable = Variable(label.type(t.LongTensor), volatile=True) if opt.use_gpu: val_input = val_input.cuda() val_label = val_label.cuda() score = model(val_input) confusion_matrix.add(score.data.squeeze(), label.type(t.LongTensor)) # 将模型调整为训练模式 model.train() #cm_value 混淆矩阵的值 cm_value = confusion_matrix.value() # 预测正确的数量除以总数量 再*100 得到正确率 accuracy = 100. * (cm_value[0][0] + cm_value[1][1]) / (cm_value.sum()) return confusion_matrix, accuracy
def val(model, dataloader): # 将模型设置为验证模式,之后还需要重新设置为训练模式,这部分会影响BatchNorm和Dropout等层的运行 model.eval() # 初始化混淆矩阵为二分类 confusion_matrix = meter.ConfusionMeter(2) for ii, data in tqdm(enumerate(dataloader)): input, label = data val_input = Variable(input, volatile=True) if opt.use_gpu: val_input = val_input.cuda() # 计算验证集得分 score = model(val_input) # 计算混淆矩阵 confusion_matrix.add(score.data.squeeze(), label.type(t.LongTensor)) # 将模型设置回训练模式 model.train() # 获取混淆矩阵 cm_value = confusion_matrix.value() # 计算正确分类的准确率 accuracy = 100. * (cm_value[0][0] + cm_value[1][1]) / (cm_value.sum()) return confusion_matrix, accuracy
def val(model, dataloader): ''' 计算模型在验证集上的准确率等信息,用以辅助训练 ''' # 置于验证模式 model.eval() confusion_matrix = meter.ConfusionMeter(2) for ii, data in enumerate(dataloader): input, label = data val_input = Variable(input, volatile=True) val_label = Variable(label.type(t.LongTensor), volatile=True) if opt.use_gpu: val_input = val_input.cuda() val_label = val_label.cuda() score = model(val_input) confusion_matrix.add(score.data.squeeze(), label.type(t.LongTensor)) # 把模型置为训练模式 model.train() # 混淆矩阵的值 cm_value = confusion_matrix.value() # 准确率 = 100 * 预测正确的数量与总数的比值 # 混淆矩阵:狗预测为狗的概率+猫预测为猫的概率 accuracy = 100. * (cm_value[0][0] + cm_value[1][1]) / (cm_value.sum()) return confusion_matrix, accuracy
def __init__(self, opt): super(ModelWrapper, self).__init__() if opt.model_name == "mobilenetv1": from .mobilenet import MobileNet self._net = MobileNet(num_classes=opt.num_classes) elif opt.model_name == "resnet50": from .resnet import resnet50 self._net = resnet50(num_classes=opt.num_classes) self.optimizer = optim.SGD( self._net.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, ) ''' self.optimizer=optim.Adam( self._net.parameters(), lr=opt.lr, weight_decay=opt.weight_decay ) ''' self._criterion = nn.CrossEntropyLoss() self.confusion_matrix = meter.ConfusionMeter(opt.num_classes)
def test(self, test_data, val=False): self.model.eval() Confusion_matrix = meter.ConfusionMeter(10) test_dataloader = DataLoader(test_data, 2000, shuffle=True) result = np.array([]) for i, (data, label) in enumerate(test_dataloader): if self.opt.use_gpu: data = data.cuda() label = label.cuda() score = self.model(data) target_digit = T.argmax(label, 1) if not val: result = np.concatenate((result, target_digit.cpu().numpy()), 0) out_digit = T.argmax(score, 1) Confusion_matrix.add(out_digit, target_digit) accuarcy = 100 * sum( Confusion_matrix.value()[i, i] for i in range(10)) / Confusion_matrix.value().sum() self.model.train() if val: return Confusion_matrix, accuarcy else: return result, Confusion_matrix, accuarcy
def train(train_loader, val_loader, model, criterion, optimizer, epoch): """ 在一个epoch上训练 :param train_loader: :param model: :param criterion: :param optimizer: :param epoch: :return: """ batch_time = meter.AverageValueMeter() data_time = meter.AverageValueMeter() loss_meter = meter.AverageValueMeter() confusion_matrix = meter.ConfusionMeter(2) meter_log = logger.MeterLogger() # switch to train mode model.train() end = time.time() # 遍历数据集训练 pbar = tqdm(train_loader) ii = 0 for (input, target) in pbar: pbar.set_description('Epoch[{:>2d}/{}] training on batches'.format( epoch, args.epochs)) # measure data loading time data_time.add(time.time() - end) if args.gpu is not None: input = input.cuda(args.gpu, non_blocking=True) target = target.cuda(args.gpu, non_blocking=True) # compute output output = model(input) loss = criterion(output, target) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() loss_meter.add(loss.data.item()) # measure elapsed time batch_time.add(time.time() - end) end = time.time() if ii % args.print_freq == (args.print_freq - 1): vis.plot('loss', loss_meter.value()[0]) ii += 1 val_cm, val_accuracy = val(model, val_loader) vis.plot('val_accuracy', val_accuracy) print('pass')
def train(**kwargs): opt.parse(kwargs) vis = Visualizer(opt.env) #step1: config model model = getattr(Nets,opt.model)() if opt.load_model_path: model.load(opt.load_model_path) if opt.use_gpu: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) #step2: data train_data = imageSentiment(opt.train_path,train = True) #训练集 val_data = imageSentiment(opt.train_path,train = False) #验证集 train_dataloader = DataLoader(train_data,batch_size = opt.batch_size,shuffle=True,num_workers = opt.num_workers) val_dataloader = DataLoader(val_data,batch_size = opt.batch_size,shuffle=False,num_workers = opt.num_workers) #step3: 定义损失函数及优化器 # criterion = nn.CrossEntropyLoss() #交叉熵损失函数 如果使用该损失函数 则网络最后无需使用softmax函数 lr = opt.lr # optimizer = Optim.Adam(model.parameters(),lr = lr,weight_decay= opt.weight_decay) optimizer = Optim.SGD(model.parameters(),lr = 0.001,momentum=0.9,nesterov=True) #step4: 统计指标(计算平均损失以及混淆矩阵) loss_meter = meter.AverageValueMeter() confusion_matrix = meter.ConfusionMeter(7) previous_loss = 1e100 #训练 for i in range(opt.max_epoch): loss_meter.reset() confusion_matrix.reset() total_loss = 0. for ii,(label,data) in tqdm(enumerate(train_dataloader),total=len(train_dataloader)): if opt.use_gpu: label,data = label.to(device),data.to(device) optimizer.zero_grad() score = model(data) # ps:使用nll_loss和crossentropyloss进行多分类时 target为索引标签即可 无需转为one-hot loss = F.nll_loss(score,label) total_loss += loss.item() loss.backward() optimizer.step() #更新统计指标以及可视化 loss_meter.add(loss.item()) confusion_matrix.add(score.data,label.data) if ii%opt.print_freq==opt.print_freq-1: vis.plot('loss',loss_meter.value()[0]) vis.plot('mach avgloss', total_loss/len(train_dataloader)) model.save() #计算验证集上的指标 val_accuracy = val(model,val_dataloader) vis.plot('val_accuracy',val_accuracy)
def __init__(self, model, model_name, train_params, train_data, val_data=None, val_data_brand_list=None, train_data_len=0, num_classes=2, visualizer_port=8899): assert isinstance(train_params, TrainParams) self.params = train_params self.num_classes = num_classes self.model_name = model_name self.visualizer_port = visualizer_port self.train_data_len = train_data_len # Data loaders self.train_data = train_data self.val_data = val_data self.val_data_brand_list = val_data_brand_list # criterion and Optimizer and learning rate self.last_epoch = 0 self.criterion = self.params.criterion self.optimizer = self.params.optimizer self.lr_scheduler = self.params.lr_scheduler logger.info('Set criterion to {}'.format(type(self.criterion))) logger.info('Set optimizer to {}'.format(type(self.optimizer))) logger.info('Set lr_scheduler to {}'.format(type(self.lr_scheduler))) # load model self.model = model logger.info('Set output dir to {}'.format(self.params.save_dir)) if os.path.isdir(self.params.save_dir): pass else: os.makedirs(self.params.save_dir) ckpt = self.params.ckpt if ckpt is not None: self._load_ckpt(ckpt) logger.info('Load ckpt from {}'.format(ckpt)) # meters self.loss_meter = meter.AverageValueMeter() self.confusion_matrix = meter.ConfusionMeter(self.num_classes) # set CUDA_VISIBLE_DEVICES if len(self.params.gpus) > 0: gpus = ','.join([str(x) for x in self.params.gpus]) os.environ['CUDA_VISIBLE_DEVICES'] = gpus self.params.gpus = tuple(range(len(self.params.gpus))) logger.info('Set CUDA_VISIBLE_DEVICES to {}...'.format(gpus)) self.model = nn.DataParallel(self.model, device_ids=self.params.gpus) self.model = self.model.cuda() self.model.train()
def test(**kwargs): config.parse(kwargs) # prepare data test_data = Vertebrae_Dataset( config.data_root, config.test_paths, phase='test') # 注意这里不要加balance=False,否则生成的Dataset会包含混合型 test_dataloader = DataLoader(test_data, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers) # test_data = FrameDiff_Dataset(config.data_root, config.test_paths, phase='test', balance=config.data_balance) # test_dataloader = DataLoader(test_data, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers) print('Test Image:', test_data.__len__()) # prepare model # model = ResNet34(num_classes=config.num_classes) # model = DenseNet121(num_classes=config.num_classes) # model = CheXPre_DenseNet121(num_classes=config.num_classes) # model = MultiResDenseNet121(num_classes=config.num_classes) # model = Vgg19(num_classes=config.num_classes) model = MultiResVgg19(num_classes=config.num_classes) if config.load_model_path: model.load(config.load_model_path) print('Model has been loaded!') else: print("Don't load model") if config.use_gpu: model.cuda() if config.parallel: model = torch.nn.DataParallel( model, device_ids=[x for x in range(config.num_of_gpu)]) model.eval() test_cm = meter.ConfusionMeter(config.num_classes) softmax = functional.softmax # go through the model for i, (image, label, image_path) in tqdm(enumerate(test_dataloader)): img = Variable(image, volatile=True) target = Variable(label) if config.use_gpu: img = img.cuda() target = target.cuda() score = model(img) test_cm.add(softmax(score, dim=1).data, target.data) SE, SP, ACC = calculate_index(test_cm.value()) print('confusion matrix:') print(test_cm.value()) print('Sensitivity:', SE) print('Specificity:', SP) print('test accuracy:', ACC)
def val(model, val_loader): val_cm = m.ConfusionMeter(2) model.eval() for batch, label in tqdm(val_loader): batch, label = batch.to(opt.device), label.to(opt.device) score = model.forward(batch) val_cm.add(score.detach(), label.detach()) val_acc = np.trace(val_cm.value()) / np.sum(val_cm.value()) return val_cm, float(val_acc)
def run_epoch(stage, state, data_loader): """stage = 'train' or 'test' or 'val' or anything""" if stage=='train': state.model.train() else: state.model.eval() pbar = tqdm(total=len(data_loader), leave=False) _loss = meter.AverageValueMeter() _acc = meter.ClassErrorMeter(accuracy=True) _conf = meter.ConfusionMeter(k=10, normalized=True) for batch_idx, (data, target) in enumerate(data_loader): data, target = data.to(state.args.device), target.to(state.args.device) if stage=='train': state.optimizer.zero_grad() output = state.model(data) loss = F.nll_loss(output, target) if stage=='train': loss.backward() state.optimizer.step() state.writer.add_scalar(stage+'/loss-iter', loss.mean(), (batch_idx + state.epoch*len(data_loader)) ) # * data.size()[0] ) _loss.add(loss.mean().item()) _acc.add(output, target) _conf.add(output, target) if batch_idx % state.args.pbar_interval == 0: pbar.desc = '{:6s}'.format(stage) pbar.postfix = 'Loss {:.4f} Acc {:.4f}%'.format(_loss.value(), _acc.value()) pbar.update(state.args.pbar_interval) if stage=='train': state.scheduler.step() pbar.close() # if stage != 'train' or 'train_test' not in stage: state.epoch_pbar.desc += ' {:6s}: loss {:.4f}, Acc {:.4f}% |'.format(stage, _loss.value(), _acc.value()) state.epoch_pbar.update() # if stage!='train': state.writer.add_scalar(stage+'/avg_loss-epoch', _loss.value(), state.epoch) state.writer.add_scalar(stage+'/avg_acc-epoch', _acc.value(), state.epoch) state.writer.add_heatmap(stage+'/conf_matrix-epoch', _conf.value(), state.epoch, y_title=data_loader.dataset.classes, x_title=data_loader.dataset.classes ) result = { 'loss' : _loss.value(), 'acc': _acc.value() } return result
def __init__(self, model, params, train_data, val_data=None): # Data loaders self.train_data = train_data self.val_data = val_data # criterion and Optimizer and learning rate self.last_epoch = 0 self.inital_epoch = 0 self.criterion = params.criterion self.optimizer = params.optimizer self.lr_scheduler = params.lr_scheduler logger.info('Set criterion to {}'.format(type(self.criterion))) logger.info('Set optimizer to {}'.format(type(self.optimizer))) logger.info('Set lr_scheduler to {}'.format(type(self.lr_scheduler))) # load model self.model = model logger.info('Set output dir to {}'.format(params.save_dir)) if os.path.isdir(params.save_dir): pass else: os.makedirs(params.save_dir) ckpt = params.ckpt if ckpt is not None: self._load_ckpt(ckpt) logger.info('Load ckpt from {}'.format(ckpt)) # meters self.loss_meter_hat = meter.AverageValueMeter() self.loss_meter_cloth = meter.AverageValueMeter() self.loss_meter_all = meter.AverageValueMeter() self.confusion_matrix_hat = meter.ConfusionMeter(2) self.confusion_matrix_cloth = meter.ConfusionMeter(2) # set CUDA_VISIBLE_DEVICES if len(params.gpus) > 0: gpus = ','.join([str(x) for x in params.gpus]) os.environ['CUDA_VISIBLE_DEVICES'] = gpus params.gpus = tuple(range(len(params.gpus))) logger.info('Set CUDA_VISIBLE_DEVICES to {}...'.format(gpus)) self.model = nn.DataParallel(self.model, device_ids=params.gpus) self.model = self.model.cuda() self.model.train()
def test(model, test_dataloader, config, model_num): with torch.no_grad(): model.eval() score_list = [] comatrix_list = [] i = 0 data_name = 'test' for test_batch in tqdm(test_dataloader): class_test = test_batch['labels'] test_batch['ct_mask'] = test_batch['ct_mask'].cuda() test_batch['CTs'] = test_batch['CTs'].cuda() test_class_pred, _ = model(test_batch) confusion_matrix = meter.ConfusionMeter(config.num_classes) confusion_matrix.add(test_class_pred.detach().cpu(), class_test.cpu()) comatrix_list.append(confusion_matrix.value()) if i == 0: test_scores = test_class_pred test_labels = class_test else: test_scores = torch.cat((test_scores, test_class_pred), axis=0) test_labels = torch.cat((test_labels, class_test), axis=0) i = i + 1 model.train() cm_all = np.array(comatrix_list).sum(axis=0) print('Testing on dataset2 using pre-trained model %d.' % (model_num)) print('Testing confusion matrix: ') print(cm_all) acc_epoch = (float(cm_all[0, 0]) + float(cm_all[1, 1])) / cm_all.sum() spec_epoch = float(cm_all[0, 0]) / ( cm_all[0, 0] + cm_all[0, 1]) # TN:val_cm[0, 0], FP:val_cm[0, 1], sen_epoch = float(cm_all[1, 1]) / ( cm_all[1, 0] + cm_all[1, 1]) # FN:val_cm[1, 0], TP:val_cm[1, 1] precision_epoch = float(cm_all[1, 1]) / (cm_all[0, 1] + cm_all[1, 1]) print('Test accuracy on %s: %.3f' % (data_name, acc_epoch)) print('Test sensitivity on %s: %.3f' % (data_name, sen_epoch)) print('Test specificity on %s: %.3f' % (data_name, spec_epoch)) print('Test precisionon on %s: %.3f' % (data_name, precision_epoch)) config.test_scores = test_scores config.test_labels = test_labels with open( 'res/%s/test_data2_fold%d_res.pickle' % ('test_res_on_dataset2_' + check_dir, model_num), 'wb') as fp: pickle.dump(config, fp)
def train(self, train_data, val_data=None): print('Now we begin training') train_dataloader = DataLoader(train_data, batch_size=self.opt.batch_size, shuffle=True) #val_dataloader = DataLoader(val_data,self.opt.batch_size,shuffle=True) vis = Visualizer(env=self.opt.env) if self.opt.use_gpu: self.model.cuda() previous_loss = 1e10 loss_meter = meter.AverageValueMeter() Confusion_matrix = meter.ConfusionMeter(10) for epoch in range(self.opt.max_epoch): loss_meter.reset() Confusion_matrix.reset() for i, (data, label) in enumerate(train_dataloader, 0): if self.opt.use_gpu: data = data.cuda() label = label.cuda() self.optimizer.zero_grad() score = self.model(data) out_classes = T.argmax(score, 1) target_digit = T.argmax(label, 1) loss = self.criterion(score, label) loss.backward() self.optimizer.step() #指标更新 loss_meter.add(loss.data.cpu()) Confusion_matrix.add(out_classes, target_digit) accuracy = 100 * sum( Confusion_matrix.value()[i, i] for i in range(10)) / Confusion_matrix.value().sum() if i % self.opt.print_freq == self.opt.print_freq - 1: print('EPOCH:{0},i:{1},loss:%.6f'.format(epoch, i) % loss.data.cpu()) vis.plot('loss', loss_meter.value()[0]) vis.plot('test_accuracy', accuracy) if val_data: val_cm, val_ac = self.test(val_data, val=True) vis.plot('Val_accuracy', val_ac) vis.img('Val Confusion_matrix', T.Tensor(val_cm.value())) # 若损失不再下降则降低学习率 if loss_meter.value()[-1] > previous_loss: self.opt.lr = self.opt.lr * self.opt.lr_decay print('learning rate:{}'.format(self.opt.lr)) for param_group in self.optimizer.param_groups: param_group['lr'] = self.opt.lr previous_loss = loss_meter.value()[-1]
def test_for_train(model): test_data = Flower(test=True) test_dataloader = DataLoader(test_data, batch_size=opt.batch_size, shuffle=False, num_workers=opt.num_workers) model.eval() confusion_matrix = meter.ConfusionMeter(17) # use argmax way: choice the max index as the prediction correct_sum = 0 total_cnt = 0 pass
def val(model, dataloader): model.eval() confusion_matrix = meter.ConfusionMeter(2) for _, (val_input, label) in enumerate(tqdm(dataloader)): val_input = val_input.to(opt.device) score = model(val_input) confusion_matrix.add(score.detach().squeeze(), label.type(torch.LongTensor)) model.train() cm_value = confusion_matrix.value() accuracy = 100. * (cm_value[0][0] + cm_value[1][1]) / (cm_value.sum()) return confusion_matrix, accuracy