コード例 #1
0
ファイル: train.py プロジェクト: stevenwierda/code_finale
def validation(args, val_loader, network, epoch, loss, optimizer, writer, saver, summary, best_pred, classnames):

    # Define Evaluator
    evaluator = Evaluator(args.num_classes)

    test_loss = 0.0
    evaluator.reset()

    #Put the model in eval() mode
    network.eval()
    network.cuda()
    num_img_tr = len(val_loader)
    for i, sample in enumerate(val_loader):
        # Get an output from the network
        img, target = sample
        with torch.no_grad():
            img = img.to(device)
            target = target.to(device)
            output = network(img)
        test_loss += loss(output, target)
        pred = output.data.cpu().numpy()
        target = target.cpu().numpy()
        pred = np.argmax(pred, axis=1)
        # Add batch sample into evaluator
        evaluator.add_batch(target, pred)
        if i % (num_img_tr // 1) == 0:
            global_step = i + num_img_tr * epoch
            summary.visualize_image(writer, img, target, pred, global_step)
        # sample, img, target, output, prediction = None, None, None, None, None
    val_loader = None
        #val_loader = None

    # Fast test during the training
    Acc = evaluator.Pixel_Accuracy()
    Acc_class = evaluator.Pixel_Accuracy_Class()
    mIoU = evaluator.Mean_Intersection_over_Union()
    FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
    F1score = evaluator.F1_Score()
    Recall = evaluator.Recall()
    Precision = evaluator.Precision()
    IoU = evaluator.IoU()
    evaluator.plot_confusion_matrix(evaluator.confusion_matrix,
                                    classes=classnames,
                                    normalize=True)

    # Send the confusion matrix to TensorBoard
    confusion_matrix = plt.imread('/home/student/aeroscan/tileimp/test.png')[:,:,:3]
    confusion_matrix = np.transpose(confusion_matrix, (2,0,1))
    confusion_matrix = torch.from_numpy(confusion_matrix)
    writer.add_image('confusion matrix', confusion_matrix, global_step)

    writer.add_scalar('val/total_loss_epoch', test_loss, epoch)
    writer.add_scalar('val/mIoU', mIoU, epoch)
    writer.add_scalar('val/Acc', Acc, epoch)
    writer.add_scalar('val/Acc_class', Acc_class, epoch)
    writer.add_scalar('val/fwIoU', FWIoU, epoch)
    writer.add_scalar("val/F1score", F1score, epoch)
    writer.add_scalar("val/Recall", Recall, epoch)
    writer.add_scalar("val/Precision", Precision, epoch)
    writer.add_scalar("val/IoU", Precision, epoch)
    print('Validation:')
    print('[Epoch:', epoch)
    print("""
            Acc:{}          Acc_class:{}
            mIoU:{}         fwIoU:{}
            Precision:{}    F1score:{}
            Recall:{}       IoU:{}"""
            .format(Acc,Acc_class,mIoU,FWIoU,Precision,F1score,Recall,IoU))
    print('Loss: %.3f' % test_loss)

    new_pred = Precision
    if new_pred > best_pred:
        print("New best pred.txt.")
        is_best = True
        best_pred = new_pred
        saver.save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': network.state_dict(),
                'optimizer': optimizer.state_dict(),
                'best_pred': best_pred,
            }, is_best)
    return best_pred, saver
コード例 #2
0
ファイル: train.py プロジェクト: xupine/DFPENet
class Trainer(object):
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()
        
        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)

        # Define network
        model = DFPENet(num_classes=self.nclass,
                        backbone=args.backbone,
                        output_stride=args.out_stride,
                        sync_bn=args.sync_bn,
                        freeze_bn=args.freeze_bn)

        train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},
                        {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}]

        # Define Optimizer
        optimizer = torch.optim.SGD(train_params, momentum=args.momentum,
                                    weight_decay=args.weight_decay, nesterov=args.nesterov)

        # Define Criterion
        # whether to use class balanced weights
        if args.use_balanced_weights:
            classes_weights_path = os.path.join(Path.db_root_dir(args.dataset), args.dataset+'_classes_weights.npy')
            if os.path.isfile(classes_weights_path):
                weight = np.load(classes_weights_path)
            else:
                weight = calculate_weigths_labels(args.dataset, self.train_loader, self.nclass)
            weight = torch.from_numpy(weight.astype(np.float32))
        else:
            weight = None
        self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)
        self.model, self.optimizer = model, optimizer
        
        # Define Evaluator
        self.evaluator = Evaluator(self.nclass)
        # Define lr scheduler
        self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr,
                                            args.epochs, len(self.train_loader))

        # Using cuda
        if args.cuda:
            self.model = self.model.cuda()
            self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
            patch_replication_callback(self.model)
            

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'" .format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            #self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))

        # Clear start epoch if fine-tuning
        if args.ft:
            args.start_epoch = 0

    def training(self, epoch):
        train_loss = 0.0
        self.model.train()
        tbar = tqdm(self.train_loader)
        num_img_tr = len(self.train_loader)
        for i, sample in enumerate(tbar):
            image, target = sample['image'], sample['label']
            if self.args.cuda:
                image, target = image.cuda(), target.cuda()
            self.scheduler(self.optimizer, i, epoch, self.best_pred)
            self.optimizer.zero_grad()
            output = self.model(image)
            loss = self.criterion(output, target)
            loss.backward()
            self.optimizer.step()
            train_loss += loss.item()
            tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))
            self.writer.add_scalar('train/total_loss_iter', loss.item(), i + num_img_tr * epoch)

            # Show 10 * 3 inference results each epoch
            if i % (num_img_tr // 1) == 0:
                global_step = i + num_img_tr * epoch
                self.summary.visualize_image(self.writer, self.args.dataset, image, target, output, global_step)

        self.writer.add_scalar('train/total_loss_epoch', train_loss, epoch)
        print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
        print('Loss: %.3f' % train_loss)

        if self.args.no_val:
            # save checkpoint every epoch
            is_best = False
            self.saver.save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': self.model.state_dict(),
                'optimizer': self.optimizer.state_dict(),
                'best_pred': self.best_pred,
            }, is_best)


    def validation(self, epoch):
        self.model.eval()
        self.evaluator.reset()
        tbar = tqdm(self.val_loader, desc='\r')
        test_loss = 0.0
        for i, sample in enumerate(tbar):
            image, target = sample['image'], sample['label']
            if self.args.cuda:
                image, target = image.cuda(), target.cuda()
            with torch.no_grad():
                output = self.model(image)
            loss = self.criterion(output, target)
            test_loss += loss.item()
            tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))
            pred = output.data.cpu().numpy()
            target = target.cpu().numpy()
            pred = np.argmax(pred, axis=1)
            # Add batch sample into evaluator
            self.evaluator.add_batch(target, pred)

        if epoch==200:
            self.evaluator.plot_confusion_matrix(epoch)

        # Fast test during the training

        Rec = self.evaluator.Pixel_Accuracy_ALLClass()
        Pre = self.evaluator.Pixel_Precision_ALLClass()
        F1 = self.evaluator.F1_ALLClass()
        F1_mean = self.evaluator.F1_MEANClass()
        IoU = self.evaluator.Class_Intersection_over_Union()
        Acc = self.evaluator.Pixel_Accuracy()
        Acc_class = self.evaluator.Pixel_Accuracy_Class()
        mIoU = self.evaluator.Mean_Intersection_over_Union()
        FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()
        self.writer.add_scalar('val/total_loss_epoch', test_loss, epoch)
        self.writer.add_scalar('val/Rec[1]', Rec[1], epoch)
        self.writer.add_scalar('val/Pre[1]', Pre[1], epoch)
        self.writer.add_scalar('val/F1[0]', F1[0], epoch)
        self.writer.add_scalar('val/F1[1]', F1[1], epoch)
        self.writer.add_scalar('val/F1_mean', F1_mean, epoch)
        self.writer.add_scalar('val/IoU[0]', IoU[0], epoch)
        self.writer.add_scalar('val/IoU[1]', IoU[1], epoch)
        self.writer.add_scalar('val/mIoU', mIoU, epoch)
        self.writer.add_scalar('val/Acc', Acc, epoch)
        self.writer.add_scalar('val/Acc_class', Acc_class, epoch)
        self.writer.add_scalar('val/fwIoU', FWIoU, epoch)
        print('Validation:')
        print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
        print("F1[0]:{}, F1[1]:{}, F1_mean: {}".format(F1[0], F1[1], F1_mean, ))
        print("IoU[0]:{}, IoU[1]:{}, mIoU: {}".format(IoU[0], IoU[1], mIoU))
        print("Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(Acc, Acc_class, mIoU, FWIoU))
        print("Rec[1]:{}, Pre[1]:{}".format(Rec[1], Pre[1]))
        print('Loss: %.3f' % test_loss)

        filename = "./rec.txt"
        with open(filename,'a', encoding='utf-8') as f:
            f.writelines(str(Rec[1])+'\n')

        filename1 = "./pre.txt"
        with open(filename1,'a', encoding='utf-8') as f1:
            f1.writelines(str(Pre[1])+'\n')

        filename2 = "./miou.txt"
        with open(filename2,'a', encoding='utf-8') as f2:
            f2.writelines(str(IoU[1])+'\n')


        new_pred = IoU[1]
        if new_pred > self.best_pred:
            is_best = True
            self.best_pred = new_pred
            self.saver.save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': self.model.state_dict(),
                'optimizer': self.optimizer.state_dict(),
                'best_pred': self.best_pred,
            }, is_best)