Exemplo n.º 1
0
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()

        # Define Dataloader
        if args.dataset == 'Cityscapes':
            kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
            self.train_loader, self.val_loader, self.test_loader, self.num_class = make_data_loader(args, **kwargs)

        # Define network
        if args.net == 'resnet101':
            blocks = [2,4,23,3]
            fpn = FPN(blocks, self.num_class, back_bone=args.net)

        # Define Optimizer
        self.lr = self.args.lr
        if args.optimizer == 'adam':
            self.lr = self.lr * 0.1
            optimizer = torch.optim.Adam(fpn.parameters(), lr=args.lr, momentum=0, weight_decay=args.weight_decay)
        elif args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(fpn.parameters(), lr=args.lr, momentum=0, weight_decay=args.weight_decay)

        # Define Criterion
        if args.dataset == 'Cityscapes':
            weight = None
            self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode='ce')

        self.model = fpn
        self.optimizer = optimizer

        # Define Evaluator
        self.evaluator = Evaluator(self.num_class)

        # multiple mGPUs
        if args.mGPUs:
            self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)

        # Using cuda
        if args.cuda:
            self.model = self.model.cuda()


        # Resuming checkpoint
        self.best_pred = 0.0
        self.lr_stage = [68, 93]
        self.lr_staget_ind = 0 
Exemplo n.º 2
0
def main():
    args = parse_args()
    if args.dataset == 'Cityscapes':
        num_class = 19

    if args.net == 'resnet101':
        blocks = [2, 4, 23, 3]
        model = FPN(blocks, num_class, back_bone=args.net)

    if args.checkname is None:
        args.checkname = 'fpn-' + str(args.net)

    #evaluator = Evaluator(num_class)

    # Trained model path and name
    experiment_dir = args.experiment_dir
    #load_name = os.path.join(experiment_dir, 'checkpoint.pth.tar')
    load_name = os.path.join(
        r'/home/home_data/zjw/SemanticSegmentationUsingFPN_PanopticFeaturePyramidNetworks-master/run/Cityscapes/fpn-resnet101/model_best.pth.tar'
    )

    # Load trained model
    if not os.path.isfile(load_name):
        raise RuntimeError("=> no checkpoint found at '{}'".format(load_name))
    print('====>loading trained model from ' + load_name)
    checkpoint = torch.load(load_name)
    checkepoch = checkpoint['epoch']
    if args.cuda:
        model.load_state_dict(checkpoint['state_dict'])
    else:
        model.load_state_dict(checkpoint['state_dict'])

    # test
    img_path = r'./s1.jpeg'
    image = scipy.misc.imread(img_path, mode='RGB')

    image = image[:, :, ::-1]
    image = np.transpose(image, (2, 0, 1))
    #image[0] -= means[0]
    #image[1] -= means[1]
    #image[2] -= means[2]
    image = torch.from_numpy(image.copy()).float()
    image = image.unsqueeze(0)
    if args.cuda:
        image, model = image.cuda(), model.cuda()
    with torch.no_grad():
        output = model(image)
    pred = output.data.cpu().numpy()
    pred = np.argmax(pred, axis=1)

    # show result
    pred_rgb = decode_seg_map_sequence(pred, args.dataset, args.plot)
    #results.append(pred_rgb)
    save_image(pred_rgb, r'./testjpg.png')
Exemplo n.º 3
0
    def __init__(self,
                 vocab_size,
                 dataset_configs,
                 hidden_dim=512,
                 embed_dim=300,
                 bidirection=True,
                 graph_node_features=1024):
        super(mainModel, self).__init__()
        dataset_configs = vars(dataset_configs)
        self.first_output_dim = dataset_configs["first_output_dim"]
        self.fpn_feature_dim = dataset_configs["fpn_feature_dim"]
        self.feature_dim = dataset_configs[
            dataset_configs['feature_type']]['feature_dim']
        self.query_encoder = QueryEncoder(vocab_size, hidden_dim, embed_dim,
                                          dataset_configs["lstm_layers"],
                                          bidirection)

        channels_list = [
            (self.feature_dim + 256, self.first_output_dim, 3, 1),
            (self.first_output_dim, self.first_output_dim * 2, 3, 2),
            ((self.first_output_dim * 2), self.first_output_dim * 4, 3, 2),
        ]
        conv_func = conv_with_kaiming_uniform(use_bn=True, use_relu=True)
        self.backbone_net = Backbone(channels_list, conv_func)
        self.fpn = FPN([256, 512, 1024], 512, conv_func)
        self.fcos = build_fcos(dataset_configs, self.fpn_feature_dim)
        # self.query_fc = nn.Linear(1024, self.feature_dim)
        self.prop_fc = nn.Linear(self.feature_dim, self.feature_dim)
        self.position_transform = nn.Linear(3, 256)

        for t in range(len(channels_list)):
            if t > 0:
                setattr(self, "qInput%d" % t,
                        nn.Linear(1024, channels_list[t - 1][1]))
            else:
                setattr(self, "qInput%d" % t,
                        nn.Linear(1024, self.feature_dim))
Exemplo n.º 4
0
def main():
    args = parse_args()

    if args.dataset == 'CamVid':
        num_class = 32
    elif args.dataset == 'Cityscapes':
        num_class = 19

    if args.net == 'resnet101':
        blocks = [2, 4, 23, 3]
        model = FPN(blocks, num_class, back_bone=args.net)

    if args.checkname is None:
        args.checkname = 'fpn-' + str(args.net)

    evaluator = Evaluator(num_class)

    # Trained model path and name
    experiment_dir = args.experiment_dir
    load_name = os.path.join(experiment_dir, 'checkpoint.pth.tar')

    # Load trained model
    if not os.path.isfile(load_name):
        raise RuntimeError("=> no checkpoint found at '{}'".format(load_name))
    print('====>loading trained model from ' + load_name)
    checkpoint = torch.load(load_name)
    checkepoch = checkpoint['epoch']
    if args.cuda:
        model.load_state_dict(checkpoint['state_dict'])
    else:
        model.load_state_dict(checkpoint['state_dict'])

    # Load image and save in test_imgs
    test_imgs = []
    test_label = []
    if args.dataset == "CamVid":
        root_dir = Path.db_root_dir('CamVid')
        test_file = os.path.join(root_dir, "val.csv")
        test_data = CamVidDataset(csv_file=test_file, phase='val')
        test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)

    elif args.dataset == "Cityscapes":
        kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
        #_, test_loader, _, _ = make_data_loader(args, **kwargs)
        _, val_loader, test_loader, _ = make_data_loader(args, **kwargs)
    else:
        raise RuntimeError("dataset {} not found.".format(args.dataset))

    # test
    Acc = []
    Acc_class = []
    mIoU = []
    FWIoU = []
    results = []
    for iter, batch in enumerate(val_loader):
        if args.dataset == 'CamVid':
            image, target = batch['X'], batch['l']
        elif args.dataset == 'Cityscapes':
            image, target = batch['image'], batch['label']
        else:
            raise NotImplementedError

        if args.cuda:
            image, target, model = image.cuda(), target.cuda(), model.cuda()
        with torch.no_grad():
            output = model(image)
        pred = output.data.cpu().numpy()
        pred = np.argmax(pred, axis=1)
        target = target.cpu().numpy()
        evaluator.add_batch(target, pred)

        # show result
        pred_rgb = decode_seg_map_sequence(pred, args.dataset, args.plot)
        results.append(pred_rgb)

    Acc = evaluator.Pixel_Accuracy()
    Acc_class = evaluator.Pixel_Accuracy_Class()
    mIoU = evaluator.Mean_Intersection_over_Union()
    FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()

    print('Mean evaluate result on dataset {}'.format(args.dataset))
    print('Acc:{:.3f}\tAcc_class:{:.3f}\nmIoU:{:.3f}\tFWIoU:{:.3f}'.format(Acc, Acc_class, mIoU, FWIoU))
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()

        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()

        # Define Dataloader
        if args.dataset == 'CamVid':
            size = 512
            train_file = os.path.join(os.getcwd() + "\\data\\CamVid", "train.csv")
            val_file = os.path.join(os.getcwd() + "\\data\\CamVid", "val.csv")
            print('=>loading datasets')
            train_data = CamVidDataset(csv_file=train_file, phase='train')
            self.train_loader = torch.utils.data.DataLoader(train_data,
                                                     batch_size=args.batch_size,
                                                     shuffle=True,
                                                     num_workers=args.num_workers)
            val_data = CamVidDataset(csv_file=val_file, phase='val', flip_rate=0)
            self.val_loader = torch.utils.data.DataLoader(val_data,
                                                     batch_size=args.batch_size,
                                                     shuffle=True,
                                                     num_workers=args.num_workers)
            self.num_class = 32
        elif args.dataset == 'Cityscapes':
            kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
            self.train_loader, self.val_loader, self.test_loader, self.num_class = make_data_loader(args, **kwargs)

        # Define network
        if args.net == 'resnet101':
            blocks = [2,4,23,3]
            fpn = FPN(blocks, self.num_class, back_bone=args.net)

        # Define Optimizer
        self.lr = self.args.lr
        if args.optimizer == 'adam':
            self.lr = self.lr * 0.1
            optimizer = torch.optim.Adam(fpn.parameters(), lr=args.lr, momentum=0, weight_decay=args.weight_decay)
        elif args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(fpn.parameters(), lr=args.lr, momentum=0, weight_decay=args.weight_decay)

        # Define Criterion
        if args.dataset == 'CamVid':
            self.criterion = nn.CrossEntropyLoss()
        elif args.dataset == 'Cityscapes':
            weight = None
            self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode='ce')

        self.model = fpn
        self.optimizer = optimizer

        # Define Evaluator
        self.evaluator = Evaluator(self.num_class)

        # multiple mGPUs
        if args.mGPUs:
            self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)

        # Using cuda
        if args.cuda:
            self.model = self.model.cuda()

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume:
            output_dir = os.path.join(args.save_dir, args.dataset, args.checkname)
            runs = sorted(glob.glob(os.path.join(output_dir, 'experiment_*')))
            run_id = int(runs[-1].split('_')[-1]) - 1 if runs else 0
            experiment_dir = os.path.join(output_dir, 'experiment_{}'.format(str(run_id)))
            load_name = os.path.join(experiment_dir,
                                 'checkpoint.pth.tar')
            if not os.path.isfile(load_name):
                raise RuntimeError("=> no checkpoint found at '{}'".format(load_name))
            checkpoint = torch.load(load_name)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            self.lr = checkpoint['optimizer']['param_groups'][0]['lr']
            print("=> loaded checkpoint '{}'(epoch {})".format(load_name, checkpoint['epoch']))

        self.lr_stage = [68, 93]
        self.lr_staget_ind = 0
Exemplo n.º 6
0
def init_network(args, n_cls):
    """
    :param args: define hyperparameters
    :param n_cls: number of object classes for initializing network output layers
    :return:
    """
    # initilize the network here.'
    if args.frame == 'faster_rcnn':
        conv_num = str(int(np.log2(cfg.RCNN_COMMON.FEAT_STRIDE[0])))
        Network = fasterRCNN(n_cls,
                             class_agnostic=args.class_agnostic,
                             feat_name=args.net,
                             feat_list=('conv' + conv_num, ),
                             pretrained=True)
    elif args.frame == 'faster_rcnn_vmrn':
        conv_num = str(int(np.log2(cfg.RCNN_COMMON.FEAT_STRIDE[0])))
        Network = fasterRCNN_VMRN(n_cls,
                                  class_agnostic=args.class_agnostic,
                                  feat_name=args.net,
                                  feat_list=('conv' + conv_num, ),
                                  pretrained=True)
    elif args.frame == 'fpn':
        Network = FPN(n_cls,
                      class_agnostic=args.class_agnostic,
                      feat_name=args.net,
                      feat_list=('conv2', 'conv3', 'conv4', 'conv5'),
                      pretrained=True)
    elif args.frame == 'fcgn':
        conv_num = str(int(np.log2(cfg.FCGN.FEAT_STRIDE[0])))
        Network = FCGN(feat_name=args.net,
                       feat_list=('conv' + conv_num, ),
                       pretrained=True)
    elif args.frame == 'mgn':
        conv_num = str(int(np.log2(cfg.RCNN_COMMON.FEAT_STRIDE[0])))
        Network = MGN(n_cls,
                      class_agnostic=args.class_agnostic,
                      feat_name=args.net,
                      feat_list=('conv' + conv_num, ),
                      pretrained=True)
    elif args.frame == 'all_in_one':
        conv_num = str(int(np.log2(cfg.RCNN_COMMON.FEAT_STRIDE[0])))
        Network = All_in_One(n_cls,
                             class_agnostic=args.class_agnostic,
                             feat_name=args.net,
                             feat_list=('conv' + conv_num, ),
                             pretrained=True)
    elif args.frame == 'ssd':
        Network = SSD(n_cls,
                      class_agnostic=args.class_agnostic,
                      feat_name=args.net,
                      feat_list=('conv3', 'conv4'),
                      pretrained=True)
    elif args.frame == 'ssd_vmrn':
        Network = SSD_VMRN(n_cls,
                           class_agnostic=args.class_agnostic,
                           feat_name=args.net,
                           feat_list=('conv3', 'conv4'),
                           pretrained=True)

    elif args.frame == 'efc_det':
        Network = EfficientDet(n_cls,
                               class_agnostic=args.class_agnostic,
                               feat_name=args.net,
                               feat_list=('conv3', 'conv4', 'conv5', 'conv6',
                                          'conv7'),
                               pretrained=True)
    elif args.frame == 'vam':
        if args.net == 'vgg16':
            Network = VAM.vgg16(n_cls, pretrained=True)
        elif args.net == 'res50':
            Network = VAM.resnet(n_cls, layer_num=50, pretrained=True)
        elif args.net == 'res101':
            Network = VAM.resnet(n_cls, layer_num=101, pretrained=True)
        else:
            print("network is not defined")
            pdb.set_trace()
    else:
        print("frame is not defined")
        pdb.set_trace()

    if args.frame in {'ssd_vmrn', 'faster_rcnn_vmrn'
                      } and cfg.TRAIN.VMRN.FIX_OBJDET:
        Network.create_architecture(cfg.TRAIN.VMRN.OBJ_MODEL_PATH)
    elif args.frame in {'mgn', 'all_in_one'} and cfg.MGN.FIX_OBJDET:
        Network.create_architecture(cfg.MGN.OBJ_MODEL_PATH)
    else:
        Network.create_architecture()

    lr = args.lr
    # tr_momentum = cfg.TRAIN.COMMON.MOMENTUM
    # tr_momentum = args.momentum

    args.start_epoch = 1
    if args.resume:
        output_dir = args.save_dir + "/" + args.dataset + "/" + args.net
        load_name = os.path.join(
            output_dir, args.frame + '_{}_{}_{}.pth'.format(
                args.checksession, args.checkepoch, args.checkpoint))
        print("loading checkpoint %s" % (load_name))
        checkpoint = torch.load(load_name)
        args.session = checkpoint['session']
        Network.load_state_dict(checkpoint['model'])
        if 'pooling_mode' in checkpoint.keys():
            cfg.RCNN_COMMON.POOLING_MODE = checkpoint['pooling_mode']
        print("loaded checkpoint %s" % (load_name))
        if args.iter_per_epoch is not None:
            Network.iter_counter = (args.checkepoch -
                                    1) * args.iter_per_epoch + args.checkpoint
        print("start iteration:", Network.iter_counter)

    if args.cuda:
        Network.cuda()

    if len(args.mGPUs) > 0:
        gpus = [int(i) for i in args.mGPUs.split('')]
        Network = nn.DataParallel(Network, gpus)

    params = []
    for key, value in dict(Network.named_parameters()).items():
        if value.requires_grad:
            if 'bias' in key:
                params += [{
                    'params': [value],
                    'lr':
                    lr * (cfg.TRAIN.COMMON.DOUBLE_BIAS + 1),
                    'weight_decay':
                    cfg.TRAIN.COMMON.BIAS_DECAY
                    and cfg.TRAIN.COMMON.WEIGHT_DECAY or 0
                }]
            else:
                params += [{
                    'params': [value],
                    'lr': lr,
                    'weight_decay': cfg.TRAIN.COMMON.WEIGHT_DECAY
                }]

    # init optimizer
    if args.optimizer == "adam":
        optimizer = torch.optim.Adam(params)
    elif args.optimizer == "sgd":
        optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.COMMON.MOMENTUM)
    if args.resume:
        optimizer.load_state_dict(checkpoint['optimizer'])

    return Network, optimizer
Exemplo n.º 7
0
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()

        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()

        # Define Dataloader
        if args.dataset == 'CamVid':
            size = 512
            train_file = os.path.join(os.getcwd() + "\\data\\CamVid", "train.csv")
            val_file = os.path.join(os.getcwd() + "\\data\\CamVid", "val.csv")
            print('=>loading datasets')
            train_data = CamVidDataset(csv_file=train_file, phase='train')
            self.train_loader = torch.utils.data.DataLoader(train_data,
                                                     batch_size=args.batch_size,
                                                     shuffle=True,
                                                     num_workers=args.num_workers)
            val_data = CamVidDataset(csv_file=val_file, phase='val', flip_rate=0)
            self.val_loader = torch.utils.data.DataLoader(val_data,
                                                     batch_size=args.batch_size,
                                                     shuffle=True,
                                                     num_workers=args.num_workers)
            self.num_class = 32
        elif args.dataset == 'Cityscapes':
            kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
            self.train_loader, self.val_loader, self.test_loader, self.num_class = make_data_loader(args, **kwargs)
        elif args.dataset == 'NYUDv2':
        kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.num_class = make_data_loader(args, **kwargs)

        # Define network
        if args.net == 'resnet101':
            blocks = [2,4,23,3]
            fpn = FPN(blocks, self.num_class, back_bone=args.net)

        # Define Optimizer
        self.lr = self.args.lr
        if args.optimizer == 'adam':
            self.lr = self.lr * 0.1
            optimizer = torch.optim.Adam(fpn.parameters(), lr=args.lr, momentum=0, weight_decay=args.weight_decay)
        elif args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(fpn.parameters(), lr=args.lr, momentum=0, weight_decay=args.weight_decay)

        # Define Criterion
        if args.dataset == 'CamVid':
            self.criterion = nn.CrossEntropyLoss()
        elif args.dataset == 'Cityscapes':
            weight = None
            self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode='ce')
        elif args.dataset == 'NYUDv2':
            weight = None
            self.criterion = SegmentationLosses(weight = weight, cuda=args.cuda).build_loss(mode='ce')

        self.model = fpn
        self.optimizer = optimizer

        # Define Evaluator
        self.evaluator = Evaluator(self.num_class)

        # multiple mGPUs
        if args.mGPUs:
            self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)

        # Using cuda
        if args.cuda:
            self.model = self.model.cuda()

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume:
            output_dir = os.path.join(args.save_dir, args.dataset, args.checkname)
            runs = sorted(glob.glob(os.path.join(output_dir, 'experiment_*')))
            run_id = int(runs[-1].split('_')[-1]) - 1 if runs else 0
            experiment_dir = os.path.join(output_dir, 'experiment_{}'.format(str(run_id)))
            load_name = os.path.join(experiment_dir,
                                 'checkpoint.pth.tar')
            if not os.path.isfile(load_name):
                raise RuntimeError("=> no checkpoint found at '{}'".format(load_name))
            checkpoint = torch.load(load_name)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            self.lr = checkpoint['optimizer']['param_groups'][0]['lr']
            print("=> loaded checkpoint '{}'(epoch {})".format(load_name, checkpoint['epoch']))

        self.lr_stage = [68, 93]
        self.lr_staget_ind = 0


    def training(self, epoch):
        train_loss = 0.0
        self.model.train()
        # tbar = tqdm(self.train_loader)
        num_img_tr = len(self.train_loader)
        if self.lr_staget_ind > 1 and epoch % (self.lr_stage[self.lr_staget_ind]) == 0:
            adjust_learning_rate(self.optimizer, self.args.lr_decay_gamma)
            self.lr *= self.args.lr_decay_gamma
            self.lr_staget_ind += 1
        for iteration, batch in enumerate(self.train_loader):
            if self.args.dataset == 'CamVid':
                image, target = batch['X'], batch['l']
            elif self.args.dataset == 'Cityscapes':
                image, target = batch['image'], batch['label']
            elif self.args.dataset == 'NYUDv2':
                image, target = batch['image'], batch['label']
            else:
                raise NotImplementedError
            if self.args.cuda:
                image, target = image.cuda(), target.cuda()
            self.optimizer.zero_grad()
            inputs = Variable(image)
            labels = Variable(target)

            outputs = self.model(inputs)
            loss = self.criterion(outputs, labels.long())
            loss_val = loss.item()
            loss.backward(torch.ones_like(loss))
            # loss.backward()
            self.optimizer.step()
            train_loss += loss.item()
            # tbar.set_description('\rTrain loss:%.3f' % (train_loss / (iteration + 1)))

            if iteration % 10 == 0:
                print("Epoch[{}]({}/{}):Loss:{:.4f}, learning rate={}".format(epoch, iteration, len(self.train_loader), loss.data, self.lr))

            self.writer.add_scalar('train/total_loss_iter', loss.item(), iteration + num_img_tr * epoch)

            #if iteration % (num_img_tr // 10) == 0:
            #    global_step = iteration + num_img_tr * epoch
            #    self.summary.visualize_image(self.witer, self.args.dataset, image, target, outputs, global_step)

        self.writer.add_scalar('train/total_loss_epoch', train_loss, epoch)
        print('[Epoch: %d, numImages: %5d]' % (epoch, iteration * self.args.batch_size + image.data.shape[0]))
        print('Loss: %.3f' % train_loss)

        if self.args.no_val:
            # save checkpoint every epoch
            is_best = False
            self.saver.save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': self.model.module.state_dict(),
                'optimizer': self.optimizer.state_dict(),
                'best_pred': self.best_pred,
                }, is_best)


    def validation(self, epoch):
        self.model.eval()
        self.evaluator.reset()
        tbar = tqdm(self.val_loader, desc='\r')
        test_loss = 0.0
        for iter, batch in enumerate(self.val_loader):
            if self.args.dataset == 'CamVid':
                image, target = batch['X'], batch['l']
            elif self.args.dataset == 'Cityscapes':
                image, target = batch['image'], batch['label']
            elif self.args.dataset == 'NYUDv2':
                image, target = batch['image'], batch['label']
            else:
                raise NotImplementedError
            if self.args.cuda:
                image, target = image.cuda(), target.cuda()
            with torch.no_grad():
                output = self.model(image)
            loss = self.criterion(output, target)
            test_loss += loss.item()
            tbar.set_description('Test loss: %.3f ' % (test_loss / (iter + 1)))
            pred = output.data.cpu().numpy()
            target = target.cpu().numpy()
            pred = np.argmax(pred, axis=1)
            # Add batch sample into evaluator
            self.evaluator.add_batch(target, pred)

        # Fast test during the training
        Acc = self.evaluator.Pixel_Accuracy()
        Acc_class = self.evaluator.Pixel_Accuracy_Class()
        mIoU = self.evaluator.Mean_Intersection_over_Union()
        FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()
        self.writer.add_scalar('val/total_loss_epoch', test_loss, epoch)
        self.writer.add_scalar('val/mIoU', mIoU, epoch)
        self.writer.add_scalar('val/Acc', Acc, epoch)
        self.writer.add_scalar('val/Acc_class', Acc_class, epoch)
        self.writer.add_scalar('val/FWIoU', FWIoU, epoch)
        print('Validation:')
        print('[Epoch: %d, numImages: %5d]' % (epoch, iter * self.args.batch_size + image.shape[0]))
        print("Acc:{:.5f}, Acc_class:{:.5f}, mIoU:{:.5f}, fwIoU:{:.5f}".format(Acc, Acc_class, mIoU, FWIoU))
        print('Loss: %.3f' % test_loss)

        new_pred = mIoU
        if new_pred > self.best_pred:
            is_best = True
            self.best_pred = new_pred
            self.saver.save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': self.model.state_dict(),
                'optimizer': self.optimizer.state_dict(),
                'best_pred': self.best_pred,
            }, is_best)
Exemplo n.º 8
0
    def __init__(self, classes, num_layers=101, pretrained=False):
        self.model_path = 'data/pretrained_model/resnet101_caffe.pth'
        self.dout_base_model = 256
        self.pretrained = pretrained

        FPN.__init__(self, FPN.Bottleneck, classes)