예제 #1
0
    def __init__(self, list_path, img_size, is_training, is_debug=False):
        self.img_files = []
        self.label_files = []
        for path in open(list_path, 'r'):
            print(path)
            label_path = path.replace('images', 'labels').replace(
                '.png', '.txt').replace('.jpg', '.txt').strip()
            print(label_path)
            input
            if os.path.isfile(label_path):
                self.img_files.append(path)
                self.label_files.append(label_path)
            else:
                logging.info("no label found. skip it: {}".format(path))
        logging.info("Total images: {}".format(len(self.img_files)))
        self.img_size = img_size  # (w, h)
        self.max_objects = 50
        self.is_debug = is_debug

        #  transforms and augmentation
        self.transforms = data_transforms.Compose()
        if is_training:
            self.transforms.add(data_transforms.ImageBaseAug())
        # self.transforms.add(data_transforms.KeepAspect())
        self.transforms.add(data_transforms.ResizeImage(self.img_size))
        self.transforms.add(
            data_transforms.ToTensor(self.max_objects, self.is_debug))
예제 #2
0
 def __init__(self, list_path, img_size, is_training, is_debug=False):
     self.img_files = []
     self.label_files = []
     for path in open(list_path, 'r'):
         path = '../data/coco' + path
         label_path = path.replace('images', 'labels').replace(
             '.png', '.txt').replace('.jpg', '.txt').strip()
         if os.path.isfile(label_path):
             self.img_files.append(path)
             self.label_files.append(label_path)
         else:
             print("no image")
             logging.info("no label found. skip it: {}".format(path))
     logging.info("Total images: {}".format(len(self.img_files)))
     self.img_size = img_size  # (w, h)
     self.max_objects = 200
     self.is_debug = is_debug
     # print(len(self.label_files))
     #  transforms and augmentation
     #  the order cannot be switched cause boundingbox is augmented with the augmentation
     self.transforms = data_transforms.Compose()
     self.transforms.add(data_transforms.KeepAspect())
     self.transforms.add(data_transforms.ResizeImage(self.img_size))
     if is_training:
         self.transforms.add(data_transforms.ImageBaseAug())
     self.transforms.add(
         data_transforms.ToTensor(self.max_objects, self.is_debug))
예제 #3
0
def load_data(dataset_path=DEFAULT_DATASET,
              transform=data_transforms.ToTensor(),
              num_workers=0,
              batch_size=128):
    custom_dataset = CustomDataset(dataset_path, transform=transform)
    return DataLoader(custom_dataset,
                      num_workers=num_workers,
                      batch_size=batch_size,
                      shuffle=True,
                      drop_last=True)
예제 #4
0
def load_data():
    train_transform = transforms.Compose([
        transforms.RandomCrop(IM_SIZE),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    val_transform = transforms.Compose([
        transforms.RandomCrop(IM_SIZE),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    trainset = dataset.LSUN(DATADIR, 'train', train_transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=BATCH_SIZE,
                                              shuffle=True,
                                              num_workers=2,
                                              pin_memory=True,
                                              drop_last=True)

    print("Train set size: " + str(len(trainset)))

    valset = dataset.LSUN(DATADIR, 'val', val_transform)
    valloader = torch.utils.data.DataLoader(valset,
                                            batch_size=BATCH_SIZE,
                                            shuffle=False,
                                            num_workers=2,
                                            pin_memory=True,
                                            drop_last=True)
    print("Val set size: " + str(len(valset)))
    return trainloader, valloader
예제 #5
0
파일: main.py 프로젝트: Bobholamovic/QGDNet
def test_cnn(args):
    batch_size = 1
    num_workers = args.workers
    phase = args.phase

    for k, v in args.__dict__.items():
        print(k, ':', v)

    single_model = QGCNN()
    model = torch.nn.DataParallel(single_model).cuda()

    dataset = DataList(cfg['DATA_DIR'],
                       phase,
                       transforms.Compose([transforms.ToTensor()]),
                       list_dir=cfg['LIST_DIR'],
                       out_name=True)
    test_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=num_workers,
                                              pin_memory=False)

    cudnn.benchmark = True

    # Optionally resume from a checkpoint
    start_epoch = 0
    if args.resume:
        if os.path.isfile(args.resume):
            logger_s.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            logger_s.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            logger_f.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            logger_f.warning("=> no checkpoint found at '{}'".format(
                args.resume))

    test_dir = join(out_dir,
                    'test/result_{:03d}_{}'.format(start_epoch, phase))

    test(test_loader, model, output_dir=test_dir, suffix=cfg['TEST_SUFFIX'])
예제 #6
0
    def __init__(self,
                 dataset_path=DEFAULT_DATASET,
                 transform=data_transforms.ToTensor()):
        from PIL import Image
        from glob import glob
        from os import path
        self.data = []

        for f in glob(path.join(dataset_path, '*/*.jpg')):
            i = Image.open(f)
            i.load()
            label = str(f)[0]
            if label == "s":
                label = "space"
            elif label == "n":
                label = "nothing"
            label_id = LABEL_NAMES.index(label)
            self.data.append((i, label_id))
        self.transform = transform
def layout(img_name):
    model = PreTrainedResNet(False, num_classes=4)
    path = os.path.join(scriptdir, ckpt_name)
    checkpoint = torch.load(path, map_location='cpu')
    model.load_state_dict(checkpoint)
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    unorm = UnNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    img = Image.open(IM_PATH + img_name)
    img = img.resize((IM_SIZE, IM_SIZE))
    tensor = transform(img)[0]
    tensor = tensor.view(1, 3, IM_SIZE, IM_SIZE)
    output = model(tensor)['out']
    _, pred = torch.max(output, 1)
    show_tensor(pred)
예제 #8
0
파일: dataset_coco.py 프로젝트: XDong18/dla
 def __getitem__(self, index):
     img, mask = self.pull_item(index)
     if self.is_train:
         mask_PIL = Image.fromarray(mask)
         # img = img.resize((1920, 1216))
         data = [img]
         data.append(mask_PIL)
         data = list(self.transforms(*data))
         trans_temp = transforms.ToTensor()
         tensor_img = trans_temp(data[0])[0]
         array_mask = np.array(data[1])
         return(tuple([tensor_img, torch.from_numpy(array_mask)]))
     else:
         # img = img.resize((1920, 1216))
         img = np.array(img)
         img = np.transpose(img, (2,0,1))
         if img.max()>1:
             img = img / 255.
         return(tuple([torch.from_numpy(img), torch.from_numpy(mask)]))
예제 #9
0
def test_house(options, model_dir, data_dir, phase, run_crf, out_dirname):
    single_model = segment.DRNSeg(options.arch,
                                  len(class_map),
                                  pretrained_model=None,
                                  pretrained=False)
    checkpoint = torch.load(os.path.join(model_dir, 'model_best.pth.tar'))
    model = torch.nn.DataParallel(single_model)
    model.load_state_dict(checkpoint['state_dict'])
    print('Model loaded')
    model.cuda()

    with open(os.path.join(data_dir, 'info.json'), 'r') as f:
        info_json = json.load(f)
    t_normalize = data_transforms.Normalize(mean=info_json['mean'],
                                            std=info_json['std'])
    t_rescale = RescaleToFixedSize(options.input_size)
    transforms = [t_rescale, data_transforms.ToTensor(), t_normalize]
    # transforms = [data_transforms.ToTensor(), t_normalize]

    test_loader = torch.utils.data.DataLoader(HouseDataList(
        data_dir, phase, data_transforms.Compose(transforms), out_name=True),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=options.num_workers,
                                              pin_memory=True,
                                              drop_last=True)
    torch.backends.cudnn.benchmark = True
    # Make the output directory
    out_dir = os.path.join(data_dir, out_dirname)
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    mAP = segment.test(test_loader,
                       model,
                       options.num_classes,
                       save_vis=True,
                       has_gt=True,
                       run_crf=run_crf,
                       output_dir=out_dir)
    logger.info('mAP: {}'.format(mAP))
예제 #10
0
    def __getitem__(self, i):
        idx = self.ids[i]
        mask_file = glob(self.masks_dir + idx + '*')
        img_file = glob(self.imgs_dir + idx + '*')

        assert len(mask_file) == 1, \
            f'Either no mask or multiple masks found for the ID {idx}: {mask_file}'
        assert len(img_file) == 1, \
            f'Either no image or multiple images found for the ID {idx}: {img_file}'
        # mask = cv2.imread(mask_file[0])
        # img = cv2.imread(img_file[0])
        if self.is_train:
            mask = np.load(mask_file[0])
            # TODO
            mask[mask == 2] = 0
            mask[mask == 3] = 0
            mask[mask == 5] = 0
            mask[mask == 6] = 0
            mask[mask == 8] = 0
            mask[mask == 10] = 0
            mask[mask == 11] = 0
            mask[mask == 12] = 0
            mask[mask == 13] = 0
            mask[mask == 15] = 0
            mask[mask == 16] = 0

            mask[mask == 4] = 2
            mask[mask == 7] = 3
            mask[mask == 9] = 4
            mask[mask == 14] = 1
            mask[mask == 17] = 3
            ##################################################
            mask = mask + 100
            # mask = np.expand_dims(mask, 2)
            # mask = np.concatenate((mask, mask, mask), axis=2)
            mask_PIL = Image.fromarray(mask)
            # img = np.load(img_file[0]) #TODO
            img = Image.open(img_file[0])
            # img = img.resize((1920, 1216)) # TODO resize is not necessary
            data = [img]
            data.append(mask_PIL)
            data = list(self.transforms(*data))
            trans_temp = transforms.ToTensor()
            tensor_img = trans_temp(data[0])[0]
            array_mask = np.array(data[1]) - 100
            # img = np.array(img)
            # img = np.transpose(img, (2,0,1))
            # if img.max()>1:
            #     img = img / 255.
            # data[1] = data[1][0]
            return (tuple([tensor_img, torch.from_numpy(array_mask)]))
        else:
            mask = np.load(mask_file[0])

            # TODO
            mask[mask == 2] = 0
            mask[mask == 3] = 0
            mask[mask == 5] = 0
            mask[mask == 6] = 0
            mask[mask == 8] = 0
            mask[mask == 10] = 0
            mask[mask == 11] = 0
            mask[mask == 12] = 0
            mask[mask == 13] = 0
            mask[mask == 15] = 0
            mask[mask == 16] = 0

            mask[mask == 4] = 2
            mask[mask == 7] = 3
            mask[mask == 9] = 4
            mask[mask == 14] = 1
            mask[mask == 17] = 3

            img = Image.open(img_file[0])
            # img = img.resize((1920, 1216)) # TODO resize is not necessary
            img = np.array(img)
            img = np.transpose(img, (2, 0, 1))
            if img.max() > 1:
                img = img / 255.
            return (tuple([torch.from_numpy(img), torch.from_numpy(mask)]))
예제 #11
0
def train_seg(args):
    rand_state = np.random.RandomState(1311)
    torch.manual_seed(1311)
    device = 'cuda' if (torch.cuda.is_available()) else 'cpu'

    # We have 2975 images total in the training set, so let's choose 500 for 3 cycles,
    # 1500 images total (~1/2 of total)
    images_per_cycle = 150

    batch_size = args.batch_size
    num_workers = args.workers
    crop_size = args.crop_size

    print(' '.join(sys.argv))

    for k, v in args.__dict__.items():
        print(k, ':', v)

    # Data loading code
    data_dir = args.data_dir
    info = json.load(open(join(data_dir, 'info.json'), 'r'))
    normalize = data_transforms.Normalize(mean=info['mean'], std=info['std'])
    t = []
    if args.random_rotate > 0:
        t.append(data_transforms.RandomRotate(args.random_rotate))
    if args.random_scale > 0:
        t.append(data_transforms.RandomScale(args.random_scale))
    t.extend([
        data_transforms.RandomCrop(crop_size),
        data_transforms.RandomHorizontalFlip(),
        data_transforms.ToTensor(), normalize
    ])
    dataset = SegList(data_dir,
                      'train',
                      data_transforms.Compose(t),
                      list_dir=args.list_dir)
    training_dataset_no_augmentation = SegList(
        data_dir,
        'train',
        data_transforms.Compose([data_transforms.ToTensor(), normalize]),
        list_dir=args.list_dir)

    unlabeled_idx = list(range(len(dataset)))
    labeled_idx = []
    validation_accuracies = list()
    validation_mAPs = list()
    progress = tqdm.tqdm(range(10))
    for cycle in progress:
        single_model = DRNSeg(args.arch, args.classes, None, pretrained=True)
        if args.pretrained:
            single_model.load_state_dict(torch.load(args.pretrained))

        # Wrap our model in Active Learning Model.
        if args.use_loss_prediction_al:
            single_model = ActiveLearning(single_model,
                                          global_avg_pool_size=6,
                                          fc_width=256)
        elif args.use_discriminative_al:
            single_model = DiscriminativeActiveLearning(single_model)
        optim_parameters = single_model.optim_parameters()

        model = torch.nn.DataParallel(single_model).cuda()

        # Don't apply a 'mean' reduction, we need the whole loss vector.
        criterion = nn.NLLLoss(ignore_index=255, reduction='none')

        criterion.cuda()

        if args.choose_images_with_highest_loss:
            # Choosing images based on the ground truth labels.
            # We want to check if predicting loss with 100% accuracy would result to
            # a good active learning algorithm.
            new_indices, entropies = choose_new_labeled_indices_using_gt(
                model, cycle, rand_state, unlabeled_idx,
                training_dataset_no_augmentation, device, criterion,
                images_per_cycle)
        else:
            new_indices, entropies = choose_new_labeled_indices(
                model,
                training_dataset_no_augmentation,
                cycle,
                rand_state,
                labeled_idx,
                unlabeled_idx,
                device,
                images_per_cycle,
                args.use_loss_prediction_al,
                args.use_discriminative_al,
                input_pickle_file=None)
        labeled_idx.extend(new_indices)
        print("Running on {} labeled images.".format(len(labeled_idx)))
        if args.output_superannotate_csv_file is not None:
            # Write image paths to csv file which can be uploaded to annotate.online.
            write_entropies_csv(training_dataset_no_augmentation, new_indices,
                                entropies, args.output_superannotate_csv_file)

        train_loader = torch.utils.data.DataLoader(data.Subset(
            dataset, labeled_idx),
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   num_workers=num_workers,
                                                   pin_memory=True,
                                                   drop_last=True)
        val_loader = torch.utils.data.DataLoader(SegList(
            data_dir,
            'val',
            data_transforms.Compose([
                data_transforms.RandomCrop(crop_size),
                data_transforms.ToTensor(),
                normalize,
            ]),
            list_dir=args.list_dir),
                                                 batch_size=batch_size,
                                                 shuffle=False,
                                                 num_workers=num_workers,
                                                 pin_memory=True,
                                                 drop_last=True)

        # define loss function (criterion) and optimizer.
        optimizer = torch.optim.SGD(optim_parameters,
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

        cudnn.benchmark = True
        best_prec1 = 0
        best_mAP = 0
        start_epoch = 0

        # optionally resume from a checkpoint
        if args.resume:
            if os.path.isfile(args.resume):
                print("=> loading checkpoint '{}'".format(args.resume))
                checkpoint = torch.load(args.resume)
                start_epoch = checkpoint['epoch']
                best_prec1 = checkpoint['best_prec1']
                model.load_state_dict(checkpoint['state_dict'])
                print("=> loaded checkpoint '{}' (epoch {})".format(
                    args.resume, checkpoint['epoch']))
            else:
                print("=> no checkpoint found at '{}'".format(args.resume))

        if args.evaluate:
            validate(val_loader,
                     model,
                     criterion,
                     eval_score=accuracy,
                     num_classes=args.classes,
                     use_loss_prediction_al=args.use_loss_prediction_al)
            return

        progress_epoch = tqdm.tqdm(range(start_epoch, args.epochs))
        for epoch in progress_epoch:
            lr = adjust_learning_rate(args, optimizer, epoch)
            logger.info('Cycle {0} Epoch: [{1}]\tlr {2:.06f}'.format(
                cycle, epoch, lr))
            # train for one epoch
            train(train_loader,
                  model,
                  criterion,
                  optimizer,
                  epoch,
                  eval_score=accuracy,
                  use_loss_prediction_al=args.use_loss_prediction_al,
                  active_learning_lamda=args.lamda)

            # evaluate on validation set
            prec1, mAP1 = validate(
                val_loader,
                model,
                criterion,
                eval_score=accuracy,
                num_classes=args.classes,
                use_loss_prediction_al=args.use_loss_prediction_al)

            is_best = prec1 > best_prec1
            best_prec1 = max(prec1, best_prec1)
            best_mAP = max(mAP1, best_mAP)
            checkpoint_path = os.path.join(args.save_path,
                                           'checkpoint_latest.pth.tar')
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                    'best_mAP': best_mAP,
                },
                is_best,
                filename=checkpoint_path)
            if (epoch + 1) % args.save_iter == 0:
                history_path = os.path.join(
                    args.save_path,
                    'checkpoint_{:03d}.pth.tar'.format(epoch + 1))
                shutil.copyfile(checkpoint_path, history_path)
        validation_accuracies.append(best_prec1)
        validation_mAPs.append(best_mAP)
        print("{} accuracies: {} mAPs {}".format(
            "Active Learning" if args.use_loss_prediction_al else "Random",
            str(validation_accuracies), str(validation_mAPs)))
예제 #12
0
    state_dict = torch.load(pretrained, map_location='cpu')
    if 'state_dict' in state_dict:
        state_dict = state_dict['state_dict']
        new_state_dict = {}
        for key in state_dict:
            new_key = '.'.join(key.split('.')[1:])
            new_state_dict[new_key] = state_dict[key]
        state_dict = new_state_dict
    if torch.cuda.is_available():
        model.load_state_dict(state_dict)
    else:
        logger.warning("CUDA not available!\n")
        model.load_state_dict(state_dict)

    # Transformations that need to be performed on input image
    transforms = data_transforms.Compose([data_transforms.ToTensor()])

    for video_id in sorted(frames_dict):
        video_name = os.path.join(clips_dir, video_id + '.mp4')
        images = get_video_frames(video_name, frames_dict[video_id])

        for i, image in enumerate(images):
            image_name = names_dict[video_id][i]
            logger.info(image_name)
            # Swap image from (H, W, C) to (C, H, W)
            image = np.transpose(image, axes=(2, 0, 1))
            # Make batch size 1
            image = np.expand_dims(image, axis=0)
            # Turn numpy array into tensor
            image = transforms(image)[0]
            test(image, model, classes, image_name)
예제 #13
0
파일: main.py 프로젝트: Bobholamovic/QGDNet
def train_cnn(args):
    batch_size = args.batch_size
    num_workers = args.workers
    crop_size = cfg['CROP_SIZE']

    for k, v in args.__dict__.items():
        print(k, ':', v)

    single_model = QGCNN()
    model = torch.nn.DataParallel(single_model)

    if cfg['FEATS']:
        feat_names, weights = zip(*(tuple(*f.items()) for f in cfg['FEATS']))
    else:
        feat_names, weights = None, None

    criterion = ComLoss(cfg['IQA_MODEL'],
                        weights,
                        feat_names,
                        patch_size=cfg['PATCH_SIZE'],
                        pixel_criterion=cfg['CRITERION'])
    criterion.cuda()

    # Data loading
    data_dir = cfg['DATA_DIR']
    list_dir = cfg['LIST_DIR']
    t = [
        transforms.RandomCrop(crop_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor()
    ]
    # Note that the cropsize could have a significant influence,
    # i.e., with a small cropsize the model would get overfitted
    # easily thus hard to train
    train_loader = torch.utils.data.DataLoader(DataList(data_dir,
                                                        'train',
                                                        transforms.Compose(t),
                                                        list_dir=list_dir),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=num_workers,
                                               pin_memory=True,
                                               drop_last=True)
    # The cropsize of the validation set dramatically affects the
    # evaluation accuracy, which means the quality of the whole
    # image might be very different from that of its cropped patches.
    #
    # Try setting batch_size = 1 and no crop (disable RandomCrop)
    # to improve the effect of early stopping.
    val_loader = DataList(data_dir,
                          'val',
                          transforms.Compose([transforms.ToTensor()]),
                          list_dir=list_dir)

    optimizer = torch.optim.Adam(single_model.parameters(),
                                 lr=args.lr,
                                 betas=(0.9, 0.99),
                                 weight_decay=args.weight_decay)

    cudnn.benchmark = True

    weight_dir = join(out_dir, 'weights/')
    if not exists(weight_dir):
        os.mkdir(weight_dir)

    best_prec = 0
    start_epoch = 0

    # Optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            logger_s.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            logger_s.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            logger_f.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            logger_f.warning("=> no checkpoint found at '{}'".format(
                args.resume))

    if args.evaluate:
        validate(val_loader, model.cuda(), criterion, eval_score=accuracy)
        return

    for epoch in range(start_epoch, args.epochs):

        lr = adjust_learning_rate(args, optimizer, epoch)

        if criterion.weights is not None and (epoch + 1) % 100 == 0:
            criterion.weights /= 10.0

        logger_s.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
        # train for one epoch
        train(train_loader,
              model.cuda(),
              criterion,
              optimizer,
              epoch,
              eval_score=accuracy)

        # Evaluate on validation set
        prec = validate(val_loader,
                        model.cuda(),
                        criterion,
                        eval_score=accuracy)

        is_best = prec > best_prec
        best_prec = max(prec, best_prec)
        logger_s.info('current best {:.6f}'.format(best_prec))

        checkpoint_path = join(weight_dir, 'checkpoint_latest.pkl')
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec': best_prec,
            },
            is_best,
            filename=checkpoint_path)

        if (epoch + 1) % args.store_interval == 0:
            history_path = join(weight_dir,
                                'checkpoint_{:03d}.pkl'.format(epoch + 1))
            shutil.copyfile(checkpoint_path, history_path)
예제 #14
0
def train_seg(args):
    batch_size = args.batch_size
    num_workers = args.workers
    crop_size = args.crop_size

    print(' '.join(sys.argv))

    for k, v in args.__dict__.items():
        print(k, ':', v)

    single_model = DRNSeg(args.arch, args.classes, None, pretrained=True)
    if args.pretrained:
        single_model.load_state_dict(torch.load(args.pretrained))
    model = torch.nn.DataParallel(single_model).cuda()
    criterion = nn.NLLLoss2d(ignore_index=255)

    criterion.cuda()

    # Data loading code
    data_dir = args.data_dir
    info = json.load(open(join(data_dir, 'info.json'), 'r'))
    normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
    t = []
    if args.downsample:
        t.append(transforms.Scale(0.5))
    if args.random_rotate > 0:
        t.append(transforms.RandomRotate(args.random_rotate))
    if args.random_scale > 0:
        t.append(transforms.RandomScale(args.random_scale))
    t.extend([
        transforms.RandomCrop(crop_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])
    train_loader = torch.utils.data.DataLoader(SegList(data_dir, 'train',
                                                       transforms.Compose(t)),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=num_workers,
                                               pin_memory=True,
                                               drop_last=True)
    val_loader = torch.utils.data.DataLoader(SegList(
        data_dir, 'val',
        transforms.Compose([
            transforms.RandomCrop(crop_size),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=num_workers,
                                             pin_memory=True,
                                             drop_last=True)

    # define loss function (criterion) and pptimizer
    optimizer = torch.optim.SGD(single_model.optim_parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    cudnn.benchmark = True
    best_prec1 = 0
    start_epoch = 0

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.evaluate:
        validate(val_loader, model, criterion, eval_score=accuracy)
        return

    for epoch in range(start_epoch, args.epochs):
        lr = adjust_learning_rate(args, optimizer, epoch)
        logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
        # train for one epoch
        train(train_loader,
              model,
              criterion,
              optimizer,
              epoch,
              eval_score=accuracy)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, eval_score=accuracy)

        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        checkpoint_path = 'checkpoint_latest.pth.tar'
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            },
            is_best,
            filename=checkpoint_path,
            prefix=args.arch)
        if (epoch + 1) % 10 == 0:
            history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
            shutil.copyfile(checkpoint_path, history_path)
            # save historical data to s3
            upload_to_s3(history_path, prefix=args.arch)
        # save latest checkpoint to s3
        try:
            upload_to_s3(checkpoint_path, prefix=args.arch)
        except:
            logging.info('failed to upload latest checkpoint to s3')
예제 #15
0
def run_training(args):
    model = dla.__dict__[args.arch](
        pretrained=args.pretrained, num_classes=args.classes,
        pool_size=args.crop_size // 32)
    model = torch.nn.DataParallel(model)

    best_prec1 = 0

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    data = dataset.get_data(args.data_name)
    if data is None:
        data = dataset.load_dataset_info(args.data, data_name=args.data_name)
    if data is None:
        raise ValueError('{} is not pre-defined in dataset.py and info.json '
                         'does not exist in {}', args.data_name, args.data)

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = data_transforms.Normalize(mean=data.mean, std=data.std)
    tt = [data_transforms.RandomResizedCrop(
        args.crop_size, min_area_ratio=args.min_area_ratio,
        aspect_ratio=args.aspect_ratio)]
    if data.eigval is not None and data.eigvec is not None \
            and args.random_color:
        ligiting = data_transforms.Lighting(0.1, data.eigval, data.eigvec)
        jitter = data_transforms.RandomJitter(0.4, 0.4, 0.4)
        tt.extend([jitter, ligiting])
    tt.extend([data_transforms.RandomHorizontalFlip(),
               data_transforms.ToTensor(),
               normalize])

    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(traindir, data_transforms.Compose(tt)),
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Resize(args.scale_size),
            transforms.CenterCrop(args.crop_size),
            transforms.ToTensor(),
            normalize
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    if args.evaluate:
        validate(args, val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(args, optimizer, epoch)

        # train for one epoch
        train(args, train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(args, val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        checkpoint_path = 'checkpoint_latest.pth.tar'
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
        }, is_best, filename=checkpoint_path)
        if (epoch + 1) % args.check_freq == 0:
            history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
            shutil.copyfile(checkpoint_path, history_path)
예제 #16
0
def train_seg(args):
    batch_size = args.batch_size
    num_workers = args.workers
    crop_size = args.crop_size

    print(' '.join(sys.argv))

    for k, v in args.__dict__.items():
        print(k, ':', v)

    pretrained_base = args.pretrained_base
    single_model = dla_up.__dict__.get(args.arch)(args.classes,
                                                  pretrained_base,
                                                  down_ratio=args.down)
    model = torch.nn.DataParallel(single_model).cuda()
    if args.edge_weight > 0:
        weight = torch.from_numpy(
            np.array([1, args.edge_weight], dtype=np.float32))
        criterion = nn.NLLLoss2d(ignore_index=255, weight=weight)
    else:
        criterion = nn.NLLLoss2d(ignore_index=255)

    criterion.cuda()

    data_dir = args.data_dir
    info = dataset.load_dataset_info(data_dir)
    normalize = transforms.Normalize(mean=info.mean, std=info.std)
    t = []
    if args.random_rotate > 0:
        t.append(transforms.RandomRotate(args.random_rotate))
    if args.random_scale > 0:
        t.append(transforms.RandomScale(args.random_scale))
    t.append(transforms.RandomCrop(crop_size))
    if args.random_color:
        t.append(transforms.RandomJitter(0.4, 0.4, 0.4))
    t.extend(
        [transforms.RandomHorizontalFlip(),
         transforms.ToTensor(), normalize])
    train_loader = torch.utils.data.DataLoader(SegList(
        data_dir, 'train', transforms.Compose(t), binary=(args.classes == 2)),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=num_workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(
        SegList(
            data_dir,
            'val',
            transforms.Compose([
                transforms.RandomCrop(crop_size),
                # transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]),
            binary=(args.classes == 2)),
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=True)
    optimizer = torch.optim.SGD(single_model.optim_parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    cudnn.benchmark = True
    best_prec1 = 0
    start_epoch = 0

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.evaluate:
        validate(val_loader, model, criterion, eval_score=accuracy)
        return

    for epoch in range(start_epoch, args.epochs):
        lr = adjust_learning_rate(args, optimizer, epoch)
        print('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
        # train for one epoch
        train(train_loader,
              model,
              criterion,
              optimizer,
              epoch,
              eval_score=accuracy)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, eval_score=accuracy)

        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        checkpoint_path = 'checkpoint_latest.pth.tar'
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            },
            is_best,
            filename=checkpoint_path)
        if (epoch + 1) % args.save_freq == 0:
            history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
            shutil.copyfile(checkpoint_path, history_path)
예제 #17
0
def test_seg(args):
    batch_size = args.batch_size
    num_workers = args.workers
    phase = args.phase

    for k, v in args.__dict__.items():
        print(k, ':', v)

    single_model = dla_up.__dict__.get(args.arch)(args.classes,
                                                  down_ratio=args.down)

    model = torch.nn.DataParallel(single_model).cuda()

    data_dir = args.data_dir
    info = dataset.load_dataset_info(data_dir)
    normalize = transforms.Normalize(mean=info.mean, std=info.std)
    # scales = [0.5, 0.75, 1.25, 1.5, 1.75]
    scales = [0.5, 0.75, 1.25, 1.5]
    t = []
    if args.crop_size > 0:
        t.append(transforms.PadToSize(args.crop_size))
    t.extend([transforms.ToTensor(), normalize])
    if args.ms:
        data = SegListMS(data_dir, phase, transforms.Compose(t), scales)
    else:
        data = SegList(data_dir,
                       phase,
                       transforms.Compose(t),
                       out_name=True,
                       out_size=True,
                       binary=args.classes == 2)
    test_loader = torch.utils.data.DataLoader(data,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=num_workers,
                                              pin_memory=False)

    cudnn.benchmark = True

    # optionally resume from a checkpoint
    start_epoch = 0
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    out_dir = '{}_{:03d}_{}'.format(args.arch, start_epoch, phase)
    if len(args.test_suffix) > 0:
        out_dir += '_' + args.test_suffix

    if args.ms:
        out_dir += '_ms'

    if args.ms:
        mAP = test_ms(test_loader,
                      model,
                      args.classes,
                      save_vis=True,
                      has_gt=phase != 'test' or args.with_gt,
                      output_dir=out_dir,
                      scales=scales)
    else:
        mAP = test(test_loader,
                   model,
                   args.classes,
                   save_vis=True,
                   has_gt=phase != 'test' or args.with_gt,
                   output_dir=out_dir)
    print('mAP: ', mAP)
예제 #18
0
            self.label_list = [line.strip() for line in open(label_path, 'r')]
            assert len(self.image_list) == len(self.label_list)


if __name__ == "__main__":
    #Testing the dataloader
    data_dir = "/home/amogh/data/datasets/drn_data/DRN-move/cityscape_dataset/"
    info = json.load(open(join(data_dir, 'info.json'), 'r'))
    normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
    t = []
    # t.append(transforms.RandomRotate(0))
    # t.append(transforms.RandomScale(0))
    t.extend([
        transforms.RandomCrop(896),
        # transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize
    ])
    # loader = SegDepthList(data_dir="/home/amogh/data/datasets/drn_data/DRN-move/cityscape_dataset/",
    loader = torch.utils.data.DataLoader(SegDepthList(data_dir,
                                                      'train',
                                                      transforms.Compose(t),
                                                      list_dir=None),
                                         batch_size=1,
                                         shuffle=False,
                                         num_workers=1,
                                         pin_memory=True,
                                         drop_last=True)

    import matplotlib.pyplot as plt
    f, ax = plt.subplots(5)
예제 #19
0
    #           # transforms.RandomContrastImg((0.8, 1.2)),
    #           # transforms.RandomGaussianNoiseImg(0.02),
    #           # transforms.ToNumpy(255.0),
    #           transforms.ToTensor(convert_pix_range=False),
    #           normalize])

    t.extend([
        transforms.RandomCrop((768, 320), 4),
        transforms.RandomHorizontalFlip(),
        # transforms.ToNumpy(1/255.0),
        # transforms.RandomGammaImg((0.7,1.5)),
        # transforms.RandomBrightnessImg(0.2),
        # transforms.RandomContrastImg((0.8, 1.2)),
        # transforms.RandomGaussianNoiseImg(0.02),
        # transforms.ToNumpy(255.0),
        transforms.ToTensor(convert_pix_range=False),
        normalize
    ])

    # data_dir = '/home/hzjiang/workspace/Data/CityScapes'
    data_dir = '/home/hzjiang/workspace/Data/KITTI_Semantics'
    train_data = SegList(data_dir,
                         'train',
                         transforms.Compose(t),
                         list_dir=data_dir)

    for i, (image, label) in enumerate(train_data):
        # for c in range(3):
        #     print('--- after norm: ', c, torch.max(image[:,:,c]), torch.min(image[:,:,c]))
        print(image.size(), label.size())
        image = image.numpy().transpose(1, 2, 0)
예제 #20
0
def get_loader(args, split, out_name=False, customized_task_set=None):
    """Returns data loader depending on dataset and split"""
    dataset = args.dataset
    loader = None

    if customized_task_set is None:
        task_set = args.task_set
    else:
        task_set = customized_task_set

    if dataset == 'taskonomy':
        print('using taskonomy')
        if split == 'train':
            loader = torch.utils.data.DataLoader(TaskonomyLoader(
                root=args.data_dir,
                is_training=True,
                threshold=1200,
                task_set=task_set,
                model_whitelist=None,
                model_limit=30,
                output_size=None),
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True,
                                                 drop_last=True)

        if split == 'val':
            loader = torch.utils.data.DataLoader(
                TaskonomyLoader(root=args.data_dir,
                                is_training=False,
                                threshold=1200,
                                task_set=task_set,
                                model_whitelist=None,
                                model_limit=30,
                                output_size=None),
                batch_size=args.test_batch_size,
                shuffle=False,
                num_workers=args.workers,
                pin_memory=True,
                drop_last=True)

        if split == 'adv_val':
            loader = torch.utils.data.DataLoader(
                TaskonomyLoader(root=args.data_dir,
                                is_training=False,
                                threshold=1200,
                                task_set=task_set,
                                model_whitelist=None,
                                model_limit=30,
                                output_size=None),
                batch_size=args.test_batch_size,
                shuffle=False,
                num_workers=args.workers,
                pin_memory=True,
                drop_last=True)

    elif dataset == 'voc':
        if split == 'train':
            loader = torch.utils.data.DataLoader(VOCSegmentation(
                args=args, base_dir=args.data_dir, split='train'),
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 num_workers=args.workers,
                                                 pin_memory=True,
                                                 drop_last=True)
        elif split == 'val':
            loader = torch.utils.data.DataLoader(
                VOCSegmentation(args=args,
                                base_dir=args.data_dir,
                                split='val',
                                out_name=out_name),
                batch_size=args.test_batch_size,
                shuffle=False,
                num_workers=args.workers,
                pin_memory=True,
                drop_last=True)
        elif split == 'adv_val':
            loader = torch.utils.data.DataLoader(VOCSegmentation(
                args=args,
                base_dir=args.data_dir,
                split='val',
                out_name=out_name),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True,
                                                 drop_last=True)

    elif dataset == 'coco':
        if split == 'train':
            loader = torch.utils.data.DataLoader(COCOSegmentation(
                args=args, base_dir=args.data_dir, split='train'),
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 num_workers=args.workers,
                                                 pin_memory=True,
                                                 drop_last=True)
        elif split == 'val':
            loader = torch.utils.data.DataLoader(
                COCOSegmentation(args=args,
                                 base_dir=args.data_dir,
                                 split='val',
                                 out_name=out_name),
                batch_size=args.test_batch_size,
                shuffle=False,
                num_workers=args.workers,
                pin_memory=True,
                drop_last=True)
        elif split == 'adv_val':
            loader = torch.utils.data.DataLoader(COCOSegmentation(
                args=args,
                base_dir=args.data_dir,
                split='val',
                out_name=out_name),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True,
                                                 drop_last=True)

    elif dataset == 'cityscape':
        data_dir = args.data_dir
        info = json.load(open(join(data_dir, 'info.json'), 'r'))
        normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
        t = []
        if args.random_rotate > 0:
            t.append(transforms.RandomRotate(args.random_rotate))
        if args.random_scale > 0:
            t.append(transforms.RandomScale(args.random_scale))
        t.extend([
            transforms.RandomCrop(args.crop_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(), normalize
        ])

        task_set_present = hasattr(args, 'task_set')
        if split == 'train':
            if task_set_present:
                print(
                    "\nCAUTION: THE DATALOADER IS FOR MULTITASK ON CITYSCAPE\n"
                )
                loader = torch.utils.data.DataLoader(
                    SegDepthList(data_dir,
                                 'train',
                                 transforms.Compose(t),
                                 list_dir=args.list_dir),
                    batch_size=args.batch_size,
                    shuffle=True,
                    num_workers=args.workers,
                    pin_memory=True,
                    drop_last=True)
            else:
                loader = torch.utils.data.DataLoader(
                    SegList(data_dir,
                            'train',
                            transforms.Compose(t),
                            list_dir=args.list_dir),
                    batch_size=args.batch_size,
                    shuffle=True,
                    num_workers=args.workers,
                    pin_memory=True,
                    drop_last=True)
        elif split == 'val':
            if args.task_set != []:
                print(
                    "\nCAUTION: THE DATALOADER IS FOR MULTITASK ON CITYSCAPE\n"
                )
                loader = torch.utils.data.DataLoader(
                    SegDepthList(data_dir,
                                 'val',
                                 transforms.Compose([
                                     transforms.ToTensor(),
                                     normalize,
                                 ]),
                                 list_dir=args.list_dir,
                                 out_name=out_name),
                    batch_size=args.test_batch_size,
                    shuffle=False,
                    num_workers=args.workers,
                    pin_memory=True,
                    drop_last=True)
            else:
                print("city test eval!")
                loader = torch.utils.data.DataLoader(
                    SegList(data_dir,
                            'val',
                            transforms.Compose([
                                transforms.ToTensor(),
                                normalize,
                            ]),
                            list_dir=args.list_dir,
                            out_name=out_name),
                    batch_size=args.test_batch_size,
                    shuffle=False,
                    num_workers=args.workers,
                    pin_memory=True,
                    drop_last=True)
        elif split == 'adv_val':  # has batch size 1
            if task_set_present:
                print(
                    "\nCAUTION: THE DATALOADER IS FOR MULTITASK ON CITYSCAPE\n"
                )
                loader = torch.utils.data.DataLoader(SegDepthList(
                    data_dir,
                    'val',
                    transforms.Compose([
                        transforms.ToTensor(),
                        normalize,
                    ]),
                    list_dir=args.list_dir,
                    out_name=out_name),
                                                     batch_size=1,
                                                     shuffle=False,
                                                     num_workers=args.workers,
                                                     pin_memory=True,
                                                     drop_last=True)
            else:
                loader = torch.utils.data.DataLoader(SegList(
                    data_dir,
                    'val',
                    transforms.Compose([
                        transforms.ToTensor(),
                        normalize,
                    ]),
                    list_dir=args.list_dir,
                    out_name=out_name),
                                                     batch_size=1,
                                                     shuffle=False,
                                                     num_workers=args.workers,
                                                     pin_memory=True,
                                                     drop_last=True)

    return loader
예제 #21
0
def test_seg(args):
    batch_size = args.batch_size
    num_workers = args.workers
    phase = args.phase

    for k, v in args.__dict__.items():
        print(k, ':', v)

    single_model = DRNSeg(args.arch,
                          args.classes,
                          pretrained_model=None,
                          pretrained=False)
    if args.pretrained:
        single_model.load_state_dict(torch.load(args.pretrained))
    model = torch.nn.DataParallel(single_model).cuda()

    data_dir = args.data_dir
    info = json.load(open(join(data_dir, 'info.json'), 'r'))
    normalize = data_transforms.Normalize(mean=info['mean'], std=info['std'])
    scales = [0.5, 0.75, 1.25, 1.5, 1.75]
    if args.ms:
        dataset = SegListMS(data_dir,
                            phase,
                            data_transforms.Compose([
                                data_transforms.ToTensor(),
                                normalize,
                            ]),
                            scales,
                            list_dir=args.list_dir)
    else:
        dataset = SegList(data_dir,
                          phase,
                          data_transforms.Compose([
                              data_transforms.ToTensor(),
                              normalize,
                          ]),
                          list_dir=args.list_dir,
                          out_name=True)
    test_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=num_workers,
                                              pin_memory=False)

    cudnn.benchmark = True

    # optionally resume from a checkpoint
    start_epoch = 0
    if args.resume:
        if os.path.isfile(args.resume):
            logger.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            logger.info("=> no checkpoint found at '{}'".format(args.resume))

    out_dir = '{}_{:03d}_{}'.format(args.arch, start_epoch, phase)
    if len(args.test_suffix) > 0:
        out_dir += '_' + args.test_suffix
    if args.ms:
        out_dir += '_ms'

    if args.ms:
        mAP = test_ms(test_loader,
                      model,
                      args.classes,
                      save_vis=True,
                      has_gt=phase != 'test' or args.with_gt,
                      output_dir=out_dir,
                      scales=scales)
    else:
        mAP = test(test_loader,
                   model,
                   args.classes,
                   save_vis=True,
                   has_gt=phase != 'test' or args.with_gt,
                   output_dir=out_dir)
    logger.info('mAP: %f', mAP)
예제 #22
0
def train_house(options, model_dir=None, resume_path=None):
    print('Building model...')
    single_model = segment.DRNSeg(options.arch,
                                  options.num_classes,
                                  None,
                                  pretrained=False)
    if options.pretrained:
        single_model.load_state_dict(torch.load(options.pretrained))
    model = torch.nn.DataParallel(single_model).cuda()
    criterion = torch.nn.NLLLoss2d(ignore_index=_ignore_idx)
    criterion.cuda()

    # Data loading
    print('Loading data')
    with open(os.path.join(options.data_dir, 'info.json')) as f:
        info_json = json.load(f)
    t_normalize = data_transforms.Normalize(mean=info_json['mean'],
                                            std=info_json['std'])
    t_rescale = RescaleToFixedSize(options.input_size)
    transforms = [
        t_rescale,
        RandomHorizontalFlip(),
        IgnoreUnlabelledPixels(),
        data_transforms.ToTensor(), t_normalize
    ]
    train_loader = torch.utils.data.DataLoader(HouseDataList(
        options.data_dir, 'train', data_transforms.Compose(transforms)),
                                               batch_size=options.batch_size,
                                               shuffle=True,
                                               num_workers=options.num_workers,
                                               pin_memory=True,
                                               drop_last=True)
    val_loader = torch.utils.data.DataLoader(HouseDataList(
        options.data_dir, 'validation', data_transforms.Compose(transforms)),
                                             batch_size=options.batch_size,
                                             shuffle=True,
                                             num_workers=options.num_workers,
                                             pin_memory=True,
                                             drop_last=True)

    # Define loss function (critierion) and optimizer
    print('Setting up optimizer')
    optimizer = torch.optim.SGD(single_model.optim_parameters(),
                                options.learning_rate, options.momentum,
                                options.weight_decay)
    torch.backends.cudnn.benchmark = True
    best_prec1 = 0
    start_epoch = 0

    train_loss_score = []
    validation_loss_score = []

    if resume_path:
        if os.path.isfile(resume_path):
            print('Loading checkpoint {}'.format(resume_path))
            checkpoint = torch.load(resume_path)
            start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print('Checkpoint {} loaded'.format(resume_path))
        else:
            print('Invalid checkpoint file path.')
    for epoch in range(start_epoch, options.epochs):
        lr = adjust_learning_rate(options, optimizer, epoch)
        logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
        train_loss, train_score = segment.train(train_loader,
                                                model,
                                                criterion,
                                                optimizer,
                                                epoch,
                                                eval_score=segment.accuracy)
        val_loss, val_score = segment.validate(val_loader,
                                               model,
                                               criterion,
                                               eval_score=segment.accuracy)

        train_loss_score.append([train_loss, train_score])
        validation_loss_score.append([val_loss, val_score])

        if model_dir:
            is_best = val_score > best_prec1
            best_prec1 = max(val_score, best_prec1)
            checkpoint_path = os.path.join(model_dir,
                                           'checkpoint_latest.pth.tar')
            segment.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': options.arch,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1
                },
                is_best,
                filename=checkpoint_path)
            print('Checkpoint for epoch {} saved.'.format(epoch + 1))
            if (epoch + 1) % 10 == 0:
                history_path = os.path.join(
                    model_dir, 'checkpoint_{:03d}.pth.tar'.format(epoch + 1))
                print('Checkpoint file saved to ' + history_path)
                shutil.copyfile(checkpoint_path, history_path)
    if model_dir:
        # Save the training history
        train_loss_score = np.array(train_loss_score)
        validation_loss_score = np.array(validation_loss_score)
        np.savetxt(os.path.join(model_dir, 'validation_loss_score.txt'),
                   validation_loss_score)
        np.savetxt(os.path.join(model_dir, 'train_loss_score.txt'),
                   train_loss_score)