Ejemplo n.º 1
0
    def _download(self):
        _fpath = os.path.join(Path.db_root_dir(), self.FILE)

        if os.path.isfile(_fpath):
            print('Files already downloaded')
            return
        else:
            print('Downloading ' + self.URL + ' to ' + _fpath)

            def _progress(count, block_size, total_size):
                sys.stdout.write('\r>> %s %.1f%%' %
                                 (_fpath, float(count * block_size) /
                                  float(total_size) * 100.0))
                sys.stdout.flush()

            urllib.request.urlretrieve(self.URL, _fpath, _progress)

        # extract file
        cwd = os.getcwd()
        print('\nExtracting tar file')
        tar = tarfile.open(_fpath)
        os.chdir(Path.db_root_dir())
        tar.extractall()
        tar.close()
        os.chdir(cwd)
        print('Done!')
Ejemplo n.º 2
0
def check_downloaded(p):
    def _progress(count, block_size, total_size):
        sys.stdout.write('\r>> %s %.1f%%' %
                         (_fpath, float(count * block_size) /
                          float(total_size) * 100.0))
        sys.stdout.flush()

    def _create_url(name):
        return 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/astmt/{}.tgz'.format(name)

    _model_urls = {
        'pascal_mnet_edge_semseg_human_parts_normals_sal_'
        'arch-mnetv2_pretr-imagenet_trBatch-16_lr-0.001_epochs-130_trNorm_poly_seenc_sedec_edge_w-0.95_130'
    }

    ans = False
    _check = p['exp_folder_name'] + '_' + p['tasks_name'] + '_' + p['exp_name'] + '_' + str(p['resume_epoch'])
    _fpath = os.path.join(Path.exp_dir(), _check + '.tgz')

    if _check in _model_urls:
        if not os.path.isfile(os.path.join(p['save_dir'], 'models',
                                           'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')):
            urllib.request.urlretrieve(_create_url(_check), _fpath, _progress)

            # extract file
            cwd = os.getcwd()
            print('\nExtracting tar file')
            tar = tarfile.open(_fpath)
            os.chdir(Path.exp_dir())
            tar.extractall()
            tar.close()
            os.chdir(cwd)
            print('Done!')
        ans = True
    return ans
Ejemplo n.º 3
0
def mobilenet_v2(pretrained='scratch', **kwargs):
    model = MobileNetV2(**kwargs)

    if pretrained == 'imagenet':

        print('loading pre-trained imagenet model')
        model_full = mobilenet_v2_imagenet.mobilenet_v2(pretrained=True)
        model.load_pretrained(model_full)
    elif pretrained == 'coco':
        print('loading pre-trained COCO model')
        # Load checkpoint
        checkpoint = torch.load(os.path.join(Path.models_dir(),
                                             'mobilenet_v2_coco_80.pth'),
                                map_location=lambda storage, loc: storage)

        # handle dataparallel
        if 'module.' in list(checkpoint.keys())[0]:
            new_state_dict = OrderedDict()
            for k, v in checkpoint.items():
                name = k.replace('module.', '')  # remove `module.`
                new_state_dict[name] = v
        else:
            new_state_dict = checkpoint

        # Load pre-trained IN model
        model.load_state_dict(new_state_dict)

    elif pretrained == 'scratch':
        print('using imagenet initialized from scratch')
    else:
        raise NotImplementedError(
            'select either imagenet or scratch for pre-training')

    return model
Ejemplo n.º 4
0
    def __init__(self,
                 split,
                 area_range=[],
                 only_pascal_categories=False,
                 mask_per_class=True,
                 db_root=Path.db_root_dir('COCO'),
                 n_samples=-1,
                 transform=None,
                 retname=True,
                 overfit=False):

        self.split = split
        self.root = os.path.join(db_root, 'images', split)
        annFile = os.path.join(db_root, 'annotations',
                               'instances_' + split + '.json')
        self.coco = COCO(annFile)
        self.pascal_cat_name = [
            'person', 'bird', 'cat', 'cow', 'dog', 'horse', 'sheep',
            'airplane', 'bicycle', 'boat', 'bus', 'car', 'motorcycle', 'train',
            'bottle', 'chair', 'dining table', 'potted plant', 'couch', 'tv'
        ]

        self.only_pascal_categories = only_pascal_categories
        if self.only_pascal_categories:
            cat_ids = self.coco.getCatIds(catNms=self.pascal_cat_name)
        else:
            cat_ids = self.coco.getCatIds()

        self.img_ids = list(self.coco.imgs.keys())

        self.ids = self.coco.getAnnIds(imgIds=self.img_ids,
                                       areaRng=area_range,
                                       catIds=cat_ids)
        self.transform = transform
        self.area_range = area_range
        self.cat_ids = cat_ids
        self.mask_per_class = mask_per_class
        self.retname = retname

        if self.mask_per_class:
            self._select_imgs()

        if n_samples > 0:
            if self.mask_per_class:
                self.img_ids = list(self.img_ids)[:n_samples]
            else:
                self.ids = self.ids[:n_samples]
        if overfit:
            n_of = 64
            self.img_ids = list(self.img_ids)[:n_of]

        # Display stats
        if self.mask_per_class:
            print("Number of images: {:d}".format(len(self.img_ids)))
        else:
            print('Number of images: {:d}\nNumber of objects: {:d}'.format(
                len(self.coco.imgs), len(self.ids)))
Ejemplo n.º 5
0
    def __init__(self,
                 root=Path.db_root_dir('MSRA10K'),
                 download=True,
                 split='trainval',
                 transform=None,
                 retname=True,
                 overfit=False):

        if download:
            self._download()

        self.transform = transform

        self.retname = retname

        self.root = root
        self.gt_dir = os.path.join(self.root, 'gt')
        self.image_dir = os.path.join(self.root, 'Imgs')

        _splits_dir = os.path.join(self.root, 'gt_sets')

        self.split = split

        if isinstance(self.split, str):
            self.split = [self.split]

        self.images = []
        self.gts = []
        self.im_ids = []

        for sp in self.split:
            with open(os.path.join(os.path.join(_splits_dir, sp + '.txt')),
                      "r") as f:
                lines = f.readlines()

            for line in lines:
                line = line.strip()

                _image = os.path.join(self.image_dir, line + ".jpg")
                _gt = os.path.join(self.gt_dir, line + ".png")

                assert os.path.isfile(_image)
                assert os.path.isfile(_gt)
                self.im_ids.append(line)
                self.images.append(_image)
                self.gts.append(_gt)

        assert (len(self.images) == len(self.gts) == len(self.im_ids))

        if overfit:
            n_of = 64
            self.images = self.images[:n_of]
            self.im_ids = self.im_ids[:n_of]

        # Display stats
        print('Number of images: {:d}'.format(len(self.im_ids)))
Ejemplo n.º 6
0
def main():
    exp_root_dir = os.path.join(Path.exp_dir(), 'pascal_se')
    edge_dirs = glob.glob(os.path.join(exp_root_dir, 'edge*'))

    p = {}

    for edge_dir in edge_dirs:
        p['save_dir_root'] = os.path.join(exp_root_dir, edge_dir)
        # sync_and_evaluate_subfolders(p, 'NYUD')
        gather_results(p, 'PASCALContext')
Ejemplo n.º 7
0
def visualize_network(net, p):
    net.eval()
    x = torch.randn(1, 3, 512, 512)
    x.requires_grad_()

    # pdf visualizer
    y = {}
    for task in p.TASKS.NAMES:
        y[task], _ = net.forward(x, task)
    g = viz.make_dot(y, net.state_dict())
    g.view(directory=Path.save_root_dir())
Ejemplo n.º 8
0
def main():
    from fblib.util.mypath import Path
    database = 'FSV'
    save_dir = os.path.join(Path.exp_dir(), 'fsv_se/albedo')

    # Evaluate all sub-folders
    exp_names = glob.glob(save_dir + '/*')
    exp_names = [x.split('/')[-1] for x in exp_names]
    for exp_name in exp_names:
        if os.path.isdir(os.path.join(save_dir, exp_name, 'Results_' + database, 'albedo')):
            print('Evaluating: {}'.format(exp_name))
            try:
                eval_and_store_albedo(database, save_dir, exp_name)
            except FileNotFoundError:
                print('Results of {} are not ready'.format(exp_name))
Ejemplo n.º 9
0
def Res_Deeplab(n_classes=21, pretrained=None):
    model = MS_Deeplab(Bottleneck, n_classes)
    if pretrained is not None:
        if pretrained == 'voc':
            pth_model = 'MS_DeepLab_resnet_trained_VOC.pth'
        elif pretrained == 'ms_coco':
            pth_model = 'MS_DeepLab_resnet_pretrained_COCO_init.pth'
        saved_state_dict = torch.load(
            os.path.join(Path.models_dir(), pth_model),
            map_location=lambda storage, loc: storage)
        if n_classes != 21:
            for i in saved_state_dict:
                i_parts = i.split('.')
                if i_parts[1] == 'layer5':
                    saved_state_dict[i] = model.state_dict()[i]
        model.load_state_dict(saved_state_dict)
    return model
Ejemplo n.º 10
0
def sync_evaluated_results(database, save_dir, exp_name, prefix=None):
    if prefix is not None:
        res_exp_name = prefix + '_' + exp_name
    else:
        res_exp_name = exp_name

    split = 'val'

    # Check whether results of experiment exists
    chk_dir = os.path.join(save_dir, exp_name, 'Results_' + database)
    if not os.path.exists(chk_dir):
        print('Experiment {} is not yet ready. Omitting this directory'.format(
            exp_name))
        return

    chk_file = os.path.join(
        save_dir, exp_name, 'Results_' + database,
        database + '_' + split + '_' + exp_name + '_edge.json')

    if os.path.isfile(chk_file):
        with open(chk_file, 'r') as f:
            eval_results = json.load(f)
    else:
        print('Creating json: {}'.format(res_exp_name))
        eval_results = {}
        for measure in {'ods_f', 'ois_f', 'ap'}:
            tmp_fname = os.path.join(
                Path.seism_root_dir(), 'results', 'pr_curves', database,
                database + '_' + split + '_fb_' + res_exp_name + '_' +
                measure + '.txt')
            if not os.path.isfile(tmp_fname):
                print('Result not available')
                continue

            with open(tmp_fname, 'r') as f:
                eval_results[measure] = float(f.read().strip())

        # Create edge json file
        if eval_results:
            print('Saving into .json: {}'.format(chk_file))
            with open(chk_file, 'w') as f:
                json.dump(eval_results, f)

    for measure in eval_results:
        print('{}: {}'.format(measure, eval_results[measure]))
Ejemplo n.º 11
0
def main():
    from fblib.util.mypath import Path
    database = 'PASCALContext'
    save_dir = os.path.join(Path.exp_dir(),
                            'pascal_se/edge_semseg_human_parts_normals_sal')

    # Evaluate all sub-folders
    exp_names = glob.glob(save_dir + '/*')
    exp_names = [x.split('/')[-1] for x in exp_names]
    for exp_name in exp_names:
        if os.path.isdir(
                os.path.join(save_dir, exp_name, 'Results_' + database,
                             'human_parts')):
            print('Evaluating: {}'.format(exp_name))
            try:
                eval_and_store_human_parts(database, save_dir, exp_name)
            except FileNotFoundError:
                print('Results of {} are not ready'.format(exp_name))
Ejemplo n.º 12
0
def resnet26(pretrained=False, remote=True):
    """Constructs a ResNet-26 model.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = ResNet_C5(Bottleneck, [2, 2, 2, 2])

    if pretrained:

        # Define ResNet26 ImageNet
        model_IN = resnet.ResNet(block=Bottleneck,
                                 layers=[2, 2, 2, 2],
                                 num_classes=1000)

        # Load checkpoint
        if remote:
            checkpoint = load_state_dict_from_url(model_urls['resnet26'],
                                                  map_location='cpu',
                                                  progress=True)
        else:
            checkpoint = torch.load(os.path.join(Path.models_dir(),
                                                 'resnet26.pth'),
                                    map_location=lambda storage, loc: storage)
        checkpoint = checkpoint['model_state']

        # Handle DataParallel
        if 'module.' in list(checkpoint.keys())[0]:
            new_state_dict = OrderedDict()
            for k, v in checkpoint.items():
                name = k.replace('module.', '')  # remove `module.`
                new_state_dict[name] = v
        else:
            new_state_dict = checkpoint

        # Load pre-trained IN model
        model_IN.load_state_dict(new_state_dict)

        # Load weights to dense-labelling network
        model.load_pretrained(model_IN)

    return model
Ejemplo n.º 13
0
def se_mobilenet_v2(pretrained=False,
                    features=False,
                    n_class=1000,
                    last_channel=1280,
                    remote=True):

    if not features:
        model = SEMobileNetV2(n_class=n_class, last_channel=last_channel)
    else:
        model = SEMobileNetV2Features(n_class=n_class,
                                      last_channel=last_channel)

    if pretrained:
        print('Loading Imagenet pre-trained SE-MobileNet-v2')

        # Load checkpoint
        if remote:
            checkpoint = load_state_dict_from_url(
                model_urls['se_mobilenet_v2_1280'],
                map_location='cpu',
                progress=True)
        else:
            checkpoint = torch.load(os.path.join(Path.models_dir(),
                                                 'se_mobilenet_v2_1280.pth'),
                                    map_location=lambda storage, loc: storage)
        checkpoint = checkpoint['model_state']

        # Handle DataParallel
        if 'module.' in list(checkpoint.keys())[0]:
            new_state_dict = OrderedDict()
            for k, v in checkpoint.items():
                name = k.replace('module.', '')  # remove `module.`
                new_state_dict[name] = v
        else:
            new_state_dict = checkpoint

        # Load pre-trained IN model
        model.load_state_dict(new_state_dict)

    return model
Ejemplo n.º 14
0
def get_state_dict_se(model_name, remote=True):
    # Load checkpoint
    if remote:
        checkpoint = load_state_dict_from_url(model_urls[model_name],
                                              map_location='cpu',
                                              progress=True)
    else:
        checkpoint = torch.load(os.path.join(Path.models_dir(),
                                             model_name + '.pth'),
                                map_location=lambda storage, loc: storage)
    checkpoint = checkpoint['model_state']

    # Handle DataParallel
    if 'module.' in list(checkpoint.keys())[0]:
        new_state_dict = OrderedDict()
        for k, v in checkpoint.items():
            name = k.replace('module.', '')  # remove `module.`
            new_state_dict[name] = v
    else:
        new_state_dict = checkpoint

    return new_state_dict
Ejemplo n.º 15
0
def resnet26(pretrained=False, features=False, remote=True, **kwargs):
    """Constructs a ResNet-26 model.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    if not features:
        model = ResNet(Bottleneck, [2, 2, 2, 2], **kwargs)
    else:
        model = ResNetFeatures(Bottleneck, [2, 2, 2, 2], **kwargs)

    if pretrained:
        print('Loading resnet26 Imagenet')

        # Load checkpoint
        if remote:
            checkpoint = load_state_dict_from_url(model_urls['resnet26'],
                                                  map_location='cpu',
                                                  progress=True)
        else:
            checkpoint = torch.load(os.path.join(Path.models_dir(),
                                                 'resnet26.pth'),
                                    map_location=lambda storage, loc: storage)
        checkpoint = checkpoint['model_state']

        # Handle DataParallel
        if 'module.' in list(checkpoint.keys())[0]:
            new_state_dict = OrderedDict()
            for k, v in checkpoint.items():
                name = k.replace('module.', '')  # remove `module.`
                new_state_dict[name] = v
        else:
            new_state_dict = checkpoint

        # Load pre-trained IN model
        model.load_state_dict(new_state_dict)

    return model
Ejemplo n.º 16
0
    def __init__(self, root=Path.db_root_dir('MNIST'), train=True, transform=None, target_transform=None, download=False,
                 multitask=False):
        self.root = os.path.expanduser(root)
        self.transform = transform
        self.target_transform = target_transform
        self.train = train  # training set or test set
        self.multitask = multitask

        if download:
            self.download()

        if not self._check_exists():
            raise RuntimeError('Dataset not found.' +
                               ' You can use download=True to download it')

        if self.train:
            self.train_data, self.train_labels = torch.load(
                os.path.join(self.root, self.processed_folder, self.training_file))
        else:
            self.test_data, self.test_labels = torch.load(
                os.path.join(self.root, self.processed_folder, self.test_file))

        if multitask:
            self._process_labels()
Ejemplo n.º 17
0
def mobilenet_v2(pretrained=False,
                 features=False,
                 n_class=1000,
                 last_channel=1280,
                 remote=True):

    if not features:
        model = MobileNetV2(n_class=n_class, last_channel=last_channel)
    else:
        model = MobileNetV2Features(n_class=n_class, last_channel=last_channel)

    if pretrained:
        if remote:
            checkpoint = load_state_dict_from_url(
                model_urls['mobilenet_v2_1280'],
                map_location='cpu',
                progress=True)
        else:
            checkpoint = torch.load(os.path.join(Path.models_dir(),
                                                 'mobilenet_v2.pth'),
                                    map_location='cpu')

        model.load_state_dict(checkpoint)
    return model
Ejemplo n.º 18
0
def sync_and_evaluate_one_folder(database,
                                 save_dir,
                                 exp_name,
                                 prefix=None,
                                 all_tasks_present=False):
    # dataset specific parameters
    if database == 'BSDS500':
        num_req_files = 200
        gt_set = ''
    elif database == 'PASCALContext':
        if all_tasks_present:
            num_req_files = 1853
            gt_set = 'val_all_tasks_present'
        else:
            num_req_files = 5105
            gt_set = 'val'
    elif database == 'NYUD':
        num_req_files = 654
        gt_set = 'val'
    else:
        raise NotImplementedError

    if prefix is None:
        res_exp_name = exp_name
    else:
        res_exp_name = prefix + '_' + exp_name

    # Check whether results of experiments exist
    chk_dir = os.path.join(save_dir, exp_name, 'Results_' + database, 'edge')
    if not os.path.exists(chk_dir):
        print('Experiment {} is not yet ready. Omitting this directory'.format(
            exp_name))
        return

    # Check for filenames
    fnames = sorted(glob.glob(os.path.join(chk_dir, '*')))
    if len(fnames) < num_req_files:
        print('Something is wrong with this directory. Check required: {}'.
              format(exp_name))
        return
    elif len(fnames) > num_req_files:
        print('Already synced: {}'.format(exp_name))
    else:
        # Seism path
        seism_cluster_dir = Path.seism_root_dir()

        # rsync to seism
        rsync_str = 'rsync -aP {}/ '.format(chk_dir)
        rsync_str += '[email protected]:{}/datasets/{}/{} '.format(
            seism_cluster_dir, database, res_exp_name)
        rsync_str += '--exclude=models --exclude=*.txt'
        print(rsync_str)
        os.system(rsync_str)

        # Submit the job
        subm_job_str = 'ssh [email protected]  "source /home/sgeadmin/BIWICELL/common/settings.sh;' \
                       'source /home/sgeadmin/BIWICELL/common/settings.sh;'
        subm_job_str += 'cp {}/parameters/HED.txt {}/parameters/{}.txt; ' \
                        ''.format(seism_cluster_dir, seism_cluster_dir, res_exp_name)
        subm_job_str += 'qsub -N evalFb -t 1-102 {}/eval_in_cluster.py {} read_one_cont_png fb 1 102 {} {}"' \
                        ''.format(seism_cluster_dir, res_exp_name, database, gt_set)
        print(subm_job_str)
        os.system(subm_job_str)

        # Leave the proof of submission
        os.system('touch {}/SYNCED_TO_REINHOLD'.format(chk_dir))
Ejemplo n.º 19
0
    def __init__(
        self,
        root=Path.db_root_dir('NYUD_MT'),
        download=True,
        split='val',
        transform=None,
        retname=True,
        overfit=False,
        do_edge=True,
        do_semseg=False,
        do_normals=False,
        do_depth=False,
    ):

        self.root = root

        if download:
            self._download()

        self.transform = transform

        if isinstance(split, str):
            self.split = [split]
        else:
            split.sort()
            self.split = split

        self.retname = retname

        # Original Images
        self.im_ids = []
        self.images = []
        _image_dir = os.path.join(root, 'images')

        # Edge Detection
        self.do_edge = do_edge
        self.edges = []
        _edge_gt_dir = os.path.join(root, 'edge')

        # Semantic segmentation
        self.do_semseg = do_semseg
        self.semsegs = []
        _semseg_gt_dir = os.path.join(root, 'segmentation')

        # Surface Normals
        self.do_normals = do_normals
        self.normals = []
        _normal_gt_dir = os.path.join(root, 'normals')

        # Depth
        self.do_depth = do_depth
        self.depths = []
        _depth_gt_dir = os.path.join(root, 'depth')

        # train/val/test splits are pre-cut
        _splits_dir = os.path.join(root, 'gt_sets')

        print('Initializing dataloader for NYUD {} set'.format(''.join(
            self.split)))
        for splt in self.split:
            with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')),
                      'r') as f:
                lines = f.read().splitlines()

            for ii, line in enumerate(lines):

                # Images
                _image = os.path.join(_image_dir, line + '.jpg')
                assert os.path.isfile(_image)
                self.images.append(_image)
                self.im_ids.append(line.rstrip('\n'))

                # Edges
                _edge = os.path.join(self.root, _edge_gt_dir, line + '.png')
                assert os.path.isfile(_edge)
                self.edges.append(_edge)

                # Semantic Segmentation
                _semseg = os.path.join(self.root, _semseg_gt_dir,
                                       line + '.mat')
                assert os.path.isfile(_semseg)
                self.semsegs.append(_semseg)

                _normal = os.path.join(self.root, _normal_gt_dir,
                                       line + '.jpg')
                assert os.path.isfile(_normal)
                self.normals.append(_normal)

                _depth = os.path.join(self.root, _depth_gt_dir, line + '.mat')
                assert os.path.isfile(_depth)
                self.depths.append(_depth)

        if self.do_edge:
            assert (len(self.images) == len(self.edges))
        if self.do_semseg:
            assert (len(self.images) == len(self.semsegs))
        if self.do_normals:
            assert (len(self.images) == len(self.normals))
        if self.do_depth:
            assert (len(self.images) == len(self.depths))

        # Uncomment to overfit to one image
        if overfit:
            n_of = 64
            self.images = self.images[:n_of]
            self.im_ids = self.im_ids[:n_of]

        # Display stats
        print('Number of dataset images: {:d}'.format(len(self.images)))
Ejemplo n.º 20
0
    def __init__(
        self,
        root=Path.db_root_dir('FSV'),
        split='test',
        mini=True,
        transform=None,
        retname=True,
        overfit=False,
        do_semseg=False,
        do_albedo=False,
        do_depth=False,
        prune_rare_classes=True,
    ):

        self.root = root
        self.transform = transform
        self.prune = []
        if prune_rare_classes:
            self.prune = [1, 4, 5, 6, 7]

        self.split = split

        self.retname = retname

        # Original Images
        self.im_ids = []
        self.images = []
        _image_dir = os.path.join(root, 'gta_' + split)

        # Semantic segmentation
        self.do_semseg = do_semseg
        self.semsegs = []

        # Albedo
        self.do_albedo = do_albedo
        self.albedos = []

        # Depth
        self.do_depth = do_depth
        self.depths = []

        # train/val/test splits are pre-cut
        _splits_dir = os.path.join(root, 'gt_sets')

        print("Initializing dataloader for FSV GTA {} set".format(self.split))
        with open(
                os.path.join(
                    os.path.join(_splits_dir, 'gta_' + self.split + '.txt')),
                "r") as f:
            lines = f.read().splitlines()

        if split == 'test' and mini:
            lines = lines[0:len(lines):int(len(lines) / 5000)]

        for ii, line in enumerate(lines):

            # Images
            _image = os.path.join(_image_dir, line + "_final.webp")
            # assert os.path.isfile(_image)
            self.images.append(_image)
            self.im_ids.append(line.rstrip('\n'))

            # Semantic Segmentation
            _semseg = os.path.join(_image_dir, line + "_object_id.png")
            # assert os.path.isfile(_semseg)
            self.semsegs.append(_semseg)

            # Albedo
            _albedo = os.path.join(_image_dir, line + "_albedo.webp")
            # assert os.path.isfile(_albedo)
            self.albedos.append(_albedo)

            # Depth Estimation
            _depth = os.path.join(_image_dir, line + "_disparity.webp")
            # assert os.path.isfile(_depth)
            self.depths.append(_depth)

        if self.do_semseg:
            assert (len(self.images) == len(self.semsegs))
        if self.do_albedo:
            assert (len(self.images) == len(self.albedos))
        if self.do_depth:
            assert (len(self.images) == len(self.depths))

        # Uncomment to overfit to one image
        if overfit:
            n_of = 64
            self.images = self.images[:n_of]
            self.im_ids = self.im_ids[:n_of]

        # Display stats
        print('Number of dataset images: {:d}'.format(len(self.images)))
Ejemplo n.º 21
0
    def __init__(
        self,
        root=Path.db_root_dir('PASCAL_MT'),
        download=True,
        split='val',
        transform=None,
        area_thres=0,
        retname=True,
        overfit=False,
        do_edge=True,
        do_human_parts=False,
        do_semseg=False,
        do_normals=False,
        do_sal=False,
        num_human_parts=6,
    ):

        self.root = root
        if download:
            self._download()

        image_dir = os.path.join(self.root, 'JPEGImages')
        self.transform = transform

        if isinstance(split, str):
            self.split = [split]
        else:
            split.sort()
            self.split = split

        self.area_thres = area_thres
        self.retname = retname

        # Edge Detection
        self.do_edge = do_edge
        self.edges = []
        edge_gt_dir = os.path.join(self.root, 'pascal-context', 'trainval')

        # Semantic Segmentation
        self.do_semseg = do_semseg
        self.semsegs = []

        # Human Part Segmentation
        self.do_human_parts = do_human_parts
        part_gt_dir = os.path.join(self.root, 'human_parts')
        self.parts = []
        self.human_parts_category = 15
        self.cat_part = json.load(
            open(
                os.path.join(os.path.dirname(__file__),
                             '../util/db_info/pascal_part.json'), 'r'))
        self.cat_part["15"] = self.HUMAN_PART[num_human_parts]
        self.parts_file = os.path.join(
            os.path.join(self.root, 'ImageSets', 'Parts'),
            ''.join(self.split) + '.txt')

        # Surface Normal Estimation
        self.do_normals = do_normals
        _normal_gt_dir = os.path.join(self.root, 'normals_distill')
        self.normals = []
        if self.do_normals:
            with open(
                    os.path.join(PROJECT_ROOT_DIR,
                                 'util/db_info/nyu_classes.json')) as f:
                cls_nyu = json.load(f)
            with open(
                    os.path.join(PROJECT_ROOT_DIR,
                                 'util/db_info/context_classes.json')) as f:
                cls_context = json.load(f)

            self.normals_valid_classes = []
            for cl_nyu in cls_nyu:
                if cl_nyu in cls_context and cl_nyu != 'unknown':
                    self.normals_valid_classes.append(cls_context[cl_nyu])

            # Custom additions due to incompatibilities
            self.normals_valid_classes.append(cls_context['tvmonitor'])

        # Saliency
        self.do_sal = do_sal
        _sal_gt_dir = os.path.join(self.root, 'sal_distill')
        self.sals = []

        # train/val/test splits are pre-cut
        _splits_dir = os.path.join(self.root, 'ImageSets', 'Context')

        self.im_ids = []
        self.images = []

        print("Initializing dataloader for PASCAL {} set".format(''.join(
            self.split)))
        for splt in self.split:
            with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')),
                      "r") as f:
                lines = f.read().splitlines()

            for ii, line in enumerate(lines):
                # Images
                _image = os.path.join(image_dir, line + ".jpg")
                assert os.path.isfile(_image)
                self.images.append(_image)
                self.im_ids.append(line.rstrip('\n'))

                # Edges
                _edge = os.path.join(edge_gt_dir, line + ".mat")
                assert os.path.isfile(_edge)
                self.edges.append(_edge)

                # Semantic Segmentation
                _semseg = self._get_semseg_fname(line)
                assert os.path.isfile(_semseg)
                self.semsegs.append(_semseg)

                # Human Parts
                _human_part = os.path.join(self.root, part_gt_dir,
                                           line + ".mat")
                assert os.path.isfile(_human_part)
                self.parts.append(_human_part)

                _normal = os.path.join(self.root, _normal_gt_dir,
                                       line + ".png")
                assert os.path.isfile(_normal)
                self.normals.append(_normal)

                _sal = os.path.join(self.root, _sal_gt_dir, line + ".png")
                assert os.path.isfile(_sal)
                self.sals.append(_sal)

        if self.do_edge:
            assert (len(self.images) == len(self.edges))
        if self.do_human_parts:
            assert (len(self.images) == len(self.parts))
        if self.do_semseg:
            assert (len(self.images) == len(self.semsegs))
        if self.do_normals:
            assert (len(self.images) == len(self.normals))
        if self.do_sal:
            assert (len(self.images) == len(self.sals))

        if not self._check_preprocess_parts():
            print(
                'Pre-processing PASCAL dataset for human parts, this will take long, but will be done only once.'
            )
            self._preprocess_parts()

        if self.do_human_parts:
            # Find images which have human parts
            self.has_human_parts = []
            for ii in range(len(self.im_ids)):
                if self.human_parts_category in self.part_obj_dict[
                        self.im_ids[ii]]:
                    self.has_human_parts.append(1)
                else:
                    self.has_human_parts.append(0)

            # If the other tasks are disabled, select only the images that contain human parts, to allow batching
            if not self.do_edge and not self.do_semseg and not self.do_sal and not self.do_normals:
                print('Ignoring images that do not contain human parts')
                for i in range(len(self.parts) - 1, -1, -1):
                    if self.has_human_parts[i] == 0:
                        del self.im_ids[i]
                        del self.images[i]
                        del self.parts[i]
                        del self.has_human_parts[i]
            print('Number of images with human parts: {:d}'.format(
                np.sum(self.has_human_parts)))

        #  Overfit to n_of images
        if overfit:
            n_of = 64
            self.images = self.images[:n_of]
            self.im_ids = self.im_ids[:n_of]
            if self.do_edge:
                self.edges = self.edges[:n_of]
            if self.do_semseg:
                self.semsegs = self.semsegs[:n_of]
            if self.do_human_parts:
                self.parts = self.parts[:n_of]
            if self.do_normals:
                self.normals = self.normals[:n_of]
            if self.do_sal:
                self.sals = self.sals[:n_of]

        # Display stats
        print('Number of dataset images: {:d}'.format(len(self.images)))
Ejemplo n.º 22
0
    def __init__(
        self,
        root=Path.db_root_dir('NYUD_raw'),
        split='train',
        transform=None,
        do_normals=True,
        do_depth=False,
        retname=True,
        overfit=False,
    ):

        self.root = root
        self.transform = transform

        self.split = split

        self.retname = retname

        self.do_normals = do_normals
        self.do_depth = do_depth

        # Original Images
        self.im_ids = []
        self.images = []
        _image_dir = os.path.join(root, self.split, 'images')
        _mask_gt_dir = os.path.join(root, self.split, 'masks')

        # Surface Normals
        self.normals = []
        nrm_ext = '.png' if self.split == 'train' else '.jpg'
        self.masks = []
        _normal_gt_dir = os.path.join(root, self.split, 'normals')

        # Monocular depth
        self.depths = []
        _depth_gt_dir = os.path.join(root, self.split, 'depth')

        # train/val/test splits are pre-cut
        _splits_dir = os.path.join(root, 'gt_sets')

        print('Initializing dataloader for NYUD Raw,  {} set'.format(
            self.split))
        with open(os.path.join(os.path.join(_splits_dir, self.split + '.txt')),
                  'r') as f:
            lines = f.read().splitlines()

        for ii, line in enumerate(lines):

            # Images
            _image = os.path.join(_image_dir, line + '.jpg')
            assert os.path.isfile(_image)
            self.images.append(_image)
            self.im_ids.append(line.rstrip('\n'))

            if self.do_normals:
                # Normals
                _normal = os.path.join(self.root, _normal_gt_dir,
                                       line + nrm_ext)
                assert os.path.isfile(_normal)
                self.normals.append(_normal)

            if self.do_depth:
                # Depth
                _depth = os.path.join(self.root, _depth_gt_dir, line + '.mat')
                assert os.path.isfile(_depth)
                self.depths.append(_depth)

            if self.split == 'train':
                # Masks (only available for train data)
                _mask = os.path.join(self.root, _mask_gt_dir, line + '.png')
                assert os.path.isfile(_mask)
                self.masks.append(_mask)

        if self.do_normals:
            assert (len(self.images) == len(self.normals))
        if self.do_depth:
            assert (len(self.images) == len(self.depths))

        if self.split == 'train':
            assert (len(self.images) == len(self.masks))

        # uncomment to overfit to one image
        if overfit:
            n_of = 64
            self.images = self.images[:n_of]
            self.im_ids = self.im_ids[:n_of]

        # display stats
        print('number of dataset images: {:d}'.format(len(self.images)))
Ejemplo n.º 23
0
    def __init__(self,
                 root=Path.db_root_dir('BSDS500'),
                 download=True,
                 split=['train', 'val'],
                 transform=None,
                 retname=True,
                 n_votes=1,
                 overfit=False):

        if download:
            self._download()

        self.transform = transform

        self.retname = retname
        self.n_votes = n_votes

        self.root = root
        self.gt_dir = os.path.join(self.root, 'data', 'groundTruth')
        self.image_dir = os.path.join(self.root, 'data', 'images')

        _splits_dir = os.path.join(self.root, 'lists')
        if not os.path.exists(os.path.join(_splits_dir)):
            os.mkdir(os.path.join(_splits_dir))

        self.split = split
        self._get_images_trainval()

        if isinstance(self.split, str):
            self.split = [self.split]

        self.images = []
        self.gts = []
        self.im_ids = []

        for sp in self.split:
            with open(os.path.join(os.path.join(_splits_dir, sp + '.txt')), "r") as f:
                lines = f.readlines()

            for line in lines:
                line = line.strip()

                _image = os.path.join(self.image_dir, sp, line + ".jpg")
                _gt = os.path.join(self.gt_dir, sp, line + ".mat")

                assert os.path.isfile(_image)
                assert os.path.isfile(_gt)
                self.im_ids.append(line)
                self.images.append(_image)
                self.gts.append(_gt)

        assert (len(self.images) == len(self.gts) == len(self.im_ids))

        if overfit:
            n_of = 16
            self.images = self.images[:n_of]
            self.gts = self.gts[:n_of]
            self.im_ids = self.im_ids[:n_of]

        # Display stats
        print('Number of images: {:d}'.format(len(self.im_ids)))
Ejemplo n.º 24
0
def main():
    args = parse_args()
    best_prec1 = 0

    if not args.group_norm:
        save_dir = os.path.join(Path.exp_dir(), 'imagenet', args.arch)
    else:
        save_dir = os.path.join(Path.exp_dir(), 'imagenet', args.arch + '-GN')
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)
    log = open(
        os.path.join(save_dir, '{}.{}.log'.format(args.arch, args.prefix)),
        'w')

    # create model
    print_log("=> creating model '{}'".format(args.arch), log)

    resol = 224
    if args.arch == 'res26':
        model = resnet.resnet26(pretrained=False, group_norm=args.group_norm)
    elif args.arch == 'res50':
        model = resnet.resnet50(pretrained=False, group_norm=args.group_norm)
    elif args.arch == 'res101':
        model = resnet.resnet101(pretrained=False, group_norm=args.group_norm)
    elif args.arch == 'x50':
        model = resnext.resnext50_32x4d(pretrained=False)
    elif args.arch == 'x101':
        model = resnext.resnext101_32x4d(pretrained=False)
    elif args.arch == 'res26-se':
        model = se_resnet.se_resnet26(num_classes=1000)
    elif args.arch == 'res50-se':
        model = se_resnet.se_resnet50(num_classes=1000)
    elif args.arch == 'res101-se':
        model = se_resnet.se_resnet101(num_classes=1000)
    elif args.arch == 'mobilenet-v2':
        model = mobilenet_v2.mobilenet_v2(pretrained=False,
                                          n_class=1000,
                                          last_channel=2048)

    print_log("=> Model : {}".format(model), log)
    print_log("=> parameter : {}".format(args), log)

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features,
                                               device_ids=list(
                                                   range(args.n_gpu)))
        model.cuda()
    else:
        model = torch.nn.DataParallel(model,
                                      device_ids=list(range(
                                          args.n_gpu))).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print_log("=> loading checkpoint '{}'".format(args.resume), log)
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print_log(
                "=> loaded checkpoint '{}' (epoch {})".format(
                    args.resume, checkpoint['epoch']), log)
        else:
            raise ValueError("=> no checkpoint found at '{}'".format(
                args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(resol),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=None)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(resol),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    filename = os.path.join(
        save_dir, 'checkpoint.{}.{}.pth.tar'.format(args.arch, args.prefix))
    bestname = os.path.join(
        save_dir, 'best.{}.{}.pth.tar'.format(args.arch, args.prefix))

    start_time = time.time()
    epoch_time = AverageMeter()
    for epoch in range(args.start_epoch, args.epochs):
        lr = adjust_learning_rate(optimizer, epoch, args)

        need_hour, need_mins, need_secs = convert_secs2time(
            epoch_time.val * (args.epochs - epoch))
        need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(
            need_hour, need_mins, need_secs)
        print_log(
            ' [{:s}] :: {:3d}/{:3d} ----- [{:s}] {:s} LR={:}'.format(
                args.arch, epoch, args.epochs, time_string(), need_time, lr),
            log)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, log, args)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, log, args)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
                'args': copy.deepcopy(args),
            }, is_best, filename, bestname)
        # measure elapsed time
        epoch_time.update(time.time() - start_time)
        start_time = time.time()
    log.close()
Ejemplo n.º 25
0
 def __init__(self, root=Path.db_root_dir('MNIST'), train=True, transform=None, target_transform=None, download=False):
     super(MultiMNIST, self).__init__(root, train, transform, target_transform, download, multitask=False)
Ejemplo n.º 26
0
def resnext_101_32x4d():
    model = resnext_101_32x4d_model
    model.load_state_dict(torch.load(os.path.join(Path.models_dir(), 'resnext_101_32x4d.pth')))
    return model
Ejemplo n.º 27
0
def parse_folder(exp_root=Path.exp_dir(),
                 exp_group='pascal_se',
                 tasks=None,
                 db_name='PASCALContext',
                 query='*',
                 dic={}):

    if tasks is None:
        tasks = ['edge', 'semseg', 'human_parts', 'normals', 'sal', 'depth']

    exp_group_dir = os.path.join(exp_root, exp_group)
    dirs = os.listdir(exp_group_dir)
    dirs.sort()

    best_perf = {task: 0 for task in tasks}
    for task in {'normals', 'depth', 'albedo'}:
        if task in tasks:
            best_perf[task] = 100

    # Examine all subdirectories
    for d in dirs:
        dir_in = os.path.join(exp_group_dir, d)

        # No dir or dir without subdirs
        if not os.path.isdir(dir_in) or not exists_dir(dir_in):
            continue

        # If results folder in dir, print results
        if ('Results_' + db_name) in os.listdir(dir_in):
            perf = {}
            task_counter = 0
            # Iterate through all tasks
            for i, task in enumerate(tasks):
                fnames = glob.glob(dir_in + '/Results_' + db_name + '/' +
                                   query + task + '.json')

                if not fnames:
                    perf[task] = -1
                    continue
                task_counter += 1

                with open(fnames[0], 'r') as f:
                    data = json.load(f)

                if task == 'edge':
                    perf[task] = 100 * data['ods_f']
                    if perf[task] > best_perf[task]:
                        best_perf[task] = perf[task]
                elif task == 'semseg':
                    perf[task] = 100 * data['mIoU']
                    if perf[task] > best_perf[task]:
                        best_perf[task] = perf[task]
                elif task == 'human_parts':
                    perf[task] = 100 * data['mIoU']
                    if perf[task] > best_perf[task]:
                        best_perf[task] = perf[task]
                elif task == 'normals':
                    perf[task] = data['mean']
                    if perf[task] < best_perf[task]:
                        best_perf[task] = perf[task]
                elif task == 'depth':
                    perf[task] = data['rmse']
                    if perf[task] < best_perf[task]:
                        best_perf[task] = perf[task]
                elif task == 'albedo':
                    perf[task] = data['rmse']
                    if perf[task] < best_perf[task]:
                        best_perf[task] = perf[task]
                elif task == 'sal':
                    perf[task] = 100 * data['mIoU']
                    if perf[task] > best_perf[task]:
                        best_perf[task] = perf[task]

            perf_str = [
                task + ' ' + '%06.3f' % perf[task] + '   '
                for i, task in enumerate(tasks)
            ]
            perf_str = "".join(perf_str)
            if task_counter > 0:
                print('{}: {}'.format(perf_str, d))
                dic[d] = perf

        elif 'models' in os.listdir(dir_in):
            # Results are not ready yet
            continue
        else:
            # Examine subdirectories recursively
            print('\n\n{}\n'.format(d))
            parse_folder(exp_group=os.path.join(exp_group, d),
                         tasks=tasks,
                         query=query,
                         db_name=db_name,
                         dic=dic)

    print(best_perf)
Ejemplo n.º 28
0
def parse_args():
    def str2bool(v):
        return v.lower() in ("yes", "true", "t", "1")

    parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
    parser.add_argument('--data',
                        default=Path.db_root_dir('Imagenet'),
                        help='path to dataset')
    parser.add_argument('--arch', '-a', metavar='ARCH', default='x50')
    parser.add_argument('-j',
                        '--workers',
                        default=16,
                        type=int,
                        metavar='N',
                        help='number of data loading workers (default: 16)')
    parser.add_argument('--epochs',
                        default=100,
                        type=int,
                        metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('--start-epoch',
                        default=0,
                        type=int,
                        metavar='N',
                        help='manual epoch number (useful on restarts)')
    parser.add_argument('-b',
                        '--batch-size',
                        default=256,
                        type=int,
                        metavar='N',
                        help='mini-batch size (default: 256)')
    parser.add_argument('--lr',
                        '--learning-rate',
                        default=0.1,
                        type=float,
                        metavar='LR',
                        help='initial learning rate')
    parser.add_argument('--momentum',
                        default=0.9,
                        type=float,
                        metavar='M',
                        help='momentum')
    parser.add_argument('--weight-decay',
                        '--wd',
                        default=1e-4,
                        type=float,
                        metavar='W',
                        help='weight decay (default: 1e-4)')
    parser.add_argument('--print-freq',
                        '-p',
                        default=200,
                        type=int,
                        metavar='N',
                        help='print frequency (default: 100)')
    parser.add_argument('--resume',
                        default='',
                        type=str,
                        metavar='PATH',
                        help='path to latest checkpoint (default: none)')
    parser.add_argument('-e',
                        '--evaluate',
                        dest='evaluate',
                        action='store_true',
                        help='evaluate model on validation set')
    parser.add_argument('--n_gpu', type=int, default=8, help='number of GPUs')
    parser.add_argument('--group_norm',
                        type=str2bool,
                        default=False,
                        help='Group Normalization')

    args = parser.parse_args()
    args.prefix = time_file_str()

    return args
Ejemplo n.º 29
0
def create_config():
    cfg = edict()

    args = parse_args()

    # Parse tasks
    assert (len(args.active_tasks) == 5)
    args.do_edge = args.active_tasks[0]
    args.do_semseg = args.active_tasks[1]
    args.do_human_parts = args.active_tasks[2]
    args.do_normals = args.active_tasks[3]
    args.do_sal = args.active_tasks[4]

    print('\nThis script was run with the following parameters:')
    for x in vars(args):
        print('{}: {}'.format(x, str(getattr(args, x))))

    cfg.resume_epoch = args.resume_epoch

    cfg.DO_EDGE = args.do_edge
    cfg.DO_SEMSEG = args.do_semseg
    cfg.DO_HUMAN_PARTS = args.do_human_parts
    cfg.DO_NORMALS = args.do_normals
    cfg.DO_SAL = args.do_sal

    if not cfg.DO_EDGE and not cfg.DO_SEMSEG and not cfg.DO_HUMAN_PARTS and not cfg.DO_NORMALS and not cfg.DO_SAL:
        raise ValueError("Select at least one task")

    cfg['arch'] = args.arch
    cfg['pretr'] = args.pretr
    cfg['trBatch'] = args.trBatch
    cfg['lr'] = args.lr
    cfg['lr_dec'] = args.lr_dec
    cfg['wd'] = args.wd
    cfg['cls'] = args.cls
    cfg['epochs'] = args.epochs
    cfg['stride'] = args.stride
    cfg['trNorm'] = args.trNorm
    cfg['dec_w'] = args.dec_w

    # Set Modulation (Squeeze and Exciation, Residual Adapters) parameters
    cfg['seenc'] = args.seenc
    cfg['sedec'] = args.sedec
    cfg['adapters'] = args.adapt

    if cfg['sedec']:
        cfg['norm_per_task'] = True
    else:
        cfg['norm_per_task'] = False

    if args.dscr == 'None':
        args.dscr = None

    cfg['dscr_type'] = args.dscr
    cfg['lr_dscr'] = args.lr_dscr
    cfg['dscr_w'] = args.dscr_w
    cfg['dscrd'] = args.dscrd
    cfg['dscrk'] = args.dscrk

    task_args, name_args = get_exp_name(args)

    cfg['exp_folder_name'] = 'pascal_resnet'
    cfg['exp_name'] = "_".join(name_args)
    cfg['tasks_name'] = "_".join(task_args)
    cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'],
                                        cfg['tasks_name'])
    if args.onlyVOC:
        cfg['train_db_name'] = ['VOC12', 'SBD']
        cfg['test_db_name'] = 'VOC12'
        cfg['infer_db_names'] = [
            'VOC12',
        ]
    else:
        cfg['train_db_name'] = [
            'PASCALContext',
        ]
        cfg['test_db_name'] = 'PASCALContext'
        cfg['infer_db_names'] = [
            'PASCALContext',
        ]

    # Which tasks?
    cfg.TASKS = edict()
    cfg.TASKS.NAMES = []
    cfg.TASKS.NUM_OUTPUT = {}  # How many outputs per task?
    cfg.TASKS.TB_MIN = {}
    cfg.TASKS.TB_MAX = {}
    cfg.TASKS.LOSS_MULT = {}
    cfg.TASKS.FLAGVALS = {'image': cv2.INTER_CUBIC}
    cfg.TASKS.INFER_FLAGVALS = {}

    if cfg.DO_EDGE:
        # Edge Detection
        print('Adding task: Edge Detection')
        tmp = 'edge'
        cfg.TASKS.NAMES.append(tmp)
        cfg.TASKS.NUM_OUTPUT[tmp] = 1
        cfg.TASKS.TB_MIN[tmp] = 0
        cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp]
        cfg.TASKS.LOSS_MULT[tmp] = 50
        cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
        cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR

        # Add task-specific parameters from parser
        cfg['edge_w'] = args.edge_w
        cfg['eval_edge'] = False

    if cfg.DO_SEMSEG:
        # Semantic Segmentation
        print('Adding task: Semantic Segmentation')
        tmp = 'semseg'
        cfg.TASKS.NAMES.append(tmp)
        cfg.TASKS.NUM_OUTPUT[tmp] = 21
        cfg.TASKS.TB_MIN[tmp] = 0
        cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
        cfg.TASKS.LOSS_MULT[tmp] = 1
        cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
        cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST

    if cfg.DO_HUMAN_PARTS:
        # Human Parts Segmentation
        print('Adding task: Human Part Segmentation')
        tmp = 'human_parts'
        cfg.TASKS.NAMES.append(tmp)
        cfg.TASKS.NUM_OUTPUT[tmp] = 7
        cfg.TASKS.TB_MIN[tmp] = 0
        cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
        cfg.TASKS.LOSS_MULT[tmp] = 2
        cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
        cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST

    if cfg.DO_NORMALS:
        # Human Parts Segmentation
        print('Adding task: Normals')
        tmp = 'normals'
        cfg.TASKS.NAMES.append(tmp)
        cfg.TASKS.NUM_OUTPUT[tmp] = 3
        cfg.TASKS.TB_MIN[tmp] = -1
        cfg.TASKS.TB_MAX[tmp] = 1
        cfg.TASKS.LOSS_MULT[tmp] = 10
        cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_CUBIC
        cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR

        cfg['normloss'] = 1  # Hard-coded L1 loss for normals

    if cfg.DO_SAL:
        # Saliency Estimation
        print('Adding task: Saliency')
        tmp = 'sal'
        cfg.TASKS.NAMES.append(tmp)
        cfg.TASKS.NUM_OUTPUT[tmp] = 1
        cfg.TASKS.TB_MIN[tmp] = 0
        cfg.TASKS.TB_MAX[tmp] = 1
        cfg.TASKS.LOSS_MULT[tmp] = 5
        cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
        cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR

    cfg['lr_tsk'] = len(cfg.TASKS.NAMES) if args.lr_tsk < 0 else args.lr_tsk

    cfg.NETWORK = edict()

    # Visualize the network on Tensorboard / pdf?
    cfg.NETWORK.VIS_NET = False

    cfg.TRAIN = edict()
    cfg.TRAIN.SCALE = (512, 512)
    cfg.TRAIN.MOMENTUM = 0.9
    cfg.TRAIN.TENS_VIS = True
    cfg.TRAIN.TENS_VIS_INTER = 1000
    cfg.TRAIN.TEMP_LOSS_INTER = 1000

    cfg.TEST = edict()

    # See evolution of the test set when training?
    cfg.TEST.USE_TEST = True
    cfg.TEST.TEST_INTER = 10
    cfg.TEST.SCALE = (512, 512)

    cfg.SEED = 0
    cfg.EVALUATE = True
    cfg.DEBUG = False

    cfg['overfit'] = args.overfit
    if cfg['overfit']:
        cfg['save_dir_root'] = os.path.join(Path.exp_dir(),
                                            cfg['exp_folder_name'])
        cfg['exp_name'] = 'test'

    cfg['save_dir'] = os.path.join(cfg['save_dir_root'], cfg['exp_name'])
    return cfg
Ejemplo n.º 30
0
    def __init__(
        self,
        root=Path.db_root_dir('PASCAL'),
        download=True,
        split='val',
        transform=None,
        retname=True,
        do_semseg=True,
        overfit=False,
    ):

        self.root = root
        _sbd_root = os.path.join(self.root, self.BASE_DIR)
        _inst_dir = os.path.join(_sbd_root, 'inst')
        _cat_dir = os.path.join(_sbd_root, 'cls')
        _image_dir = os.path.join(_sbd_root, 'img')

        if download:
            self._download()

        self.transform = transform

        if isinstance(split, str):
            self.split = [split]
        else:
            split.sort()
            self.split = split

        self.retname = retname

        self.do_semseg = do_semseg
        if self.do_semseg:
            self.semsegs = []

        # train/val/test splits are pre-cut
        _splits_dir = os.path.join(_sbd_root)

        self.im_ids = []
        self.images = []

        print("Initializing dataloader for SBD {} set".format(''.join(
            self.split)))
        for splt in self.split:
            with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')),
                      "r") as f:
                lines = f.read().splitlines()

            for ii, line in enumerate(lines):

                # Images
                _image = os.path.join(_image_dir, line + ".jpg")
                assert os.path.isfile(_image)
                self.images.append(_image)
                self.im_ids.append(line.rstrip('\n'))

                # Semantic Segmentation
                if self.do_semseg:
                    _semseg = os.path.join(_cat_dir, line + '.mat')
                    assert os.path.isfile(_semseg)
                    self.semsegs.append(_semseg)

        if self.do_semseg:
            assert (len(self.images) == len(self.semsegs))

        # Uncomment to overfit to one image
        if overfit:
            n_of = 32
            self.im_ids = self.im_ids[:n_of]
            self.images = self.images[:n_of]
            if self.do_semseg:
                self.semsegs = self.semsegs[:n_of]

        # Display stats
        print('Number of dataset images: {:d}'.format(len(self.images)))