Esempio n. 1
0
 def dict_performance(self, gt):
     '''Evaluate against ground truth on a number of criteria.'''
     res = ezdict(
         # Accuracy
         # acc = self.Accuracy(gt),
         # Error
         Err=1 - self.Accuracy(gt),
         # Negative log-likelihood
         NLL=self.Clip().NLL(gt),
         # Per-sample NLL.
         # sNLL= self.Clip().NLL(gt, reduce=False),
         # Mean Average Precision.
         # Note that VOC's flawed version is used for better comparison.
         mAP=self.mAP(gt),
         # This is the vanilla Brier score.
         # Bri = self.Bri(gt),
         # This is our modified Brier loss.
         Bri=np.sqrt(self.Bri(gt)),
         # Expected Calibration Error
         ECE=self.ECE(gt),
         # Error rate among 99% confident
         E99=self.E99(gt),
         # Entropy
         Ent=self.Ent(gt),
     )
     return res
Esempio n. 2
0
    def __init__(self,
                 paths=None,
                 train=True,
                 transform=None,
                 target_transform=None,
                 split='val',
                 loader=default_loader):

        paths = paths if paths is not None else default_paths
        root = paths.LFWproot
        root = os.path.join(root, '')  # add trailing slash
        imgroot = os.path.join(root, 'lfw+_jpg24')

        meta = MU.cached(os.path.join(root, '__meta__.pkl'),
                         type(self)._load_meta, root)
        meta = ezdict(meta)

        assert split in {'val', 'test'}
        if split == 'val':
            trainf = [0, 1]
            valf = [2]
        else:
            trainf = [0, 1, 2]
            valf = [3, 4]

        folds = trainf if train else valf
        names = [xx for x in folds for xx in meta.images[x]]
        labels = np.concatenate([meta.labels[x] for x in folds], axis=0)

        # initialize self
        imgs = [nl for nl in zip(names, labels)]
        classes = meta.attrs
        super().__init__(imgroot, imgs, classes, None, transform,
                         target_transform, loader)
Esempio n. 3
0
    def __init__(self, root=None):
        # decide root based on input and machine name
        if root is None:
            hostname = socket.gethostname()
            if hostname == 'cashew':
                self.dsetroot = '/home/zhizhong/tmp_dset/'
            elif hostname.startswith('vision-'):
                self.dsetroot = '/home/zli115/tmp_dset/'
            else:
                raise Exception(
                    'Please configure your dataset root and paths in this code file'
                )
        else:
            self.dsetroot = root

        # dataset root folders
        self.ImageNetroot = os.path.join(self.dsetroot, 'ILSVRC2012')
        # self.Places365root = os.path.join(self.dsetroot, 'Places365')
        self.VOCroot = os.path.join(self.dsetroot, 'VOC2012')
        self.Cocoroot = os.path.join(self.dsetroot, 'MSCOCO')
        self.OIIITPetsroot = os.path.join(self.dsetroot, 'Oxford_IIIT_Pets')
        self.LFWproot = os.path.join(self.dsetroot, 'LFW+_Release')

        # pretrained models from https://github.com/CSAILVision/places365 for training
        self.pretrainedroot = os.path.join(self.dsetroot, 'pretrained')
        self.prePlaces365 = ezdict()
        for x in ['resnet18', 'resnet50', 'densenet161']:
            self.prePlaces365[x] = os.path.join(
                self.pretrainedroot,
                '%s_places365.pth.tar' % (x.lower()),
            )
Esempio n. 4
0
def multiple_runs_evaulate(safe_pred_allruns, labels):
    '''Evaluate logits from multiple runs and averaging the performance.'''
    res_allruns = [
        x.dict_performance(labels.numpy()) for x in safe_pred_allruns
    ]
    res = ezdict()
    for k in res_allruns[0].keys():
        res[k] = np.mean([x[k] for x in res_allruns])
    return res
Esempio n. 5
0
 def register_module(self, name, path):
     _META_.MODULES[name] = ezdict(path=path, classes=ezdict())
Esempio n. 6
0
class _META_(type):
    # real argparser instance
    PARSER = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    # parsed arguments
    ARGS = ezdict()
    # monitored modules
    MODULES = ezdict()

    def require_args(self):

        # args for config file
        _META_.PARSER.add_argument('--cfgs', type=str, nargs='*', default=
        ['/home/xifeng/Workspace/2020/PICA/configs/base.yaml', '/home/xifeng/Workspace/2020/PICA/configs/cifar100.yaml'],
                                   help='config files to load')

    def register_module(self, name, path):
        _META_.MODULES[name] = ezdict(path=path, classes=ezdict())

    def register_class(self, module, name, obj):
        assert module in _META_.MODULES.keys(), ('No module '
                                                 'named [%s] has been registered' % module)
        _META_.MODULES[module].classes[name] = obj

    def get_class_name(self, module):
        assert module in _META_.MODULES.keys(), ('No module '
                                                 'named [%s] has been registered' % module)
        return [name for name, _ in _META_.MODULES[module].classes.items()]

    def get_class(self, module, name=None):
        assert module in _META_.MODULES.keys(), ('No module '
                                                 'named [%s] has been registered' % module)
        if name is None:
            return [obj for name, obj in _META_.MODULES[module].classes.items()]
        assert name in _META_.MODULES[module].classes.keys(), ('No class named [%s] '
                                                               'has been registered in module [%s]' % (name, module))
        return _META_.MODULES[module].classes[name]

    def parse(self):
        # collect self args
        self.require_args()

        # load default args from config file
        self.from_files(self.known_args().cfgs)

        # collect args for packages
        for module in _META_.MODULES.keys():
            # setup module arguments
            mod = importlib.import_module(_META_.MODULES[module].path)
            if hasattr(mod, 'require_args'):
                mod.require_args()

        # re-update default value for new args
        self.from_files(self.known_args().cfgs)

        for module in _META_.MODULES.keys():
            # setup class arguments
            if hasattr(self.known_args(), module):
                cls = self.get_class(module, self.known_args().__dict__[module])
                if hasattr(cls, 'require_args'):
                    cls.require_args()
            else:
                cls_list = self.get_class(module)
                for cls in cls_list:
                    if hasattr(cls, 'require_args'):
                        cls.require_args()

        # re-update default value for new args
        self.from_files(self.known_args().cfgs)

        # parse args
        _META_.ARGS = _META_.PARSER.parse_args()

    def known_args(self):
        args, _ = _META_.PARSER.parse_known_args()
        return args

    def from_files(self, files):

        # if no config file is provided, skip
        if files is None or len(files) <= 0:
            return None

        for file in files:
            assert os.path.exists(file), "Config file not found: [%s]" % file
            configs = yaml.load(open(file, 'r'))
            _META_.PARSER.set_defaults(**configs)

    def get(self, attr, default=None):
        if hasattr(_META_.ARGS, attr):
            return getattr(_META_.ARGS, attr)
        return default

    def set(self, key, val):
        setattr(_META_.ARGS, key, val)

    def yaml(self):
        config = {k: v for k, v in sorted(vars(_META_.ARGS).items())}
        return yaml.safe_dump(config, default_flow_style=False)

    def __getattr__(self, attr):
        try:
            return _META_.PARSER.__getattribute__(attr)
        except AttributeError:
            return _META_.ARGS.__getattribute__(attr)
        except:
            traceback.print_exec()
            exit(-1)

    def __str__(self):
        MAX_WIDTH = 20
        table = PrettyTable(["#", "Key", "Value", "Default"])
        table.align = 'l'
        for i, (k, v) in enumerate(sorted(vars(_META_.ARGS).items())):
            v = str(v)
            default = str(_META_.PARSER.get_default(k))
            if default == v:
                default = '--'
            table.add_row([i, k, v[:MAX_WIDTH] + ('...' if len(v) > MAX_WIDTH else ''), default])
        return table.get_string()
Esempio n. 7
0
def CocoExistence(paths=None,
                  train=True,
                  split='val',
                  transform=None,
                  target_transform=None,
                  loader=default_loader):
    '''MSCOCO Dataset, but treat as multi-label problem to classify existence of classes.

    !!!
    WARNING: this dataset labels may contain -100 as ignored labels, which pytorch's binary_cross_entropy_with_logits cannot handle.'''

    paths = paths if paths is not None else default_paths

    # deal with splits
    assert split in {'val', 'test'}
    if split == 'val':
        phase = 'train2014' if train else 'val2014'
        annFile = 'instances_%s.json' % phase
    else:
        phase = 'trainval' if train else 'test2015'
        annFile = 'image_info_%s.json' % phase
    if phase == 'trainval':
        ret_ = [
            CocoExistence(paths, True, 'val', transform, target_transform),
            CocoExistence(paths, False, 'val', transform, target_transform),
        ]
        ret = ret_[0]
        # Do not use data.ConcatDataset(ret_)
        ret.extend_imgs(ret_[1])
        return ret

    root = os.path.join(paths.Cocoroot, phase)

    def __cached_fn(annFile, root):
        # get dataset object
        annFile = os.path.join(paths.Cocoroot, 'annotations', annFile)

        with MU.nostdout(on=True):
            ret = CocoDetection(root, annFile, transform, target_transform)
        coco = ret.coco

        # add label parsing
        clsCoco = sorted(coco.cats.keys())
        ttf = coco_anno2existence(coco, clsCoco)
        ret.classes = ([
            '{:s}:{:2d}'.format(coco.cats[x]['name'], x) for x in clsCoco
        ])
        # also provide labels for convenience
        # This is redundant code. No idea how to use CocoDetection without loading image...
        ret.imgs = [(coco.loadImgs(x)[0]['file_name'],
                     ttf(coco.loadAnns(coco.getAnnIds(imgIds=x))))
                    for x in sorted(ret.ids)]
        return dict(imgs=ret.imgs, classes=ret.classes)

    cache_file = os.path.join(
        paths.Cocoroot, '__cache_{split}split_{train}__.pkl'.format(
            split=split, train='train' if train else 'eval'))
    ret = ezdict(MU.cached(cache_file, __cached_fn, annFile, root))

    # cast as ImagePathsDataset for uniform interfacing
    ret = ImagePathsDataset(root,
                            ret.imgs,
                            classes=ret.classes,
                            class_to_idx=None,
                            transform=transform,
                            target_transform=target_transform,
                            loader=loader)

    return ret
Esempio n. 8
0
lookup = ezdict(
    # Number of class
    num_outputs=dict(
        lfwp_gender=2,
        cat_vs_dog=2,
        imnet_animal=4,
        voc_to_coco=20,
    ),
    # Release model files
    release_model_file=dict(
        lfwp_gender='release_models/lfwp_gender/net.pkl',
        cat_vs_dog='release_models/cat_vs_dog/net.pkl',
        imnet_animal='release_models/imnet_animal/net.pkl',
        voc_to_coco='release_models/voc_to_coco/net.pkl',
    ),
    # Numerically-safe probability computation class
    # MC for softmax logit in multi-class, ML for sigmoid logit in multi-label
    SafeProbClass=dict(
        lfwp_gender=SafeProbsMC,
        cat_vs_dog=SafeProbsMC,
        imnet_animal=SafeProbsMC,
        voc_to_coco=SafeProbsML,
    ),
    # Dataset class / function handles
    dataset=dict(
        lfwp_gender=mydatasets.LFWpGenderFamUnfDataset,
        cat_vs_dog=mydatasets.PetsFamUnfDataset,
        imnet_animal=mydatasets.ImageNetFamUnfDataset,
        voc_to_coco=mydatasets.VOCCocoFamUnfDataset,
    ),
    # Calibration temperature for baseline ResNet18 models
    # (obtained on a familiar data validation split)
    calibrate_T_baseline_resnet18=dict(
        lfwp_gender=1.60,
        cat_vs_dog=1.90,
        imnet_animal=1.68,
        voc_to_coco=1.02,
    ))
Esempio n. 9
0
def load_config(config_path):
    with open(config_path, 'r', encoding='utf-8') as file:
        config = ezdict(yaml.load(file, Loader=yaml.SafeLoader))
    return config
Esempio n. 10
0
lookup = ezdict(
    resnet18=dict(
        lfwp_gender=dict(
            # Number of class
            num_outputs=2,
            # Dataset class / function handles
            DS='LFWpGenderFamUnfDataset',
            # learning rate
            lr=0.0080,
            # num of epochs
            max_epochs=24,
            # batch size
            batch_size=128,
            # label style
            label_style='MC',
        ),
        cat_vs_dog=dict(
            num_outputs=2,
            DS='PetsFamUnfDataset',
            lr=0.0600,
            max_epochs=128,
            batch_size=128,
            label_style='MC',
        ),
        imnet_animal=dict(
            num_outputs=4,
            DS='ImageNetFamUnfDataset',
            lr=0.0030,
            max_epochs=64,
            batch_size=128,
            label_style='MC',
        ),
        voc_to_coco=dict(
            num_outputs=20,
            DS='VOCCocoFamUnfDataset',
            lr=0.0300,
            max_epochs=32,
            batch_size=128,
            label_style='ML',
        ),
    ),
    densenet161=dict(lfwp_gender=dict(
        num_outputs=2,
        DS='LFWpGenderFamUnfDataset',
        lr=0.0040,
        max_epochs=96,
        batch_size=32,
        label_style='MC',
    ), ),
)