Ejemplo n.º 1
0
def adversarial_attack(args, model, inv_factors, results_path, fig_path):
    print("Loading data")
    if args.data == 'cifar10':
        test_loader = datasets.cifar10(args.torch_data, splits='test')
    elif args.data == 'gtsrb':
        test_loader = datasets.gtsrb(args.data_dir, batch_size=args.batch_size, splits='test')
    if args.data == 'mnist':
        test_loader = datasets.mnist(args.torch_data, splits='test')
    elif args.data == 'tiny':
        test_loader = datasets.imagenet(args.data_dir, img_size=64, batch_size=args.batch_size, splits='test',
                                        tiny=True)
    elif args.data == 'imagenet':
        img_size = 224
        if args.model in ['googlenet', 'inception_v3']:
            img_size = 299
        test_loader = datasets.imagenet(args.data_dir, img_size, args.batch_size, workers=args.workers, splits='test')

    if args.epsilon > 0:
        print(eval_fgsm(model, test_loader, args.epsilon, args.device)[-1])
    else:
        stats_dict = {"eps": [], "acc": [], "ece1": [], "ece2": [], "nll": [], "ent": []}
        bnn_stats_dict = {"eps": [], "acc": [], "ece1": [], "ece2": [], "nll": [], "ent": []}
        steps = np.concatenate([np.linspace(0, 0.2, 11), np.linspace(0.3, 1, 8)])
        for step in steps:
            stats = eval_fgsm(model, test_loader, step, args.device, verbose=False)[-1]
            bnn_stats = eval_fgsm_bnn(model, test_loader, inv_factors, args.estimator, args.samples, step,
                                      device=args.device)[-1]
            for (k1, v1), (k2, v2) in zip(stats.items(), bnn_stats.items()):
                stats_dict[k1].append(v1)
                bnn_stats_dict[k2].append(v2)
            np.savez(results_path + "_fgsm.npz", stats=stats_dict, bnn_stats=bnn_stats_dict)
        print(tabulate.tabulate(stats_dict, headers="keys"))
        print(tabulate.tabulate(bnn_stats_dict, headers="keys"))

        plot.adversarial_results(steps, stats_dict, bnn_stats_dict, fig_path)
Ejemplo n.º 2
0
def test(args, model, fig_path=""):
    print("Loading data")
    if args.data == 'cifar10':
        test_loader = datasets.cifar10(args.torch_data, splits='test')
    elif args.data == 'gtsrb':
        test_loader = datasets.gtsrb(args.data_dir, batch_size=args.batch_size, splits='test')
    if args.data == 'mnist':
        test_loader = datasets.mnist(args.torch_data, splits='test')
    elif args.data == 'tiny':
        test_loader = datasets.imagenet(args.data_dir, img_size=64, batch_size=args.batch_size, splits='test',
                                        tiny=True)
    elif args.data == 'imagenet':
        img_size = 224
        if args.model in ['googlenet', 'inception_v3']:
            img_size = 299
        test_loader = datasets.imagenet(args.data_dir, img_size, args.batch_size, workers=args.workers, splits='test')

    predictions, labels = eval_nn(model, test_loader, args.device, args.verbose)

    print("Plotting results")
    plot.reliability_diagram(predictions, labels, path=fig_path + "_reliability.pdf")
Ejemplo n.º 3
0
        i = 1
        for ap in aps:
            print('{}: {:.3f}'.format(self._classes[i], ap))
            i += 1
        print('{:.3f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------')

    def evaluate_detections(self, all_boxes, output_dir):
        self._comp_id = self._write_imagenet_results_file(all_boxes)
        self._do_python_eval(output_dir)

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True

if __name__ == '__main__':
    d = datasets.imagenet('val1', '')
    res = d.roidb
    from IPython import embed; embed()
Ejemplo n.º 4
0
def main():
    args = setup()

    print("Preparing directories")
    os.makedirs(os.path.join(args.root_dir, "factors"), exist_ok=True)
    filename = f"{args.prefix}{args.model}_{args.data}_{args.estimator}{args.suffix}"
    factors_path = os.path.join(args.root_dir, "factors", filename)

    print("Loading model")
    if args.model == 'lenet5':
        model = lenet5.lenet5(pretrained=args.data, device=args.device)
    elif args.model == 'resnet18' and args.data != 'imagenet':
        model = resnet.resnet18(pretrained=os.path.join(
            args.root_dir, 'weights', f"{args.model}_{args.data}.pth"),
                                num_classes=43 if args.data == 'gtsrb' else 10,
                                device=args.device)
    else:
        model_class = getattr(torchvision.models, args.model)
        if args.model in ['googlenet', 'inception_v3']:
            model = model_class(pretrained=True, aux_logits=False)
        else:
            model = model_class(pretrained=True)
    model.to(args.device).train()
    if args.parallel:
        model = torch.nn.parallel.DataParallel(model)

    if args.estimator != 'inf':
        print(f"Loading data")
        if args.data == 'cifar10':
            data = datasets.cifar10(args.torch_data,
                                    args.batch_size,
                                    args.workers,
                                    args.augment,
                                    splits='train')
        elif args.data == 'mnist':
            data = datasets.mnist(args.torch_data,
                                  args.batch_size,
                                  args.workers,
                                  args.augment,
                                  splits='train')
        elif args.data == 'gtsrb':
            data = datasets.gtsrb(args.data_dir,
                                  batch_size=args.batch_size,
                                  workers=args.workers,
                                  splits='train')
        elif args.data == 'tiny':
            img_size = 64
            data = datasets.imagenet(args.data_dir,
                                     img_size,
                                     args.batch_size,
                                     splits='train',
                                     tiny=True)
        elif args.data == 'imagenet':
            img_size = 224
            if args.model in ['googlenet', 'inception_v3']:
                img_size = 299
            data = datasets.imagenet(args.data_dir,
                                     img_size,
                                     args.batch_size,
                                     workers=args.workers,
                                     splits='train')
    torch.backends.cudnn.benchmark = True

    print("Computing factors")
    if args.estimator == 'inf':
        est = compute_inf(args)
    elif args.estimator == 'efb':
        factors = torch.load(factors_path.replace("efb", "kfac") + '.pth')
        est = compute_factors(args, model, data, factors)
    else:
        est = compute_factors(args, model, data)

    print("Saving factors")
    if args.estimator == "inf":
        torch.save(est.state, f"{factors_path}{args.rank}.pth")
    elif args.estimator == "efb":
        torch.save(list(est.state.values()), factors_path + '.pth')
        torch.save(list(est.diags.values()),
                   factors_path.replace("efb", "diag") + '.pth')
    else:
        torch.save(list(est.state.values()), factors_path + '.pth')
Ejemplo n.º 5
0
def out_of_domain(args, model, inv_factors, results_path="", fig_path=""):
    """Evaluates the model on in- and out-of-domain data.

    Each dataset has its own out-of-domain dataset which is loaded automatically alongside the in-domain dataset
    specified in `args.data`. For each image (batch) in the in- and out-of-domain data a forward pass through the
    provided `model` is performed and the predictions are stored under `results_path`. This is repeated for the Bayesian
    variant of the model (Laplace approximation).

    Parameters
    ----------
    args : Todo: Check type
        The arguments provided to the script on execution.
    model : torch.nn.Module Todo: Verify
        A `torchvision` or custom neural network (a `torch.nn.Module` or `torch.nn.Sequential` instance)
    inv_factors : list
        A list KFAC factors, Eigenvectors of KFAC factors or diagonal terms. Todo: INF
    results_path : string, optional
        The path where results (in- and out-of-domain predictions) should be stored. Results are not stored if
        argument `args.no_results` is provided.
    fig_path : string, optional
        The path where figures should be stored. Figures are only generated if argument `args.plot` is provided.
    """
    print("Loading data")
    if args.data == 'cifar10':
        in_data = datasets.cifar10(args.torch_data, splits='test')
        out_data = datasets.svhn(args.torch_data, splits='test')
    elif args.data == 'mnist':
        in_data = datasets.mnist(args.torch_data, splits='test')
        out_data = datasets.kmnist(args.torch_data, splits='test')
    elif args.data == 'gtsrb':
        in_data = datasets.gtsrb(args.data_dir, batch_size=args.batch_size, splits='test')
        out_data = datasets.cifar10(args.torch_data, splits='test')
    elif args.data == 'tiny':
        in_data = datasets.imagenet(args.data_dir, img_size=64, batch_size=args.batch_size, splits='test', tiny=True,
                                    use_cache=True)
        out_data = datasets.art(args.data_dir, img_size=64, batch_size=args.batch_size, use_cache=True)
    elif args.data == 'imagenet':
        img_size = 224
        if args.model in ['googlenet', 'inception_v3']:
            img_size = 299
        in_data = datasets.imagenet(args.data_dir, img_size, args.batch_size, workers=args.workers, splits='test',
                                    use_cache=True)
        out_data = datasets.art(args.data_dir, img_size, args.batch_size, workers=args.workers, use_cache=True)

    # Compute NN and BNN predictions on validation set of training data
    predictions, bnn_predictions, labels, stats = eval_nn_and_bnn(model, in_data, inv_factors, args.estimator,
                                                                  args.samples, args.stats, args.device, verbose=True)

    # Compute NN and BNN predictions on out-of-distribution data
    ood_predictions, bnn_ood_predictions, _, _ = eval_nn_and_bnn(model, out_data, inv_factors, args.estimator,
                                                                 args.samples, False, args.device, verbose=True)

    if not args.no_results:
        print("Saving results")
        np.savez_compressed(results_path,
                            stats=stats,
                            labels=labels,
                            predictions=predictions,
                            bnn_predictions=bnn_predictions,
                            ood_predictions=ood_predictions,
                            bnn_ood_predictions=bnn_ood_predictions)

    if args.plot:
        print("Plotting results")
        fig, ax = plt.subplots(figsize=(12, 7), tight_layout=True)
        plot.inv_ecdf_vs_pred_entropy(predictions, color='dodgerblue', linestyle='--', axis=ax)
        plot.inv_ecdf_vs_pred_entropy(ood_predictions, color='crimson', linestyle='--', axis=ax)
        plot.inv_ecdf_vs_pred_entropy(bnn_predictions, color='dodgerblue', axis=ax)
        plot.inv_ecdf_vs_pred_entropy(bnn_ood_predictions, color='crimson', axis=ax)
        ax.legend([f"NN {args.data.upper()} | Acc.: {accuracy(predictions, labels):.2f}%",
                   f"NN OOD",
                   f"BNN {args.data.upper()} | Acc.: {accuracy(bnn_predictions, labels):.2f}%",
                   f"BNN OOD"], fontsize=16, frameon=False)
        plt.savefig(fig_path + "_ecdf.pdf", forma='pdf', dpi=1200)

        plot.reliability_diagram(predictions, labels, path=fig_path + "_reliability.pdf")
        plot.reliability_diagram(bnn_predictions, labels, path=fig_path + "_bnn_reliability.pdf")

        plot.entropy_hist(predictions, ood_predictions, path=fig_path + "_entropy.pdf")
        plot.entropy_hist(bnn_predictions, bnn_ood_predictions, path=fig_path + "_bnn_entropy.pdf")
Ejemplo n.º 6
0
        assert os.path.exists(IJCV_path), \
               'Selective search IJCV data not found at: {}'.format(IJCV_path)

        top_k = self.config['top_k']
        box_list = []
        for i in xrange(self.num_images):
            filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')
            raw_data = sio.loadmat(filename)
            box_list.append(
                (raw_data['boxes'][:top_k, :] - 1).astype(np.uint16))

        return self.create_roidb_from_box_list(box_list, gt_roidb)

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True

    def _write_inria_results_file(self, all_boxes):
        return


if __name__ == '__main__':
    d = datasets.imagenet('train', '')
    res = d.roidb
    from IPython import embed
    embed()
Ejemplo n.º 7
0
        return self.create_roidb_from_box_list(box_list, gt_roidb)   
        
    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True   
    
    def _write_inria_results_file(self, all_boxes):
        return
       
if __name__ == '__main__':
    d = datasets.imagenet('train', '')
    res = d.roidb
    from IPython import embed; embed()    
        
        
        
        
        
        
        
        
        
        
        
        
        
Ejemplo n.º 8
0
def main():
    args = setup()

    print("Preparing directories")
    filename = f"{args.prefix}{args.model}_{args.data}_{args.estimator}{args.suffix}"
    factors_path = os.path.join(args.root_dir, "factors", filename)
    weights_path = os.path.join(args.root_dir, "weights",
                                f"{args.model}_{args.data}.pth")
    if args.exp_id == -1:
        if not args.no_results:
            os.makedirs(os.path.join(args.results_dir, args.model, "data",
                                     args.estimator, args.optimizer),
                        exist_ok=True)
        if args.plot:
            os.makedirs(os.path.join(args.results_dir, args.model, "figures",
                                     args.estimator, args.optimizer),
                        exist_ok=True)
        results_path = os.path.join(args.results_dir, args.model, "data",
                                    args.estimator, args.optimizer, filename)
    else:
        if not args.no_results:
            os.makedirs(os.path.join(args.results_dir, args.model, "data",
                                     args.estimator, args.optimizer,
                                     args.exp_id),
                        exist_ok=True)
        if args.plot:
            os.makedirs(os.path.join(args.results_dir, args.model, "figures",
                                     args.estimator, args.optimizer,
                                     args.exp_id),
                        exist_ok=True)
        results_path = os.path.join(args.results_dir, args.model, "data",
                                    args.estimator, args.optimizer,
                                    args.exp_id, filename)

    print("Loading model")
    if args.model == 'lenet5':
        model = lenet5(pretrained=args.data, device=args.device)
    elif args.model == 'resnet18' and args.data != 'imagenet':
        model = resnet18(pretrained=weights_path,
                         num_classes=43 if args.data == 'gtsrb' else 10,
                         device=args.device)
    else:
        model_class = getattr(torchvision.models, args.model)
        if args.model in ['googlenet', 'inception_v3']:
            model = model_class(pretrained=True, aux_logits=False)
        else:
            model = model_class(pretrained=True)
    model.to(args.device).eval()
    if args.parallel:
        model = torch.nn.parallel.DataParallel(model)

    print("Loading data")
    if args.data == 'mnist':
        val_loader = datasets.mnist(args.torch_data, splits='val')
    elif args.data == 'cifar10':
        val_loader = datasets.cifar10(args.torch_data, splits='val')
    elif args.data == 'gtsrb':
        val_loader = datasets.gtsrb(args.data_dir,
                                    batch_size=args.batch_size,
                                    splits='val')
    elif args.data == 'imagenet':
        img_size = 224
        if args.model in ['googlenet', 'inception_v3']:
            img_size = 299
        val_loader = datasets.imagenet(args.data_dir,
                                       img_size,
                                       args.batch_size,
                                       args.workers,
                                       splits='val',
                                       use_cache=True,
                                       pre_cache=True)

    print("Loading factors")
    if args.estimator in ["diag", "kfac"]:
        factors = torch.load(factors_path + '.pth')
    elif args.estimator == 'efb':
        kfac_factors = torch.load(factors_path.replace("efb", "kfac") + '.pth')
        lambdas = torch.load(factors_path + '.pth')

        factors = list()
        eigvecs = get_eigenvectors(kfac_factors)

        for eigvec, lambda_ in zip(eigvecs, lambdas):
            factors.append((eigvec[0], eigvec[1], lambda_))
    elif args.estimator == 'inf':
        factors = torch.load(f"{factors_path}{args.rank}.pth")
    torch.backends.cudnn.benchmark = True

    norm_min = -10
    norm_max = 10
    scale_min = -10
    scale_max = 10
    if args.boundaries:
        x0 = list()
        boundaries = [[norm_min, scale_min], [norm_max, scale_max],
                      [norm_min, scale_max], [norm_max, scale_min],
                      [norm_min / 2., scale_min], [norm_max / 2., scale_max],
                      [norm_min, scale_max / 2.], [norm_max, scale_min / 2.],
                      [norm_min / 2., scale_min / 2.],
                      [norm_max / 2., scale_max / 2.],
                      [norm_min / 2., scale_max / 2.],
                      [norm_max / 2., scale_min / 2.]]
        for b in boundaries:
            tmp = list()
            for _ in range(3 if args.layer else 1):
                tmp.extend(b)
            x0.append(tmp)
    else:
        x0 = None
    f_norms = np.array([factor.norm().cpu().numpy() for factor in factors])

    space = list()
    for i in range(3 if args.layer else 1):
        space.append(
            skopt.space.Real(norm_min,
                             norm_max,
                             name=f"norm{i}",
                             prior='uniform'))
        space.append(
            skopt.space.Real(scale_min,
                             scale_max,
                             name=f"scale{i}",
                             prior='uniform'))

    stats = {
        "norms": [],
        "scales": [],
        "acc": [],
        "ece": [],
        "nll": [],
        "ent": [],
        "cost": []
    }

    @skopt.utils.use_named_args(dimensions=space)
    def objective(**params):
        norms = list()
        scales = list()
        for f in f_norms:
            if args.layer:
                # Closest to max
                if abs(f_norms.max() - f) < abs(f_norms.min() - f) and abs(
                        f_norms.max() - f) < abs(f_norms.mean() - f):
                    norms.append(10**params['norm0'])
                    scales.append(10**params['scale0'])
                # Closest to min
                elif abs(f_norms.min() - f) < abs(f_norms.max() - f) and abs(
                        f_norms.min() - f) < abs(f_norms.mean() - f):
                    norms.append(10**params['norm1'])
                    scales.append(10**params['scale1'])
                # Closest to mean
                else:
                    norms.append(10**params['norm2'])
                    scales.append(10**params['scale2'])
            else:
                norms.append(10**params['norm0'])
                scales.append(10**params['scale0'])
        if args.layer:
            print(
                tabulate.tabulate(
                    {
                        'Layer': np.arange(len(factors)),
                        'F-Norm:': f_norms,
                        'Norms': norms,
                        'Scales': scales
                    },
                    headers='keys',
                    numalign='right'))
        else:
            print("Norm:", norms[0], "Scale:", scales[0])
        try:
            inv_factors = invert_factors(factors, norms,
                                         args.pre_scale * scales,
                                         args.estimator)
        except (RuntimeError, np.linalg.LinAlgError):
            print(f"Error: Singular matrix")
            return 200

        predictions, labels, _ = eval_bnn(model,
                                          val_loader,
                                          inv_factors,
                                          args.estimator,
                                          args.samples,
                                          stats=False,
                                          device=args.device,
                                          verbose=False)

        err = 100 - accuracy(predictions, labels)
        ece = 100 * expected_calibration_error(predictions, labels)[0]
        nll = negative_log_likelihood(predictions, labels)
        ent = predictive_entropy(predictions, mean=True)
        stats["norms"].append(norms)
        stats["scales"].append(scales)
        stats["acc"].append(100 - err)
        stats["ece"].append(ece)
        stats["nll"].append(nll)
        stats["ent"].append(ent)
        stats["cost"].append(err + ece)
        print(
            f"Err.: {err:.2f}% | ECE: {ece:.2f}% | NLL: {nll:.3f} | Ent.: {ent:.3f}"
        )

        return err + ece

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", category=FutureWarning)

        if args.optimizer == "gbrt":
            res = skopt.gbrt_minimize(func=objective,
                                      dimensions=space,
                                      n_calls=args.calls,
                                      x0=x0,
                                      verbose=True,
                                      n_jobs=args.workers,
                                      n_random_starts=0 if x0 else 10,
                                      acq_func='EI')

        # EI (neg. expected improvement)
        # LCB (lower confidence bound)
        # PI (neg. prob. of improvement): Usually favours exploitation over exploration
        # gp_hedge (choose probabilistically between all)
        if args.optimizer == "gp":
            res = skopt.gp_minimize(func=objective,
                                    dimensions=space,
                                    n_calls=args.calls,
                                    x0=x0,
                                    verbose=True,
                                    n_jobs=args.workers,
                                    n_random_starts=0 if x0 else 1,
                                    acq_func='gp_hedge')

        # acq_func: EI (neg. expected improvement), LCB (lower confidence bound), PI (neg. prob. of improvement)
        # xi: how much improvement one wants over the previous best values.
        # kappa: Importance of variance of predicted values. High: exploration > exploitation
        # base_estimator: RF (random forest), ET (extra trees)
        elif args.optimizer == "forest":
            res = skopt.forest_minimize(func=objective,
                                        dimensions=space,
                                        n_calls=args.calls,
                                        x0=x0,
                                        verbose=True,
                                        n_jobs=args.workers,
                                        n_random_starts=0 if x0 else 1,
                                        acq_func='EI')

        elif args.optimizer == "random":
            res = skopt.dummy_minimize(func=objective,
                                       dimensions=space,
                                       n_calls=args.calls,
                                       x0=x0,
                                       verbose=True)

        elif args.optimizer == "grid":
            space = [
                np.arange(norm_min, norm_max + 1, 10),
                np.arange(scale_min, scale_max + 1, 10)
            ]
            res = grid(func=objective, dimensions=space)

        print(f"Minimal cost of {min(stats['cost'])} found at:")
        if args.layer:
            print(
                tabulate.tabulate(
                    {
                        'Layer': np.arange(len(factors)),
                        'F-Norm:': f_norms,
                        'Norms': stats['norms'][np.argmin(stats['cost'])],
                        'Scales': stats['scales'][np.argmin(stats['cost'])]
                    },
                    headers='keys',
                    numalign='right'))
        else:
            print("Norm:", stats['norms'][np.argmin(stats['cost'])][0],
                  "Scale:", stats['scales'][np.argmin(stats['cost'])][0])

    if not args.no_results:
        print("Saving results")
        del res.specs['args']['func']
        np.save(
            results_path +
            f"_best_params{'_layer.npy' if args.layer else '.npy'}", [
                stats['norms'][np.argmin(stats['cost'])],
                stats['scales'][np.argmin(stats['cost'])]
            ])
        np.save(
            results_path +
            f"_hyperopt_stats{'_layer.npy' if args.layer else '.npy'}", stats)
        skopt.dump(
            res, results_path +
            f"_hyperopt_dump{'_layer.pkl' if args.layer else '.pkl'}")

    if args.plot:
        print("Plotting results")
        hyperparameters(args)
Ejemplo n.º 9
0
        path = os.path.join(os.path.dirname(__file__),
                            'VOCdevkit-matlab-wrapper')
        cmd = 'cd {} && '.format(path)
        cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
        cmd += '-r "dbstop if error; '
        cmd += 'setenv(\'LC_ALL\',\'C\'); imagenet_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\',{:d}); quit;"' \
               .format(self._devkit_path, comp_id,
                       self._image_set, output_dir, int(rm_results))
        print('Running:\n{}'.format(cmd))
        status = subprocess.call(cmd, shell=True)

    def evaluate_detections(self, all_boxes, output_dir):
        comp_id = self._write_imagenet_results_file(all_boxes)
        self._do_matlab_eval(comp_id, output_dir)

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True


if __name__ == '__main__':
    d = datasets.imagenet('DET_val', '')
    res = d.roidb
    from IPython import embed
    embed()
Ejemplo n.º 10
0
        cache_filename = self._name + '_evaluation_cache.mat'
        cache_filepath = os.path.join(self.cache_path, cache_filename)

        gt_dir = os.path.join(self._data_path, 'bbox', self._image_set)

        path = os.path.join(self._devkit_path, 'evaluation')
        cmd = 'cd {} && '.format(path)
        cmd += '{:s} -nodisplay -nodesktop '.format(datasets.MATLAB)
        cmd += '-r "dbstop if error; '
        cmd += 'evaluate_frcn(\'{:s}\', \'{:s}\', \'{:s}\'); quit;"' \
                .format(filename, gt_dir, cache_filepath)
        print('Running:\n{}'.format(cmd))
        status = subprocess.call(cmd, shell=True)

    def evaluate_detections(self, all_boxes, output_dir=None):
        filename = self._write_voc_results_file(all_boxes)
        self._do_matlab_eval(filename)

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True

if __name__ == '__main__':
    d = datasets.imagenet('train', '2014')
    res = d.roidb
    from IPython import embed; embed()
Ejemplo n.º 11
0
        print('~~~~~~~~')
        print('Results:')
        for ap in aps:
            print('{:.3f}'.format(ap))
        print('{:.3f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------')

    def evaluate_detections(self, all_boxes, output_dir):
        self._comp_id = self._write_imagenet_results_file(all_boxes)
        self._do_python_eval(output_dir)

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True

if __name__ == '__main__':
    d = datasets.imagenet('val1', '')
    res = d.roidb
    from IPython import embed; embed()
Ejemplo n.º 12
0
        gt_classes = np.zeros((num_objs), dtype=np.int32)
        overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)

        # Load object bounding boxes into a data frame.
        for ix, obj in enumerate(objs):
            x1 = float(get_data_from_tag(obj, "xmin"))
            y1 = float(get_data_from_tag(obj, "ymin"))
            x2 = float(get_data_from_tag(obj, "xmax"))
            y2 = float(get_data_from_tag(obj, "ymax"))
            cls = self._wnid_to_ind[str(get_data_from_tag(obj, "name")).lower().strip()]
            boxes[ix, :] = [x1, y1, x2, y2]
            gt_classes[ix] = cls
            overlaps[ix, cls] = 1.0

        overlaps = scipy.sparse.csr_matrix(overlaps)

        return {
            "boxes": boxes,
            "gt_classes": gt_classes,
            "gt_overlaps": overlaps,
            "flipped": False,
        }


if __name__ == "__main__":
    d = datasets.imagenet("val", "")
    res = d.roidb
    from IPython import embed

    embed()
Ejemplo n.º 13
0
def main(argv):
    patch_conv2d_4_size()

    ## Parsing arguments
    parser = argparse.ArgumentParser(prog="main.py")
    parser.add_argument("--model", required=True, help="model name")
    parser.add_argument("--gpu",
                        default="0",
                        help="gpu ids, seperate by comma")
    parser.add_argument(
        "--resume",
        "-r",
        help="resume from checkpoint,specify folder containing the ckpt.t7")
    parser.add_argument("--dataset",
                        default="cifar",
                        type=str,
                        help="The Dataset")
    parser.add_argument("--no-cuda",
                        action="store_true",
                        default=False,
                        help="do not use gpu")
    parser.add_argument("--seed", default=None, help="random seed", type=int)
    parser.add_argument("--path", default=None, help="imagenet dataset path")
    args = parser.parse_args(argv)

    if args.seed is not None:
        if torch.cuda.is_available():
            torch.cuda.manual_seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        random.seed(args.seed)

    device = "cuda" if torch.cuda.is_available(
    ) and not args.no_cuda else "cpu"

    if device == "cuda":
        logging.info("Using GPU! Available gpu count: {}".format(
            torch.cuda.device_count()))
    else:
        logging.info("\033[1;3mWARNING: Using CPU!\033[0m")

    ## Dataset
    if args.dataset == "cifar":
        trainloader, validloader, ori_trainloader, testloader, _ = datasets.cifar10(
            train_bs=128,
            test_bs=100,
            train_transform=None,
            test_transform=None,
            train_val_split_ratio=0.9)
    elif args.dataset == "imagenet":
        trainloader, validloader, ori_trainloader, testloader, _ = datasets.imagenet(
            128, 32, None, None, train_val_split_ratio=None, path=args.path)

    ## Build model
    logging.info("==> Building model..")
    gpus = [int(d) for d in args.gpu.split(",")]
    torch.cuda.set_device(gpus[0])
    net = get_model(args.model)()
    net = net.to(device)

    if device == "cuda":
        cudnn.benchmark = True
        if len(gpus) > 1:
            p_net = torch.nn.DataParallel(net, gpus)
        else:
            p_net = net

    tester = Tester(net,
                    p_net, [trainloader, validloader, ori_trainloader],
                    testloader,
                    cfg={"dataset": args.dataset},
                    log=print)
    tester.init(device=device, resume=args.resume, pretrain=True)
    # tester.test(save=False)
    keep_ratios, sparsity = tester.check_sparsity()
    print("The final Sparsity is {:.3}, Keep Ratios Are:\n{}".format(
        sparsity, keep_ratios))
    for pc in tester.comp_primals.pc_list:
        print(pc.comp_names, pc.get_keep_ratio())
    _, keep_ratios = tester.get_true_flops()