def __init__(self,
              eps,
              lr,
              num_iter,
              loss_type,
              rand_eps=1e-3,
              num_classes=2,
              bounds=(0., 1.),
              minimal=False,
              restarts=1,
              device=None):
     self.eps = eps
     self.lr = lr
     self.num_iter = num_iter
     self.B = bounds
     self.restarts = restarts
     self.rand_eps = rand_eps
     self.device = device or gu.get_device(None)
     self.loss_type = loss_type
     self.num_classes = num_classes
     self.classes = list(range(self.num_classes))
     self.delta = None
     self.minimal = minimal  # early stop + no eps
     self.project = not self.minimal
     self.loss = -np.inf
def parse_exp_linear_model(data):
    """cosine"""
    device = gu.get_device()
    model = data['model'].to(device)
    p = W, b = list(map(lambda x: x.detach().numpy(), model.parameters()))
    s = {}
    s['cosine0'], s['cosine1'] = W[:, 0] / np.linalg.norm(W, axis=1)
    return s
def parse_exp_depth1_model(data):
    """cosine + w2"""
    device = gu.get_device()
    model = data['model'].to(device)
    p = W1, b1, w2, b2 = list(
        map(lambda x: x.detach().numpy(), model.parameters()))
    s = {}
    s['params'] = p
    s['cosine'] = W1[:, 0] / np.linalg.norm(W1, axis=1)
    s['l2'] = np.linalg.norm(W1, axis=1)
    s['w2'] = w2
    s['corr0'] = np.corrcoef(s['cosine'], w2[0, :])[0, 1]
    s['corr1'] = np.corrcoef(s['cosine'], w2[1, :])[0, 1]
    s['max_weight_cosine'] = s['cosine'][np.argmax(s['w2'][1, :])]
    return s
Esempio n. 4
0
def get_logits_given_tensor(X, model, device=None, bs=250, softmax=False):
    if device is None: device = gu.get_device(None)
    sampler = torch.utils.data.SequentialSampler(X)
    sampler = torch.utils.data.BatchSampler(sampler, bs, False)

    logits = []

    with torch.no_grad():
        model = model.to(device)
        for idx in sampler:
            xb = X[idx].to(device)
            out = model(xb)
            logits.append(out)

    L = torch.cat(logits)
    if softmax: return F.softmax(L, 1)
    return L
def parse_exp_data(data, load_X=False):
    s = {}
    model = data['model'].to(gu.get_device())
    data = data['data']
    X, Y = data['X'], data['Y']

    if type(X) != np.ndarray:
        X = data['X'].detach().cpu()

    if type(X) != np.ndarray:
        Y = data['Y'].detach().cpu()

    s['Y'] = Y
    if load_X: s['X'] = X
    s['Y_'] = get_yhat(model, X)
    s['model'] = model
    return s
def get_cifar10_models(device=None, pretrained=True):
    if c10_not_found: return {}
    device = gu.get_device(None) if device is None else device
    get_lmbda = lambda cls: (lambda: cls(pretrained=pretrained).eval().to(
        device))
    return {
        'vgg11_bn': get_lmbda(c10.vgg11_bn),
        'vgg13_bn': get_lmbda(c10.vgg13_bn),
        'vgg16_bn': get_lmbda(c10.vgg16_bn),
        'vgg19_bn': get_lmbda(c10.vgg19_bn),
        'resnet18': get_lmbda(c10.resnet18),
        'resnet34': get_lmbda(c10.resnet34),
        'resnet50': get_lmbda(c10.resnet50),
        'densenet121': get_lmbda(c10.densenet121),
        'densenet161': get_lmbda(c10.densenet161),
        'densenet169': get_lmbda(c10.densenet169),
        'mobilenet_v2': get_lmbda(c10.mobilenet_v2),
        'googlenet': get_lmbda(c10.googlenet),
        'inception_v3': get_lmbda(c10.inception_v3)
    }
 def __init__(self,
              eps,
              lr,
              num_iter,
              shape,
              num_classes=2,
              bounds=(0., 1.),
              loss_type='untargeted',
              rand_eps=0.,
              device=None):
     self.device = device if device else gu.get_device(None)
     self.loss_type = loss_type
     self.B = bounds
     self.rand_eps = rand_eps
     self.eps = eps
     self.lr = lr
     self.num_iter = num_iter
     self.num_classes = num_classes
     self.classes = list(range(self.num_classes))
     self.shape = shape
     self._init_delta()
Esempio n. 8
0
def get_lms_data(**kw):

    c = config = {
        'num_train': 100_000,
        'dim': 20,
        'lin_margin': 0.1,
        'slab_margin': 0.1,
        'same_margin': False,
        'random_transform': False,
        'width': 1,  # data width
        'bs': 256,
        'corrupt_lin': 0.0,
        'corrupt_lin_margin': False,
        'corrupt_slab': 0.0,
        'num_test': 2_000,
        'hdim': 200,  # model width
        'hl': 2,  # model depth
        'device': gu.get_device(0),
        'input_dropout': 0,
        'num_lin': 1,
        'num_slabs': 19,
        'num_slabs7': 0,
        'num_slabs3': 0,
    }
Esempio n. 9
0
def get_accuracy_given_tensor(X, Y, model, device=None, bs=250):
    if device is None: device = gu.get_device(None)
    Y = torch.LongTensor(Y).to(device)
    yhat = get_predictions_given_tensor(X, model, device=device, bs=bs)
    return (Y == yhat).float().mean().item()
Esempio n. 10
0
def get_feature_deps(dl,
                     model,
                     W=None,
                     dep_type='random',
                     only_linear=False,
                     coords=None,
                     metric='accuracy',
                     use_model_pred=False,
                     print_info=False,
                     sample_pct=1.0,
                     device_id=None):
    """Compute feature dependencies using randomization or swapping"""
    def _randomize(X, Y, coords):
        p = torch.randperm(len(X))
        for c in coords:
            X[:, c] = X[p, c]
        return X

    def _swap(X, Y, coords):
        idx0, idx1 = map(lambda c: (Y.numpy() == c).nonzero()[0], [0, 1])
        idx0_new = np.random.choice(idx1, size=len(idx0), replace=True)
        idx1_new = np.random.choice(idx0, size=len(idx1), replace=True)
        for c in coords:
            X[idx0, c], X[idx1, c] = X[idx0_new, c], X[idx1_new, c]
        return X

    def _get_dep_data(X, Y, coords):
        return dict(random=_randomize, swap=_swap)[dep_type](X, Y, coords)

    assert metric in {'accuracy', 'loss', 'auc'}

    # setup data
    device = gu.get_device(device_id)
    model = model.to(device)
    X, Y = map(lambda Z: Z.to(device), dl.dataset.tensors)
    Yh = get_yhat(model, X)
    dim = X.shape[1]
    if W is None: W = np.eye(dim)
    W = torch.Tensor(W).to(device)
    rt_X = torch.mm(X, torch.transpose(W, 0, 1))

    # subsample data
    n_samp = int(round(sample_pct * len(rt_X)))
    perm = torch.randperm(len(rt_X))[:n_samp]
    rt_X, Y, Yh = rt_X[perm, :], Y[perm], Yh[perm]

    # compute deps
    deps = {}

    dims = list(range(dim))
    if coords is None and not only_linear: coords = dims
    if coords is None and only_linear: coords = [0, 1]

    for idx, coord in enumerate(coords):
        if print_info: print('{}/{}'.format(idx, len(coords)), end=' ')
        rt_X_ = copy.deepcopy(rt_X).to(device)
        rt_X_ = _get_dep_data(
            rt_X_, Y, coord if type(coord) in (list, tuple) else [coord])
        X_ = torch.mm(rt_X_, W)
        Ys = get_yhat(model, X_)

        key = tuple(coord) if type(coord) in (list, tuple) else coord

        if metric == 'auc':
            L = utils.get_logits_given_tensor(X_, model, device=device, bs=250)
            S = L[:, 1] - L[:, 0]
            auc = roc_auc_score(Y.cpu().numpy(), S.cpu().numpy())
            deps[key] = auc
        elif metric == 'accuracy':
            deps[key] = get_acc(Yh if use_model_pred else Y, Ys)
        elif metric == 'loss':
            L = utils.get_logits_given_tensor(X_, model, device=device, bs=250)
            with torch.no_grad():
                loss_val = F.cross_entropy(L, Y).item()
            deps[key] = loss_val

    return deps
Esempio n. 11
0
 def __init__(self, attack, bounds=(0., 1.), device=None, num_classes=2):
     self.device = device if device else gu.get_device(None)
     self.attack = attack
     self.attack.device = self.device
     self.B = bounds
     self.num_classes = num_classes