Esempio n. 1
0
 def _train_a_query(self, indice):
     indexer = np.stack(
         [np.array(indice[i::self.num_groups])
          for i in range(self.num_groups)])
     sampler = SubsetRandomSampler(indice)
     loader = DataLoader(self.dataset, batch_size=128, sampler=sampler,
                         num_workers=4, pin_memory=True)
     alpha_generator = Exponential(torch.ones([1, self.num_groups]))
     t_iter = tqdm(range(self.num_epoch),
                   total=self.num_epoch,
                   desc="Training")
     for t in t_iter:
         alpha = alpha_generator.sample().cuda()
         for img, label, index in loader:
             self.model.train()
             n0 = img.size(0)
             u_is = []
             for i in index:
                 u_i = np.where(indexer == i.item())[0][0]
                 u_is += [u_i]
             w = alpha[0, u_is].cuda()
             output = self.model(img.cuda(), alpha.repeat_interleave(n0, 0))
             loss = self.loss_fn(output, label.cuda(), w)
             self.optim.zero_grad()
             loss.backward()
             self.optim.step()
         self.sched.step()
Esempio n. 2
0
def odin_infer(loader,
               model,
               num_bs,
               num_classes,
               with_acc=False,
               seed=0,
               T=1000,
               eps=0.0014):
    loss_fn = torch.nn.CrossEntropyLoss()
    torch.manual_seed(seed)
    model.eval()
    a_test_ = Exponential(torch.ones([1, 400]))
    a_test = a_test_.sample((num_bs, ))
    acc = 0.
    outputs = np.zeros([num_bs, len(loader.dataset), num_classes + 1])
    beg = 0
    for i, (img, label) in enumerate(loader):
        index = list(range(beg, beg + img.size(0)))
        beg = beg + img.size(0)
        label = label.numpy().reshape(-1, 1)
        img_ = img.cuda()
        img_.requires_grad = True
        output = model(img_, torch.zeros([img.shape[0], 400]))

        output = output / T
        pseudo_label = output.argmax(-1).cuda()
        loss = loss_fn(output, pseudo_label)
        loss.backward()

        gradient = torch.ge(img_.grad.data, 0)
        gradient = (gradient.float() - 0.5) * 2

        gradient.index_copy_(
            1,
            torch.LongTensor([0]).cuda(),
            gradient.index_select(1,
                                  torch.LongTensor([0]).cuda()) / (0.2023))
        gradient.index_copy_(
            1,
            torch.LongTensor([1]).cuda(),
            gradient.index_select(1,
                                  torch.LongTensor([1]).cuda()) / (0.1994))
        gradient.index_copy_(
            1,
            torch.LongTensor([2]).cuda(),
            gradient.index_select(1,
                                  torch.LongTensor([2]).cuda()) / (0.2010))

        img_new = torch.add(img_.data, -eps, gradient)
        for _ in range(num_bs):
            w_test = a_test[_].repeat_interleave(img.shape[0], dim=0)
            output_new = model(img_new, w_test).cpu().detach().numpy()
            outputs[_, index] = np.concatenate([output_new, label], axis=1)

    if with_acc:
        pred = outputs.sum(0)[:, :-1].argmax(1)
        label = outputs[0][:, -1]
        acc = pred == label
        print(f"[Test] acc : {acc.mean()}")
    return outputs
Esempio n. 3
0
def infer_a_sample(img, model, num_classes, num_bs, fac):
    model.eval()
    a_test = Exponential(torch.ones([1, 400]))
    w_test = a_test.sample((num_bs, ))
    output = np.zeros([num_bs, num_classes])
    for _ in range(num_bs):
        o = model(img, w_test[_], fac).cpu().numpy()
        output[_] = o
    return output
Esempio n. 4
0
class HomogeneousPoissonProcess:
    def __init__(self, rate=1):
        self.rate = rate
        self.exp = Exponential(rate)

    def sample(self, size, max_seq_len, max_time=math.inf):
        gaps = self.exp.sample((size, max_seq_len))
        times = torch.cumsum(gaps, dim=1)
        masks = (times <= max_time).float()
        return times, masks
Esempio n. 5
0
    def __init__(self, in_features, alpha=None, alpha_trainable=True):
        """
        Init method.
        """
        super(Snake, self).__init__()
        self.in_features = in_features
        if alpha is not None:
            self.alpha = Parameter(torch.ones(in_features) * alpha)
        else:
            m = Exponential(torch.tensor([0.1]))
            self.alpha = Parameter(m.sample(in_features))

        self.alpha.requiresGrad = alpha_trainable
Esempio n. 6
0
    def _infer_gbs(self, with_acc=False, seed=0):
        torch.manual_seed(seed)
        a_test_ = Exponential(torch.ones([1, self.args.n_a]))
        a_test = a_test_.sample([self.args.num_bs, ])
        outputs = np.zeros([self.args.num_bs, len(self.loader.dataset), self.args.num_classes + 1])
        beg = 0
        for i, (img, label) in enumerate(self.loader):
            end = beg + img.size(0)
            label = label.numpy().reshape(-1, 1)
            for _ in range(self.args.num_bs):
                w_test = a_test[_].repeat_interleave(img.size(0), dim=0)
                output = self._infer_a_batch_a_bs(img, w_test)
                outputs[_, beg: end] = np.concatenate([output, label], axis=1)
            beg = end

        if with_acc:
            pred = outputs.sum(0)[:, :-1].argmax(1)
            label = outputs[0][:, -1]
            acc = (pred == label).mean()
            print(f"[Test] acc : {acc}")
        return outputs
Esempio n. 7
0
def odin_infer_a_sample(img,
                        model,
                        num_classes,
                        num_bs,
                        fac,
                        T=1000,
                        eps=0.0001):
    model.eval()
    loss_fn = torch.nn.CrossEntropyLoss()
    img = img.cuda()
    img.requires_grad = True
    model.zero_grad()
    a_test = Exponential(torch.ones([1, 400]))
    w_test = a_test.sample((num_bs, ))
    outputs = np.zeros([num_bs, num_classes])
    for _ in range(num_bs):
        # w_test = a_test[_].repeat_interleave(img.shape[0], dim=0)
        img_ = img.cuda()
        img_.requires_grad = True
        output = model(img_, w_test[_], fac)

        output = output / T
        pseudo_label = output.argmax(-1).cuda()
        loss = loss_fn(output, pseudo_label)
        loss.backward()

        gradient = torch.ge(img_.grad.data, 0)
        # gradient = (gradient.float() - 0.5) * 2

        # gradient.index_copy_(1, torch.LongTensor([0]).cuda(),
        #  gradient.index_select(1, torch.LongTensor([0]).cuda()) / (0.2023))
        # gradient.index_copy_(1, torch.LongTensor([1]).cuda(),
        #  gradient.index_select(1, torch.LongTensor([1]).cuda()) / (0.1994))
        # gradient.index_copy_(1, torch.LongTensor([2]).cuda(),
        #  gradient.index_select(1, torch.LongTensor([2]).cuda()) / (0.2010))

        img_new = torch.add(img_.data, -eps, gradient)
        output_new = model(img_new, w_test[_], fac).cpu().detach().numpy()
        outputs[_] = output_new
    return outputs
Esempio n. 8
0
def infer(loader,
          model,
          num_bs,
          num_classes,
          with_acc=False,
          with_indice=False,
          is_mcd=False,
          seed=0):
    torch.manual_seed(seed)
    model.eval()
    a_test_ = Exponential(torch.ones([1, 400]))
    a_test = a_test_.sample((num_bs, ))
    acc = 0.
    outputs = np.zeros([num_bs, len(loader.dataset), num_classes + 1])
    beg = 0
    indice = []
    ret = []
    for i, (img, label, _) in enumerate(loader):
        img = img.cuda()
        index = list(range(beg, beg + img.size(0)))
        beg = beg + img.size(0)
        label = label.numpy().reshape(-1, 1)
        indice += [_]
        for _ in range(num_bs):
            w_test = a_test[_].repeat_interleave(img.shape[0], dim=0)
            if is_mcd:
                model.apply(apply_dropout)
            output = model(img, w_test.cuda()).cpu().numpy()
            outputs[_, index] = np.concatenate([output, label], axis=1)
    ret += [outputs]
    if with_acc:
        pred = outputs.sum(0)[:, :-1].argmax(1)
        label = outputs[0][:, -1]
        acc = pred == label
        print(f"[Test] acc : {acc.mean()}")
        ret += [acc.mean()]

    if with_indice:
        ret += [torch.cat(indice)]
    return ret
Esempio n. 9
0
    def __init__(self, in_features, alpha = None, alpha_trainable = True):
        '''
        Initialization.
        INPUT:
            - in_features: shape of the input
            - alpha: trainable parameter
            
            alpha is initialized to 1 by default, higher values = higher-frequency, 
            5-50 is a good starting point if you already think your data is periodic, 
            consider starting lower e.g. 0.5 if you think not, but don't worry, 
            alpha will be trained along with the rest of your model. 
        '''
        super(Snake,self).__init__()
        self.in_features = in_features

        # initialize alpha
        if alpha is not None:
            self.alpha = Parameter(torch.ones(in_features) * alpha) # create a tensor out of alpha
        else:            
            m = Exponential(torch.tensor([0.1]))
            self.alpha = Parameter(m.sample(in_features)) # Random init = mix of frequencies

        self.alpha.requiresGrad = alpha_trainable # Usually we'll want to train alpha, but maybe for some experiments we won't?