def replaceone(model, inputs, pred, classes): losses = torch.zeros(inputs.size()[0],inputs.size()[1]) for i in range(inputs.size()[1]): tempinputs = inputs.clone() tempinputs[:,i]=2 with torch.no_grad(): tempoutput = model(tempinputs) losses[:,i] = F.nll_loss(tempoutput, pred, reduce=False) return losses
def grad(model, inputs, pred, classes): losses1 = torch.zeros(inputs.size()[0], inputs.size()[2]) dloss = torch.zeros(inputs.size()[0], inputs.size()[2]) inputs1 = inputs.clone() inputs1.requires_grad_(True) output = model(inputs1) loss = F.nll_loss(output, pred) loss.backward() score = inputs1.grad.norm(2, dim=1) return score
def temporaltail(model, inputs, pred, classes): losses1 = torch.zeros(inputs.size()[0],inputs.size()[1]) dloss = torch.zeros(inputs.size()[0],inputs.size()[1]) for i in range(inputs.size()[1]): tempinputs = inputs[:,i:] with torch.no_grad(): tempoutput = torch.exp(model(tempinputs)) losses1[:,i] = tempoutput.gather(1,pred.view(-1,1)).view(-1) dloss[:,-1] = losses1[:,-1] - 1.0/classes for i in range(inputs.size()[1]-1): dloss[:,i] = losses1[:,i] - losses1[:,i+1] return dloss
def temporal(model, inputs, pred, classes): losses1 = torch.zeros(inputs.size()[0], inputs.size()[2]) dloss = torch.zeros(inputs.size()[0], inputs.size()[2]) for i in range(inputs.size()[2]): tempinputs = inputs.clone() if i != inputs.size()[2] - 1: tempinputs[:, :, i + 1:].zero_() with torch.no_grad(): tempoutput = torch.exp(model(tempinputs)) losses1[:, i] = tempoutput.gather(1, pred.view(-1, 1)).view(-1) dloss[:, 0] = losses1[:, 0] - 1.0 / classes for i in range(1, inputs.size()[1]): dloss[:, i] = losses1[:, i] - losses1[:, i - 1] return dloss
def grad(model, inputs, pred, classes): losses1 = torch.zeros(inputs.size()[0],inputs.size()[1]) dloss = torch.zeros(inputs.size()[0],inputs.size()[1]) if isinstance(model,torch.nn.DataParallel): model = model.module model.train() embd,output = model(inputs, returnembd = True) # embd.retain_grad() loss = F.nll_loss(output,pred) loss.backward() score = (inputs<=2).float() score = -score score = embd.grad.norm(2,dim=2) + score * 1e9 return score