def calculate_sample_grad(self, net, x, adjs, sampled_nodes, targets, batch_nodes): outputs = self.forward_full(net, x, adjs, sampled_nodes) loss = net.loss_f(outputs, targets[batch_nodes]) loss.backward() grad_per_sample = autograd_wl.calculate_sample_grad(net.gc_out) return grad_per_sample.cpu().numpy()
def calculate_sample_grad(self, x, adjs, targets, batch_nodes): # use smart way outputs = self.forward(x, adjs) loss = self.loss_f(outputs, targets[batch_nodes]) loss.backward() grad_per_sample = autograd_wl.calculate_sample_grad(self.gc_out) return grad_per_sample.cpu().numpy()
def partial_grad_with_norm(self, x, adjs, targets, weight): num_samples = targets.size(0) outputs = self.forward(x, adjs) if self.multi_class: loss = self.loss_f_vec(outputs, targets) loss = loss.mean(1) * weight else: loss = self.loss_f_vec(outputs, targets) * weight loss = loss.sum() loss.backward() grad_per_sample = autograd_wl.calculate_sample_grad(self.gc_out) grad_per_sample = grad_per_sample*(1/weight/num_samples) return loss.detach(), grad_per_sample.cpu().numpy()
def partial_grad_with_norm(self, net, x, adjs, sampled_nodes, x_exact, adjs_exact, input_exact_nodes, targets, weight): num_samples = targets.size(0) outputs = self.forward_mini(net, x, adjs, sampled_nodes, x_exact, adjs_exact, input_exact_nodes) if net.multi_class: loss = net.loss_f_vec(outputs, targets) loss = loss.mean(1) * weight else: loss = net.loss_f_vec(outputs, targets) * weight loss = loss.sum() loss.backward() grad_per_sample = autograd_wl.calculate_sample_grad(net.gc_out) grad_per_sample = grad_per_sample * (1 / weight / num_samples) return loss.detach(), grad_per_sample.cpu().numpy()