def backward(ctx, gradoutput): input1, input2 = ctx.saved_tensors if input1.is_cuda: gradinput1 = torch.cuda.FloatTensor().resize_(input1.size()).zero_() gradinput2 = torch.cuda.FloatTensor().resize_(input2.size()).zero_() err = my_lib.InterpolationLayer_gpu_backward(input1,input2,gradoutput,gradinput1,gradinput2) if err != 0 : print(err) else: gradinput1 = torch.FloatTensor().resize_(input1.size()).zero_() gradinput2 = torch.FloatTensor().resize_(input2.size()).zero_() err = my_lib.InterpolationLayer_cpu_backward(input1, input2, gradoutput, gradinput1, gradinput2) if err != 0 : print(err) return gradinput1, gradinput2
def backward(ctx, gradoutput): # print("Backward of Interpolation Layer") # gradinput1 = input1.new().zero_() # gradinput2 = input2.new().zero_() # gradinput1 = torch.zeros(self.input1.size()) # gradinput2 = torch.zeros(self.input2.size()) input1, input2 = ctx.saved_tensors if input1.is_cuda: # print("CUDA backward") # gradinput1 = gradinput1.cuda(self.device) # gradinput2 = gradinput2.cuda(self.device) gradinput1 = torch.cuda.FloatTensor().resize_( input1.size()).zero_() gradinput2 = torch.cuda.FloatTensor().resize_( input2.size()).zero_() # the input1 image should not require any gradients # print("Does input1 requires gradients? " + str(self.input1.requires_grad)) err = my_lib.InterpolationLayer_gpu_backward( input1, input2, gradoutput, gradinput1, gradinput2) if err != 0: print(err) else: # print("CPU backward") # print(gradoutput) gradinput1 = torch.FloatTensor().resize_(input1.size()).zero_() gradinput2 = torch.FloatTensor().resize_(input2.size()).zero_() err = my_lib.InterpolationLayer_cpu_backward( input1, input2, gradoutput, gradinput1, gradinput2) # print(err) if err != 0: print(err) # print(gradinput1) # print(gradinput2) # print(gradinput1) return gradinput1, gradinput2