def backward(ctx, gradoutput): # print("Backward of Filter Interpolation Layer") # gradinput1 = input1.new().zero_() # gradinput2 = input2.new().zero_() # gradinput1 = torch.zeros(self.input1.size()) input1, count, output = ctx.saved_tensors if input1.is_cuda: # print("CUDA backward") # gradinput1 = gradinput1.cuda(self.device) gradinput1 = torch.cuda.FloatTensor().resize_( input1.size()).zero_() err = my_lib.FlowProjectionLayer_gpu_backward( input1, count, gradoutput, gradinput1) # print(err) if err != 0: print(err) else: # print("CPU backward") # print(gradoutput) gradinput1 = torch.FloatTensor().resize_(input1.size()).zero_() err = my_lib.FlowProjectionLayer_cpu_backward( input1, count, gradoutput, gradinput1) # print(err) if err != 0: print(err) # print(gradinput1) # print(gradinput2) # print(gradinput1) return gradinput1, None
def backward(ctx, gradoutput): input1, count = ctx.saved_tensors if input1.is_cuda: gradinput1 = torch.cuda.FloatTensor().resize_(input1.size()).zero_() err = my_lib.FlowProjectionLayer_gpu_backward(input1, count, gradoutput, gradinput1) if err != 0 : print(err) else: gradinput1 = torch.FloatTensor().resize_(input1.size()).zero_() err = my_lib.FlowProjectionLayer_cpu_backward(input1, count, gradoutput, gradinput1) if err != 0: print(err) return gradinput1, None