def backward(ctx, grad_output): input1, input2 = ctx.saved_tensors with torch.cuda.device_of(input1): rbot1 = input1.new() rbot2 = input2.new() grad_input1 = input1.new() grad_input2 = input2.new() correlation_cuda.backward( input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2, ctx.pad_size, ctx.kernel_size, ctx.max_displacement, ctx.stride1, ctx.stride2, ctx.corr_multiply, ) return grad_input1, grad_input2, None, None, None, None, None, None
def backward(self, grad_output): input1, input2 = self.saved_tensors with torch.cuda.device_of(input1): rbot1 = input1.new() rbot2 = input2.new() grad_input1 = input1.new() grad_input2 = input2.new() correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2, self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply) return grad_input1, grad_input2
def backward(ctx, grad_output): input1, input2 = ctx.saved_tensors pad, kernel, max_d, stride1, stride2, corr_m = ctx.arg with torch.cuda.device_of(input1): rbot1 = input1.new() rbot2 = input2.new() grad_input1 = input1.new() grad_input2 = input2.new() correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2, pad, kernel, max_d, stride1, stride2, corr_m) return grad_input1, grad_input2
def backward(self, grad_output): input1, input2 = self.saved_variables kH, kW = self.kernel_size patchH, patchW = self.patch_size padH, padW = self.padding dilation_patchH, dilation_patchW = self.dilation_patch dH, dW = self.stride grad_input1, grad_input2 = correlation_cuda.backward( input1, input2, grad_output, kH, kW, patchH, patchW, padH, padW, dilation_patchH, dilation_patchW, dH, dW, ) return grad_input1, grad_input2
def backward(ctx, grad_output): input1, input2 = ctx.saved_tensors pad_size, kernel_size, max_displacement, stride1, stride2, corr_multiply = 3, 3, 20, 1, 2, 1 with torch.cuda.device_of(input1): rbot1 = input1.new() rbot2 = input2.new() grad_input1 = input1.new() grad_input2 = input2.new() #correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2, # self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply) correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2, ctx.pad_size, ctx.kernel_size, ctx.max_displacement, ctx.stride1, ctx.stride2, ctx.corr_multiply) return grad_input1, grad_input2, None, None, None, None, None, None
def backward(ctx, grad_output): input1, input2, = ctx.saved_varaibles pad_size = ctx.pad_size kernel_size = ctx.kernel_size max_displacement = ctx.max_displacement stride1 = ctx.stride1 stride2 = ctx.stride2 corr_multiply = ctx.corr_multiply with torch.cuda.device_of(input1): rbot1 = input1.new() rbot2 = input2.new() grad_input1 = input1.new() grad_input2 = input2.new() correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2, pad_size, kernel_size, max_displacement, stride1, stride2, corr_multiply) return grad_input1, grad_input2