Exemplo n.º 1
0
    def forward(ctx, input1, input2):

        assert(input1.is_contiguous())
        assert(input2.is_contiguous())


        if input1.is_cuda :
            # output = output.cuda()
            output = torch.cuda.FloatTensor().resize_(input1.size()).zero_()
            my_lib.InterpolationLayer_gpu_forward(input1, input2, output)
        else:
            output = torch.cuda.FloatTensor(input1.data.size())
            my_lib.InterpolationLayer_cpu_forward(input1, input2, output)
        ctx.save_for_backward(input1, input2)

        return output
Exemplo n.º 2
0
    def forward(ctx, input1, input2):

        assert input1.is_contiguous()
        assert input2.is_contiguous()
        # self.input1 = input1.contiguous() # need to use in the backward process, so we need to cache it
        # self.input2 = input2.contiguous() # TODO: Note that this is simply a shallow copy?
        # if input1.is_cuda:
        #     self.device = torch.cuda.current_device()
        # else:
        #     self.device = -1

        # output =  torch.zeros(input1.size())

        if input1.is_cuda:
            # output = output.cuda()
            output = torch.cuda.FloatTensor().resize_(input1.size()).zero_()
            my_lib.InterpolationLayer_gpu_forward(input1, input2, output)
        else:
            output = torch.cuda.FloatTensor(input1.data.size())
            my_lib.InterpolationLayer_cpu_forward(input1, input2, output)
        ctx.save_for_backward(input1, input2)

        # the function returns the output to its caller
        return output