示例#1
0
    def testTorchInterop(self):
        workspace.RunOperatorOnce(core.CreateOperator(
            "ConstantFill", [], "foo", shape=(4,), value=2, dtype=10))
        t = workspace.FetchTorch("foo")
        t.resize_(5)
        t[4] = t[2] = 777
        np.testing.assert_array_equal(t.numpy(), np.array([2,2,777,2,777]))
        # this doesn't work because of variable / tensor confusion
        # the underlying data tensor is not properly reshaped :(
        np.testing.assert_array_equal(
            workspace.FetchBlob("foo"), np.array([2,2,777,2]))

        z = torch.ones((4,), dtype=torch.int64)
        workspace.FeedBlob('bar', z)
        workspace.RunOperatorOnce(
            core.CreateOperator("Reshape", ['bar'], ['bar', '_'], shape=(2,2)))
        # NOTE: `workspace.FeedBlob('bar', z)` above creates a shallow-copy of `z`
        # and assign it to `bar` in the Caffe2 workspace. Since it's a shallow-copy,
        # any sizes or strides change to `bar` will not be propagated back to `z`,
        # and we need to call `z = workspace.FetchTorch("bar")` to manually put
        # the value of `bar` back into `z`.
        #
        # In the near future, we won't need to perform the shallow-copying of `z` and
        # can directly pass it into the Caffe2 workspace, as long as `z` doesn't require
        # grad. At that point we won't need to use `z = workspace.FetchTorch("bar")`
        # to fetch `z` from the Caffe2 workspace, since it will exactly be the same as
        # the original tensor `z`.
        z = workspace.FetchTorch("bar")
        z[0,1] = 123
        np.testing.assert_array_equal(
            workspace.FetchBlob("bar"), np.array([[1,123],[1,1]]))
        np.testing.assert_array_equal(z, np.array([[1,123],[1,1]]))
示例#2
0
    def testTorchInterop(self):
        # CUDA has convenient mem stats, let's use them to make sure we didn't
        # leak memory
        initial_mem = torch.cuda.memory_allocated()
        workspace.RunOperatorOnce(
            core.CreateOperator("ConstantFill", [],
                                "foo",
                                shape=(4, ),
                                value=2,
                                dtype=10,
                                device_option=core.DeviceOption(
                                    workspace.GpuDeviceType)))
        t = workspace.FetchTorch("foo")
        t.resize_(5)
        self.assertTrue(t.is_cuda)
        t[4] = t[2] = 777
        np.testing.assert_array_equal(t.cpu().numpy(),
                                      np.array([2, 2, 777, 2, 777]))
        # this doesn't work because of variable / tensor confusion
        # the underlying data tensor is not properly reshaped :(
        np.testing.assert_array_equal(workspace.FetchBlob("foo"),
                                      np.array([2, 2, 777, 2]))

        z = torch.ones((4, ), dtype=torch.int64, device="cuda")
        workspace.FeedBlob('bar', z)
        workspace.RunOperatorOnce(
            core.CreateOperator("Reshape", ['bar'], ['bar', '_'],
                                shape=(2, 2),
                                device_option=core.DeviceOption(
                                    workspace.GpuDeviceType)))
        # NOTE: `workspace.FeedBlob('bar', z)` above creates a shallow-copy of `z`
        # and assign it to `bar` in the Caffe2 workspace. Since it's a shallow-copy,
        # any sizes or strides change to `bar` will not be propagated back to `z`,
        # and we need to call `z = workspace.FetchTorch("bar")` to manually put
        # the value of `bar` back into `z`.
        #
        # In the near future, we won't need to perform the shallow-copying of `z` and
        # can directly pass it into the Caffe2 workspace, as long as `z` doesn't require
        # grad. At that point we won't need to use `z = workspace.FetchTorch("bar")`
        # to fetch `z` from the Caffe2 workspace, since it will exactly be the same as
        # the original tensor `z`.
        z = workspace.FetchTorch("bar")
        z[0, 1] = 123
        np.testing.assert_array_equal(workspace.FetchBlob("bar"),
                                      np.array([[1, 123], [1, 1]]))
        np.testing.assert_array_equal(z.cpu(), np.array([[1, 123], [1, 1]]))

        self.assertGreater(torch.cuda.memory_allocated(), initial_mem)
        # clean up everything
        del t
        del z
        workspace.ResetWorkspace()
        self.assertEqual(torch.cuda.memory_allocated(), initial_mem)
示例#3
0
    def testTorchInterop(self):
        workspace.RunOperatorOnce(
            core.CreateOperator("ConstantFill", [],
                                "foo",
                                shape=(4, ),
                                value=2,
                                dtype=10))
        t = workspace.FetchTorch("foo")
        t.resize_(5)
        t[4] = t[2] = 777
        np.testing.assert_array_equal(t.numpy(), np.array([2, 2, 777, 2, 777]))
        # this doesn't work because of variable / tensor confusion
        # the underlying data tensor is not properly reshaped :(
        np.testing.assert_array_equal(workspace.FetchBlob("foo"),
                                      np.array([2, 2, 777, 2]))

        z = torch.ones((4, ), dtype=torch.int64)
        workspace.FeedBlob('bar', z)
        workspace.RunOperatorOnce(
            core.CreateOperator("Reshape", ['bar'], ['bar', '_'],
                                shape=(2, 2)))
        z[0, 1] = 123
        np.testing.assert_array_equal(workspace.FetchBlob("bar"),
                                      np.array([[1, 123], [1, 1]]))
        np.testing.assert_array_equal(z, np.array([[1, 123], [1, 1]]))
示例#4
0
    def testTorchInterop(self):
        # CUDA has convenient mem stats, let's use them to make sure we didn't
        # leak memory
        initial_mem = torch.cuda.memory_allocated()
        workspace.RunOperatorOnce(core.CreateOperator(
            "ConstantFill", [], "foo", shape=(4,), value=2, dtype=10,
            device_option=core.DeviceOption(workspace.GpuDeviceType)))
        t = workspace.FetchTorch("foo")
        t.resize_(5)
        self.assertTrue(t.is_cuda)
        t[4] = t[2] = 777
        np.testing.assert_array_equal(
            t.cpu().numpy(), np.array([2,2,777,2,777]))
        # this doesn't work because of variable / tensor confusion
        # the underlying data tensor is not properly reshaped :(
        np.testing.assert_array_equal(
            workspace.FetchBlob("foo"), np.array([2,2,777,2]))

        z = torch.ones((4,), dtype=torch.int64, device="cuda")
        workspace.FeedBlob('bar', z)
        workspace.RunOperatorOnce(
            core.CreateOperator("Reshape", ['bar'], ['bar', '_'], shape=(2,2),
            device_option=core.DeviceOption(workspace.GpuDeviceType)))
        z[0,1] = 123
        np.testing.assert_array_equal(
            workspace.FetchBlob("bar"), np.array([[1,123],[1,1]]))
        np.testing.assert_array_equal(z.cpu(), np.array([[1,123],[1,1]]))

        self.assertGreater(torch.cuda.memory_allocated(), initial_mem)
        # clean up everything
        del t
        del z
        workspace.ResetWorkspace()
        self.assertEqual(torch.cuda.memory_allocated(), initial_mem)
    def testTorchInterop(self):
        workspace.RunOperatorOnce(core.CreateOperator(
            "ConstantFill", [], "foo", shape=(4,), value=2, dtype=10))
        t = workspace.FetchTorch("foo")
        t.resize_(5)
        t[4] = t[2] = 777
        np.testing.assert_array_equal(t.numpy(), np.array([2,2,777,2,777]))
        np.testing.assert_array_equal(
            workspace.FetchBlob("foo"), np.array([2,2,777,2,777]))

        z = torch.ones((4,), dtype=torch.int64)
        workspace.FeedBlob('bar', z)
        workspace.RunOperatorOnce(
            core.CreateOperator("Reshape", ['bar'], ['bar', '_'], shape=(2,2)))
        z[0,1] = 123
        np.testing.assert_array_equal(
            workspace.FetchBlob("bar"), np.array([[1,123],[1,1]]))
        np.testing.assert_array_equal(z, np.array([[1,123],[1,1]]))