Ejemplo n.º 1
0
 def test_sparse_lengths_ops(self):
     LengthsTester().test(
         'SparseLengths',
         hu.sparse_lengths_tensor(dtype=np.float32,
                                  min_value=1,
                                  max_value=10,
                                  allow_empty=True), REFERENCES_ALL)(self)
Ejemplo n.º 2
0
 def test_sparse_lengths_ops(self):
     for itype in [np.int32, np.int64]:
         LengthsTester()._test(
             'SparseLengths',
             hu.sparse_lengths_tensor(
                 dtype=np.float32,
                 min_value=1,
                 max_value=5,
                 allow_empty=True,
                 itype=itype,
             ), REFERENCES_ALL)(self)
Ejemplo n.º 3
0
 def test_sparse_lengths_ops(self):
     LengthsTester().test(
         'SparseLengths',
         hu.sparse_lengths_tensor(
             dtype=np.float32,
             min_value=1,
             max_value=10,
             allow_empty=True
         ),
         REFERENCES_ALL
     )(self)
Ejemplo n.º 4
0
 def test_sparse_lengths_ops(self):
     for itype in [np.int32, np.int64]:
         LengthsTester()._test(
             'SparseLengths',
             hu.sparse_lengths_tensor(
                 dtype=np.float32,
                 min_value=1,
                 max_value=5,
                 allow_empty=True,
                 itype=itype,
             ),
             REFERENCES_ALL,
         )(self)
Ejemplo n.º 5
0
class TestSegmentOps(hu.HypothesisTestCase):
    def test_sorted_segment_ops(self):
        SegmentsTester()._test(
            'SortedSegment',
            hu.segmented_tensor(
                dtype=np.float32,
                is_sorted=True,
                allow_empty=True
            ),
            REFERENCES_ALL + REFERENCES_SORTED
        )(self)

    def test_unsorted_segment_ops(self):
        SegmentsTester()._test(
            'UnsortedSegment',
            hu.segmented_tensor(
                dtype=np.float32,
                is_sorted=False,
                allow_empty=True
            ),
            REFERENCES_ALL,
        )(self)

    def test_unsorted_segment_ops_gpu(self):
        SegmentsTester()._test(
            'UnsortedSegment',
            hu.segmented_tensor(
                dtype=np.float32,
                is_sorted=False,
                allow_empty=True,
            ),
            REFERENCES_ALL,
            gpu=workspace.has_gpu_support,
            grad_check=False,
        )(self)

    def test_sparse_sorted_segment_ops(self):
        SegmentsTester()._test(
            'SparseSortedSegment',
            hu.sparse_segmented_tensor(
                dtype=np.float32,
                is_sorted=True,
                allow_empty=True
            ),
            REFERENCES_ALL
        )(self)

    def test_sparse_unsorted_segment_ops(self):
        SegmentsTester()._test(
            'SparseUnsortedSegment',
            hu.sparse_segmented_tensor(
                dtype=np.float32,
                is_sorted=False,
                allow_empty=True
            ),
            REFERENCES_ALL
        )(self)

    def test_lengths_ops(self):
        LengthsTester()._test(
            'Lengths',
            hu.lengths_tensor(
                dtype=np.float32,
                min_value=1,
                max_value=5,
                allow_empty=True
            ),
            REFERENCES_ALL + REFERENCES_LENGTHS_ONLY,
        )(self)

    def test_sparse_lengths_ops(self):
        for itype in [np.int32, np.int64]:
            LengthsTester()._test(
                'SparseLengths',
                hu.sparse_lengths_tensor(
                    dtype=np.float32,
                    min_value=1,
                    max_value=5,
                    allow_empty=True,
                    itype=itype,
                ),
                REFERENCES_ALL,
            )(self)

    @unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
    @given(**hu.gcs)
    def test_unsorted_sums_large(self, gc, dc):
        X = np.random.rand(10000, 32, 12).astype(np.float32)
        segments = np.random.randint(0, 10000, size=10000).astype(np.int32)
        op = core.CreateOperator("UnsortedSegmentSum", ["X", "segments"], "out")
        self.assertDeviceChecks(dc, op, [X, segments], [0])

    @unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
    @given(**hu.gcs)
    def test_sorted_segment_range_mean(self, gc, dc):
        X = np.random.rand(6, 32, 12).astype(np.float32)
        segments = np.array([0, 0, 1, 1, 2, 3]).astype(np.int32)
        op = core.CreateOperator(
            "SortedSegmentRangeMean",
            ["X", "segments"],
            "out"
        )
        self.assertDeviceChecks(dc, op, [X, segments], [0])
        self.assertGradientChecks(gc, op, [X, segments], 0, [0])

    @unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
    @given(**hu.gcs)
    def test_sorted_segment_range_log_mean_exp(self, gc, dc):
        X = np.random.rand(7, 32, 12).astype(np.float32)
        segments = np.array([0, 0, 1, 1, 2, 2, 3]).astype(np.int32)
        op = core.CreateOperator(
            "SortedSegmentRangeLogMeanExp",
            ["X", "segments"],
            "out"
        )
        self.assertDeviceChecks(dc, op, [X, segments], [0])
        self.assertGradientChecks(gc, op, [X, segments], 0, [0])

    @unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
    @given(**hu.gcs)
    def test_unsorted_means_large(self, gc, dc):
        X = np.random.rand(10000, 31, 19).astype(np.float32)
        segments = np.random.randint(0, 10000, size=10000).astype(np.int32)
        op = core.CreateOperator("UnsortedSegmentMean", ["X", "segments"], "out")
        self.assertDeviceChecks(dc, op, [X, segments], [0])

    @given(
        inputs=hu.lengths_tensor(
            dtype=np.float32,
            min_value=1,
            max_value=5,
            allow_empty=True,
        ),
        **hu.gcs
    )
    def test_lengths_sum(self, inputs, gc, dc):
        X, Y = inputs
        op = core.CreateOperator("LengthsSum", ["X", "Y"], "out")

        def ref(D, L):
            R = np.zeros(shape=(L.size, ) + D.shape[1:], dtype=D.dtype)
            line = 0
            for g in range(L.size):
                for _ in range(L[g]):
                    if len(D.shape) > 1:
                        R[g, :] += D[line, :]
                    else:
                        R[g] += D[line]
                    line += 1
            return [R]

        self.assertReferenceChecks(gc, op, [X, Y], ref)
        self.assertDeviceChecks(dc, op, [X, Y], [0])
        self.assertGradientChecks(gc, op, [X, Y], 0, [0])

    @given(
        inputs=hu.sparse_lengths_tensor(
            dtype=np.float32,
            min_value=1,
            max_value=5,
            allow_empty=True
        ),
        **hu.gcs
    )
    def test_sparse_lengths_sum(self, inputs, gc, dc):
        X, Y, Z = inputs
        op = core.CreateOperator("SparseLengthsSum", ["X", "Y", "Z"], "out")

        def ref(D, I, L):
            R = np.zeros(shape=(L.size, ) + D.shape[1:], dtype=D.dtype)
            line = 0
            for g in range(L.size):
                for _ in range(L[g]):
                    if len(D.shape) > 1:
                        R[g, :] += D[I[line], :]
                    else:
                        R[g] += D[I[line]]
                    line += 1
            return [R]

        self.assertReferenceChecks(gc, op, [X, Y, Z], ref)
        self.assertDeviceChecks(dc, op, [X, Y, Z], [0])
        self.assertGradientChecks(gc, op, [X, Y, Z], 0, [0])

    @given(
        inputs=hu.lengths_tensor(
            dtype=np.float32,
            min_value=1,
            max_value=5,
            allow_empty=True,
        ),
        **hu.gcs
    )
    def test_lengths_mean(self, inputs, gc, dc):
        X, Y = inputs
        op = core.CreateOperator("LengthsMean", ["X", "Y"], "out")

        def ref(D, L):
            R = np.zeros(shape=(L.size, ) + D.shape[1:], dtype=D.dtype)
            line = 0
            for g in range(L.size):
                for _ in range(L[g]):
                    if len(D.shape) > 1:
                        R[g, :] += D[line, :]
                    else:
                        R[g] += D[line]
                    line += 1
                if L[g] > 1:
                    if len(D.shape) > 1:
                        R[g, :] = R[g, :] / L[g]
                    else:
                        R[g] = R[g] / L[g]

            return [R]

        self.assertReferenceChecks(gc, op, [X, Y], ref)
        self.assertDeviceChecks(dc, op, [X, Y], [0])
        self.assertGradientChecks(gc, op, [X, Y], 0, [0])

    @given(
        inputs=hu.sparse_lengths_tensor(
            dtype=np.float32,
            min_value=1,
            max_value=5,
            allow_empty=True
        ),
        **hu.gcs
    )
    def test_sparse_lengths_mean(self, inputs, gc, dc):
        X, Y, Z = inputs
        op = core.CreateOperator("SparseLengthsMean", ["X", "Y", "Z"], "out")

        def ref(D, I, L):
            R = np.zeros(shape=(L.size, ) + D.shape[1:], dtype=D.dtype)
            line = 0
            for g in range(L.size):
                for _ in range(L[g]):
                    if len(D.shape) > 1:
                        R[g, :] += D[I[line], :]
                    else:
                        R[g] += D[I[line]]
                    line += 1

                if L[g] > 1:
                    if len(D.shape) > 1:
                        R[g, :] = R[g, :] / L[g]
                    else:
                        R[g] = R[g] / L[g]

            return [R]

        self.assertReferenceChecks(gc, op, [X, Y, Z], ref)
        self.assertDeviceChecks(dc, op, [X, Y, Z], [0])
        self.assertGradientChecks(gc, op, [X, Y, Z], 0, [0])

    @given(
        grad_on_weights=st.booleans(),
        inputs=hu.sparse_lengths_tensor(
            dtype=np.float32,
            min_value=1,
            max_value=5,
            allow_empty=True
        ),
        seed=st.integers(min_value=0, max_value=100),
        **hu.gcs
    )
    def test_sparse_lengths_weighted_sum(
            self, grad_on_weights, inputs, seed, gc, dc):
        D, I, L = inputs

        np.random.seed(int(seed))

        W = np.random.rand(I.size).astype(np.float32)
        op = core.CreateOperator(
            "SparseLengthsWeightedSum",
            ["D", "W", "I", "L"],
            "out",
            grad_on_weights=grad_on_weights)
        self.assertDeviceChecks(dc, op, [D, W, I, L], [0])
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[D, W, I, L],
            reference=sparse_lengths_weighted_sum_ref,
            threshold=1e-4,
            output_to_grad='out',
            grad_reference=partial(
                sparse_lengths_weighted_sum_grad_ref,
                grad_on_weights=grad_on_weights),
        )
        self.assertGradientChecks(gc, op, [D, W, I, L], 0, [0])
        if grad_on_weights:
            self.assertGradientChecks(gc, op, [D, W, I, L], 1, [0])

    @given(**hu.gcs)
    def test_sparse_lengths_indices_in_gradient_sum_gpu(self, gc, dc):
        X = np.random.rand(3, 3, 4, 5).astype(np.float32)
        Y = np.asarray([3, 3, 2]).astype(np.int32)
        Z = np.random.randint(0, 50, size=8).astype(np.int64)
        op = core.CreateOperator(
            "SparseLengthsIndicesInGradientSumGradient", ["X", "Y", "Z"], "out"
        )
        self.assertDeviceChecks(dc, op, [X, Y, Z], [0])

    @given(**hu.gcs)
    def test_sparse_lengths_indices_in_gradient_mean_gpu(self, gc, dc):
        X = np.random.rand(3, 3, 4, 5).astype(np.float32)
        Y = np.asarray([3, 3, 2]).astype(np.int32)
        Z = np.random.randint(0, 50, size=8).astype(np.int64)
        op = core.CreateOperator(
            "SparseLengthsIndicesInGradientMeanGradient", ["X", "Y", "Z"], "out"
        )
        self.assertDeviceChecks(dc, op, [X, Y, Z], [0])

    @given(**hu.gcs_cpu_only)
    def test_legacy_sparse_and_lengths_sum_gradient(self, gc, dc):
        X = np.random.rand(3, 64).astype(np.float32)
        Y = np.asarray([20, 20, 10]).astype(np.int32)
        workspace.FeedBlob("X", X)
        workspace.FeedBlob("Y", Y)
        test_net = core.Net("test_net")
        test_net.SparseLengthsSumGradient(["X", "Y"], "out1")
        test_net.LengthsSumGradient(["X", "Y"], "out2")
        workspace.RunNetOnce(test_net)
        out1 = workspace.FetchBlob("out1")
        out2 = workspace.FetchBlob("out2")
        self.assertTrue((out1 == out2).all())

    @given(**hu.gcs)
    def test_sparse_lengths_sum_invalid_index(self, gc, dc):
        D = np.random.rand(50, 3, 4, 5).astype(np.float32)
        I = (np.random.randint(0, 10000, size=10) + 10000).astype(np.int64)
        L = np.asarray([4, 4, 2]).astype(np.int32)
        op = core.CreateOperator(
            "SparseLengthsSum",
            ["D", "I", "L"],
            "out")
        workspace.FeedBlob('D', D)
        workspace.FeedBlob('I', I)
        workspace.FeedBlob('L', L)
        with self.assertRaises(RuntimeError):
            workspace.RunOperatorOnce(op)

    @given(**hu.gcs_cpu_only)
    def test_sparse_lengths_positional_weighted_sum(
            self, gc, dc):
        D = np.random.rand(50, 3, 4, 5).astype(np.float32)
        W = np.random.rand(50).astype(np.float32)
        indices = np.random.randint(0, 50, size=10).astype(np.int64)
        L = np.asarray([4, 4, 2]).astype(np.int32)
        op = core.CreateOperator(
            "SparseLengthsPositionalWeightedSum",
            ["D", "W", "indices", "L"],
            "out")

        def ref_sparse(D, W, indices, L):
            workspace.FeedBlob("L", L)
            lengths_range_fill_op = core.CreateOperator(
                "LengthsRangeFill", ["L"], ["L_pos_seq"])
            workspace.RunOperatorOnce(lengths_range_fill_op)

            workspace.FeedBlob("W", W)
            gather_op = core.CreateOperator(
                "Gather", ["W", "L_pos_seq"], ["W_gathered"])
            workspace.RunOperatorOnce(gather_op)

            workspace.FeedBlob("D", D)
            workspace.FeedBlob("indices", indices)
            sparse_op = core.CreateOperator(
                "SparseLengthsWeightedSum",
                ["D", "W_gathered", "indices", "L"],
                "out_ref")
            workspace.RunOperatorOnce(sparse_op)

            return (workspace.FetchBlob("out_ref"),)

        self.assertReferenceChecks(
            gc, op, [D, W, indices, L], ref_sparse)