def test_batchmatmul(self):
        # define TC
        lang = """
        def batch_matmul(float(B, N, M) X, float(B, M, K) Y) -> (Z) {
          Z(b, n, k) +=! X(b, n, mm) * Y(b, mm, k)
        }
        """

        # create input tensors
        B, K, M, N = 500, 26, 72, 26
        X = torch.randn(B, N, M).cuda()
        Y = torch.randn(B, M, K).cuda()
        inputs = [X, Y]

        # define the mapping_options
        options = Options("naive")
        options.useSharedMemory(True)
        options.usePrivateMemory(True)
        options.unrollCopyShared(True)
        options.outerScheduleFusionStrategy("Preserve3Coincident")
        options.fixParametersBeforeScheduling(True)
        options.tile([1])
        options.tileImperfectlyNested(False)
        options.mapToBlocks([72, 16, 1])
        options.mapToThreads([7, 26])
        options.unroll(128)

        # run with TC, get the outputs and check against reference implementation
        outputs = self.check(lang, "batch_matmul", options, inputs)
Exemplo n.º 2
0
    def test_tmm(self):
        # define TC
        lang = """
        def tmm(float(M,K) A, float(N,K) B) -> (C) {
          C(m, n) +=! A(m, kk) * B(n, kk)
        }
        """

        # create input tensors
        M, N, K = 128, 256, 32
        A = torch.randn(M, K).cuda()
        B = torch.randn(N, K).cuda()
        inputs = [A, B]

        # define the mapping_options
        options = Options("naive")
        options.useSharedMemory(True)
        options.usePrivateMemory(True)
        options.unrollCopyShared(False)
        options.outerScheduleFusionStrategy("Preserve3Coincident")
        options.fixParametersBeforeScheduling(False)
        options.tile([4, 32])
        options.tileImperfectlyNested(False)
        options.mapToBlocks([64, 128])
        options.mapToThreads([1, 32])
        options.unroll(4)

        # run with TC, get the outputs and check against reference implementation
        outputs = self.check(lang, "tmm", options, inputs)
        expected = torch.mm(A, torch.transpose(B, 0, 1))
        diff = outputs[0] - expected
        self.assert_almost_equal(diff, inputs, M * N, 3e-7)
Exemplo n.º 3
0
    def test_C3(self):
        # define TC
        lang = """
        def _C3(float(B, WX) I, float(WY, WX) W) -> (C3) {
            C3(b, wy) +=! I(b, wxx) * W(wy, wxx)
        }
        """

        # create input tensors
        B, WX, WY = 128, 1000, 1024
        I = torch.randn(B, WX).cuda()
        W = torch.randn(WY, WX).cuda()
        inputs = [I, W]

        # define the mapping_options
        options = Options("naive")
        options.useSharedMemory(True)
        options.usePrivateMemory(True)
        options.unrollCopyShared(True)
        options.outerScheduleFusionStrategy("Preserve3Coincident")
        options.fixParametersBeforeScheduling(True)
        options.tile([8, 32, 32])
        options.tileImperfectlyNested(False)
        options.mapToBlocks([128, 128])
        options.mapToThreads([1, 32])
        options.unroll(256)

        # run with TC, get the outputs and check against reference implementation
        outputs = self.check(lang, "_C3", options, inputs)
    def test_group_convolution(self):
        # define TC
        lang = """
        def group_convolution(float(N,G,C,H,W) I, float(G,F,C,KH,KW) W1, float(G,F) B)
        -> (O)
        {
          O(n, g, f, h, w) +=! I(n, g, c, h + kh, w + kw) * W1(g, f, c, kh, kw)
          O(n, g, f, h, w) = O(n, g, f, h, w) + B(g, f)
        }
        """

        # create input tensors
        N, G, C, F, H, W, KH, KW = 32, 32, 32, 32, 7, 7, 3, 3
        tI = torch.randn(N, G, C, H, W).cuda()
        tW = torch.randn(G, F, C, KH, KW).cuda()
        tB = torch.randn(G, F).cuda()
        inputs = [tI, tW, tB]

        # define the mapping_options
        options = Options("naive")
        options.useSharedMemory(True)
        options.usePrivateMemory(False)
        options.unrollCopyShared(True)
        options.outerScheduleFusionStrategy("Preserve3Coincident")
        options.fixParametersBeforeScheduling(False)
        options.tile([1, 1])
        options.tileImperfectlyNested(False)
        options.mapToBlocks([32, 32, 3])
        options.mapToThreads([8, 7, 7])
        options.unroll(256)

        # run with TC, get the outputs and check against reference implementation
        outputs = self.check(lang, "group_convolution", options, inputs)
Exemplo n.º 5
0
 def test_options(self):
     print('\nCreating mapping_options')
     options = Options("naive")
     options.useSharedMemory(True)
     options.unrollCopyShared(False)
     options.mapToBlocks([256, 8])
     options.mapToThreads([4, 16, 4])
     options.tile([2, 8, 64, 128])
     options.unroll(128)
     options.fixParametersBeforeScheduling(False)
     options.scheduleFusionStrategy("Max")
     options.outerScheduleFusionStrategy("Preserve3Coincident")
     print('Mapping options created successfully')
Exemplo n.º 6
0
    def test_mlp(self):
        # define TC
        lang = """
        def mlp3(float(B,N) I, float(O,N) W2, float(O) B2, float(P,O) W3, float(P) B3, float(Q,P) W4, float(Q) B4) -> (O2, O3, O4) {
            O2(b, o) +=! I(b, n) * W2(o, n)
            O2(b, o) = O2(b, o) + B2(o)
            O2(b, o) = fmax(O2(b, o), 0)
            O3(b, p) +=! O2(b, o) * W3(p, o)
            O3(b, p) = O3(b, p) + B3(p)
            O3(b, p) = fmax(O3(b, p), 0)
            O4(b, q) +=! O3(b, p) * W4(q, p)
            O4(b, q) = O4(b, q) + B4(q)
            O4(b, q) = fmax(O4(b, q), 0)
        }
        """

        # create input tensors
        B, N, O, P, Q = 128, 128, 64, 32, 2
        I = torch.randn(B, N).cuda()
        W2 = torch.randn(O, N).cuda()
        B2 = torch.randn(O).cuda()
        W3 = torch.randn(P, O).cuda()
        B3 = torch.randn(P).cuda()
        W4 = torch.randn(Q, P).cuda()
        B4 = torch.randn(Q).cuda()
        inputs = [I, W2, B2, W3, B3, W4, B4]

        # define the mapping_options
        options = Options("naive")
        options.useSharedMemory(False)
        options.usePrivateMemory(False)
        options.unrollCopyShared(True)
        options.outerScheduleFusionStrategy("Max")
        options.fixParametersBeforeScheduling(False)
        options.tile([4])
        options.tileImperfectlyNested(False)
        options.mapToBlocks([128])
        options.mapToThreads([64])
        options.unroll(128)

        # run with TC, get the outputs and check against reference implementation
        outputs = self.check(lang, "mlp3", options, inputs)