コード例 #1
0
 def add(a, b, c):
     d = hcl.compute(a.shape, lambda *x: a[x] + b[x], "d")
     hcl.assert_(True, "assert error 1")
     hcl.print(0, "print1\n")
     hcl.update(c, lambda *x: d[x] + 1, "u")
     hcl.assert_(False, "assert error 2")
     hcl.print(0, "print2")
コード例 #2
0
 def add(a, b, c):
     d = hcl.compute(a.shape, lambda *x: a[x] + b[x])
     hcl.assert_(False)
     hcl.print(0, "print1")
     hcl.update(c, lambda *x: d[x] + 1)
     hcl.assert_(False)
     hcl.print(0, "print2")
コード例 #3
0
 def mul(A, B, x):
     temp = hcl.scalar(0)
     with hcl.for_(0, x) as i:
         hcl.assert_(x < 5, "assert in for")
         temp[0] += add(A, B, x)
         hcl.print(0, "in for\n")
     hcl.return_(temp[0])
コード例 #4
0
    def update(l, prototype, prototypeCounter, max):
        hcl.print((l + 1),
                  "%d:Use hard examples to update the prototype counters.\n")

        ###data preparation
        distance = hcl.compute((hdTrainData.shape[1], ), lambda x: 0,
                               'distance')
        hamming_dist = hcl.compute((numClasses, ), lambda x: 0, "hamming_dist")
        m = hcl.reduce_axis(0, hdTrainData.shape[1], "m")
        ###

        with hcl.for_(0, hdTrainData.shape[0]) as i:
            with hcl.for_(0, numClasses) as n:
                #Do hdc multiplication(XOR) on sample[i]'s hdv and prototype[n]'s hdv (elementwise on the high-bit data)
                hcl.update(distance,
                           lambda x: hdTrainData[i][x] ^ prototype[n][x])
                #Calculate the hamming distance of the two vectors by adding 1s
                hamming_dist[n] = hcl.sum(distance[m], axis=m)

            #Find the one having the least hamming distance and choose it's label as the predicted label
            pred = hcl.scalar(0, 'pred')
            with hcl.for_(0, hamming_dist.shape[0]) as j:
                with hcl.if_(hamming_dist[j] < hamming_dist[pred]):
                    pred.v = j

            #Adjust the proto vectors by adding the sample vector on its label proto hdv and substrct it on its predicted proto hdv
            with hcl.if_(pred.v != trainLabels[i]):
                max[trainLabels[i]] += 1
                max[pred] -= 1
                with hcl.for_(0, hdTrainData.shape[1]) as m:
                    prototypeCounter[trainLabels[i]][m] += hdTrainData[i][m]
                    prototypeCounter[pred][m] -= hdTrainData[i][m]
                    with hcl.if_(max[trainLabels[i]] % 2 == 0):
                        with hcl.if_(prototypeCounter[trainLabels[i]][m] -
                                     max[trainLabels[i]] / 2 == 0):
                            prototype[trainLabels[i]][m] &= 1
                    with hcl.else_():
                        prototype[trainLabels[i]][m] = hcl.select(
                            prototypeCounter[trainLabels[i]][m] -
                            max[trainLabels[i]] / 2 > 0, 1, 0)

                    with hcl.if_(max[pred] % 2 == 0):
                        with hcl.if_(prototypeCounter[pred][m] -
                                     max[pred] / 2 == 0):
                            prototype[pred][m] &= 1
                    with hcl.else_():
                        prototype[pred][m] = hcl.select(
                            prototypeCounter[pred][m] - max[pred] / 2 > 0, 1,
                            0)

        #print the accuracy
        hcl.mutate(
            (1, ),
            lambda x: test_hdc_accu(prototype, hdTrainData, trainLabels, 1),
            'training_update')
        hcl.mutate(
            (1, ),
            lambda x: test_hdc_accu(prototype, hdTestData, testLabels, 2),
            'testing_update')
コード例 #5
0
 def test_encoding(m, preTestData):
     test_temp = hcl.compute((testData.shape[1], dim), lambda x, y: itemMem[testData[m][x]][y]^idMem[x][y], name = "test_temp")
     k2 = hcl.reduce_axis(0, testData.shape[1], 'k2')
     test_result = hcl.compute((dim,), lambda x: hcl.sum(test_temp[k2, x], axis = k2, dtype=hcl.Int()), name = "test_result")
     with hcl.for_(0, dim) as n:
         preTestData[m][n] = test_result[n]
     with hcl.if_((m+1)%100 == 0):
         hcl.print((m+1), "Finish encoding %d testing data\n")
コード例 #6
0
 def train_encoding(m, preTrainData):
     train_temp = hcl.compute((trainData.shape[1], dim), lambda x, y: itemMem[trainData[m][x]][y] ^ idMem[x][y], name = "train_temp")
     k1 = hcl.reduce_axis(0, trainData.shape[1], 'k1')
     train_result = hcl.compute((dim,), lambda x: hcl.sum(train_temp[k1, x], axis = k1, dtype=hcl.Int()), name = "train_result")
     with hcl.for_(0, dim) as n:
         preTrainData[m][n] = train_result[n]
     with hcl.if_((m + 1) % 1000 == 0):
         hcl.print((m+1), "Finish encoding %d training data\n")
コード例 #7
0
 def update_B(A, x):
     with hcl.for_(0, 10) as i:
         hcl.assert_(i < 20)
         hcl.print(0, "in for loop\n")
         with hcl.if_(A[x] == i):
             hcl.assert_(A[x] > 10, "assert in if")
             hcl.print(0, "this should not be printed")
             hcl.return_(1)
     hcl.return_(A[x])
コード例 #8
0
    def algorithm(a, b, c):
        @hcl.def_([a.shape, b.shape, c.shape])
        def add(a, b, c):
            hcl.update(c, lambda *x: a[x] + b[x])
            hcl.assert_(False)
            hcl.print(0, "print add")

        hcl.print(0, "print1\n")
        add(a, b, c)
        hcl.print(0, "print end\n")
コード例 #9
0
  def kernel(matrix_1, matrix_2):
      return_matrix = hcl.compute((m,k), lambda x, y: matrix_1[x,y] + matrix_2[x,y], "return_matrix")
 
      with hcl.if_(matrix_2[0,0] == 0):
          hcl.assert_(matrix_2[1,1] == 0, "assert message in if statement") #result is true
          hcl.print(0, "in the if statement\n") #should be printed
          
      hcl.assert_(matrix_1[0,0] != 0, "customized assert message 1") #result is false
      hcl.print(0, "this shouldn't be printed")
      return return_matrix
コード例 #10
0
 def kernel(A, M):
     def loop_body(x):
         with hcl.if_(A[x]> M[0]):
             with hcl.if_(A[x]> M[1]):
                 hcl.assert_(x == 2, "assert error in if--value of x: %d", x)
                 M[0] = M[1]
                 M[1] = A[x]
             with hcl.else_():
                 M[0] = A[x]
     hcl.mutate(A.shape, lambda x : loop_body(x))
     hcl.print(0, "this should not be printed\n")
コード例 #11
0
 def two_stage(A):
     var = hcl.scalar(0, "v", dtype=hcl.UInt(32))
     var.v = 1
     with hcl.if_(var == 0):
         hcl.print((), "A\n")
     with hcl.else_():
         var.v = var - 1
         # this condition should not be optimized away
         with hcl.if_(var == 0):
             hcl.print((), "B\n")
     A[0] = var
     return A
コード例 #12
0
    def kernel(matrix_1, matrix_2):
        return_matrix = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y], "return_matrix")

        with hcl.if_(matrix_2[0, 0] == 0):
            matrix_A = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y], "matrix_A")
        with hcl.else_():
            matrix_B = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y] + 2, "matrix_B")

        hcl.assert_(matrix_1[0, 0] != 0, "customized assert message 1") #result is false

        matrix_C = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y], "matrix_C")
        hcl.print(0, "this shouldn't be printed")
        return return_matrix
コード例 #13
0
 def kernel(matrix_1, matrix_2):
     first_matrix = hcl.compute((m,k), lambda x, y: matrix_1[x,y] + matrix_2[x,y], "first_matrix")
     return_matrix = hcl.compute((m,k), lambda x, y: matrix_1[x,y] + matrix_2[x,y] + 7, "return_matrix")
     
     hcl.assert_(matrix_1[0,0] == 0, "assert %d message % d", [matrix_1[0,0], matrix_2[0,0]]) #assert is true
     hcl.assert_(matrix_1[0,0] == 10, "assert %d message % d number 2", [matrix_1[0,0], matrix_2[0,0]]) #assert is false
  
     matrix_C = hcl.compute((m,k), lambda x, y: matrix_1[x,y] + matrix_2[x,y] + 9, "matrix_C")
     matrix_D = hcl.compute((m,k), lambda x, y: matrix_1[x,y] + matrix_2[x,y] + 10, "matrix_D")
     
     hcl.assert_(matrix_1[0,0] == 0, "assert %d message % d number 3", [matrix_1[0,0], matrix_2[0,0]]) #assert is true
     hcl.print(0, "this should not be printed\n") #should not be printed
     return return_matrix
コード例 #14
0
    def algorithm(a, b, c):
        @hcl.def_([a.shape, b.shape, c.shape])
        def add(a, b, c):
            d = hcl.compute(a.shape, lambda *x: a[x] + b[x], "d")
            hcl.assert_(True, "assert error 1")
            hcl.print(0, "print1\n")
            hcl.update(c, lambda *x: d[x] + 1, "u")
            hcl.assert_(False, "assert error 2")
            hcl.print(0, "print2")

        tmp = hcl.compute((64, 64), lambda x, y: 4 + 8)
        add(a, b, c)
        hcl.print(0, "print end")
コード例 #15
0
    def kernel(matrix_1, matrix_2):
        first_matrix = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y], "first_matrix")
        return_matrix = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y] + 7, "return_matrix")
        ax = hcl.scalar(0)
        with hcl.while_(ax.v < 3):
            matrix_A = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y] + 7, "matrix_A")

            with hcl.for_(0, 2, name="for_loop_in") as h:
                matrix_B = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y] + 8, "matrix_B")

                with hcl.if_(matrix_1[0, 2] >= 0):
                    matrix_C = hcl.compute((m, k), lambda x, y : matrix_1[x, x] + matrix_2[x, x] + 9, "matrix_C")
                    hcl.assert_(matrix_1[0, 0]> 0, "assert message in the if statement %d", matrix_C[0, 0])
                    matrix_D = hcl.compute((m, k), lambda x, y : matrix_1[x, x] + matrix_2[x, x] + 9, "matrix_D")
                    hcl.print(0, "in if statement\n")

                hcl.assert_(matrix_1[0, 0]> 1, "assert message for loop")
                matrix_F = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y] + 8, "matrix_F")
                hcl.print(0, "in for loop\n")

            hcl.assert_(matrix_1[0, 0]> 2, "assert error, matrix_A[1, 1]: %d matrix_A[2, 1]: %d matrix_A[3, 1]: %d", [matrix_A[1, 1], matrix_A[2, 1], matrix_A[3, 1]])
            hcl.print(0, "in the while loop\n")
            ax.v = ax.v + 1

        hcl.assert_(matrix_1[0, 0]> 3, "assert message end")
        matrix_E = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y] + 10, "matrix_E")
        hcl.print(0, "this should not be printed\n")
        return return_matrix
コード例 #16
0
    def algorithm(A, B):
        @hcl.def_([A.shape, ()])
        def update_B(A, x):
            hcl.print(0, "print1\n")
            hcl.assert_(A[x] != 7)
            hcl.print(0, "print2\n")
            hcl.return_(A[x] + 1)

        matrix_B = hcl.compute((m, k), lambda x, y: A[x] + B[x] + 7,
                               "matrix_B")
        hcl.update(B, lambda x: update_B(A, x))
        matrix_C = hcl.compute((m, k), lambda x, y: A[x] + B[x] + 7,
                               "matrix_C")

        hcl.print(0, "should not print\n")
コード例 #17
0
    def algorithm(A, B):
        @hcl.def_([A.shape, ()])
        def update_B(A, x):
            with hcl.if_(A[x] > 5):
                hcl.print(0, "print if 1\n")
                hcl.assert_(A[x] <= 5, "assert in if")
                hcl.print(0, "print if 2\n")
                hcl.return_(-1)
            with hcl.else_():
                hcl.print(0, "print else 1\n")
                hcl.assert_(A[x] <= 5, "assert in else")
                hcl.print(0, "print else 2\n")
                hcl.return_(A[x] + 1)

        hcl.update(B, lambda x: update_B(A, x))
        hcl.print(0, "shouldn't be printed")
コード例 #18
0
 def update_B(A, x):
     with hcl.if_(A[x] > 5):
         hcl.print(0, "print if 1\n")
         hcl.assert_(A[x] <= 5, "assert in if")
         hcl.print(0, "print if 2\n")
         hcl.return_(-1)
     with hcl.else_():
         hcl.print(0, "print else 1\n")
         hcl.assert_(A[x] <= 5, "assert in else")
         hcl.print(0, "print else 2\n")
         hcl.return_(A[x] + 1)
コード例 #19
0
    def test_hdc_accu(proto, pack_data, labels, type):
        #pack the prototype
        pack_proto = hcl.pack(proto,
                              axis=1,
                              dtype=hcl.UInt(bw),
                              name="pack_proto")

        ###data preparation
        distance1 = hcl.compute((pack_data.shape[1], ),
                                lambda x: 0,
                                'distance1',
                                dtype=hcl.UInt(bw))
        pre_hamming = hcl.compute((pack_data.shape[1], ), lambda x: 0,
                                  "pre_hamming")
        hamming_dist1 = hcl.compute((numClasses, ), lambda x: 0,
                                    "hamming_dist1")
        m1 = hcl.reduce_axis(0, pack_data.shape[1], "m1")
        correct1 = hcl.scalar(0, 'correct1')
        ###

        with hcl.for_(0, pack_data.shape[0]) as i:
            hcl.print((i), "%d suc\n")
            with hcl.for_(0, numClasses) as n:
                #Do hdc multiplication(XOR) on sample[i]'s hdv and prototype[n]'s hdv (elementwise on the high-bit data)
                hcl.update(distance1,
                           lambda x: pack_data[i][x] ^ pack_proto[n][x])
                #Calculate the hamming distance of the two vectors by adding 1s
                hcl.update(pre_hamming, lambda x: popcount(distance1[x]))
                hcl.print((), "sum of 1s suc")
                ###########################seg fault
                hamming_dist1[n] = hcl.sum(pre_hamming[m1], axis=m1)

            #Find the one having the least hamming distance and choose it's label as the predicted label
            pred1 = hcl.scalar(0, 'pred1')
            with hcl.for_(0, hamming_dist1.shape[0]) as j:
                with hcl.if_(hamming_dist1[j] < hamming_dist1[pred1]):
                    pred1.v = j

            with hcl.if_(pred1.v == labels[i]):
                correct1.v += 1

        #Print the accuracy
        all1 = hcl.scalar(pack_data.shape[0], "all1", dtype=hcl.Float(32))
        accuracy1 = hcl.compute((1, ),
                                lambda x: correct1.v / all1.v * 100,
                                "accuracy1",
                                dtype=hcl.Float(32))
        with hcl.if_(type == 1):
            hcl.print((correct1, pack_data.shape[0], accuracy1[0]),
                      "Training accu: %d/%d (%.2f%%)\n")
        with hcl.else_():
            hcl.print((correct1, pack_data.shape[0], accuracy1[0]),
                      "Testing accu: %d/%d (%.2f%%)\n")
コード例 #20
0
    def algorithm(A, B):
        @hcl.def_([A.shape, B.shape, ()])
        def add(A, B, x):
            hcl.assert_(x < 3, "assert in add")
            hcl.print(0, "in add\n")
            hcl.return_(A[x] + B[x])

        @hcl.def_([A.shape, B.shape, ()])
        def mul(A, B, x):
            temp = hcl.scalar(0)
            with hcl.for_(0, x) as i:
                hcl.assert_(x < 5, "assert in for")
                temp[0] += add(A, B, x)
                hcl.print(0, "in for\n")
            hcl.return_(temp[0])

        tmp = hcl.compute(A.shape, lambda x: mul(A, B, x))
        hcl.print(0, "shouldn't print\n")
        return tmp
コード例 #21
0
    def kernel(matrix_1, matrix_2):
        return_matrix = hcl.compute(
            (m, k), lambda x, y: matrix_1[x, y] + matrix_2[x, y],
            "return_matrix")
        matrix_A = hcl.compute(
            (m, k), lambda x, y: matrix_1[x, y] + matrix_2[x, y] + 7,
            "matrix_A")
        matrix_B = hcl.compute(
            (m, k), lambda x, y: matrix_1[x, y] + matrix_2[x, y] + 8,
            "matrix_B")

        with hcl.for_(0, 7, name="for_loop") as f:
            with hcl.if_(matrix_1[0, f] == 0):
                hcl.assert_(
                    matrix_2[f, 2] == 0,
                    "assert message in the first for loop")  #assert true
                hcl.print(0, "in the first for loop and if statement\n"
                          )  #should be printed 7 times

            hcl.print(0, "in the first for loop, outside if statement\n"
                      )  #should be printed 7 times

        with hcl.for_(0, 7, name="for_loop") as f:
            with hcl.if_(matrix_1[0, f] == 0):
                hcl.assert_(
                    matrix_2[f, 2] != 0,
                    "assert message in the second for loop")  #assert false
                hcl.print(0, "in the second for loop and if statement\n"
                          )  #should not be printed

            hcl.print(0, "in the second for loop, outside if statement\n"
                      )  #should not be printed

        hcl.print(0, "this should not be printed\n")  #should not be printed
        matrix_C = hcl.compute(
            (m, k), lambda x, y: matrix_1[x, y] + matrix_2[x, y] + 9,
            "matrix_C")
        matrix_D = hcl.compute(
            (m, k), lambda x, y: matrix_1[x, y] + matrix_2[x, y] + 10,
            "matrix_D")
        return return_matrix
コード例 #22
0
 def update_B(A, B, x):
     hcl.print(0, "print1\n")
     hcl.assert_(x < 2, "assert error 1")
     hcl.print(0, "print2\n")
     hcl.assert_(x < 1, "assert error 2")
     hcl.print(x, "print3\n")
     B[x] = A[x] + 1
コード例 #23
0
    def test_hdc_accu(proto, hyper_dataset, labels, type):
        ###data preparation
        distance1 = hcl.compute((hyper_dataset.shape[1], ), lambda x: 0,
                                'distance1')
        hamming_dist1 = hcl.compute((numClasses, ), lambda x: 0,
                                    "hamming_dist1")
        m1 = hcl.reduce_axis(0, hyper_dataset.shape[1], "m1")
        correct1 = hcl.scalar(0, 'correct1')
        ###

        with hcl.for_(0, hyper_dataset.shape[0]) as i:
            with hcl.for_(0, numClasses) as n:
                #Do hdc multiplication(XOR) on sample[i]'s hdv and prototype[n]'s hdv (elementwise on the high-bit data)
                hcl.update(distance1,
                           lambda x: hyper_dataset[i][x] ^ proto[n][x])
                #Calculate the hamming distance of the two vectors by adding 1s
                hamming_dist1[n] = hcl.sum(distance1[m1], axis=m1)

            #Find the one having the least hamming distance and choose it's label as the predicted label
            pred1 = hcl.scalar(0, 'pred1')
            with hcl.for_(0, hamming_dist1.shape[0]) as j:
                with hcl.if_(hamming_dist1[j] < hamming_dist1[pred1]):
                    pred1.v = j

            with hcl.if_(pred1.v == labels[i]):
                correct1.v += 1

        #Print the accuracy
        all1 = hcl.scalar(hyper_dataset.shape[0], "all1", dtype=hcl.Float(32))
        accuracy1 = hcl.compute((1, ),
                                lambda x: correct1.v / all1.v * 100,
                                "accuracy1",
                                dtype=hcl.Float(32))
        with hcl.if_(type == 1):
            hcl.print((correct1, hyper_dataset.shape[0], accuracy1[0]),
                      "Training accu: %d/%d (%.2f%%)\n")
        with hcl.else_():
            hcl.print((correct1, hyper_dataset.shape[0], accuracy1[0]),
                      "Testing accu: %d/%d (%.2f%%)\n")
コード例 #24
0
 def update_B(A, x):
     with hcl.if_(A[x] > 5):
         with hcl.if_(A[x] > 7):
             hcl.print(0, "in if 1\n")
             hcl.assert_(A[x] == 1, "assert in if")
             hcl.print(0, "in if 2\n")
             hcl.return_(-2)
         hcl.return_(-1)
     with hcl.else_():
         with hcl.if_(A[x] > 3):
             hcl.print(0, "in else 1\n")
             hcl.assert_(A[x] == 4, "assert in else")
             hcl.print(2, "in else 2\n")
             hcl.return_(-3)
     hcl.return_(A[x] + 1)
コード例 #25
0
def kernel(trainData, testData, itemMem, idMem, rdv1, rdv2):
    def train_encoding(m, preTrainData):
        train_temp = hcl.compute((trainData.shape[1], dim), lambda x, y: itemMem[trainData[m][x]][y] ^ idMem[x][y], name = "train_temp")
        k1 = hcl.reduce_axis(0, trainData.shape[1], 'k1')
        train_result = hcl.compute((dim,), lambda x: hcl.sum(train_temp[k1, x], axis = k1, dtype=hcl.Int()), name = "train_result")
        with hcl.for_(0, dim) as n:
            preTrainData[m][n] = train_result[n]
        with hcl.if_((m + 1) % 1000 == 0):
            hcl.print((m+1), "Finish encoding %d training data\n")

    def test_encoding(m, preTestData):
        test_temp = hcl.compute((testData.shape[1], dim), lambda x, y: itemMem[testData[m][x]][y]^idMem[x][y], name = "test_temp")
        k2 = hcl.reduce_axis(0, testData.shape[1], 'k2')
        test_result = hcl.compute((dim,), lambda x: hcl.sum(test_temp[k2, x], axis = k2, dtype=hcl.Int()), name = "test_result")
        with hcl.for_(0, dim) as n:
            preTestData[m][n] = test_result[n]
        with hcl.if_((m+1)%100 == 0):
            hcl.print((m+1), "Finish encoding %d testing data\n")

    #Encoding
    hcl.print((), "Encoding the training data into HDVs.\n")
    preTrainData = hcl.compute((trainData.shape[0], dim), lambda x, y: 0, "preTrainData")
    hcl.mutate((trainData.shape[0], ), lambda x: train_encoding(x, preTrainData))

    hdTrainData = hcl.compute((trainData.shape[0], dim), lambda x, y: 0, "hdTrainData", dtype=hcl.UInt(1))
    with hcl.Stage("S1"):
        with hcl.if_(trainData.shape[1] % 2 == 0):
            hcl.print((), "Use the random vector\n")
            hcl.update(hdTrainData, lambda x, y: hcl.select(preTrainData[x][y] + rdv1[x][y] - trainData.shape[1]/2 > 0, 1, 0))
        with hcl.else_():
            hcl.update(hdTrainData, lambda x, y: hcl.select(preTrainData[x][y] - trainData.shape[1]/2 > 0, 1, 0))

    hcl.print((),"Encoding the testing data into HDVs.\n")
    preTestData = hcl.compute((testData.shape[0], dim), lambda x, y: 0, "preTestData")
    hcl.mutate((testData.shape[0], ), lambda x: test_encoding(x, preTestData))

    hdTestData = hcl.compute((testData.shape[0], dim), lambda x, y: 0, "hdTestData", dtype=hcl.UInt(1))
    with hcl.Stage("S2"):
        with hcl.if_(testData.shape[1] % 2 == 0):
            hcl.print((), "Use the random vector\n")
            hcl.update(hdTestData, lambda x, y: hcl.select(preTestData[x][y] + rdv2[x][y] - testData.shape[1]/2 > 0, 1, 0))
        with hcl.else_():
            hcl.update(hdTestData, lambda x, y: hcl.select(preTestData[x][y] - testData.shape[1]/2 > 0, 1, 0))

    ###data_packing
    pack_train = hcl.pack(hdTrainData, axis=1, dtype=hcl.UInt(bw), name="pack_train")
    pack_test = hcl.pack(hdTestData, axis=1, dtype=hcl.UInt(bw), name="pack_test")
    return pack_train, pack_test
コード例 #26
0
    def algorithm(A, B):
        @hcl.def_([A.shape, B.shape, ()])
        def update_B(A, B, x):
            hcl.print(0, "print1\n")
            hcl.assert_(x < 2, "assert error 1")
            hcl.print(0, "print2\n")
            hcl.assert_(x < 1, "assert error 2")
            hcl.print(x, "print3\n")
            B[x] = A[x] + 1

        with hcl.Stage():
            matrix_B = hcl.compute((m, k), lambda x, y: A[x] + B[x] + 1,
                                   "matrix_B")
            with hcl.for_(0, 10) as i:
                matrix_C = hcl.compute((m, k), lambda x, y: A[x] + B[x] + 2,
                                       "matrix_C")
                with hcl.for_(0, 10) as z:
                    matrix_D = hcl.compute(
                        (m, k), lambda x, y: A[x] + B[x] + 3, "matrix_D")
                    update_B(A, B, i)
                    matrix_E = hcl.compute(
                        (m, k), lambda x, y: A[x] + B[x] + 4, "matrix_E")
            hcl.print(0, "end\n")
コード例 #27
0
ファイル: test.py プロジェクト: ezw2/hcl_practice
     def update_knn(train_inst, test_inst, dists, labels, label):
       
         #dist = hcl.scalar(0)
         diff = hcl.compute((DIGIT_WIDTH,), lambda x: test_inst[x] ^ train_inst[x], "diff")
         dist = hcl.compute((1,), lambda x: popcount64(diff), "dist")
     
         #max_dist = hcl.compute((1,), lambda x: 0, "max_dist")
         max_dist = hcl.scalar(0)
         max_dist_id = hcl.scalar(K_CONST + 1)
         
     
         with hcl.for_(0, K_CONST) as k:
             with hcl.if_(dists[k] > max_dist.v):
                 max_dist.v = dists[k]
                 max_dist_id.v = k
             
         with hcl.if_(dist[0] < max_dist.v):
             print("wat")
 #            dists[0] = dist[0]
             dists[max_dist_id.v] = dist[0]
             labels[max_dist_id.v] = label[0]
         hcl.print(dist)
         return dist
コード例 #28
0
 def update_B(A, x):
     with hcl.if_(A[x] < 5):
         hcl.print(0, "print1\n")
         hcl.assert_(A[x] < 4, "assert message 1")
         hcl.print(0, "print2\n")
         hcl.return_(-1)
     hcl.assert_(A[x] >= 5, "assert message 2")
     hcl.print(0, "not in if\n")
     hcl.return_(A[x] + 1)
コード例 #29
0
    def update(l, prototype, prototypeCounter, max):
        hcl.print((l+1),"%d:Use hard examples to update the prototype counters.\n")

        ###data preparation
        distance = hcl.compute((in_train.shape[1],), lambda x: 0, 'distance', dtype=hcl.UInt(in_bw))
        pre_dist = hcl.compute((in_train.shape[1],), lambda x: 0, "pre_dist")
        hamming_dist = hcl.compute((numClasses,), lambda x: 0, "hamming_dist")
        m = hcl.reduce_axis(0, in_train.shape[1], "m")
        ###

        with hcl.for_(0, in_train.shape[0]) as i:
            hcl.print((i),"%d suc\n")
            # pack_proto = hcl.pack(prototype, axis=1, dtype=hcl.UInt(in_bw), name="pack_proto") 
            with hcl.for_(0, numClasses) as n:
                #Do hdc multiplication(XOR) on sample[i]'s hdv and prototype[n]'s hdv (elementwise on the high-bit data)
                hcl.update(distance, lambda x: in_train[i][x] ^ prototype[n][x])
                #Calculate the hamming distance of the two vectors by adding 1s
                hcl.update(pre_dist, lambda x: popcount(distance[x]))
                hcl.print((),"sum of 1s suc")
                hamming_dist[n] = hcl.sum(pre_dist[m], axis=m)

            #Find the one having the least hamming distance and choose it's label as the predicted label
            pred = hcl.scalar(0, 'pred')
            with hcl.for_(0, hamming_dist.shape[0]) as j:
                with hcl.if_(hamming_dist[j] < hamming_dist[pred]):
                    pred.v = j

            #Adjust the proto vectors by adding the sample vector on its label proto hdv and substrct it on its predicted proto hdv
            with hcl.if_(pred.v != trainLabels[i]):
                max[trainLabels[i]] += 1
                max[pred] -= 1
                with hcl.for_(0, in_train.shape[1]) as m:
                    with hcl.for_(0, in_bw) as bit:
                        # with hcl.if_(in_train[i][m][bit] == 1):
                        #     ###########
                        #     prototypeCounter[trainLabels[i]][m*in_bw+bit] += 1
                        #     prototypeCounter[pred][m*in_bw+bit] -= 1
                        prototypeCounter[trainLabels[i]][m*in_bw+bit] += in_train[i][m][bit]
                        prototypeCounter[pred][m*in_bw+bit] -= in_train[i][m][bit]
                        with hcl.if_(max[trainLabels[i]] % 2 == 0):
                            with hcl.if_(prototypeCounter[trainLabels[i]][m*in_bw+bit] - max[trainLabels[i]]/2 == 0):
                                prototype[trainLabels[i]][m][bit] &= 1
                        with hcl.else_():
                            prototype[trainLabels[i]][m][bit] = hcl.select(prototypeCounter[trainLabels[i]][m*in_bw+bit] - max[trainLabels[i]]/2 > 0, 1, 0)

                        with hcl.if_(max[pred] % 2 == 0):
                            with hcl.if_(prototypeCounter[pred][m*in_bw+bit] - max[pred]/2 == 0):
                                prototype[pred][m][bit] &= 1
                        with hcl.else_():
                            prototype[pred][m][bit] = hcl.select(prototypeCounter[pred][m*in_bw+bit] - max[pred]/2 > 0, 1, 0)

        #print the accuracy
        hcl.mutate((1,), lambda x: test_hdc_accu(prototype, in_train, trainLabels, 1), 'training_update')
        hcl.mutate((1,), lambda x: test_hdc_accu(prototype, in_test, testLabels, 2), 'testing_update')
コード例 #30
0
    def kernel(matrix_1, matrix_2):
        return_matrix = hcl.compute((m,k), lambda x, y: matrix_1[x,y] + matrix_2[x,y], "return_matrix")
   
        with hcl.for_(0, 7, name="for_loop") as f:
            hcl.assert_(matrix_2[f,2] == 0, "assert message in the first for loop") #assert true
            hcl.print(0, "in the first for loop\n") #should be printed
        
        with hcl.for_(0, 7, name="for_loop") as f: 
            hcl.assert_(matrix_2[f,2] != 0, "assert message in the second for loop") #assert false 
            hcl.print(0, "in the second for loop\n") #should not be printed

        hcl.print(0, "this should not be printed\n") #should not be printed
        return return_matrix