Ejemplo n.º 1
0
 def freduce(x, Y):
     with hcl.for_(0, Y.shape[0]) as i:
         with hcl.if_(x < Y[i]):
             with hcl.for_(Y.shape[0]-1, i, -1) as j:
                 Y[j] = Y[j-1]
             Y[i] = x
             hcl.break_()
Ejemplo n.º 2
0
        def loop_kernel(labels):
            # assign cluster
            with hcl.for_(0, N, name="n") as n:
                min_dist = hcl.scalar(100000)
                new_label = hcl.scalar(labels[n])
                with hcl.for_(0, K) as k:
                    dist = hcl.scalar(0)
                    with hcl.for_(0, dim) as d:
                        dist_ = hcl.scalar(points[n, d] - means[k, d], "temp")
                        dist.v += dist_.v * dist_.v
                    with hcl.if_(dist.v < min_dist.v):
                        min_dist.v = dist.v
                        new_label[0] = k
                labels[n] = new_label
            # update mean
            num_k = hcl.compute((K, ), lambda x: 0, "num_k")
            sum_k = hcl.compute((K, dim), lambda x, y: 0, "sum_k")

            def calc_sum(n):
                num_k[labels[n]] += 1
                with hcl.for_(0, dim) as d:
                    sum_k[labels[n], d] += points[n, d]

            hcl.mutate((N, ), lambda n: calc_sum(n), "calc_sum")
            hcl.update(means, lambda k, d: sum_k[k, d] // num_k[k],
                       "update_mean")
Ejemplo n.º 3
0
    def fft(X_real, X_imag, IndexTable, F_real, F_imag):
        L = X_real.shape[0]
        if np.log2(L) % 1 > 0:
            raise ValueError("Length of input vector (1d tensor) must be power of 2")
        num_stages = int(np.log2(L))

        # bit reverse permutation
        hcl.update(F_real, lambda i: X_real[IndexTable[i]], name='F_real_update')
        hcl.update(F_imag, lambda i: X_imag[IndexTable[i]], name='F_imag_update')

        with hcl.Stage("Out"):
            one = hcl.scalar(1, dtype="int32")
            with hcl.for_(0, num_stages) as stage:
                DFTpts = one[0] << (stage + 1)
                numBF = DFTpts / 2
                e = -2 * np.pi / DFTpts
                a = hcl.scalar(0)
                with hcl.for_(0, numBF) as j:
                    c = hcl.scalar(hcl.cos(a[0]))
                    s = hcl.scalar(hcl.sin(a[0]))
                    a[0] = a[0] + e
                    with hcl.for_(j, L + DFTpts - 1, DFTpts) as i:
                        i_lower = i + numBF
                        temp_r = hcl.scalar(F_real[i_lower] * c - F_imag[i_lower] * s)
                        temp_i = hcl.scalar(F_imag[i_lower] * c + F_real[i_lower] * s)
                        F_real[i_lower] = F_real[i] - temp_r[0]
                        F_imag[i_lower] = F_imag[i] - temp_i[0]
                        F_real[i] = F_real[i] + temp_r[0]
                        F_imag[i] = F_imag[i] + temp_i[0]
Ejemplo n.º 4
0
 def absolute(A, B):
     with hcl.for_(0, A.shape[0], name="x") as x:
         with hcl.for_(0, A.shape[1], name="y") as y:
             with hcl.if_(A[x, y] >= 0):
                 B[x, y] = A[x, y]
             with hcl.else_():
                 B[x, y] = -A[x, y]
Ejemplo n.º 5
0
 def kernel(A):
     with hcl.Stage():
         with hcl.for_(0, 10) as i:
             with hcl.for_(0, 10) as j:
                 with hcl.if_(j >= i):
                     hcl.break_()
                 A[i] += j
Ejemplo n.º 6
0
 def freduce(x, Y):
     with hcl.for_(0, 10) as i:
         with hcl.if_(x < Y[i]):
             with hcl.for_(9, i, -1) as j:
                 Y[j] = Y[j-1]
             Y[i] = x
             hcl.break_()
 def learn(k, hdTrainData, prototype, prototypeCounter):
     #Find samples that have the label k
     match = hcl.compute(
         hdTrainData.shape,
         lambda x, y: hcl.select(trainLabels[x] == k, hdTrainData[x][y], 0),
         "match")
     #Record the number of these samples
     with hcl.for_(0, hdTrainData.shape[0]) as a:
         with hcl.if_(trainLabels[a] == k):
             max[k] += 1
     #Do hdc sum on these samples' hdv
     r = hcl.reduce_axis(0, hdTrainData.shape[0], 'r')
     result = hcl.compute((hdTrainData.shape[1], ),
                          lambda y: hcl.sum(match[r][y], axis=r), "result")
     #Do the binary voting
     sum1 = hcl.compute((hdTrainData.shape[1], ), lambda x: 0, "sum1")
     with hcl.if_(max[k] % 2 == 0):
         hcl.update(
             sum1, lambda x: hcl.select(
                 result[x] + rdv3[k][x] - max[k] / 2 > 0, 1, 0))
     with hcl.else_():
         hcl.update(sum1,
                    lambda x: hcl.select(result[x] - max[k] / 2 > 0, 1, 0))
     #Push the binary sum to prototype and the original sum to prototypeCounter
     with hcl.for_(0, hdTrainData.shape[1]) as t:
         prototype[k][t] = sum1[t]
         prototypeCounter[k][t] = result[t]
Ejemplo n.º 8
0
 def func(data):
     out = hcl.compute((4, 4), lambda x, y: 0, "out", dtype)
     with hcl.Stage("S"):
         with hcl.for_(0, 4, name="i") as i:
             with hcl.for_(0, 4, name="j") as j:
                 out[i, j] = data[i, j] + 1
     return out
    def update(l, prototype, prototypeCounter, max):
        hcl.print((l + 1),
                  "%d:Use hard examples to update the prototype counters.\n")

        ###data preparation
        distance = hcl.compute((hdTrainData.shape[1], ), lambda x: 0,
                               'distance')
        hamming_dist = hcl.compute((numClasses, ), lambda x: 0, "hamming_dist")
        m = hcl.reduce_axis(0, hdTrainData.shape[1], "m")
        ###

        with hcl.for_(0, hdTrainData.shape[0]) as i:
            with hcl.for_(0, numClasses) as n:
                #Do hdc multiplication(XOR) on sample[i]'s hdv and prototype[n]'s hdv (elementwise on the high-bit data)
                hcl.update(distance,
                           lambda x: hdTrainData[i][x] ^ prototype[n][x])
                #Calculate the hamming distance of the two vectors by adding 1s
                hamming_dist[n] = hcl.sum(distance[m], axis=m)

            #Find the one having the least hamming distance and choose it's label as the predicted label
            pred = hcl.scalar(0, 'pred')
            with hcl.for_(0, hamming_dist.shape[0]) as j:
                with hcl.if_(hamming_dist[j] < hamming_dist[pred]):
                    pred.v = j

            #Adjust the proto vectors by adding the sample vector on its label proto hdv and substrct it on its predicted proto hdv
            with hcl.if_(pred.v != trainLabels[i]):
                max[trainLabels[i]] += 1
                max[pred] -= 1
                with hcl.for_(0, hdTrainData.shape[1]) as m:
                    prototypeCounter[trainLabels[i]][m] += hdTrainData[i][m]
                    prototypeCounter[pred][m] -= hdTrainData[i][m]
                    with hcl.if_(max[trainLabels[i]] % 2 == 0):
                        with hcl.if_(prototypeCounter[trainLabels[i]][m] -
                                     max[trainLabels[i]] / 2 == 0):
                            prototype[trainLabels[i]][m] &= 1
                    with hcl.else_():
                        prototype[trainLabels[i]][m] = hcl.select(
                            prototypeCounter[trainLabels[i]][m] -
                            max[trainLabels[i]] / 2 > 0, 1, 0)

                    with hcl.if_(max[pred] % 2 == 0):
                        with hcl.if_(prototypeCounter[pred][m] -
                                     max[pred] / 2 == 0):
                            prototype[pred][m] &= 1
                    with hcl.else_():
                        prototype[pred][m] = hcl.select(
                            prototypeCounter[pred][m] - max[pred] / 2 > 0, 1,
                            0)

        #print the accuracy
        hcl.mutate(
            (1, ),
            lambda x: test_hdc_accu(prototype, hdTrainData, trainLabels, 1),
            'training_update')
        hcl.mutate(
            (1, ),
            lambda x: test_hdc_accu(prototype, hdTestData, testLabels, 2),
            'testing_update')
Ejemplo n.º 10
0
 def _mvpodd_reduce(*args):
     """compute {1, -1} dot product on packed data."""
     temp = hcl.local(0, name='mvpodd_acc', dtype=hcl.Int(64))
     with hcl.for_(0, in_block_num) as o:
         with hcl.for_(0, block_size) as i:
             temp[0] += tvm.popcount(d_packed[args[0], i+block_size*o] ^ w_packed[args[1], i+block_size*o])
     temp[0] = ppac_config.elem_num - temp[0]*2
     return temp[0]
Ejemplo n.º 11
0
 def kernel(A, B):
     C = hcl.compute(A.shape, lambda *args : 0, "C")
     with hcl.Stage("stage"):
         with hcl.for_(0, 10, name="i") as i:
             with hcl.for_(0, 32, name="j") as j:
                 B[i, j] = A[i, j] + B[i, j]
                 C[i, j] = 2 * B[i, j]
     return C
    def update(l, prototype, prototypeCounter, max):
        hcl.print((l+1),"%d:Use hard examples to update the prototype counters.\n")

        ###data preparation
        distance = hcl.compute((in_train.shape[1],), lambda x: 0, 'distance', dtype=hcl.UInt(in_bw))
        pre_dist = hcl.compute((in_train.shape[1],), lambda x: 0, "pre_dist")
        hamming_dist = hcl.compute((numClasses,), lambda x: 0, "hamming_dist")
        m = hcl.reduce_axis(0, in_train.shape[1], "m")
        ###

        with hcl.for_(0, in_train.shape[0]) as i:
            hcl.print((i),"%d suc\n")
            # pack_proto = hcl.pack(prototype, axis=1, dtype=hcl.UInt(in_bw), name="pack_proto") 
            with hcl.for_(0, numClasses) as n:
                #Do hdc multiplication(XOR) on sample[i]'s hdv and prototype[n]'s hdv (elementwise on the high-bit data)
                hcl.update(distance, lambda x: in_train[i][x] ^ prototype[n][x])
                #Calculate the hamming distance of the two vectors by adding 1s
                hcl.update(pre_dist, lambda x: popcount(distance[x]))
                hcl.print((),"sum of 1s suc")
                hamming_dist[n] = hcl.sum(pre_dist[m], axis=m)

            #Find the one having the least hamming distance and choose it's label as the predicted label
            pred = hcl.scalar(0, 'pred')
            with hcl.for_(0, hamming_dist.shape[0]) as j:
                with hcl.if_(hamming_dist[j] < hamming_dist[pred]):
                    pred.v = j

            #Adjust the proto vectors by adding the sample vector on its label proto hdv and substrct it on its predicted proto hdv
            with hcl.if_(pred.v != trainLabels[i]):
                max[trainLabels[i]] += 1
                max[pred] -= 1
                with hcl.for_(0, in_train.shape[1]) as m:
                    with hcl.for_(0, in_bw) as bit:
                        # with hcl.if_(in_train[i][m][bit] == 1):
                        #     ###########
                        #     prototypeCounter[trainLabels[i]][m*in_bw+bit] += 1
                        #     prototypeCounter[pred][m*in_bw+bit] -= 1
                        prototypeCounter[trainLabels[i]][m*in_bw+bit] += in_train[i][m][bit]
                        prototypeCounter[pred][m*in_bw+bit] -= in_train[i][m][bit]
                        with hcl.if_(max[trainLabels[i]] % 2 == 0):
                            with hcl.if_(prototypeCounter[trainLabels[i]][m*in_bw+bit] - max[trainLabels[i]]/2 == 0):
                                prototype[trainLabels[i]][m][bit] &= 1
                        with hcl.else_():
                            prototype[trainLabels[i]][m][bit] = hcl.select(prototypeCounter[trainLabels[i]][m*in_bw+bit] - max[trainLabels[i]]/2 > 0, 1, 0)

                        with hcl.if_(max[pred] % 2 == 0):
                            with hcl.if_(prototypeCounter[pred][m*in_bw+bit] - max[pred]/2 == 0):
                                prototype[pred][m][bit] &= 1
                        with hcl.else_():
                            prototype[pred][m][bit] = hcl.select(prototypeCounter[pred][m*in_bw+bit] - max[pred]/2 > 0, 1, 0)

        #print the accuracy
        hcl.mutate((1,), lambda x: test_hdc_accu(prototype, in_train, trainLabels, 1), 'training_update')
        hcl.mutate((1,), lambda x: test_hdc_accu(prototype, in_test, testLabels, 2), 'testing_update')
Ejemplo n.º 13
0
    def knn_vote(labels, max_label):
        max_vote = hcl.scalar(0)

        votes = hcl.compute((10, ), lambda x: 0, "votes")
        with hcl.for_(0, K_CONST) as i:
            votes[labels[i]] += 1

        with hcl.for_(0, 10) as i:
            with hcl.if_(votes[i] > max_vote.v):
                max_vote.v = votes[i]
                max_label[0] = i
Ejemplo n.º 14
0
    def test_hdc_accu(proto, pack_data, labels, type):
        #pack the prototype
        pack_proto = hcl.pack(proto,
                              axis=1,
                              dtype=hcl.UInt(bw),
                              name="pack_proto")

        ###data preparation
        distance1 = hcl.compute((pack_data.shape[1], ),
                                lambda x: 0,
                                'distance1',
                                dtype=hcl.UInt(bw))
        pre_hamming = hcl.compute((pack_data.shape[1], ), lambda x: 0,
                                  "pre_hamming")
        hamming_dist1 = hcl.compute((numClasses, ), lambda x: 0,
                                    "hamming_dist1")
        m1 = hcl.reduce_axis(0, pack_data.shape[1], "m1")
        correct1 = hcl.scalar(0, 'correct1')
        ###

        with hcl.for_(0, pack_data.shape[0]) as i:
            hcl.print((i), "%d suc\n")
            with hcl.for_(0, numClasses) as n:
                #Do hdc multiplication(XOR) on sample[i]'s hdv and prototype[n]'s hdv (elementwise on the high-bit data)
                hcl.update(distance1,
                           lambda x: pack_data[i][x] ^ pack_proto[n][x])
                #Calculate the hamming distance of the two vectors by adding 1s
                hcl.update(pre_hamming, lambda x: popcount(distance1[x]))
                hcl.print((), "sum of 1s suc")
                ###########################seg fault
                hamming_dist1[n] = hcl.sum(pre_hamming[m1], axis=m1)

            #Find the one having the least hamming distance and choose it's label as the predicted label
            pred1 = hcl.scalar(0, 'pred1')
            with hcl.for_(0, hamming_dist1.shape[0]) as j:
                with hcl.if_(hamming_dist1[j] < hamming_dist1[pred1]):
                    pred1.v = j

            with hcl.if_(pred1.v == labels[i]):
                correct1.v += 1

        #Print the accuracy
        all1 = hcl.scalar(pack_data.shape[0], "all1", dtype=hcl.Float(32))
        accuracy1 = hcl.compute((1, ),
                                lambda x: correct1.v / all1.v * 100,
                                "accuracy1",
                                dtype=hcl.Float(32))
        with hcl.if_(type == 1):
            hcl.print((correct1, pack_data.shape[0], accuracy1[0]),
                      "Training accu: %d/%d (%.2f%%)\n")
        with hcl.else_():
            hcl.print((correct1, pack_data.shape[0], accuracy1[0]),
                      "Testing accu: %d/%d (%.2f%%)\n")
Ejemplo n.º 15
0
def updateVopt(i, j, k, l, m, n, o, iVals, sVals, actions, Vopt, intermeds,
               trans, interpV, gamma, bounds, goal, ptsEachDim, useNN):
    p = hcl.scalar(0, "p")

    with hcl.for_(0, actions.shape[0], name="a") as a:
        # set iVals equal to (i,j,k,l,m,n,o) and sVals equal to the corresponding state values (si,sj,sk,sl,sm,sn,so)
        updateStateVals(i, j, k, l, m, n, o, iVals, sVals, bounds, ptsEachDim)
        # call the transition function to obtain the outcome(s) of action a from state (si,sj,sk,sl,sm,sn,so)
        UD.transition(sVals, actions[a], bounds, trans, goal)
        # initialize the value of the action Q value with the immediate reward of taking that action
        intermeds[a] = UD.reward(sVals, actions[a], bounds, goal, trans)
        # add the value of each possible successor state to the Q value
        with hcl.for_(0, trans.shape[0], name="si") as si:
            p[0] = trans[si, 0]
            sVals[0] = trans[si, 1]
            sVals[1] = trans[si, 2]
            sVals[2] = trans[si, 3]
            sVals[3] = trans[si, 4]
            sVals[4] = trans[si, 5]
            sVals[5] = trans[si, 6]
            sVals[6] = trans[si, 7]

            # Nearest neighbour
            with hcl.if_(useNN[0] == 1):
                # convert the state values of the successor state (si,sj,sk,sl,sm,sn,so) into indeces (ia,ja,ka,la,ma,na,oa)
                stateToIndex(sVals, iVals, bounds, ptsEachDim)
                # if (ia,ja,ka,la,ma,na,oa) is within the state space, add its discounted value to the Q value
                with hcl.if_(
                        hcl.and_(iVals[0] < Vopt.shape[0],
                                 iVals[1] < Vopt.shape[1],
                                 iVals[2] < Vopt.shape[2])):
                    with hcl.if_(
                            hcl.and_(iVals[3] < Vopt.shape[3],
                                     iVals[4] < Vopt.shape[4],
                                     iVals[5] < Vopt.shape[5],
                                     iVals[6] < Vopt.shape[6])):
                        with hcl.if_(
                                hcl.and_(iVals[0] >= 0, iVals[1] >= 0,
                                         iVals[2] >= 0, iVals[3] >= 0,
                                         iVals[4] >= 0, iVals[5] >= 0,
                                         iVals[6] >= 0)):
                            intermeds[a] += (
                                gamma[0] *
                                (p[0] *
                                 Vopt[iVals[0], iVals[1], iVals[2], iVals[3],
                                      iVals[4], iVals[5], iVals[6]]))

        # maximize over each Q value to obtain the optimal value
        Vopt[i, j, k, l, m, n, o] = -1000000
        with hcl.for_(0, intermeds.shape[0], name="r") as r:
            with hcl.if_(Vopt[i, j, k, l, m, n, o] < intermeds[r]):
                Vopt[i, j, k, l, m, n, o] = intermeds[r]
Ejemplo n.º 16
0
    def kernel(matrix_1, matrix_2):
        return_matrix = hcl.compute((m,k), lambda x, y: matrix_1[x,y] + matrix_2[x,y], "return_matrix")
   
        with hcl.for_(0, 7, name="for_loop") as f:
            hcl.assert_(matrix_2[f,2] == 0, "assert message in the first for loop") #assert true
            hcl.print(0, "in the first for loop\n") #should be printed
        
        with hcl.for_(0, 7, name="for_loop") as f: 
            hcl.assert_(matrix_2[f,2] != 0, "assert message in the second for loop") #assert false 
            hcl.print(0, "in the second for loop\n") #should not be printed

        hcl.print(0, "this should not be printed\n") #should not be printed
        return return_matrix
Ejemplo n.º 17
0
def updateVopt(obj, i, j, k, iVals, sVals, actions, Vopt, intermeds, trans,
               interpV, gamma, bounds, goal, ptsEachDim, useNN, fillVal):
    p = hcl.scalar(0, "p")

    with hcl.for_(0, actions.shape[0], name="a") as a:
        # set iVals equal to (i,j,k) and sVals equal to the corresponding state values (si,sj,sk)
        updateStateVals(i, j, k, iVals, sVals, bounds, ptsEachDim)
        # call the transition function to obtain the outcome(s) of action a from state (si,sj,sk)
        obj.transition(sVals, actions[a], bounds, trans, goal)
        # initialize the value of the action using the immediate reward of taking that action
        intermeds[a] = obj.reward(sVals, actions[a], bounds, goal, trans)
        Vopt[i, j, k] = intermeds[a]
        # add the value of each possible successor state to the estimated value of taking action a
        with hcl.for_(0, trans.shape[0], name="si") as si:
            p[0] = trans[si, 0]
            sVals[0] = trans[si, 1]
            sVals[1] = trans[si, 2]
            sVals[2] = trans[si, 3]
            # Nearest neighbour
            with hcl.if_(useNN[0] == 1):
                # convert the state values of the successor state (si,sj,sk) into indeces (ia,ij,ik)
                stateToIndex(sVals, iVals, bounds, ptsEachDim)
                # if (ia, ij, ik) is within the state space, add its discounted value to action a
                with hcl.if_(
                        hcl.and_(iVals[0] < Vopt.shape[0],
                                 iVals[1] < Vopt.shape[1],
                                 iVals[2] < Vopt.shape[2])):
                    with hcl.if_(
                            hcl.and_(iVals[0] >= 0, iVals[1] >= 0,
                                     iVals[2] >= 0)):
                        intermeds[a] += (
                            gamma[0] *
                            (p[0] * Vopt[iVals[0], iVals[1], iVals[2]]))
            # Linear interpolation
            with hcl.if_(useNN[0] == 0):
                # if (sia, sja, ska) is within the state space, add its discounted value to action a
                with hcl.if_(
                        hcl.and_(sVals[0] <= bounds[0, 1],
                                 sVals[1] <= bounds[1, 1],
                                 sVals[2] <= bounds[2, 1])):
                    with hcl.if_(
                            hcl.and_(sVals[0] >= bounds[0, 0],
                                     sVals[1] >= bounds[1, 0],
                                     sVals[2] >= bounds[2, 0])):
                        stateToIndexInterpolants(Vopt, sVals, bounds,
                                                 ptsEachDim, interpV, fillVal)
                        intermeds[a] += (gamma[0] * (p[0] * interpV[0]))
        # maximize over each possible action in intermeds to obtain the optimal value
        with hcl.for_(0, intermeds.shape[0], name="r") as r:
            with hcl.if_(Vopt[i, j, k] < intermeds[r]):
                Vopt[i, j, k] = intermeds[r]
Ejemplo n.º 18
0
def updateQopt(i, j, k, a, iVals, sVals, Qopt, actions, intermeds, trans,
               interpV, gamma, bounds, goal, ptsEachDim, useNN, fillVal):
    r = hcl.scalar(0, "r")
    p = hcl.scalar(0, "p")
    # set iVals equal to (i,j,k) and sVals equal to the corresponding state values at (i,j,k)
    updateStateVals(i, j, k, iVals, sVals, bounds, ptsEachDim)
    # call the transition function to obtain the outcome(s) of action a from state (si,sj,sk)
    transition(sVals, actions[a], bounds, trans, goal)
    # initialize Qopt[i,j,k,a] with the immediate reward
    r[0] = reward(sVals, actions[a], bounds, goal, trans)
    Qopt[i, j, k, a] = r[0]
    # maximize over successor Q-values
    with hcl.for_(0, trans.shape[0], name="si") as si:
        p[0] = trans[si, 0]
        sVals[0] = trans[si, 1]
        sVals[1] = trans[si, 2]
        sVals[2] = trans[si, 3]
        # Nearest neighbour
        with hcl.if_(useNN[0] == 1):
            # obtain the nearest neighbour successor state
            stateToIndex(sVals, iVals, bounds, ptsEachDim)
            # maximize over successor state Q-values
            with hcl.if_(
                    hcl.and_(iVals[0] < Qopt.shape[0],
                             iVals[1] < Qopt.shape[1],
                             iVals[2] < Qopt.shape[2])):
                with hcl.if_(
                        hcl.and_(iVals[0] >= 0, iVals[1] >= 0, iVals[2] >= 0)):
                    with hcl.for_(0, actions.shape[0], name="a_") as a_:
                        with hcl.if_(
                            (r[0] +
                             (gamma[0] *
                              (p[0] * Qopt[iVals[0], iVals[1], iVals[2], a_]))
                             ) > Qopt[i, j, k, a]):
                            Qopt[i, j, k, a] = r[0] + (gamma[0] * (
                                p[0] * Qopt[iVals[0], iVals[1], iVals[2], a_]))
        # Linear interpolation
        with hcl.if_(useNN[0] == 0):
            with hcl.if_(
                    hcl.and_(sVals[0] <= bounds[0, 1],
                             sVals[1] <= bounds[1, 1],
                             sVals[2] <= bounds[2, 1])):
                with hcl.if_(
                        hcl.and_(sVals[0] >= bounds[0, 0],
                                 sVals[1] >= bounds[1, 0],
                                 sVals[2] >= bounds[2, 0])):
                    stateToIndexInterpolants(Qopt, sVals, actions, bounds,
                                             ptsEachDim, interpV, fillVal)
                    Qopt[i, j, k, a] += (gamma[0] * (p[0] * interpV[0]))
        r[0] += Qopt[i, j, k, a]
Ejemplo n.º 19
0
    def kernel_digit_rec(training_set, test_set, result):

        with hcl.for_(0, NUM_TEST) as t:
            dists = hcl.compute((K_CONST, ), lambda x: 0, "dists")
            labels = hcl.compute((K_CONST, ), lambda x: 0, "labels")
            test = get_data(test_set, t)

            with hcl.for_(0, NUM_TRAINING) as i:
                training = get_data(training_set, i)
                label = hcl.compute((1, ), lambda x: 0, "label")
                update_knn(training, test, dists, labels, label)

            max_label = hcl.compute((1, ), lambda x: 0, "max_label")
            knn_vote(labels, max_label)
Ejemplo n.º 20
0
 def genpack(nn, cc, hh, ww):
     out = hcl.scalar(0, name=name + "_pack", dtype=hcl.UInt(bitwidth))
     with hcl.for_(0, bitwidth) as k:
         out[0][(k + 1):k] = hcl.select(
             data[nn, cc * bitwidth + k, hh, ww] + alpha[cc * bitwidth + k]
             > 0, 1, 0)
     return out[0]
Ejemplo n.º 21
0
 def mul(A, B, x):
     temp = hcl.scalar(0)
     with hcl.for_(0, x) as i:
         hcl.assert_(x < 5, "assert in for")
         temp[0] += add(A, B, x)
         hcl.print(0, "in for\n")
     hcl.return_(temp[0])
Ejemplo n.º 22
0
    def kernel(matrix_1, matrix_2):
        first_matrix = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y], "first_matrix")
        return_matrix = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y] + 7, "return_matrix")
        ax = hcl.scalar(0)
        with hcl.while_(ax.v < 3):
            matrix_A = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y] + 7, "matrix_A")

            with hcl.for_(0, 2, name="for_loop_in") as h:
                matrix_B = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y] + 8, "matrix_B")

                with hcl.if_(matrix_1[0, 2] >= 0):
                    matrix_C = hcl.compute((m, k), lambda x, y : matrix_1[x, x] + matrix_2[x, x] + 9, "matrix_C")
                    hcl.assert_(matrix_1[0, 0]> 0, "assert message in the if statement %d", matrix_C[0, 0])
                    matrix_D = hcl.compute((m, k), lambda x, y : matrix_1[x, x] + matrix_2[x, x] + 9, "matrix_D")
                    hcl.print(0, "in if statement\n")

                hcl.assert_(matrix_1[0, 0]> 1, "assert message for loop")
                matrix_F = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y] + 8, "matrix_F")
                hcl.print(0, "in for loop\n")

            hcl.assert_(matrix_1[0, 0]> 2, "assert error, matrix_A[1, 1]: %d matrix_A[2, 1]: %d matrix_A[3, 1]: %d", [matrix_A[1, 1], matrix_A[2, 1], matrix_A[3, 1]])
            hcl.print(0, "in the while loop\n")
            ax.v = ax.v + 1

        hcl.assert_(matrix_1[0, 0]> 3, "assert message end")
        matrix_E = hcl.compute((m, k), lambda x, y : matrix_1[x, y] + matrix_2[x, y] + 10, "matrix_E")
        hcl.print(0, "this should not be printed\n")
        return return_matrix
Ejemplo n.º 23
0
    def kernel(A, B, C, O):
        dtype_xyz = hcl.Struct({
            "x": hcl.Int(),
            "y": hcl.Int(),
            "z": hcl.Int()
        })
        dtype_out = hcl.Struct({
            "v0": hcl.Int(),
            "v1": hcl.Int(),
            "v2": hcl.Int(),
            "v3": hcl.Int(),
            "v4": hcl.Int(),
            "v5": hcl.Int()
        })

        D = hcl.compute(A.shape, lambda x: (A[x], B[x], C[x]), dtype=dtype_xyz)
        E = hcl.compute(A.shape,
                        lambda x:
                        (D[x].x * D[x].x, D[x].y * D[x].y, D[x].z * D[x].z, D[
                            x].x * D[x].y, D[x].y * D[x].z, D[x].x * D[x].z),
                        dtype=dtype_out)
        with hcl.Stage():
            with hcl.for_(0, 100) as i:
                for j in range(0, 6):
                    O[i][j] = E[i].__getattr__("v" + str(j))
Ejemplo n.º 24
0
def kernel(A, B, C):
    with hcl.Stage("S"):
        with hcl.for_(0, 10) as i:
            # set the LSB of B to be the same as A
            B[i][0] = A[i][0]
            # set the lower 4-bit of C
            C[i][4:0] = A[i]
Ejemplo n.º 25
0
 def update_knn(dist, knn_mat, i, j):
     max_id = hcl.local(0, "max_id")
     with hcl.for_(0, 3) as k:
         with hcl.if_(knn_mat[i][k] > knn_mat[i][max_id[0]]):
             max_id[0] = k
     with hcl.if_(dist[i][j] < knn_mat[i][max_id[0]]):
         knn_mat[i][max_id[0]] = dist[i][j]
Ejemplo n.º 26
0
 def update_knn(dist, knn_mat, i, j):
     max_id = hcl.scalar(0, "max_id")
     with hcl.for_(0, 3) as k:
         with hcl.if_(knn_mat[i][k] > knn_mat[i][max_id.v]):
             max_id.v = k
     with hcl.if_(dist[i][j] < knn_mat[i][max_id.v]):
         knn_mat[i][max_id.v] = dist[i][j]
Ejemplo n.º 27
0
 def add(a, b, c):
     with hcl.for_(0, 10) as i:
         a[i] = 0
         hcl.assert_(i < 10, "assert error 1")
     d = hcl.compute(a.shape, lambda *x: a[x] + b[x])
     hcl.assert_(a[0] == 0, "assert error 2")
     hcl.update(c, lambda *x: d[x] + 1)
     hcl.assert_(a[0] == 0, "assert error 3")
Ejemplo n.º 28
0
def simple_compute(a, A):
    B = hcl.compute(A.shape, lambda x, y: A[x, y], "B")
    index = 2
    with hcl.for_(0, A.shape[0], name="i") as i:
        with hcl.for_(0, A.shape[1], name="j") as j:
            ind = np.array([index, j])
            with hcl.if_(A[ind] > 10):
                ind = ind + np.array([0, 1])
                ind = ind % np.array([3, 3])
                ind = tuple(ind)
                B[i, j] = B[i, j] - 1000
            with hcl.else_():
                ind = ind - np.array([0, 1])
                ind = ind % np.array([3, 3])
                ind = tuple(ind)
                B[i, j] = B[i, j] + 1000
    return B
Ejemplo n.º 29
0
    def algorithm(A, B):
        @hcl.def_([A.shape, B.shape, ()])
        def update_B(A, B, x):
            B[x] = A[x] + 1

        with hcl.Stage():
            with hcl.for_(0, 10) as i:
                update_B(A, B, i)
Ejemplo n.º 30
0
 def find_max(A, len_):
     max_ = hcl.local(A[0], "max")
     act_ = hcl.local(0, "act")
     with hcl.for_(0, len_) as i:
         with hcl.if_(A[i] > max_[0]):
             max_[0] = A[i]
             act_[0] = i
     return max_[0], act_[0]