示例#1
0
def SgdLR(data, label, theta, lut):

    label_local = hcl.unpack(label, name="label_local")
    theta_local = hcl.unpack(theta, name="theta_local")
    data_local = hcl.unpack(data, name="data_local")

    FTYPE = theta_local.dtype

    def Sigmoid(exponent):
        ret = hcl.scalar(0.0, "sigmoid", FTYPE)
        with hcl.if_(exponent > hcl.cast(FTYPE, 4.0)):
            ret[0] = 1.0
        with hcl.elif_(exponent < hcl.cast(FTYPE, -4.0)):
            ret[0] = 0.0
        with hcl.else_():
            with hcl.if_(exponent < hcl.cast(FTYPE, 0.0)):
                num = hcl.scalar(0, dtype=hcl.UFixed(18, 8))
                num[0][18:0] = exponent[29:11]
                num[0] = ~(num[0] << 8) + 1
                index = 2047.0 - num[0]
                ret[0] = lut[hcl.cast(hcl.Int(32), index)]
            with hcl.else_():
                index = exponent[21:11]
                ret[0] = lut[hcl.cast(hcl.Int(32), index)]
        return ret[0]

    with hcl.stage("M"):
        with hcl.for_(0, NUM_TRAINING) as train_id:
            training_instance = hcl.compute(
                (NUM_FEATURES, ),
                lambda x: data_local[train_id * NUM_FEATURES + x],
                "training_instance", data_local.dtype)

            # Main Computation
            k = hcl.reduce_axis(0, NUM_FEATURES, "k")
            dot = hcl.compute(
                (1, ),
                lambda x: hcl.sum(theta_local[k] * training_instance[k],
                                  axis=k,
                                  dtype=FTYPE),
                "dot",
                dtype=FTYPE)
            gradient = hcl.compute((NUM_FEATURES, ),
                                   lambda x: (Sigmoid(dot[0]) - label_local[
                                       train_id]) * training_instance[x],
                                   "gradient",
                                   dtype=FTYPE)
            update = hcl.update(
                theta_local,
                lambda x: theta_local[x] - 2565.0 * gradient[x],
                name="update")

    theta_pack = hcl.pack(theta_local, name="theta_pack", dtype=theta.dtype)
    stream_out = hcl.update(theta, lambda x: theta_pack[x], name="stream_out")

    return stream_out
 def unpack(A):
     return hcl.unpack(A, factor=4, name="B")
 def unpack(A, B):
     C = hcl.unpack(A, name="C", dtype=B.dtype)
     hcl.update(B, lambda x: C[x])
 def pack_unpack(A):
     C = hcl.pack(A, factor=4)
     return hcl.unpack(C, factor=4)
def kernel(pack_train, trainLabels, pack_test, testLabels, rdv3, epoch):
    def learn(k, hdTrainData, prototype, prototypeCounter):
        #Find samples that have the label k
        match = hcl.compute(
            hdTrainData.shape,
            lambda x, y: hcl.select(trainLabels[x] == k, hdTrainData[x][y], 0),
            "match")
        #Record the number of these samples
        with hcl.for_(0, hdTrainData.shape[0]) as a:
            with hcl.if_(trainLabels[a] == k):
                max[k] += 1
        #Do hdc sum on these samples' hdv
        r = hcl.reduce_axis(0, hdTrainData.shape[0], 'r')
        result = hcl.compute((hdTrainData.shape[1], ),
                             lambda y: hcl.sum(match[r][y], axis=r), "result")
        #Do the binary voting
        sum1 = hcl.compute((hdTrainData.shape[1], ), lambda x: 0, "sum1")
        with hcl.if_(max[k] % 2 == 0):
            hcl.update(
                sum1, lambda x: hcl.select(
                    result[x] + rdv3[k][x] - max[k] / 2 > 0, 1, 0))
        with hcl.else_():
            hcl.update(sum1,
                       lambda x: hcl.select(result[x] - max[k] / 2 > 0, 1, 0))
        #Push the binary sum to prototype and the original sum to prototypeCounter
        with hcl.for_(0, hdTrainData.shape[1]) as t:
            prototype[k][t] = sum1[t]
            prototypeCounter[k][t] = result[t]

    def test_hdc_accu(proto, hyper_dataset, labels, type):
        ###data preparation
        distance1 = hcl.compute((hyper_dataset.shape[1], ), lambda x: 0,
                                'distance1')
        hamming_dist1 = hcl.compute((numClasses, ), lambda x: 0,
                                    "hamming_dist1")
        m1 = hcl.reduce_axis(0, hyper_dataset.shape[1], "m1")
        correct1 = hcl.scalar(0, 'correct1')
        ###

        with hcl.for_(0, hyper_dataset.shape[0]) as i:
            with hcl.for_(0, numClasses) as n:
                #Do hdc multiplication(XOR) on sample[i]'s hdv and prototype[n]'s hdv (elementwise on the high-bit data)
                hcl.update(distance1,
                           lambda x: hyper_dataset[i][x] ^ proto[n][x])
                #Calculate the hamming distance of the two vectors by adding 1s
                hamming_dist1[n] = hcl.sum(distance1[m1], axis=m1)

            #Find the one having the least hamming distance and choose it's label as the predicted label
            pred1 = hcl.scalar(0, 'pred1')
            with hcl.for_(0, hamming_dist1.shape[0]) as j:
                with hcl.if_(hamming_dist1[j] < hamming_dist1[pred1]):
                    pred1.v = j

            with hcl.if_(pred1.v == labels[i]):
                correct1.v += 1

        #Print the accuracy
        all1 = hcl.scalar(hyper_dataset.shape[0], "all1", dtype=hcl.Float(32))
        accuracy1 = hcl.compute((1, ),
                                lambda x: correct1.v / all1.v * 100,
                                "accuracy1",
                                dtype=hcl.Float(32))
        with hcl.if_(type == 1):
            hcl.print((correct1, hyper_dataset.shape[0], accuracy1[0]),
                      "Training accu: %d/%d (%.2f%%)\n")
        with hcl.else_():
            hcl.print((correct1, hyper_dataset.shape[0], accuracy1[0]),
                      "Testing accu: %d/%d (%.2f%%)\n")

    def update(l, prototype, prototypeCounter, max):
        hcl.print((l + 1),
                  "%d:Use hard examples to update the prototype counters.\n")

        ###data preparation
        distance = hcl.compute((hdTrainData.shape[1], ), lambda x: 0,
                               'distance')
        hamming_dist = hcl.compute((numClasses, ), lambda x: 0, "hamming_dist")
        m = hcl.reduce_axis(0, hdTrainData.shape[1], "m")
        ###

        with hcl.for_(0, hdTrainData.shape[0]) as i:
            with hcl.for_(0, numClasses) as n:
                #Do hdc multiplication(XOR) on sample[i]'s hdv and prototype[n]'s hdv (elementwise on the high-bit data)
                hcl.update(distance,
                           lambda x: hdTrainData[i][x] ^ prototype[n][x])
                #Calculate the hamming distance of the two vectors by adding 1s
                hamming_dist[n] = hcl.sum(distance[m], axis=m)

            #Find the one having the least hamming distance and choose it's label as the predicted label
            pred = hcl.scalar(0, 'pred')
            with hcl.for_(0, hamming_dist.shape[0]) as j:
                with hcl.if_(hamming_dist[j] < hamming_dist[pred]):
                    pred.v = j

            #Adjust the proto vectors by adding the sample vector on its label proto hdv and substrct it on its predicted proto hdv
            with hcl.if_(pred.v != trainLabels[i]):
                max[trainLabels[i]] += 1
                max[pred] -= 1
                with hcl.for_(0, hdTrainData.shape[1]) as m:
                    prototypeCounter[trainLabels[i]][m] += hdTrainData[i][m]
                    prototypeCounter[pred][m] -= hdTrainData[i][m]
                    with hcl.if_(max[trainLabels[i]] % 2 == 0):
                        with hcl.if_(prototypeCounter[trainLabels[i]][m] -
                                     max[trainLabels[i]] / 2 == 0):
                            prototype[trainLabels[i]][m] &= 1
                    with hcl.else_():
                        prototype[trainLabels[i]][m] = hcl.select(
                            prototypeCounter[trainLabels[i]][m] -
                            max[trainLabels[i]] / 2 > 0, 1, 0)

                    with hcl.if_(max[pred] % 2 == 0):
                        with hcl.if_(prototypeCounter[pred][m] -
                                     max[pred] / 2 == 0):
                            prototype[pred][m] &= 1
                    with hcl.else_():
                        prototype[pred][m] = hcl.select(
                            prototypeCounter[pred][m] - max[pred] / 2 > 0, 1,
                            0)

        #print the accuracy
        hcl.mutate(
            (1, ),
            lambda x: test_hdc_accu(prototype, hdTrainData, trainLabels, 1),
            'training_update')
        hcl.mutate(
            (1, ),
            lambda x: test_hdc_accu(prototype, hdTestData, testLabels, 2),
            'testing_update')

    ###unpack
    hdTrainData = hcl.unpack(pack_train,
                             axis=1,
                             dtype=hcl.UInt(1),
                             name="hdTrainData")
    hdTestData = hcl.unpack(pack_test,
                            axis=1,
                            dtype=hcl.UInt(1),
                            name="hdTestData")

    ###learn
    hcl.print((), "Learning the prototype HDVs.\n")
    prototype = hcl.compute(
        (numClasses, hdTrainData.shape[1]),
        lambda x, y: 0,
        "prototype",
    )
    prototypeCounter = hcl.compute(
        (numClasses, hdTrainData.shape[1]), lambda x, y: 0,
        "prototypeCounter")  #Every dimension is the sum of the targeted data

    #max is the number records the added vectors, later for binary voting
    max = hcl.compute((numClasses, ), lambda x: 0)
    hcl.mutate((numClasses, ),
               lambda k: learn(k, hdTrainData, prototype, prototypeCounter),
               "learn")

    #Test the accuracy after learning
    hcl.mutate((1, ),
               lambda x: test_hdc_accu(prototype, hdTrainData, trainLabels, 1),
               "test_train_accu")
    hcl.mutate((1, ),
               lambda x: test_hdc_accu(prototype, hdTestData, testLabels, 2),
               "test_test_accu")

    ###update
    hcl.mutate((epoch[0], ),
               lambda x: update(x, prototype, prototypeCounter, max), "update")