Пример #1
0
    def sobel_kernel(imgF, Gx, Gy):
        def pad(x, y, z):
            out = hcl.scalar(0, "out")
            with hcl.if_(hcl.and_(x > 0, y > 0)):
                out.v = imgF[x - 1, y - 1, z]
            with hcl.else_():
                out.v = 0
            return out.v

        P = hcl.compute((height + 2, width + 2, 3),
                        lambda x, y, z: pad(x, y, z), "P")

        A = hcl.compute((height + 2, width + 2),
                        lambda x, y: P[x][y][0] + P[x][y][1] + P[x][y][2], "A")

        r = hcl.reduce_axis(0, 3)
        c = hcl.reduce_axis(0, 3)
        resX = hcl.compute((height, width), lambda x, y: hcl.sum(
            A[x + r, y + c] * Gx[r, c], axis=[r, c], name="sum1"), "X")

        t = hcl.reduce_axis(0, 3)
        g = hcl.reduce_axis(0, 3)
        resY = hcl.compute((height, width), lambda x, y: hcl.sum(
            A[x + t, y + g] * Gy[t, g], axis=[t, g], name="sum2"), "Y")

        R = hcl.compute((height, width), lambda x, y: hcl.sqrt(resX[x][
            y] * resX[x][y] + resY[x][y] * resY[x][y]), "R")

        norm = hcl.scalar(255 / 4328)

        return hcl.compute((height, width), lambda x, y: R[x][y] * norm.v, "F")
Пример #2
0
    def sobel(A, Gx, Gy):
        r = hcl.reduce_axis(0,3)
        c = hcl.reduce_axis(0,3)

        A1 = hcl.compute((height,width), lambda y, x: 
            A[y][x][0] + A[y][x][1] + A[y][x][2], "A1")

        B1 = hcl.compute((height-2,width-2), 
                lambda x,y: hcl.sum(A1[x+r,y+c]*Gx[r,c], axis=[r,c], name="sum1"),
                name="B1", dtype=hcl.Float())

        t = hcl.reduce_axis(0,3)
        g = hcl.reduce_axis(0,3)

        B2 = hcl.compute((height-2,width-2), 
                lambda x,y: hcl.sum(A1[x+t,y+g]*Gy[t,g], axis=[t,g], name="sum2"),
                name="B2", dtype=hcl.Float())

        def avg(in1, in2):
            ll = hcl.scalar(in1, "in1")
            lr = hcl.scalar(in2, "in2")
            return hcl.sqrt(ll.v * ll.v + lr.v * lr.v)/4328*255

        return hcl.compute((height-2,width-2), 
                   lambda x, y : avg(B1[x,y], B2[x,y]),
                   name="output", dtype=hcl.Float())
Пример #3
0
def sobel(A, Gx, Gy):

    B = hcl.compute((height, width),
                    lambda x, y: A[x][y][0] + A[x][y][1] + A[x][y][2],
                    "B",
                    dtype=hcl.Float())
    r = hcl.reduce_axis(0, 3)
    c = hcl.reduce_axis(0, 3)
    D = hcl.compute(
        (height - 2, width - 2),
        lambda x, y: hcl.sum(B[x + r, y + c] * Gx[r, c], axis=[r, c]),
        "D",
        dtype=hcl.Float())
    t = hcl.reduce_axis(0, 3)
    g = hcl.reduce_axis(0, 3)

    E = hcl.compute(
        (height - 2, width - 2),
        lambda x, y: hcl.sum(B[x + t, y + g] * Gy[t, g], axis=[t, g]),
        "E",
        dtype=hcl.Float())
    return hcl.compute((height - 2, width - 2),
                       lambda x, y: hcl.sqrt(D[x][y] * D[x][y] + E[x][y] * E[x]
                                             [y]) / 4328 * 255,
                       dtype=hcl.Float())
Пример #4
0
def sobelAlgo(A, Fx, Fy):
    B = hcl.compute((height+2, width+2), lambda x,y:A[x][y][0]+A[x][y][1]+A[x][y][2], "B")
    r = hcl.reduce_axis(0, 3)
    c = hcl.reduce_axis(0, 3)
    Gx = hcl.compute((height, width), lambda y,x:hcl.sum(B[y+r, x+c]*Fx[r,c], axis = [r,c]), "Gx")
    t = hcl.reduce_axis(0, 3)
    g = hcl.reduce_axis(0, 3)
    Gy = hcl.compute((height, width), lambda y,x:hcl.sum(B[y+t, x+g]*Fy[t,g], axis = [t,g]), "Gy")
    return hcl.compute((height, width), lambda y,x:(hcl.sqrt(Gx[y][x]*Gx[y][x]+Gy[y][x]*Gy[y][x]))/4328*255)
Пример #5
0
def seidel(input_image, output_image):
  dtype = hcl.Float()
  rx = hcl.reduce_axis(0, 3, "rx")
  ry = hcl.reduce_axis(0, 3, "ry")

  tmp = hcl.compute(output_image.shape, lambda x, y: hcl.sum(
      input_image[x, ry+y], axis=[ry], dtype=dtype)/3, dtype=dtype, name='tmp')

  return hcl.update(output_image, lambda x, y: hcl.sum(
      tmp[rx+x, y], axis=[rx], dtype=dtype)/3, name=output_image.name)
Пример #6
0
def sobel(A,Gx,Gy):   
   B = hcl.compute((height,width), lambda x,y: A[x][y][0]+A[x][y][1]+A[x][y][2], "B") 
   r = hcl.reduce_axis(0,3)
   c = hcl.reduce_axis(0,3)
  # D = hcl.compute((height, width), lambda x,y: hcl.select(hcl.and_(x>0,x<(height-1),y>0,y<(width-1)), hcl.sum(B[x+r,y+c]*Gx[r,c],axis=[r,c]), B[x,y]), "xx")
   D = hcl.compute((height-2, width-2), lambda x,y: hcl.sum(B[x+r, y+c]*Gx[r,c], axis=[r,c], name="sum1"), "xx")

   t = hcl.reduce_axis(0, 3)
   g = hcl.reduce_axis(0, 3)
  # E = hcl.compute((height, width), lambda x,y: hcl.select(hcl.and_(x>0,x<(height-1),y>0,y<(width-1)), hcl.sum(B[x+t,y+g]*Gy[t,g],axis=[t,g]), B[x,y]), "yy")
   E = hcl.compute((height-2, width-2), lambda x,y: hcl.sum(B[x+t, y+g]*Gy[t,g], axis=[t,g]), "yy")

   return  hcl.compute((height-2,width-2), lambda x,y:hcl.sqrt(D[x][y]*D[x][y]+E[x][y]*E[x][y])*0.05891867,"Fimg")
Пример #7
0
    def sobel(RGB,Gx,Gy):
       B = hcl.compute((height,width), lambda x,y: RGB[x][y][8:0] + RGB[x][y][16:8] + RGB[x][y][24:16], "B")
       r = hcl.reduce_axis(0,3)
       c = hcl.reduce_axis(0,3)
       D = hcl.compute((height-2, width-2),
            lambda x,y: hcl.sum(B[x+r, y+c]*Gx[r,c], axis=[r,c], name="sum1"), "xx")

       t = hcl.reduce_axis(0, 3)
       g = hcl.reduce_axis(0, 3)
       E = hcl.compute((height-2, width-2),
            lambda x,y: hcl.sum(B[x+t, y+g]*Gy[t,g], axis=[t,g]), name="sum2"), "yy")
       return  hcl.compute((height-2,width-2),
            lambda x,y:hcl.sqrt(D[x][y]*D[x][y]+E[x][y]*E[x][y])*0.05891867, "Fimg")
Пример #8
0
def sobel(A, Gx, Gy):

   r = hcl.reduce_axis(0,3)
   c = hcl.reduce_axis(0,3)
   B = hcl.compute((height-2,width-2), 
           lambda x,y: hcl.sum(A[x+r,y+c]*Gx[r,c], axis=[r,c], name="sum1"),
           name="B", dtype=hcl.Float())
   t = hcl.reduce_axis(0,3)
   g = hcl.reduce_axis(0,3)

   C = hcl.compute((height-2,width-2), 
           lambda x,y: hcl.sum(A[x+t,y+g]*Gy[t,g], axis=[t,g], name="sum2"),
           name="C", dtype=hcl.Float())
   return hcl.compute((height-2,width-2), 
              lambda x, y :hcl.sqrt(B[x,y]*B[x,y] + C[x,y]*C[x,y])/4328*255,
              name="output", dtype=hcl.Float())
Пример #9
0
 def kernel(matrix_1, matrix_2):
     r = hcl.reduce_axis(0, k, 'k')
     out_matrix = hcl.compute((m, n),
             lambda x, y: hcl.sum(matrix_1[x, r] * matrix_2[r, y],
                                  axis=r, dtype=dtype), dtype=dtype,
             name="out_matrix")
     return out_matrix
 def learn(k, hdTrainData, prototype, prototypeCounter):
     #Find samples that have the label k
     match = hcl.compute(
         hdTrainData.shape,
         lambda x, y: hcl.select(trainLabels[x] == k, hdTrainData[x][y], 0),
         "match")
     #Record the number of these samples
     with hcl.for_(0, hdTrainData.shape[0]) as a:
         with hcl.if_(trainLabels[a] == k):
             max[k] += 1
     #Do hdc sum on these samples' hdv
     r = hcl.reduce_axis(0, hdTrainData.shape[0], 'r')
     result = hcl.compute((hdTrainData.shape[1], ),
                          lambda y: hcl.sum(match[r][y], axis=r), "result")
     #Do the binary voting
     sum1 = hcl.compute((hdTrainData.shape[1], ), lambda x: 0, "sum1")
     with hcl.if_(max[k] % 2 == 0):
         hcl.update(
             sum1, lambda x: hcl.select(
                 result[x] + rdv3[k][x] - max[k] / 2 > 0, 1, 0))
     with hcl.else_():
         hcl.update(sum1,
                    lambda x: hcl.select(result[x] - max[k] / 2 > 0, 1, 0))
     #Push the binary sum to prototype and the original sum to prototypeCounter
     with hcl.for_(0, hdTrainData.shape[1]) as t:
         prototype[k][t] = sum1[t]
         prototypeCounter[k][t] = result[t]
Пример #11
0
def test_conv2D_lb():
    hcl.init()
    A = hcl.placeholder((10, 10))
    r = hcl.reduce_axis(0, 3)
    c = hcl.reduce_axis(0, 3)
    B = hcl.compute((8, 8), lambda y, x: hcl.sum(A[y + r, x + c], axis=[r, c]))
    s = hcl.create_schedule([A, B])
    LB = s.reuse_at(A, s[B], B.axis[0])
    f = hcl.build(s)

    np_A = np.random.randint(0, 10, size=(10, 10))
    np_B = np.zeros((8, 8), dtype="int")
    np_C = np.zeros((8, 8), dtype="int")

    for y in range(0, 8):
        for x in range(0, 8):
            for r in range(0, 3):
                for c in range(0, 3):
                    np_C[y][x] += np_A[y + r][x + c]

    hcl_A = hcl.asarray(np_A)
    hcl_B = hcl.asarray(np_B)

    f(hcl_A, hcl_B)

    np_B = hcl_B.asnumpy()

    assert np.array_equal(np_B, np_C)
Пример #12
0
 def kernel(A, B):
     C = hcl.compute(
         (M, N),
         lambda x, y: hcl.sum(A[x, k] * B[k, y], axis=k, dtype=dtype),
         "C",
         dtype=dtype)
     return C
Пример #13
0
 def kernel(A):
     r = hcl.reduce_axis(0, KERNEL_SIZE)
     c = hcl.reduce_axis(0, KERNEL_SIZE)
     F = hcl.copy(np.random.randint(0, 10, (KERNEL_SIZE, KERNEL_SIZE)), "F")
     return hcl.compute(
         (SIZE - KERNEL_SIZE + 1, SIZE - KERNEL_SIZE + 1),
         lambda y, x: hcl.sum(A[y + r, x + c] * F[r, c], axis=[r, c]), "B")
    def update(l, prototype, prototypeCounter, max):
        hcl.print((l + 1),
                  "%d:Use hard examples to update the prototype counters.\n")

        ###data preparation
        distance = hcl.compute((hdTrainData.shape[1], ), lambda x: 0,
                               'distance')
        hamming_dist = hcl.compute((numClasses, ), lambda x: 0, "hamming_dist")
        m = hcl.reduce_axis(0, hdTrainData.shape[1], "m")
        ###

        with hcl.for_(0, hdTrainData.shape[0]) as i:
            with hcl.for_(0, numClasses) as n:
                #Do hdc multiplication(XOR) on sample[i]'s hdv and prototype[n]'s hdv (elementwise on the high-bit data)
                hcl.update(distance,
                           lambda x: hdTrainData[i][x] ^ prototype[n][x])
                #Calculate the hamming distance of the two vectors by adding 1s
                hamming_dist[n] = hcl.sum(distance[m], axis=m)

            #Find the one having the least hamming distance and choose it's label as the predicted label
            pred = hcl.scalar(0, 'pred')
            with hcl.for_(0, hamming_dist.shape[0]) as j:
                with hcl.if_(hamming_dist[j] < hamming_dist[pred]):
                    pred.v = j

            #Adjust the proto vectors by adding the sample vector on its label proto hdv and substrct it on its predicted proto hdv
            with hcl.if_(pred.v != trainLabels[i]):
                max[trainLabels[i]] += 1
                max[pred] -= 1
                with hcl.for_(0, hdTrainData.shape[1]) as m:
                    prototypeCounter[trainLabels[i]][m] += hdTrainData[i][m]
                    prototypeCounter[pred][m] -= hdTrainData[i][m]
                    with hcl.if_(max[trainLabels[i]] % 2 == 0):
                        with hcl.if_(prototypeCounter[trainLabels[i]][m] -
                                     max[trainLabels[i]] / 2 == 0):
                            prototype[trainLabels[i]][m] &= 1
                    with hcl.else_():
                        prototype[trainLabels[i]][m] = hcl.select(
                            prototypeCounter[trainLabels[i]][m] -
                            max[trainLabels[i]] / 2 > 0, 1, 0)

                    with hcl.if_(max[pred] % 2 == 0):
                        with hcl.if_(prototypeCounter[pred][m] -
                                     max[pred] / 2 == 0):
                            prototype[pred][m] &= 1
                    with hcl.else_():
                        prototype[pred][m] = hcl.select(
                            prototypeCounter[pred][m] - max[pred] / 2 > 0, 1,
                            0)

        #print the accuracy
        hcl.mutate(
            (1, ),
            lambda x: test_hdc_accu(prototype, hdTrainData, trainLabels, 1),
            'training_update')
        hcl.mutate(
            (1, ),
            lambda x: test_hdc_accu(prototype, hdTestData, testLabels, 2),
            'testing_update')
Пример #15
0
def unsharp(input_image, output_image):
    """
  Helper Functions
  """
    def clamp(val, min_, max_):
        local = hcl.scalar(val)
        with hcl.if_(val < min_):
            local[0] = min_
        with hcl.elif_(val > max_):
            local[0] = max_
        return local[0]

    def clamp2D(tensor, min_, max_):
        return hcl.compute(tensor.shape,
                           lambda x, y: clamp(tensor[x, y], min_, max_),
                           name="clamped_" + tensor.name)

    def clamp3D(tensor, min_, max_):
        return hcl.compute(tensor.shape,
                           lambda x, y, c: clamp(tensor[x, y, c], min_, max_),
                           name="clamped_" + tensor.name)

    def kernel_f(x):
        return hcl.exp(-(x * x) / (2 * 1.5 * 1.5)) / sqrt(2 * 3.14159 * 1.5)

    def kernel(x):
        return kernel_f(x) * 255 / (kernel_f(0) + kernel_f(1) * 2 +
                                    kernel_f(2) * 2 + kernel_f(3) * 2 +
                                    kernel_f(4) * 2)

    rx = hcl.reduce_axis(-4, 5, "rx")
    ry = hcl.reduce_axis(-4, 5, "ry")
    my = hcl.reduce_axis(0, 640, "my")

    gray = hcl.compute((480, 640),
                       lambda x, y: (input_image[x, y, 0] * 77 + input_image[
                           x, y, 1] * 150 + input_image[x, y, 2] * 29) >> 8,
                       name="gray")
    blur = hcl.compute(
        gray.shape,
        lambda x, y: hcl.sum(gray[rx + x, ry + y] * kernel(rx) * kernel(ry),
                             axis=[rx, ry]),
        name="blur")
    sharpen = clamp2D(
        hcl.compute(gray.shape,
                    lambda x, y: gray[x, y] * 2 - blur[x, y],
                    name="sharpen"), 0, 255)
    ratio = clamp2D(
        hcl.compute(
            gray.shape,
            lambda x, y: sharpen[x, y] * 32 / hcl.max(gray[x, my], axis=my),
            name="ratio"), 0, 255)
    out = clamp3D(
        hcl.compute(output_image.shape,
                    lambda x, y, c: ratio[x, y] * input_image[x, y, c] >> 5,
                    name="out"), 0, 255)
    U = hcl.update(output_image, lambda x, y, c: out[x, y, c])

    return U
Пример #16
0
 def test_encoding(m, preTestData):
     test_temp = hcl.compute((testData.shape[1], dim), lambda x, y: itemMem[testData[m][x]][y]^idMem[x][y], name = "test_temp")
     k2 = hcl.reduce_axis(0, testData.shape[1], 'k2')
     test_result = hcl.compute((dim,), lambda x: hcl.sum(test_temp[k2, x], axis = k2, dtype=hcl.Int()), name = "test_result")
     with hcl.for_(0, dim) as n:
         preTestData[m][n] = test_result[n]
     with hcl.if_((m+1)%100 == 0):
         hcl.print((m+1), "Finish encoding %d testing data\n")
def gemm_compute(matrix_1, matrix_2):
	m = matrix_1.shape[0];
	n = matrix_2.shape[1];
	k = matrix_1.shape[1];
	assert matrix_1.shape[1] == matrix_2.shape[0]
	r = hcl.reduce_axis(0, k, 'k')
	temp = hcl.compute((m, n), lambda x, y: hcl.sum(matrix_1[x, r] * matrix_2[r, y], axis = r), name = 'matrix_3')
	return temp
Пример #18
0
 def train_encoding(m, preTrainData):
     train_temp = hcl.compute((trainData.shape[1], dim), lambda x, y: itemMem[trainData[m][x]][y] ^ idMem[x][y], name = "train_temp")
     k1 = hcl.reduce_axis(0, trainData.shape[1], 'k1')
     train_result = hcl.compute((dim,), lambda x: hcl.sum(train_temp[k1, x], axis = k1, dtype=hcl.Int()), name = "train_result")
     with hcl.for_(0, dim) as n:
         preTrainData[m][n] = train_result[n]
     with hcl.if_((m + 1) % 1000 == 0):
         hcl.print((m+1), "Finish encoding %d training data\n")
Пример #19
0
def sobel(A, Gx, Gy):
    B = hcl.compute((height, width),
                    lambda x, y: A[x][y][0] + A[x][y][1] + A[x][y][2], "B")

    r = hcl.reduce_axis(0, 3)
    c = hcl.reduce_axis(0, 3)
    D = hcl.compute((height - 2, width - 2), lambda x, y: hcl.sum(
        B[x + r, y + c] * Gx[r, c], axis=[r, c], name="sum1"), "D")

    t = hcl.reduce_axis(0, 3)
    g = hcl.reduce_axis(0, 3)
    E = hcl.compute((height - 2, width - 2), lambda x, y: hcl.sum(
        B[x + t, y + g] * Gy[t, g], axis=[t, g], name="sum2"), "E")

    # constant factor to normalize the output
    return hcl.compute((height - 2, width - 2), lambda x, y: hcl.sqrt(D[x][
        y] * D[x][y] + E[x][y] * E[x][y]) * 0.05891867, "Fimg")
Пример #20
0
def batch_matmul(x, y, name="batch_matmul"):
    out_shape = (x.shape[0], x.shape[1], y.shape[2])
    k = hcl.reduce_axis(0, x.shape[2], "k")
    return hcl.compute(
        out_shape,
        lambda b, m, n: hcl.sum(x[b, m, k] * y[b, n, k], axis=[k]),
        name=name,
        dtype=x.dtype)
Пример #21
0
def sobel(A, Gx, Gy):

    B = hcl.compute((height, width),
                    lambda x, y: A[x][y][0] + A[x][y][1] + A[x][y][2], "B")
    r = hcl.reduce_axis(0, 3)
    c = hcl.reduce_axis(0, 3)
    D = hcl.compute((height, width), lambda x, y: hcl.select(
        hcl.and_(x > 0, x < (height - 1), y > 0, y < (width - 1)),
        hcl.sum(B[x + r, y + c] * Gx[r, c], axis=[r, c]), B[x, y]), "Gx")
    t = hcl.reduce_axis(0, 3)
    g = hcl.reduce_axis(0, 3)
    E = hcl.compute((height, width), lambda x, y: hcl.select(
        hcl.and_(x > 0, x < (height - 1), y > 0, y < (width - 1)),
        hcl.sum(B[x + t, y + g] * Gy[t, g], axis=[t, g]), B[x, y]), "Gy")

    return hcl.compute(
        (height, width), lambda x, y:
        (hcl.sqrt(D[x][y] * D[x][y] + E[x][y] * E[x][y])) / 4328 * 255)
Пример #22
0
def SgdLR(data, label, theta, lut):

    label_local = hcl.unpack(label, name="label_local")
    theta_local = hcl.unpack(theta, name="theta_local")
    data_local = hcl.unpack(data, name="data_local")

    FTYPE = theta_local.dtype

    def Sigmoid(exponent):
        ret = hcl.scalar(0.0, "sigmoid", FTYPE)
        with hcl.if_(exponent > hcl.cast(FTYPE, 4.0)):
            ret[0] = 1.0
        with hcl.elif_(exponent < hcl.cast(FTYPE, -4.0)):
            ret[0] = 0.0
        with hcl.else_():
            with hcl.if_(exponent < hcl.cast(FTYPE, 0.0)):
                num = hcl.scalar(0, dtype=hcl.UFixed(18, 8))
                num[0][18:0] = exponent[29:11]
                num[0] = ~(num[0] << 8) + 1
                index = 2047.0 - num[0]
                ret[0] = lut[hcl.cast(hcl.Int(32), index)]
            with hcl.else_():
                index = exponent[21:11]
                ret[0] = lut[hcl.cast(hcl.Int(32), index)]
        return ret[0]

    with hcl.stage("M"):
        with hcl.for_(0, NUM_TRAINING) as train_id:
            training_instance = hcl.compute(
                (NUM_FEATURES, ),
                lambda x: data_local[train_id * NUM_FEATURES + x],
                "training_instance", data_local.dtype)

            # Main Computation
            k = hcl.reduce_axis(0, NUM_FEATURES, "k")
            dot = hcl.compute(
                (1, ),
                lambda x: hcl.sum(theta_local[k] * training_instance[k],
                                  axis=k,
                                  dtype=FTYPE),
                "dot",
                dtype=FTYPE)
            gradient = hcl.compute((NUM_FEATURES, ),
                                   lambda x: (Sigmoid(dot[0]) - label_local[
                                       train_id]) * training_instance[x],
                                   "gradient",
                                   dtype=FTYPE)
            update = hcl.update(
                theta_local,
                lambda x: theta_local[x] - 2565.0 * gradient[x],
                name="update")

    theta_pack = hcl.pack(theta_local, name="theta_pack", dtype=theta.dtype)
    stream_out = hcl.update(theta, lambda x: theta_pack[x], name="stream_out")

    return stream_out
Пример #23
0
def softmax(out, x):
    assert len(x.shape) == 2, "only support 2-dim softmax"
    m, n = x.shape
    k = hcl.reduce_axis(0, n)
    max_elem = hcl.compute((m, ), lambda i: hcl.max(x[i, k], axis=k))
    k = hcl.reduce_axis(0, n)
    expsum = hcl.compute(
        (m, ), lambda i: hcl.sum(hcl.exp(x[i, k] - max_elem[i]), axis=k))
    return hcl.update(out,
                      lambda i, j: hcl.exp(x[i, j] - max_elem[i]) / expsum[i])
Пример #24
0
def sobelAlgo(A, B, Fx, Fy):
    def rgb_sum(x, y):
        B[x][y] = A[x][y][0] + A[x][y][1] + A[x][y][2]

    hcl.mutate(B.shape, lambda x, y: rgb_sum(x, y))
    #B = hcl.compute((height+2, width+2), lambda x,y:A[x][y][0]+A[x][y][1]+A[x][y][2], "B")
    r = hcl.reduce_axis(0, 3)
    c = hcl.reduce_axis(0, 3)
    Gx = hcl.compute(
        (height, width),
        lambda y, x: hcl.sum(B[y + r, x + c] * Fx[r, c], axis=[r, c]), "Gx")
    t = hcl.reduce_axis(0, 3)
    g = hcl.reduce_axis(0, 3)
    Gy = hcl.compute(
        (height, width),
        lambda y, x: hcl.sum(B[y + t, x + g] * Fy[t, g], axis=[t, g]), "Gy")
    return hcl.compute(
        (height, width), lambda y, x:
        (hcl.sqrt(Gx[y][x] * Gx[y][x] + Gy[y][x] * Gy[y][x]) * 0.05891867))
Пример #25
0
def test_reuse_compute_sum():
    hcl.init()
    rx = hcl.reduce_axis(0, 3, name="rx")
    A = hcl.placeholder((10, 10), name="A")
    B = hcl.compute((10, 10), lambda y, x: A[y, x], "B")
    C = hcl.compute((10, 8), lambda y, x: hcl.sum(B[y, x + rx], axis=rx), "C")
    s = hcl.create_schedule([A, B, C])
    RB = s.reuse_at(B, s[C], C.axis[1])
    print(hcl.lower(s))
    f = hcl.build(s)
Пример #26
0
def sobel(imgF, Gx, Gy):
    A = hcl.compute((height + 2, width + 2),
                    lambda x, y: imgF[x][y][0] + imgF[x][y][1] + imgF[x][y][2],
                    "A")

    r = hcl.reduce_axis(0, 3)
    c = hcl.reduce_axis(0, 3)

    resX = hcl.compute((height, width), lambda x, y: hcl.sum(
        A[x + r, y + c] * Gx[r, c], axis=[r, c], name="sum1"), "X")

    t = hcl.reduce_axis(0, 3)
    g = hcl.reduce_axis(0, 3)

    resY = hcl.compute((height, width), lambda x, y: hcl.sum(
        A[x + t, y + g] * Gy[t, g], axis=[t, g], name="sum2"), "Y")

    return hcl.compute((height, width), lambda x, y: hcl.sqrt(resX[x][
        y] * resX[x][y] + resY[x][y] * resY[x][y]) / 4328 * 255, "R")
Пример #27
0
def sobel(B, G):
    r = hcl.reduce_axis(0, 3)
    c = hcl.reduce_axis(0, 3)
    return hcl.compute(
        (height, width),
        lambda x, y: hcl.select(
            hcl.and_(x > 0, x < (height - 1), y > 0, y < (width - 1)),
            hcl.sum(B[x + r, y + c] * G[r, c], axis=[r, c]), B[x, y]),
        "D",
        dtype=hcl.Float())
    def update(l, prototype, prototypeCounter, max):
        hcl.print((l+1),"%d:Use hard examples to update the prototype counters.\n")

        ###data preparation
        distance = hcl.compute((in_train.shape[1],), lambda x: 0, 'distance', dtype=hcl.UInt(in_bw))
        pre_dist = hcl.compute((in_train.shape[1],), lambda x: 0, "pre_dist")
        hamming_dist = hcl.compute((numClasses,), lambda x: 0, "hamming_dist")
        m = hcl.reduce_axis(0, in_train.shape[1], "m")
        ###

        with hcl.for_(0, in_train.shape[0]) as i:
            hcl.print((i),"%d suc\n")
            # pack_proto = hcl.pack(prototype, axis=1, dtype=hcl.UInt(in_bw), name="pack_proto") 
            with hcl.for_(0, numClasses) as n:
                #Do hdc multiplication(XOR) on sample[i]'s hdv and prototype[n]'s hdv (elementwise on the high-bit data)
                hcl.update(distance, lambda x: in_train[i][x] ^ prototype[n][x])
                #Calculate the hamming distance of the two vectors by adding 1s
                hcl.update(pre_dist, lambda x: popcount(distance[x]))
                hcl.print((),"sum of 1s suc")
                hamming_dist[n] = hcl.sum(pre_dist[m], axis=m)

            #Find the one having the least hamming distance and choose it's label as the predicted label
            pred = hcl.scalar(0, 'pred')
            with hcl.for_(0, hamming_dist.shape[0]) as j:
                with hcl.if_(hamming_dist[j] < hamming_dist[pred]):
                    pred.v = j

            #Adjust the proto vectors by adding the sample vector on its label proto hdv and substrct it on its predicted proto hdv
            with hcl.if_(pred.v != trainLabels[i]):
                max[trainLabels[i]] += 1
                max[pred] -= 1
                with hcl.for_(0, in_train.shape[1]) as m:
                    with hcl.for_(0, in_bw) as bit:
                        # with hcl.if_(in_train[i][m][bit] == 1):
                        #     ###########
                        #     prototypeCounter[trainLabels[i]][m*in_bw+bit] += 1
                        #     prototypeCounter[pred][m*in_bw+bit] -= 1
                        prototypeCounter[trainLabels[i]][m*in_bw+bit] += in_train[i][m][bit]
                        prototypeCounter[pred][m*in_bw+bit] -= in_train[i][m][bit]
                        with hcl.if_(max[trainLabels[i]] % 2 == 0):
                            with hcl.if_(prototypeCounter[trainLabels[i]][m*in_bw+bit] - max[trainLabels[i]]/2 == 0):
                                prototype[trainLabels[i]][m][bit] &= 1
                        with hcl.else_():
                            prototype[trainLabels[i]][m][bit] = hcl.select(prototypeCounter[trainLabels[i]][m*in_bw+bit] - max[trainLabels[i]]/2 > 0, 1, 0)

                        with hcl.if_(max[pred] % 2 == 0):
                            with hcl.if_(prototypeCounter[pred][m*in_bw+bit] - max[pred]/2 == 0):
                                prototype[pred][m][bit] &= 1
                        with hcl.else_():
                            prototype[pred][m][bit] = hcl.select(prototypeCounter[pred][m*in_bw+bit] - max[pred]/2 > 0, 1, 0)

        #print the accuracy
        hcl.mutate((1,), lambda x: test_hdc_accu(prototype, in_train, trainLabels, 1), 'training_update')
        hcl.mutate((1,), lambda x: test_hdc_accu(prototype, in_test, testLabels, 2), 'testing_update')
Пример #29
0
def guassian(A, G):
    h = hcl.reduce_axis(0, size)
    w = hcl.reduce_axis(0, size)
    return hcl.compute(
        (height, width),
        lambda x, y: hcl.select(
            hcl.and_(x > (size - 1), x < (height - size), y > (size - 1), y <
                     (width - size)),
            hcl.sum(A[x + h, y + w] * G[h, w], axis=[h, w]), A[x, y]),
        "F",
        dtype=hcl.Float())
Пример #30
0
def test_reuse_compute_nd():
    hcl.init()
    nz = 1
    rx = hcl.reduce_axis(0, 3, name="rx")
    rz = hcl.reduce_axis(0, nz, name="rz")
    A = hcl.placeholder((nz, 10, 10), name="A")
    B = hcl.compute((10, 8),
                    lambda y, x: hcl.sum(A[rz, y, x + rx], axis=[rz, rx]), "B")
    s = hcl.create_schedule([A, B])
    RB = s.reuse_at(A, s[B], B.axis[1])
    print(hcl.lower(s))
    f = hcl.build(s)