Esempio n. 1
0
def main():
    hl.load_plugin("autoschedule_li2018")

    x = hl.Var('x')
    f_in = hl.Func('in')
    f_in[x] = hl.f32(x)  # Cast to float 32
    f_0 = hl.Func('f_0')
    f_0[x] = 2 * f_in[x]
    f_1 = hl.Func('f_1')
    f_1[x] = hl.sin(f_0[x])
    f_2 = hl.Func('f_2')
    f_2[x] = f_1[x] * f_1[x]

    # Setup
    f_2.set_estimate(x, 0, 1000)
    p = hl.Pipeline(f_2)
    target = hl.Target()
    # Only first parameter is used (number of cores on CPU)
    params = hl.MachineParams(32, 0, 0)
    result = p.auto_schedule('Li2018', target, params)
    print('Schedule:')
    print(result.schedule_source)

    p.compile_jit()  # compile
    buf = p.realize(1000)  # compute and get the buffer
Esempio n. 2
0
    def gen_pipeline(self):
        '''define the Halide pipeline that generates the outputs'''

        logging.info("generating halide pipeline")

        rv, g_fock_out = [self.outputs[f] for f in ["rv", "g_fock_out"]]

        # return the pipeline
        self.pipeline = hl.Pipeline([rv, g_fock_out])
        return self.pipeline, self.outputs, self.inputs
Esempio n. 3
0
def test_basics5():
    # Test Func.in_()
    x, y = hl.Var('x'), hl.Var('y')
    f = hl.Func('f')
    g = hl.Func('g')
    h = hl.Func('h')
    f[x, y] = y
    r = hl.RDom([(0, 100)])
    g[x] = 0
    g[x] += f[x, r]
    h[x] = 0
    h[x] += f[x, r]
    f.in_(g).compute_at(g, x)
    f.in_(h).compute_at(h, x)
    g.compute_root()
    h.compute_root()
    p = hl.Pipeline([g, h])
    p.compile_jit()
Esempio n. 4
0
def test_vector_tile():
    # Test Func.tile() and Stage.tile() with vector arguments
    x, y, z = [hl.Var(c) for c in 'xyz']
    xi, yi, zi = [hl.Var(c + "i") for c in 'xyz']
    xo, yo, zo = [hl.Var(c + "o") for c in 'xyz']
    f = hl.Func('f')
    g = hl.Func('g')
    h = hl.Func('h')
    f[x, y] = y
    f[x, y] += x
    g[x, y, z] = x + y
    g[x, y, z] += z
    f.tile([x, y], [xo, yo], [x, y], [8, 8])
    f.update(0).tile([x, y], [xo, yo], [xi, yi], [8, 8])
    g.tile([x, y], [xo, yo], [x, y], [8, 8], hl.TailStrategy.RoundUp)
    g.update(0).tile([x, y], [xo, yo], [xi, yi], [8, 8],
                     hl.TailStrategy.GuardWithIf)
    p = hl.Pipeline([f, g])
    p.compile_jit()
def focus_stack_pipeline():
    outputs = []
    start_w, start_h = 3000, 2000
    number_of_layers = 5
    layer_sizes = [[start_w, start_h]]    

    for i in range(0, number_of_layers):
            # Grab from prev layer
            w,h = layer_sizes[-1]
            layer_sizes.append([int(math.ceil(w/2.0)),int(math.ceil(h/2.0))])

    # Add last size in once more to get the 2nd top lap layer (gaussian) for
    # the energy/deviation split.
    layer_sizes.append(layer_sizes[-1])

    input = hl.ImageParam(hl.UInt(8), 3)
    input.dim(0).set_estimate(0, start_w)
    input.dim(1).set_estimate(0, start_h)
    input.dim(2).set_estimate(0, 3)

    lap_inputs = []
    max_energy_inputs = []

    for i in range(0,number_of_layers+1):
        lap_layer = hl.ImageParam(hl.Float(32), 3, "lap{}".format(i))
        lap_inputs.append(lap_layer)
        w,h = layer_sizes[i]
        lap_layer.dim(0).set_estimate(0, w)
        lap_layer.dim(1).set_estimate(0, h)
        lap_layer.dim(2).set_estimate(0, 3)

        if i == number_of_layers:
            # last (top - small) layer
            # Add the last laplacian (really direct from gaussian) layer
            # in twice. We output one maxed on entropies and one maxed on
            # deviations.
            lap_layer = hl.ImageParam(hl.Float(32), 3, "lap{}".format(i+1))
            lap_inputs.append(lap_layer)
            lap_layer.dim(0).set_estimate(0, w)
            lap_layer.dim(1).set_estimate(0, h)
            lap_layer.dim(2).set_estimate(0, 3)


            entropy_layer = hl.ImageParam(hl.Float(32), 2, "entroy{}".format(i))
            max_energy_inputs.append(entropy_layer)
            entropy_layer.dim(0).set_estimate(0, w)
            entropy_layer.dim(1).set_estimate(0, h)

            deviation_layer = hl.ImageParam(hl.Float(32), 2, "deviation{}".format(i))
            max_energy_inputs.append(deviation_layer)
            deviation_layer.dim(0).set_estimate(0, w)
            deviation_layer.dim(1).set_estimate(0, h)
        else:
            max_energy_layer = hl.ImageParam(hl.Float(32), 2, "max_energy{}".format(i))
            max_energy_inputs.append(max_energy_layer)
            max_energy_layer.dim(0).set_estimate(0, w)
            max_energy_layer.dim(1).set_estimate(0, h)

    x, y, c = hl.Var("x"), hl.Var("y"), hl.Var("c")
    hist_index = hl.Var('hist_index')
    clamped = f32(x, y, c, mirror(input, 3000, 2000))

    f = hl.Func("input32")
    f[x, y, c] = clamped[x, y, c]

    energy_outputs = []
    gaussian_layers = [f]

    laplacian_layers = []
    merged_laps = []

    for layer_num in range(0, number_of_layers):
        # Add the layer size in also
        w,h = layer_sizes[layer_num]

        start_layer = gaussian_layers[-1]

        # Blur the image
        gaussian_layer = gaussian(x, y, c, start_layer)

        # Grab next layer size
        # w,h = layer_sizes[layer_num+1]

        # Reduce the layer size and add it into the list
        next_layer = reduce_layer(x, y, c, gaussian_layer)
        gaussian_layers.append(next_layer)

        # Expand back up
        expanded = expand_layer(x, y, c, next_layer)

        # Generate the laplacian from the
        # original - blurred/reduced/expanded version
        laplacian_layer = laplacian(x, y, c, start_layer, expanded)
        laplacian_layers.append(laplacian_layer)

        # Calculate energies for the gaussian layer
        prev_energies = mirror(max_energy_inputs[layer_num], w, h)
        next_energies = region_energy(x, y, c, laplacian_layer)

        prev_laplacian = mirror(lap_inputs[layer_num], w, h)
        merged_energies = energy_maxes(x, y, c, prev_energies, next_energies)

        merged_lap = merge_laplacian(x, y, c, merged_energies, next_energies, prev_laplacian, laplacian_layer)

        energy_outputs.append([[w,h,True],merged_energies])
        merged_laps.append(merged_lap)

        # Add estimates
        next_layer.set_estimate(x, 0, w)
        next_layer.set_estimate(y, 0, h)
        next_layer.set_estimate(c, 0, 3)

    # Handle last layer differently
    w,h = layer_sizes[-1]

    # The next_lap is really just the last gaussian layer
    next_lap = gaussian_layers[-1]

    prev_entropy_laplacian = mirror(lap_inputs[-2], w, h)
    prev_entropy = mirror(max_energy_inputs[-2], w, h)
    next_entropy = entropy(x, y, c, next_lap, w, h, hist_index)
    merged_entropy = energy_maxes(x, y, c, prev_entropy, next_entropy)
    merged_lap_on_entropy = merge_laplacian(x, y, c, merged_entropy, next_entropy, prev_entropy_laplacian, next_lap)
    merged_laps.append(merged_lap_on_entropy)

    prev_deviation_laplacian = mirror(lap_inputs[-1], w, h)
    prev_deviation = mirror(max_energy_inputs[-1], w, h)
    next_deviation = deviation(x, y, c, next_lap)
    merged_deviation = energy_maxes(x, y, c, prev_deviation, next_deviation)
    merged_lap_on_deviation = merge_laplacian(x, y, c, merged_deviation, next_deviation, prev_deviation_laplacian, next_lap)
    merged_laps.append(merged_lap_on_deviation)

    energy_outputs.append([[w,h,True],merged_entropy])
    energy_outputs.append([[w,h,True],merged_deviation])


    print("NUM LAYERS: ", len(gaussian_layers), len(laplacian_layers), layer_sizes)
    
    # Add all of the laplacian layers to the output first
    i = 0
    for merged_lap in merged_laps:
        w,h = layer_sizes[i]
        mid = (i < (len(merged_laps) - 2))
        outputs.append([[w,h,False,mid], merged_lap])
        i += 1

    # Then energies
    for energy_output in energy_outputs:
        outputs.append(energy_output)

    new_outputs = []
    for size, output in outputs:
        w = size[0]
        h = size[1]
        gray = len(size) > 2 and size[2]
        mid = len(size) > 3 and size[3]

        if mid:
            uint8_output = output
        else:
            uint8_output = output

        uint8_output.set_estimate(x, 0, w)
        uint8_output.set_estimate(y, 0, h)
        if not gray:
            uint8_output.set_estimate(c, 0, 3)

        new_outputs.append([size, uint8_output])

    outputs = new_outputs

    print("OUTPUT LAYERS: ")
    pprint(outputs)

    output_funcs = [output for _, output in outputs]
    
    pipeline = hl.Pipeline(output_funcs)

    return {
        'pipeline': pipeline,
        'inputs': [input] + lap_inputs + max_energy_inputs
    }