Exemple #1
0
def add(options, input1, input2, output):
    shift1, multiplier1 = quantization.compute_multiplier_for_conv2d(
        input1.scale, 1.0, output.scale)
    shift2, multiplier2 = quantization.compute_multiplier_for_conv2d(
        input2.scale, 1.0, output.scale)
    p('computing residual ....')
    x = _add(input1.data, input1.zero_point, shift1, multiplier1, input2.data,
             input2.zero_point, shift2, multiplier2, output.shape,
             output.zero_point)
    print 'done'
    return x
Exemple #2
0
def conv2d(options, inputs, weights, bias, output):
    shift, multiplier = quantization.compute_multiplier_for_conv2d(
        weights.scale, inputs.scale, output.scale)
    padding_h, padding_w = (weights.shape[1] // 2, weights.shape[2] // 2)
    # we flatten the weights and account for the offset before we input them to
    # the _conv2d function. Of course, this can be performed in a preprocessing
    # step.
    p('flattening weights ....')
    _wd = np.array(weights.data, dtype='int64')
    wd = _flatten_weights(_wd - weights.zero_point, weights.shape)
    print 'done'
    p('computing conv2d ....')
    x = _conv2d(
        # input
        inputs.data,
        inputs.zero_point,
        inputs.shape,
        # output
        output.zero_point,
        output.shape,
        # weights
        wd,
        weights.shape,
        # bias
        bias.data,
        # options
        options.stride,
        (padding_h, padding_w),
        shift,
        multiplier)
    print 'done'
    return x
Exemple #3
0
def dwconv2d(options, inputs, weights, bias, output):
    shift, multiplier = quantization.compute_multiplier_for_conv2d(
        weights.scale, inputs.scale, output.scale)
    padding_h, padding_w = (weights.shape[1] // 2, weights.shape[2] // 2)
    wd = np.array(weights.data, dtype='int64')
    p('computing depthwise convolution ....')
    x = _dwconv2d(
        # input
        inputs.data,
        inputs.zero_point,
        inputs.shape,
        # output
        output.zero_point,
        output.shape,
        # weights
        wd - weights.zero_point,
        weights.shape,
        # bias
        bias.data,
        # options
        options.stride,
        (padding_h, padding_w),
        shift,
        multiplier)
    print 'done'
    return x
Exemple #4
0
def dwconv2d(options, inputs, weights, bias, output):
    # assume number of batches == 1
    depth_mult, weights_h, weights_w, n_channels_in = weights.shape
    _, inputs_h, inputs_w, n_channels_in_ = inputs.shape

    assert n_channels_in_ == n_channels_in

    # quantization params
    inputs_S, inputs_Z = inputs.scale, inputs.zero_point
    weights_S, weights_Z = weights.scale, weights.zero_point
    bias_S, bias_Z = bias.scale, bias.zero_point
    output_S, output_Z = output.scale, output.zero_point

    n, qm = quantization.compute_multiplier_for_conv2d(weights_S, inputs_S,
                                                       output_S)

    multiplier = (weights_S * inputs_S) / output_S

    # other options
    stride_h, stride_w = options.stride
    # a bit crude, but this provides the maximum padding we need, since we
    # assume "SAME" padding type.
    padding_h, padding_w = (weights_h // 2, weights_w // 2)

    output_shape = output.shape
    _, output_h, output_w, n_channels_out = output_shape

    print 'input shape: ', inputs.shape
    print 'weights shape: ', weights.shape
    print 'output shape: ', output.shape

    sys.stdout.write('prepping input ... ')
    sys.stdout.flush()

    # same as with conv2d. Convert each layer into a matrix where each row
    # correspond to a window.
    inputs_per_channel = list()
    for c in range(n_channels_in):
        Z = list()
        for i in range(output_h):
            for j in range(output_w):
                x = i * stride_h
                y = j * stride_w
                z = list()
                for a in range(weights_h):
                    for b in range(weights_w):
                        xx = x + a - padding_h
                        yy = y + b - padding_w
                        if (0 <= xx < inputs_h) and (0 <= yy < inputs_w):
                            z.append(int64(inputs[0][xx][yy][c] - inputs_Z))
                        else:
                            z.append(int64(0))
                Z.append(z)
        inputs_per_channel.append(Z)
    inputs_per_channel = np.array(inputs_per_channel, dtype='int64')
    print inputs_per_channel.shape

    print 'done'

    # Since we don't need to sum accross channels in this case, we could easily
    # merge the computation with the above step. We can them separate here,
    # since that more precisely reflects the MPC implementation.

    sys.stdout.write('flattening weights ... ')
    sys.stdout.flush()

    flat_weights = list()
    for d in range(depth_mult):
        ww = list()
        for c_ in range(n_channels_in):
            w = list()
            for a in range(weights_h):
                for b in range(weights_w):
                    w.append(weights[d][a][b][c_] - weights_Z)
            ww.append(np.array(w, dtype='int64'))
        flat_weights.append(ww)

    print 'done'

    sys.stdout.write('computing dwconv2d ... ')
    sys.stdout.flush()

    max_print = 10
    print_counter = 0

    output_list = list()
    for d in range(depth_mult):
        for c in range(n_channels_in):
            W_flat = flat_weights[d][c_]
            I_c = inputs_per_channel[c_]
            if print_counter <= max_print:
                print 'I_c:\n----------------------------------'
                print I_c
                print '----------------------------------------'
                print 'W_flat:\n-------------------------------'
                print W_flat
            print_counter += 1
            r = I_c.dot(W_flat).reshape(output_shape[1:-1])
            r += bias[c]
            output_list.append(r)

    print 'done'

    sys.stdout.write('scaling outputs ... ')
    sys.stdout.flush()

    output_final = np.zeros(output_shape)
    for c in range(n_channels_out):
        for i in range(output_shape[1]):
            for j in range(output_shape[2]):
                v = output_list[c][i][j]
                # v = mult_by_quant_mult(v, qm, n)
                v = rounded_mult(v, multiplier)
                v += output_Z
                v = 255 if v > 255 else v
                v = 0 if v < 0 else v
                output_final[0][i][j][c] = v

    print 'done'
    return output_final
Exemple #5
0
def conv2d(options, inputs, weights, bias, output):

    # assume number of batches == 1
    n_channels_out, weights_h, weights_w, n_channels_in = weights.shape
    _, inputs_h, inputs_w, n_channels_in_ = inputs.shape

    assert n_channels_in_ == n_channels_in

    # quantization params
    inputs_S, inputs_Z = inputs.scale, inputs.zero_point
    weights_S, weights_Z = weights.scale, weights.zero_point
    bias_S, bias_Z = bias.scale, bias.zero_point
    output_S, output_Z = output.scale, output.zero_point

    n, qm = quantization.compute_multiplier_for_conv2d(weights_S, inputs_S,
                                                       output_S)

    # other options
    stride_h, stride_w = options.stride
    # a bit crude, but this provides the maximum padding we need, since we
    # assume "SAME" padding type.
    padding_h, padding_w = (weights_h // 2, weights_w // 2)

    output_shape = (int(math.ceil(float(inputs_h) / stride_h)),
                    int(math.ceil(float(inputs_w) / stride_w)), n_channels_out)

    output_h, output_w, _ = output_shape

    print 'input shape: ', inputs.shape
    print 'weights shape: ', weights.shape
    print 'output shape: ', output_shape

    sys.stdout.write('input prep ... ')
    sys.stdout.flush()

    # input prep. This is essentially the Conv function from reference_ops.h
    # except we don't perform any computation. It is possible to pre-process
    # this guy also by telling people which entries correspond to padding and
    # which doesn't. (Note that this doesn't leak information.)

    inputs_per_channel = list()
    for c in range(n_channels_in):
        Z = list()
        for i in range(output_h):
            for j in range(output_w):
                x = i * stride_h
                y = j * stride_w
                z = list()
                for a in range(weights_h):
                    for b in range(weights_w):
                        xx = x + a - padding_h
                        yy = y + b - padding_w
                        if (0 <= xx < inputs_h) and (0 <= yy < inputs_w):
                            z.append(int64(inputs[0][xx][yy][c]))
                        else:
                            z.append(int64(0))
                Z.append(z)
        inputs_per_channel.append(Z)
    inputs_per_channel = np.array(inputs_per_channel, dtype='int64')

    print 'done'

    sys.stdout.write('flattening weights ... ')
    sys.stdout.flush()
    # flatten and reshape weights. Again, this can be preprocessed.
    flat_weights = list()
    for c in range(n_channels_out):
        ww = list()
        for c_ in range(n_channels_in):
            w = list()
            for a in range(weights_h):
                for b in range(weights_w):
                    w.append(weights[c][a][b][c_])
            ww.append(np.array(w, dtype='int64'))
        flat_weights.append(ww)

    print 'done'

    sys.stdout.write('computing conv2d ... ')
    sys.stdout.flush()

    # This step is very cheap
    max_print = 10
    print_counter = 0
    output_list = list()
    for c in range(n_channels_out):
        output_for_c = np.zeros(output_shape[:-1])
        for c_ in range(n_channels_in):
            W_flat = flat_weights[c][c_]
            I_c = inputs_per_channel[c_]
            # if print_counter <= max_print:
            #     print 'I_c:\n----------------------------------'
            #     print I_c
            #     print '----------------------------------------'
            #     print 'W_flat:\n-------------------------------'
            #     print W_flat
            print_counter += 1
            r = (I_c - inputs_Z).dot(W_flat - weights_Z)
            r = r.reshape(output_shape[:-1])
            output_for_c += r
        output_list.append(output_for_c + bias[c])

    print 'done'

    sys.stdout.write('scaling results ... ')
    sys.stdout.flush()

    multiplier = (weights_S * inputs_S) / output_S
    # def f(v):
    #     x = rounded_mult(v, multiplier)
    #     x += output_Z
    #     x = 255 if x > 255 else x
    #     return 0 if x < 0 else x

    # f = np.vectorize(f)

    # output_final = np.array(output_list)
    # output_final = np.expand_dims(f(output_final), axis=0)
    # print output_final.shape
    # return output_final

    # this step is very expensive (probably the clamping and whatnot)
    output_final = np.zeros((output.shape))
    for c in range(n_channels_out):
        for i in range(output_shape[0]):
            for j in range(output_shape[1]):
                v = output_list[c][i][j]
                v = rounded_mult(v, multiplier)
                # v = mult_by_quant_mult(v, qm, n)
                v += output_Z
                v = 255 if v > 255 else v
                v = 0 if v < 0 else v
                output_final[0][i][j][c] = v

    print 'done'

    return output_final
def dwconv2d(options, inputs, weights, bias, output):
    depth_multiplier = options.depth_multiplier

    # from here everything is essentially the same as before
    _, weights_h, weights_w, output_depth = weights.shape
    _, inputs_h, inputs_w, n_channels_in = inputs.shape
    _, output_h, output_w, output_depth_ = output.shape

    assert output_depth_ == output_depth
    assert options.dilation_factor == (1, 1)

    inputs_S, inputs_Z = inputs.scale, inputs.zero_point
    weights_S, weights_Z = weights.scale, weights.zero_point
    bias_S, bias_Z = bias.scale, bias.zero_point
    output_S, output_Z = output.scale, output.zero_point

    # multiplier = (inputs_S * weights_S) / output_S
    n, qm = quantization.compute_multiplier_for_conv2d(weights_S, inputs_S,
                                                       output_S)
    quant_mult = quantization.quantized_multiplier_mult

    stride_h, stride_w = options.stride
    padding_h, padding_w = (weights_h // 2, weights_w // 2)

    print 'input shape: ', inputs.shape
    print 'output shape:', output.shape
    print 'weights shape:', weights.shape

    print 'quantization params:'
    print 'input: %s, %s' % (inputs_S, inputs_Z)
    print 'weights: %s, %s' % (weights_S, weights_Z)
    print 'bias: %s, %s' % (bias_S, bias_Z)
    print 'output: %s, %s' % (output_S, output_Z)

    output.data = np.zeros(output.shape, dtype='uint8')

    for out_y in range(output_h):
        for out_x in range(output_w):
            for in_c in range(n_channels_in):
                for m in range(depth_multiplier):
                    oc = m + in_c * depth_multiplier
                    in_x_origin = (out_x * stride_w) - padding_w
                    in_y_origin = (out_y * stride_h) - padding_h
                    acc = 0
                    for filter_y in range(weights_h):
                        for filter_x in range(weights_w):
                            in_x = in_x_origin + filter_x
                            in_y = in_y_origin + filter_y
                            if (0 <= in_x < inputs_w) and \
                               (0 <= in_y < inputs_h):
                                iv = int32(inputs[0][in_y][in_x][in_c])
                                wv = int32(weights[0][filter_y][filter_x][oc])
                                acc += (iv - inputs_Z) * (wv - weights_Z)
                    acc += bias[oc]
                    # acc = int32(rounded_mult(acc, multiplier))
                    acc = quant_mult(acc, qm, n)
                    acc += output_Z
                    acc = 0 if acc < 0 else acc
                    acc = 255 if acc > 255 else acc
                    output[0][out_y][out_x][oc] = uint8(acc)

    return output.data
def conv2d(options, inputs, weights, bias, output):

    n_channels_out, weights_h, weights_w, n_channels_in = weights.shape
    _, inputs_h, inputs_w, n_channels_in_ = inputs.shape
    _, output_h, output_w, n_channels_out_ = output.shape

    assert n_channels_in_ == n_channels_in
    assert n_channels_out_ == n_channels_out

    # dilated convolution is not supported.
    assert options.dilation_factor == (1, 1)

    # quantization params
    inputs_S, inputs_Z = inputs.scale, inputs.zero_point
    weights_S, weights_Z = weights.scale, weights.zero_point
    bias_S, bias_Z = bias.scale, bias.zero_point
    output_S, output_Z = output.scale, output.zero_point

    print 'quantization params:'
    print 'input: %s, %s' % (inputs_S, inputs_Z)
    print 'weights: %s, %s' % (weights_S, weights_Z)
    print 'bias: %s, %s' % (bias_S, bias_Z)
    print 'output: %s, %s' % (output_S, output_Z)

    # other options
    stride_h, stride_w = options.stride
    # a bit crude, but this provides the maximum padding we need, since we
    # assume "SAME" padding type.
    padding_h, padding_w = (weights_h // 2, weights_w // 2)

    # multiplier = (inputs_S * weights_S) / output_S
    n, qm = quantization.compute_multiplier_for_conv2d(weights_S, inputs_S,
                                                       output_S)
    quant_mult = quantization.quantized_multiplier_mult

    output.data = np.zeros(output.shape, dtype='uint8')

    for out_y in range(output_h):
        for out_x in range(output_w):
            for out_c in range(n_channels_out):
                in_x_origin = (out_x * stride_w) - padding_w
                in_y_origin = (out_y * stride_h) - padding_h
                acc = int32(0)
                for filter_y in range(weights_h):
                    for filter_x in range(weights_w):
                        for in_c in range(n_channels_in):
                            in_x = in_x_origin + filter_x
                            in_y = in_y_origin + filter_y
                            if (0 <= in_x < inputs_w) and \
                               (0 <= in_y < inputs_h):
                                iv = int32(inputs[0][in_y][in_x][in_c])
                                wv = int32(
                                    weights[out_c][filter_y][filter_x][in_c])
                                acc += (iv - inputs_Z) * (wv - weights_Z)
                acc += bias[out_c]
                # acc = int32(rounded_mult(acc, multiplier))
                acc = quant_mult(acc, qm, n)
                acc += output_Z
                acc = 255 if acc > 255 else acc
                acc = 0 if acc < 0 else acc
                output[0][out_y][out_x][out_c] = uint8(acc)
    return output.data
Exemple #8
0
    return output_data


if __name__ == '__main__':
    np.set_printoptions(threshold=np.nan)
    i_shape = (1, 128, 128, 3)
    w_shape = (8, 3, 3, 3)
    o_shape = (1, 64, 64, 8)
    padding_h, padding_w = (w_shape[1] // 2, w_shape[2] // 2)
    # i = np.random.randint(0, 255, size=i_shape, dtype='int32')
    i = np.array(np.arange(np.prod(i_shape), dtype='uint8').reshape(i_shape),
                 dtype='int32')
    # w = np.random.randint(0, 255, size=w_shape, dtype='int32')
    w = np.array(np.arange(np.prod(w_shape), dtype='uint8').reshape(w_shape),
                 dtype='int32')
    # b = np.random.randint(-5000, 5000, size=(8,), dtype='int32')
    b = np.arange(o_shape[3], dtype='int32')
    w_offset = 157
    w_scale = 0.008882409892976284
    i_offset = 128
    i_scale = 0.0078125
    b_offset = 0.00006939382728887722
    b_scale = 0.0
    o_offset = 0.0
    o_scale = 0.023528477177023888
    shift, mult = quantization.compute_multiplier_for_conv2d(
        i_scale, w_scale, o_scale)
    w = _flatten_weights(w - w_offset, w_shape)
    print _conv2d(i, i_offset, i_shape, o_offset, o_shape, w, w_shape, b,
                  (2, 2), (padding_h, padding_w), shift, mult)