def create_arrays(root, w, h, ch): id_ = "one" if ch == 1 else "multiple" a_rand = random_fixed_array((1, ch, h, w), 8, 0) a_in = v_float2fixedint(a_rand, 8, 0) np.savetxt(join(root, "src", "input_%s.csv" % id_), flatten(a_in), delimiter=", ", fmt="%3d") a_out = v_float2fixedint(zero_pad(a_rand), 8, 0) np.savetxt(join(root, "src", "output_%s.csv" % id_), flatten(a_out), delimiter=", ", fmt="%3d")
def create_stimuli(root, pool_dim, total_bits, frac_bits): # vunit import from csv can only handle datatype integer. # Therefore the random fixed point values have to be converted to # corresponding integer values. int_bits = total_bits - frac_bits a_rand = random_fixed_array((pool_dim, pool_dim), int_bits, frac_bits) a_in = v_float2fixedint(a_rand, int_bits, frac_bits) np.savetxt(join(root, "src", "input%d.csv" % pool_dim), a_in, delimiter=", ", fmt="%3d") # use atleast_1d to fulfill 1d requirement of savetxt a_out = np.atleast_1d(v_float2fixedint(np.max(a_rand), int_bits, frac_bits)) np.savetxt(join(root, "src", "output%d.csv" % pool_dim), a_out, delimiter=", ", fmt="%3d")
def avg_pool(array_in, bitwidth: Tuple[int, int]): """Global average pooling layer.""" _, _, width, height = array_in.shape array_in_float = v_fixedint2ffloat(array_in, *bitwidth) # calculate reciprocal for average manually, because else factor would # be too different reciprocal = float2ffloat(1. / (width * height), 1, 16) return v_float2fixedint( np.sum(np.sum(array_in_float, axis=2), axis=2) * reciprocal, *bitwidth)
def create_stimuli(root, stage, ksize, total_bits_data, frac_bits_data, total_bits_weight, frac_bits_weight): # vunit import from csv can only handle datatype integer. # Therefore the random fixed point values have to be converted to # corresponding integer values. int_bits_data = total_bits_data - frac_bits_data a_rand = random_fixed_array((ksize, ksize), int_bits_data, frac_bits_data, signed=stage != 1) # manually extend the bitwidth to implicitly create unsigned values sign_bit = 1 if stage == 1 else 0 a_in = v_float2fixedint(a_rand, int_bits_data + sign_bit, frac_bits_data) name = "input_data%s.csv" % ("_stage1" if stage == 1 else str(ksize)) np.savetxt(join(root, "src", name), a_in, delimiter=", ", fmt="%3d") int_bits_weight = total_bits_weight - frac_bits_weight a_weights_rand = random_fixed_array((ksize, ksize), int_bits_weight, frac_bits_weight) a_weights_in = v_float2fixedint(a_weights_rand, int_bits_weight, frac_bits_weight) name = "input_weights%s.csv" % ("_stage1" if stage == 1 else str(ksize)) np.savetxt(join(root, "src", name), a_weights_in, delimiter=", ", fmt="%3d") sum_ = np.sum(a_rand * a_weights_rand) additions = 0 if ksize == 1 else int(math.log2(ksize - 1) * 2) # use atleast_1d to fulfill 1d requirement of savetxt a_out = np.atleast_1d( float2fixedint( sum_, int_bits_data + int_bits_weight + additions + 1 + sign_bit, frac_bits_data + frac_bits_weight)) name = "output%s.csv" % ("_stage1" if stage == 1 else str(ksize)) np.savetxt(join(root, "src", name), a_out, delimiter=", ", fmt="%d")
def max_pool(array_in, ksize: int, stride: int, bitwidth: Tuple[int, int]): """Local maximum pooling layer.""" # pylint: disable=too-many-locals batch, channel, height, width = array_in.shape assert batch == 1, "batch size != 1 not supported" array_in_flt = v_fixedint2ffloat(array_in, *bitwidth) out = np.zeros((1, channel, int( (height - ksize) / stride) + 1, int((width - ksize) / stride) + 1), dtype=np.uint8) # - (stride - 1) to provide only outputs, where the full kernel fits max_height = height - (ksize - stride) - (stride - 1) max_width = width - (ksize - stride) - (stride - 1) for row_out, row_in in enumerate(range(0, max_height, stride)): for col_out, col_in in enumerate(range(0, max_width, stride)): roi = array_in_flt[0, :, row_in:row_in + ksize, col_in:col_in + ksize] out[0, :, row_out, col_out] = v_float2fixedint( np.amax(roi.reshape(channel, -1), axis=1), *bitwidth) return out
def leaky_relu(array_in, alpha: float, bitwidth: Tuple[int, int]): """Leaky rectified linear unit activation.""" array_in_float = v_fixedint2ffloat(array_in, *bitwidth) return np.where(array_in_float > 0, array_in, v_float2fixedint(array_in_float * alpha, *bitwidth))
def relu(array_in, bitwidth: Tuple[int, int]): """Rectified linear unit activation.""" array_in_float = v_fixedint2ffloat(array_in, *bitwidth) return v_float2fixedint(np.maximum(array_in_float, 0), *bitwidth)