def _run_encode_op_and_add(
        op: str, kop: str, model: delay_model_pb2.DelayModel,
        stub: synthesis_service_pb2_grpc.SynthesisServiceStub) -> None:
    """Runs characterization for the encode op."""
    _new_regression_op_model(model, kop, operand_bit_counts=[0])

    # input_bits should be at least 2 bits.
    for input_bits in _bitwidth_sweep(0):
        node_bits = bits.min_bit_count_unsigned(input_bits - 1)
        model.data_points.append(
            _build_data_point_bit_types(op, kop, node_bits, [input_bits],
                                        stub))
        logging.info('# encode_op: %s, %s input bits --> %s', op,
                     str(input_bits), str(model.data_points[-1].delay))

    # Validate model
    delay_model.DelayModel(model)
def _run_dynamic_bit_slice_op_and_add(
        op: str, kop: str, model: delay_model_pb2.DelayModel,
        stub: synthesis_service_pb2_grpc.SynthesisServiceStub) -> None:
    """Runs characterization for the dynamic bit slice op."""
    add_op_model = _new_regression_op_model(model, kop)

    # ~= result_bit_count * operand_bit_count[1] (start bits)
    # Hard to model this well - in theory, this should be something
    # more like result_bit_count * 2 ^ start bits.  However,
    # as we add more result bits, more work gets eliminated / reduced
    # (iff 2 ^ start bits + result width > input bits).
    mul_expr = _new_expression(add_op_model)
    _set_multiply_expression(mul_expr)
    _set_result_bit_count_expression_factor(mul_expr.lhs_expression)
    _set_operand_bit_count_expression_factor(mul_expr.rhs_expression, 1)

    # input_bits should be at least 2 bits
    idx = 0
    for input_bits in _bitwidth_sweep(2):
        for start_bits in range(
                3,
                bits.min_bit_count_unsigned(input_bits - 1) + 1):
            for node_bits in range(1, input_bits, BITWIDTH_STRIDE_DEGREES[2]):
                model.data_points.append(
                    _build_data_point_bit_types(op,
                                                kop,
                                                node_bits,
                                                [input_bits, start_bits],
                                                stub,
                                                attributes=[('width',
                                                             str(node_bits))]))
                logging.info(
                    '# idx: %s, dynamic_bit_slice_op: %s, %s start bits, '
                    '%s input bits, %s width --> %s', str(idx), op,
                    str(start_bits), str(input_bits), str(node_bits),
                    str(model.data_points[-1].delay))
                idx = idx + 1

    # Validate model
    delay_model.DelayModel(model)
def _run_select_op_and_add(
        op: str, kop: str, model: delay_model_pb2.DelayModel,
        stub: synthesis_service_pb2_grpc.SynthesisServiceStub) -> None:
    """Runs characterization for the select op."""
    add_op_model = _new_regression_op_model(model, kop)

    # operand_count * result_bit_count
    # Alternatively, try pow(2, operand_bit_count(0)) * result_bit_count
    expr = _new_expression(add_op_model)
    _set_multiply_expression(expr)
    _set_operand_count_expression_factor(expr.lhs_expression, add_constant=-2)
    _set_result_bit_count_expression_factor(expr.rhs_expression)

    # Enumerate cases and bitwidth.
    # Note: at 7 and 8 cases, there is a weird dip in LUTs at around 40 bits wide
    # Why? No idea...
    for num_cases in _operand_count_sweep():
        for bit_count in _bitwidth_sweep(0):

            # Handle differently if num_cases is a power of 2.
            select_bits = bits.min_bit_count_unsigned(num_cases - 1)
            if math.pow(2, select_bits) == num_cases:
                model.data_points.append(
                    _build_data_point_bit_types(
                        op, kop, bit_count,
                        [select_bits] + ([bit_count] * num_cases), stub))
            else:
                model.data_points.append(
                    _build_data_point_bit_types(
                        op, kop, bit_count,
                        [select_bits] + ([bit_count] * (num_cases + 1)), stub))
                logging.info('# select_op: %s, %s bits, %s cases --> %s', op,
                             str(bit_count), str(num_cases),
                             str(model.data_points[-1].delay))

    # Validate model
    delay_model.DelayModel(model)
def _run_array_update_op_and_add(
        op: str, kop: str, model: delay_model_pb2.DelayModel,
        stub: synthesis_service_pb2_grpc.SynthesisServiceStub) -> None:
    """Runs characterization for the ArrayUpdate op."""
    add_op_model = _new_regression_op_model(model, kop)

    # Area is a function of #elements*weight + elements*bitwidth*weight.
    #
    # This seems to hold across a range of element counts, bitwidth, and number
    # of dimensions i.e.
    #
    # The weight isn't an artifact of where we sampled data - It is actually
    # ~constant rather than being something like the ratio of #elements to
    # #bitwidths or similar.

    def _set_addressable_element_count_expression(elm_expr):
        _set_divide_expression(elm_expr)
        _set_operand_bit_count_expression_factor(elm_expr.lhs_expression, 0)
        _set_operand_bit_count_expression_factor(elm_expr.rhs_expression, 1)

    elm_expr = _new_expression(add_op_model)
    _set_addressable_element_count_expression(elm_expr)
    mul_expr = _new_expression(add_op_model)
    _set_multiply_expression(mul_expr)
    _set_addressable_element_count_expression(mul_expr.lhs_expression)
    _set_operand_bit_count_expression_factor(mul_expr.rhs_expression, 1)

    for num_dims in range(1, 3):
        for array_dimension_sizes in _yield_array_dimension_sizes(num_dims):

            # If single-dimension array, increase number of elements.
            if num_dims == 1:
                assert len(array_dimension_sizes) == 1
                array_dimension_sizes[0] = array_dimension_sizes[0] * 2

            for element_bit_count in _bitwidth_sweep(3):
                array_and_element_dimensions = [element_bit_count
                                                ] + array_dimension_sizes

                # Format dimension args
                operand_dimensions = [array_and_element_dimensions]
                operand_dimensions.append([element_bit_count])
                for dim in reversed(array_dimension_sizes):
                    operand_dimensions.append(
                        [bits.min_bit_count_unsigned(dim - 1)])

                # Record data point
                result = _build_data_point(op, kop,
                                           array_and_element_dimensions,
                                           operand_dimensions, stub)
                array_operand = result.operation.operands.add()
                array_operand.bit_count = functools.reduce(
                    operator.mul, array_and_element_dimensions, 1)
                new_elm_operand = result.operation.operands.add()
                new_elm_operand.bit_count = element_bit_count
                model.data_points.append(result)

                logging.info(
                    '%s: %s --> %s', str(kop),
                    ','.join(str(item) for item in operand_dimensions),
                    str(result.delay))

    # Validate model
    delay_model.DelayModel(model)