class DefaultEntityArgTemplate(DefaultArgTemplate): base_name = "unknown_entity" entity_name = "unknown_entity" output_file = "entity.vhd" debug_file = None # Specification, precision = HdlVirtualFormat(ML_Binary32) io_precisions = None io_formats = None accuracy = ML_Faithful # Optimization parameters, backend = VHDLBackend() # Debug verbosity, debug = False language = VHDL_Code # functional test related parameters auto_test = False auto_test_range = Interval(0, 1) auto_test_std = False embedded_test = True externalized_test_data = False # exit after test exit_after_test = True # RTL elaboration build_enable = False # RTL elaboration & simulation tool simulator = "vsim" # pipelined deisgn pipelined = False # pipeline register control (reset, synchronous) reset_pipeline = (False, True) negate_reset = False reset_name = "reset" recirculate_pipeline = False recirculate_signal_map = {}
class DefaultEntityArgTemplate(DefaultArgTemplate): base_name = "unknown_entity" entity_name = "unknown_entity" output_file = "entity.vhd" debug_file = None # Specification, precision = ML_Binary32 io_precisions = None accuracy = ML_Faithful libm_compliant = False # Optimization parameters, backend = VHDLBackend() fuse_fma = None fast_path_extract = False # Debug verbosity, debug = False language = VHDL_Code # functional test related parameters auto_test = False auto_test_range = Interval(0, 1) auto_test_std = False # exit after test exit_after_test = True # RTL elaboration build_enable = False # pipelined deisgn pipelined = False # pipeline register control reset_pipeline = False recirculate_pipeline = False
def __init__(self, arg_template=DefaultEntityArgTemplate, precision=ML_Binary32, accuracy=ML_Faithful, libm_compliant=True, debug_flag=False, fuse_fma=True, fast_path_extract=True, target=VHDLBackend(), output_file="fp_fma.vhd", entity_name="fp_fma", language=VHDL_Code, vector_size=1): # initializing I/O precision precision = ArgDefault.select_value( [arg_template.precision, precision]) io_precisions = [precision] * 2 # initializing base class ML_EntityBasis.__init__(self, base_name="fp_fma", entity_name=entity_name, output_file=output_file, io_precisions=io_precisions, abs_accuracy=None, backend=target, fuse_fma=fuse_fma, fast_path_extract=fast_path_extract, debug_flag=debug_flag, language=language, arg_template=arg_template) self.accuracy = accuracy self.precision = precision
def __init__( self, arg_template=DefaultEntityArgTemplate, precision=ML_Binary32, libm_compliant=True, debug_flag=False, target=VHDLBackend(), output_file="fp_adder.vhd", entity_name="fp_adder", language=VHDL_Code, ): # initializing I/O precision precision = ArgDefault.select_value( [arg_template.precision, precision]) io_precisions = [precision] * 2 # initializing base class ML_EntityBasis.__init__(self, base_name="fp_adder", entity_name=entity_name, output_file=output_file, io_precisions=io_precisions, backend=target, debug_flag=debug_flag, language=language, arg_template=arg_template) self.precision = precision
def __init__(self, arg_template=DefaultEntityArgTemplate, precision=HdlVirtualFormat(ML_Binary32), accuracy=ML_Faithful, debug_flag=False, target=VHDLBackend(), output_file="fp_mpfma.vhd", entity_name="fp_mpfma", language=VHDL_Code, acc_prec=None, pipelined=False): # initializing I/O precision precision = ArgDefault.select_value( [arg_template.precision, precision]) io_precisions = [precision] * 2 # initializing base class ML_EntityBasis.__init__(self, base_name="fp_mpfma", entity_name=entity_name, output_file=output_file, io_precisions=io_precisions, backend=target, debug_flag=debug_flag, language=language, arg_template=arg_template) self.accuracy = accuracy # main precision (used for product operand and default for accumulator) self.precision = precision # accumulator precision self.acc_precision = precision if acc_prec is None else acc_prec # enable operator pipelining self.pipelined = pipelined
def get_default_args(**kw): default_dict = { "precision": fixed_point(32, 0), "target": VHDLBackend(), "output_file": "mult_array.vhd", "entity_name": "mult_array", "language": VHDL_Code, "Method": ReductionMethod.Wallace_4to2, "pipelined": False, "dummy_mode": False, "booth_mode": False, "method": ReductionMethod.Wallace, "op_expr": multiplication_descriptor_parser("FS9.0xFS13.0"), "stage_height_limit": [None], "passes": [ ("beforepipelining:size_datapath"), ("beforepipelining:rtl_legalize"), ("beforepipelining:unify_pipeline_stages"), ], } default_dict.update(kw) return DefaultEntityArgTemplate(**default_dict)
def get_default_args(width=32): return DefaultEntityArgTemplate( precision=ML_Int32, debug_flag=False, target=VHDLBackend(), output_file="my_lzc.vhd", entity_name="my_lzc", language=VHDL_Code, width=width, )
def get_default_args(**kw): default_dict = { "precision": ML_Binary32, "target": VHDLBackend(), "output_file": "my_fp_div.vhd", "entity_name": "my_fp_div", "language": VHDL_Code, "pipelined": False, } default_dict.update(kw) return DefaultEntityArgTemplate(**default_dict)
def get_default_args(**kw): default_dict = { "precision": ML_Int32, "debug_flag": False, "target": VHDLBackend(), "output_file": "ut_rtl_report.vhd", "entity_name": "ut_rtl_report", "language": VHDL_Code, } default_dict.update(kw) return DefaultEntityArgTemplate(**default_dict)
def get_default_args(width=32, **kw): """ generate default argument template """ return DefaultEntityArgTemplate( precision=ML_Int32, debug_flag=False, target=VHDLBackend(), output_file="ut_fixed_point_position.vhd", entity_name="ut_fixed_point_position", language=VHDL_Code, width=width, passes=[("beforecodegen:size_datapath")], )
def get_default_args(width=32, **kw): """ generate default argument template """ return DefaultEntityArgTemplate( precision=ML_Int32, debug_flag=False, target=VHDLBackend(), output_file="my_adapative_entity.vhd", entity_name="my_adaptative_entity", language=VHDL_Code, width=width, passes=[("beforecodegen:size_datapath"), ("beforecodegen:rtl_legalize"), ("beforecodegen:dump")], )
def get_default_args(width=32, **kw): """ generate default argument template """ return DefaultEntityArgTemplate( precision=ML_Int32, debug_flag=False, target=VHDLBackend(), output_file="ut_sub_component.vhd", entity_name="ut_sub_component", language=VHDL_Code, width=width, passes=[ ("beforepipelining:size_datapath"), ("beforepipelining:rtl_legalize"), ("beforepipelining:unify_pipeline_stages"), ], )
def get_default_args(**kw): """ generate default argument structure for BipartiteApprox """ default_dict = { "target": VHDLBackend(), "output_file": "my_bipartite_approx.vhd", "entity_name": "my_bipartie_approx", "language": VHDL_Code, "function": lambda x: 1.0 / x, "interval": Interval(1, 2), "pipelined": False, "precision": fixed_point(1, 15, signed=False), "disable_sub_testing": False, "disable_sv_testing": False, "alpha": 6, "beta": 5, "gamma": 5, "guard_bits": 3, "passes": [ "beforepipelining:size_datapath", "beforepipelining:rtl_legalize", "beforepipelining:unify_pipeline_stages" ], } default_dict.update(kw) return DefaultEntityArgTemplate(**default_dict)
def __init__(self, arg_template=DefaultEntityArgTemplate, precision=fixed_point(32, 0, signed=False), accuracy=ML_Faithful, debug_flag=False, target=VHDLBackend(), output_file="mult_array.vhd", entity_name="mult_array", language=VHDL_Code, acc_prec=None, pipelined=False): # initializing I/O precision precision = arg_template.precision io_precisions = [precision] * 2 # initializing base class ML_EntityBasis.__init__(self, base_name="mult_array", entity_name=entity_name, output_file=output_file, io_precisions=io_precisions, backend=target, debug_flag=debug_flag, language=language, arg_template=arg_template) self.accuracy = accuracy # main precision (used for product operand and default for accumulator) self.precision = precision # enable operator pipelining self.pipelined = pipelined # multiplication input descriptor self.op_expr = arg_template.op_expr self.dummy_mode = arg_template.dummy_mode self.booth_mode = arg_template.booth_mode # reduction method self.reduction_method = arg_template.method # limit of height for each compression stage self.stage_height_limit = arg_template.stage_height_limit
def __init__( self, arg_template=DefaultEntityArgTemplate, precision=ML_Binary32, target=VHDLBackend(), debug_flag=False, output_file="fp_fixed_mpfma.vhd", entity_name="fp_fixed_mpfma", language=VHDL_Code, vector_size=1, ): # initializing I/O precision precision = ArgDefault.select_value( [arg_template.precision, precision]) io_precisions = [precision] * 2 # initializing base class ML_EntityBasis.__init__(self, base_name="fp_fixed_mpfma", entity_name=entity_name, output_file=output_file, io_precisions=io_precisions, abs_accuracy=None, backend=target, debug_flag=debug_flag, language=language, arg_template=arg_template) self.precision = precision # number of extra bits to add to the accumulator fixed precision self.extra_digit = arg_template.extra_digit min_prod_exp = self.precision.get_emin_subnormal() * 2 self.acc_lsb_index = min_prod_exp # select sign-magintude encoded accumulator self.sign_magnitude = arg_template.sign_magnitude # enable/disable operator pipelining self.pipelined = arg_template.pipelined
def __init__( self, # Naming base_name=ArgDefault("unknown_entity", 2), entity_name=ArgDefault(None, 2), output_file=ArgDefault(None, 2), # Specification io_precisions=ArgDefault([ML_Binary32], 2), abs_accuracy=ArgDefault(None, 2), libm_compliant=ArgDefault(True, 2), # Optimization parameters backend=ArgDefault(VHDLBackend(), 2), fast_path_extract=ArgDefault(True, 2), # Debug verbosity debug_flag=ArgDefault(False, 2), language=ArgDefault(VHDL_Code, 2), arg_template=DefaultEntityArgTemplate): # selecting argument values among defaults base_name = ArgDefault.select_value([base_name]) Log.report( Log.Info, "pre entity_name: %s %s " % (entity_name, arg_template.entity_name)) entity_name = ArgDefault.select_value( [arg_template.entity_name, entity_name]) Log.report(Log.Info, "entity_name: %s " % entity_name) Log.report( Log.Info, "output_file: %s %s " % (arg_template.output_file, output_file)) Log.report(Log.Info, "debug_file: %s " % arg_template.debug_file) output_file = ArgDefault.select_value( [arg_template.output_file, output_file]) debug_file = arg_template.debug_file # Specification io_precisions = ArgDefault.select_value([io_precisions]) abs_accuracy = ArgDefault.select_value([abs_accuracy]) # Optimization parameters backend = ArgDefault.select_value([arg_template.backend, backend]) fast_path_extract = ArgDefault.select_value( [arg_template.fast_path_extract, fast_path_extract]) # Debug verbosity debug_flag = ArgDefault.select_value([arg_template.debug, debug_flag]) language = ArgDefault.select_value([arg_template.language, language]) auto_test = arg_template.auto_test auto_test_std = arg_template.auto_test_std self.precision = arg_template.precision self.pipelined = arg_template.pipelined # io_precisions must be a list # -> with a single element # XOR -> with as many elements as function arity (input + output arities) self.io_precisions = io_precisions ## enable the generation of numeric/functionnal auto-test self.auto_test_enable = (auto_test != False or auto_test_std != False) self.auto_test_number = auto_test self.auto_test_range = arg_template.auto_test_range self.auto_test_std = auto_test_std # enable/disable automatic exit once functional test is finished self.exit_after_test = arg_template.exit_after_test # enable post-generation RTL elaboration self.build_enable = arg_template.build_enable # enable post-elaboration simulation self.execute_trigger = arg_template.execute_trigger self.language = language # Naming logic, using provided information if available, otherwise deriving from base_name # base_name is e.g. exp # entity_name is e.g. expf or expd or whatever self.entity_name = entity_name if entity_name else generic_naming( base_name, self.io_precisions) self.output_file = output_file if output_file else self.entity_name + ".vhd" self.debug_file = debug_file if debug_file else "{}_dbg.do".format( self.entity_name) # debug version self.debug_flag = debug_flag # debug display self.display_after_gen = arg_template.display_after_gen self.display_after_opt = arg_template.display_after_opt # TODO: FIX which i/o precision to select # TODO: incompatible with fixed-point formats # self.sollya_precision = self.get_output_precision().get_sollya_object() self.abs_accuracy = abs_accuracy if abs_accuracy else S2**( -self.get_output_precision().get_precision()) # target selection self.backend = backend # register control self.reset_pipeline = arg_template.reset_pipeline self.recirculate_pipeline = arg_template.recirculate_pipeline # optimization parameters self.fast_path_extract = fast_path_extract self.implementation = CodeEntity(self.entity_name) self.vhdl_code_generator = VHDLCodeGenerator( self.backend, declare_cst=False, disable_debug=not self.debug_flag, language=self.language) uniquifier = self.entity_name self.main_code_object = NestedCode(self.vhdl_code_generator, static_cst=False, uniquifier="{0}_".format( self.entity_name), code_ctor=VHDLCodeObject) if self.debug_flag: self.debug_code_object = CodeObject(self.language) self.debug_code_object << debug_utils_lib self.vhdl_code_generator.set_debug_code_object( self.debug_code_object) # pass scheduler instanciation self.pass_scheduler = PassScheduler() # recursive pass dependency pass_dep = PassDependency() for pass_uplet in arg_template.passes: pass_slot_tag, pass_tag = pass_uplet.split(":") pass_slot = PassScheduler.get_tag_class(pass_slot_tag) pass_class = Pass.get_pass_by_tag(pass_tag) pass_object = pass_class(self.backend) self.pass_scheduler.register_pass(pass_object, pass_dep=pass_dep, pass_slot=pass_slot) # linearly linking pass in the order they appear pass_dep = AfterPassById(pass_object.get_pass_id()) # TODO/FIXME: can be overloaded if self.reset_pipeline: self.reset_signal = self.implementation.add_input_signal( "reset", ML_StdLogic) self.recirculate_signal_map = {}