Esempio n. 1
0
def check_requirements():
    if not distutils.spawn.find_executable('vivado'):
        msg.error('vivado not found. Please set PATH correctly')

    vivado_version = str(subprocess.check_output(['vivado -version | head -n1 | sed "s/\(Vivado.\+v\)\(\([0-9]\|\.\)\+\).\+/\\2/"'], shell=True), 'utf-8').strip()
    if vivado_version < MIN_VIVADO_VERSION:
        msg.error('Installed Vivado version ({}) not supported (>= {})'.format(vivado_version, MIN_VIVADO_VERSION))
Esempio n. 2
0
def check_board_support(chip_part):

    check_requirements()

    tmp_dir = os.popen('mktemp -d --suffix=_ait').read().rstrip()
    os.environ['MYVIVADO'] = tmp_dir

    os.mkdir(tmp_dir + '/scripts')

    os.system('echo "enable_beta_device ' + chip_part + '" >' + tmp_dir +
              '/scripts/Vivado_init.tcl')
    os.system('echo "if {[llength [get_parts ' + chip_part +
              ']] == 0} {exit 1}" > ' + tmp_dir +
              '/scripts/ait_part_check.tcl')
    p = subprocess.Popen('vivado -nojournal -nolog -mode batch -source ' +
                         tmp_dir + '/scripts/ait_part_check.tcl',
                         shell=True,
                         stdout=open(os.devnull, 'w'),
                         cwd=tmp_dir + '/scripts')
    retval = p.wait()
    os.system('rm -rf ' + tmp_dir)
    del os.environ['MYVIVADO']
    if (int(retval) == 1):
        msg.error('Your current version of Vivado does not support part ' +
                  chip_part)
Esempio n. 3
0
 def check_args(self, args):
     if args.debug_intfs == 'custom' and args.debug_intfs_list is None:
         msg.error('A file specifying which interfaces to mark for debug is required when choosing \'custom\' value on --debug_intfs argument')
     if args.interconnect_regslice is not None:
         for opt in args.interconnect_regslice:
             if opt == 'all' and len(args.interconnect_regslice) != 1:
                 msg.error('Invalid combination of values for --interconnect_regslice')
     if args.jobs > getNumJobs():
         msg.warning('Using more Vivado jobs ({}) than the recommended default ({}). Performance of the compilation process might be affected'.format(args.jobs, getNumJobs()))
Esempio n. 4
0
def synthesize_accelerator(acc):
    shutil.rmtree(project_backend_path + '/HLS/' + acc.name,
                  ignore_errors=True)
    os.makedirs(project_backend_path + '/HLS/' + acc.name)

    shutil.copy2(
        acc.full_path,
        project_backend_path + '/HLS/' + acc.name + '/' + acc.filename)

    acc_tcl_script = '# Script automatically generated by the Accelerator Integration Tool. Edit at your own risk.\n' \
                     + 'open_project ' + acc.name + '\n' \
                     + 'set_top ' + acc.name + '_wrapper\n' \
                     + 'add_files ' + acc.name + '/' + acc.filename + ' -cflags "-I' + os.getcwd() + '"\n' \
                     + 'open_solution "solution1"\n' \
                     + 'set_part {' + chip_part + '} -tool vivado\n' \
                     + 'create_clock -period ' + str(args.clock) + 'MHz -name default\n' \
                     + 'config_rtl -reset control -reset_level low -reset_async\n'

    if board.arch.device == 'zynqmp' or board.arch.device == 'alveo':
        acc_tcl_script += 'config_interface -m_axi_addr64\n'

    acc_tcl_script += 'csynth_design\n' \
                      + 'export_design -rtl verilog -format ip_catalog -vendor bsc -library ompss -display_name ' + acc.name + ' -taxonomy /BSC/OmpSs\n' \
                      + 'exit\n'

    acc_tcl_script_file = open(
        project_backend_path + '/HLS/' + acc.name + '/HLS_' + acc.name +
        '.tcl', 'w')
    acc_tcl_script_file.write(acc_tcl_script)
    acc_tcl_script_file.close()

    msg.info('Synthesizing \'' + acc.name + '\'')

    p = subprocess.Popen('vivado_hls ' + project_backend_path + '/HLS/' +
                         acc.name + '/HLS_' + acc.name + '.tcl -l ' +
                         project_backend_path + '/HLS/' + acc.name + '/HLS_' +
                         acc.name + '.log',
                         cwd=project_backend_path + '/HLS',
                         stdout=sys.stdout.subprocess,
                         stderr=sys.stdout.subprocess,
                         shell=True)
    if args.verbose:
        for line in iter(p.stdout.readline, b''):
            sys.stdout.write(line.decode('utf-8'))

    retval = p.wait()
    if retval:
        if not args.keep_files:
            shutil.rmtree(project_backend_path + '/HLS/' + acc.name,
                          ignore_errors=True)
        msg.error('Synthesis of \'' + acc.name + '\' failed', start_time,
                  False)
    else:
        msg.success('Finished synthesis of \'' + acc.name + '\'')

    update_resource_utilization(acc)
Esempio n. 5
0
    def check_flow_args(self, args):
        # Validate flow args
        if args.from_step not in generation_steps[args.backend]:
            msg.error('Initial step \'' + args.from_step +
                      '\' is not a valid generation step for \'' +
                      args.backend + '\' backend. Set it correctly')

        if args.to_step not in generation_steps[args.backend]:
            msg.error('Final step \'' + args.to_step +
                      '\' is not a valid generation step for \'' +
                      args.backend + '\' backend. Set it correctly')

        if generation_steps[args.backend].index(
                args.from_step) > generation_steps[args.backend].index(
                    args.to_step):
            msg.error('Initial step \'' + args.from_step +
                      '\' is posterior to the final step \'' + args.to_step +
                      '\'. Set them correctly')

        if not args.disable_IP_caching and not os.path.isdir(
                args.IP_cache_location):
            if self.is_default('IP_cache_location', args.backend):
                # Create cache folder and set perms to allow all users writing there
                os.makedirs(args.IP_cache_location)
                os.chmod(args.IP_cache_location, 0o777)
            else:
                msg.error('Cache location (' + args.IP_cache_location +
                          ') does not exist or is not a folder')
Esempio n. 6
0
    def check_required_args(self, args):
        # Validate required args
        if not re.match('^[A-Za-z][A-Za-z0-9_]*$', args.name):
            msg.error(
                'Invalid project name. Must start with a letter and contain only letters, numbers or underscores'
            )

        if args.wrapper_version and args.wrapper_version < MIN_WRAPPER_VERSION:
            msg.error('Unsupported wrapper version (' +
                      str(args.wrapper_version) + '). Minimum version is ' +
                      str(MIN_WRAPPER_VERSION))

        if not os.path.exists(args.dir + '/' + args.name + '_ait'):
            os.mkdir(args.dir + '/' + args.name + '_ait')
Esempio n. 7
0
    def check_board_args(self, args, board):
        if args.memory_interleaving_stride is not None:
            if board.arch.device == 'zynq' or board.arch.device == 'zynqmp':
                msg.error('Memory interleaving is not available on neither Zynq nor ZynqMP boards')
            elif math.log2(decimalFromHumanReadable(board.mem.bank_size)) - math.log2(args.memory_interleaving_stride) < math.ceil(math.log2(board.mem.num_banks)):
                msg.error('Max allowed interleaving stride in current board: ' + decimalToHumanReadable(2**(math.log2(decimalFromHumanReadable(board.mem.bank_size)) - math.ceil(math.log2(board.mem.num_banks))), 2))

        if args.simplify_interconnection and (board.arch.device == 'zynq' or board.arch.device == 'zynqmp'):
            msg.error('Simplify memory interconnection is not available on neither Zynq nor ZynqMP boards')
        if args.simplify_interconnection and board.mem.type != 'ddr':
            msg.error('Simplify memory interconnection is only available for DDR memories')

        if (args.memory_interleaving_stride is not None and board.arch.device != 'alveo'):
            msg.error('Memory interleaving is not available on neither Zynq nor ZynqMP boards')
Esempio n. 8
0
def load_acc_placement(accList, args):
    # Read placement info from file
    if args.placement_file and os.path.exists(args.placement_file):
        usrPlacement = json.load(open(args.placement_file))
        for acc in accList:
            if acc.name not in usrPlacement:
                msg.warning('No placement given for acc ' + acc.name)
            else:
                placeList = usrPlacement[acc.name]
                if len(placeList) != acc.num_instances:
                    msg.warning('Placement list does not match number instances, placing only matching instances')
                acc.SLR = placeList

    elif args.placement_file:
        msg.error('Placement file not found: ' + args.user_constraints)
Esempio n. 9
0
def check_requirements():
    if not distutils.spawn.find_executable('vivado'):
        msg.error('vivado not found. Please set PATH correctly')
    elif board.arch.device == 'zynq' and not distutils.spawn.find_executable(
            'bootgen'):
        msg.warning('bootgen not found. .bit.bin file will not be generated')

    vivado_version = str(
        subprocess.check_output([
            'vivado -version | head -n1 | sed "s/\(Vivado.\+v\)\(\([0-9]\|\.\)\+\).\+/\\2/"'
        ],
                                shell=True), 'utf-8').strip()
    if vivado_version < MIN_VIVADO_VERSION:
        msg.error('Installed Vivado version ({}) not supported (>= {})'.format(
            vivado_version, MIN_VIVADO_VERSION))
Esempio n. 10
0
def update_resource_utilization(acc):
    global available_resources
    global used_resources

    report_file = project_backend_path + '/HLS/' + acc.name + '/solution1/syn/report/' + acc.name + '_wrapper_csynth.xml'

    tree = cET.parse(report_file)
    root = tree.getroot()

    for resource in root.find('AreaEstimates').find('AvailableResources'):
        available_resources[resource.tag] = int(resource.text)

    if args.verbose_info:
        res_msg = 'Resources estimation for \'' + acc.name + '\': '
        res_msg += ', '.join(
            sorted(
                map(lambda r: r.tag + ' ' + r.text,
                    list(root.find('AreaEstimates').find('Resources')))))
        msg.log(res_msg)

    depleted_resources = False
    error_message = 'Resource utilization over 100%\nResources estimation summary\n'
    for resource in root.find('AreaEstimates').find('Resources'):
        used_resources[resource.tag] = int(
            resource.text) * acc.num_instances + (int(used_resources[
                resource.tag]) if resource.tag in used_resources else 0)
        if used_resources[resource.tag] > available_resources[resource.tag]:
            if available_resources[resource.tag] == 0:
                msg.error(
                    'The HLS code is using resources not available in the selected FPGA'
                )
            utilization_percentage = str(
                round(
                    float(used_resources[resource.tag]) /
                    float(available_resources[resource.tag]) * 100, 2))
            report_string = '{0:<9} {1:>7} used | {2:>7} available - {3:>6}% utilization\n'
            report_string_formatted = report_string.format(
                resource.tag, used_resources[resource.tag],
                available_resources[resource.tag], utilization_percentage)
            error_message += report_string_formatted
            depleted_resources = True

    if not args.disable_utilization_check and depleted_resources:
        msg.error(error_message.rstrip())
Esempio n. 11
0
    def check_hardware_runtime_args(self, args, num_accs):
        def prev_power_of_2(num):
            if num & (num - 1) != 0:
                num = int(log2(num))
                num = int(pow(2, num))
            return num

        if args.cmdin_subqueue_len is not None and args.cmdin_queue_len is not None:
            msg.error(
                '--cmdin_subqueue_len and --cmdin_queue_len are mutually exclusive'
            )
        if args.cmdout_subqueue_len is not None and args.cmdout_queue_len is not None:
            msg.error(
                '--cmdout_subqueue_len and --cmdout_queue_len are mutually exclusive'
            )

        if args.cmdin_queue_len is not None:
            args.cmdin_subqueue_len = prev_power_of_2(
                int(args.cmdin_queue_len / num_accs))
            msg.info('Setting --cmdin_subqueue_len to {}'.format(
                args.cmdin_subqueue_len))
        elif args.cmdin_subqueue_len is None:
            args.cmdin_subqueue_len = max(
                64, prev_power_of_2(int(1024 / num_accs)))
        if args.cmdout_queue_len is not None:
            args.cmdout_subqueue_len = prev_power_of_2(
                int(args.cmdout_queue_len / num_accs))
            msg.info('Setting --cmdout_subqueue_len to {}'.format(
                args.cmdout_subqueue_len))
        elif args.cmdout_subqueue_len is None:
            args.cmdout_subqueue_len = max(
                64, prev_power_of_2(int(1024 / num_accs)))

        # The subqueue length has to be checked here in the case the user provides the cmdin queue length
        if args.cmdin_subqueue_len < 34:
            msg.warning(
                'Value of --cmdin_subqueue_len={} is less than 34, which is the length of the longest command possible. This design might not work with tasks with enough arguments.'
                .format(args.cmdin_subqueue_len))
        if args.spawnout_queue_len < 79:
            msg.warning(
                'Value of --spawnout_queue_len={} is less than 79, which is the length of the longest task possible. This design might not work if an accelerator creates SMP tasks with enough copies, dependencies and/or arguments.'
                .format(args.spawnout_queue_len))
Esempio n. 12
0
def check_requirements():
    global project_backend_path
    global petalinux_build_path
    global petalinux_install_path

    if (not os.path.exists(petalinux_build_path)
            or not os.path.exists(petalinux_install_path)):
        msg.error(
            'PETALINUX_BUILD (' +
            (petalinux_build_path if petalinux_build_path else 'empty') +
            ') or PETALINUX_INSTALL (' +
            (petalinux_install_path if petalinux_install_path else 'empty') +
            ') variables not properly set')
        msg.error('Generation of petalinux boot files failed')

    env = str(
        subprocess.Popen(
            'bash -c "trap \'env\' exit; source ' + petalinux_install_path +
            '/settings.sh > /dev/null 2>&1"',
            shell=True,
            stdout=subprocess.PIPE).communicate()[0], 'utf-8').strip('\n')

    # NOTE: Only importing some environment variables as there may be complex functions/expansions that
    #       we do not need to handle here
    for line in env.split('\n'):
        splitted = line.split('=', 1)
        if splitted[0] == 'PATH' or splitted[0].find('PETALINUX') != -1:
            os.environ.update(dict([line.split('=', 1)]))

    if not distutils.spawn.find_executable('petalinux-config'):
        msg.error(
            'petalinux commands not found. Please check PETALINUX_INSTALL environment variable'
        )
Esempio n. 13
0
 def check_picos_args(self, args):
     # Validate Picos args
     if (args.picos_dm_hash == 'P_PEARSON'
             and args.picos_hash_t_size != 64):
         msg.error(
             'With P_PEARSON hash function, --picos_hash_t_size must be 64')
     if (args.picos_hash_t_size > args.picos_dm_size):
         msg.error(
             'Invalid --picos_hash_t_size ({}), maximum value is --picos_dm_size ({})'
             .format(args.picos_hash_t_size, args.picos_dm_size))
     if (ceil(log2(args.picos_hash_t_size)) +
             ceil(log2(args.picos_num_dcts)) > 8):
         msg.error(
             'Invalid combination of --picos_hash_t_size and --picos_num_dcts, ceil(log2(args.picos_hash_t_size))+ceil(log2(args.picos_num_dcts)) <= 8'
         )
Esempio n. 14
0
    def check_bitstream_args(self, args):
        # Validate bitstream args
        if (args.slr_slices == 'acc'
                or args.slr_slices == 'all') and args.placement_file is None:
            msg.error(
                '--placement_file argument required when enabling SLR-crossing register slices on accelerators'
            )
        elif (args.floorplanning_constr == 'acc' or args.floorplanning_constr
              == 'all') and args.placement_file is None:
            msg.error(
                '--placement_file argument required when setting floorplanning constraints on accelerators'
            )

        if len(args.bitinfo_note) > 256:
            msg.error(
                'Length of bitInfo note must be less than 256 ASCII chars')
Esempio n. 15
0
def run_implementation_step(project_args):
    global args
    global board
    global chip_part
    global start_time
    global ait_backend_path
    global project_backend_path

    args = project_args['args']
    board = project_args['board']
    start_time = project_args['start_time']
    project_path = project_args['path']

    chip_part = board.chip_part + ('-' + board.es if
                                   (board.es
                                    and not args.ignore_eng_sample) else '')
    ait_backend_path = ait_path + '/backend/' + args.backend
    project_backend_path = project_path + '/' + args.backend
    project_step_path = project_backend_path + '/scripts/' + script_folder

    # Check if the requirements are met
    check_requirements()

    # Remove old directories used on the implementation step
    shutil.rmtree(project_step_path, ignore_errors=True)

    # Create directories and copy necessary files for implementation step
    shutil.copytree(ait_backend_path + '/scripts/' + script_folder,
                    project_step_path,
                    ignore=shutil.ignore_patterns('*.py*'))

    if os.path.isfile(project_backend_path + '/' + args.name + '/' +
                      args.name + '.xpr'):
        # Enable beta device on Vivado init script
        if board.board_part:
            p = subprocess.Popen(
                'echo "enable_beta_device ' + chip_part +
                '\nset_param board.repoPaths [list ' + project_backend_path +
                '/board/' + board.name + '/board_files]" > ' +
                project_backend_path + '/scripts/Vivado_init.tcl',
                shell=True)
            retval = p.wait()
        else:
            p = subprocess.Popen('echo "enable_beta_device ' + chip_part +
                                 '" > ' + project_backend_path +
                                 '/scripts/Vivado_init.tcl',
                                 shell=True)
            retval = p.wait()

        os.environ['MYVIVADO'] = project_backend_path + '/scripts'

        p = subprocess.Popen(
            'vivado -nojournal -nolog -notrace -mode batch -source ' +
            project_step_path + '/implement_design.tcl',
            cwd=project_backend_path + '/scripts/',
            stdout=sys.stdout.subprocess,
            stderr=sys.stdout.subprocess,
            shell=True)

        if args.verbose:
            for line in iter(p.stdout.readline, b''):
                sys.stdout.write(line.decode('utf-8'))

        retval = p.wait()
        del os.environ['MYVIVADO']
        if retval:
            msg.error('Hardware implementation failed', start_time, False)
        else:
            msg.success('Hardware implemented')
    else:
        msg.error(
            'No Vivado .xpr file exists for the current project. Hardware implementation failed'
        )
Esempio n. 16
0
def generate_Vivado_variables_tcl():
    global accs
    global args

    vivado_project_variables = '# File automatically generated by the Accelerator Integration Tool. Edit at your own risk.\n' \
                               + '\n' \
                               + '## AIT messages procedures\n' \
                               + '# Error\n' \
                               + 'proc aitError {msg} {\n' \
                               + '   puts "\[AIT\] ERROR: $msg"\n' \
                               + '   exit 1\n' \
                               + '}\n' \
                               + '# Warning\n' \
                               + 'proc aitWarning {msg} {\n' \
                               + '   puts "\[AIT\] WARNING: $msg"\n' \
                               + '}\n' \
                               + '\n' \
                               + '# Info\n' \
                               + 'proc aitInfo {msg} {\n' \
                               + '   puts "\[AIT\] INFO: $msg"\n' \
                               + '}\n' \
                               + '\n' \
                               + '# Log\n' \
                               + 'proc aitLog {msg} {\n' \
                               + '   puts "\[AIT\]: $msg"\n' \
                               + '}\n' \
                               + '\n' \
                               + '# Paths\n' \
                               + 'variable path_Project ' + os.path.relpath(project_backend_path, project_backend_path + '/scripts') + '\n' \
                               + 'variable path_Repo ' + os.path.relpath(project_backend_path + '/HLS/', project_backend_path + '/scripts') + '\n' \
                               + '\n' \
                               + '# Project variables\n' \
                               + 'variable name_Project ' + args.name + '\n' \
                               + 'variable name_Design ' + args.name + '_design\n' \
                               + 'variable target_lang ' + args.target_language + '\n' \
                               + 'variable num_accs ' + str(num_instances) + '\n' \
                               + 'variable num_acc_creators ' + str(num_acc_creators) + '\n' \
                               + 'variable num_jobs ' + str(args.jobs) + '\n' \
                               + 'variable ait_call "' + str(re.escape(os.path.basename(sys.argv[0]) + ' ' + ' '.join(sys.argv[1:]))) + '"\n' \
                               + 'variable bitInfo_note ' + str(re.escape(args.bitinfo_note)) + '\n' \
                               + 'variable version_major_ait ' + str(VERSION_MAJOR) + '\n' \
                               + 'variable version_minor_ait ' + str(VERSION_MINOR) + '\n' \
                               + 'variable version_bitInfo ' + str(BITINFO_VERSION).lower() + '\n' \
                               + 'variable version_wrapper ' + (str(args.wrapper_version).lower() if args.wrapper_version else '0') + '\n' \
                               + '\n' \
                               + '# IP caching variables\n' \
                               + 'variable IP_caching ' + str(not args.disable_IP_caching).lower() + '\n'

    if not args.disable_IP_caching:
        vivado_project_variables += 'variable path_CacheLocation ' + os.path.realpath(args.IP_cache_location) + '\n'

    regslice_all = '0'
    regslice_mem = '0'
    regslice_hwruntime = '0'
    if args.interconnect_regslice is not None:
        for opt in args.interconnect_regslice:
            if opt == 'all':
                regslice_all = '1'
            elif opt == 'mem':
                regslice_mem = '1'
            elif opt == 'hwruntime':
                regslice_hwruntime = '1'

    vivado_project_variables += '\n' \
                                + '# Bitstream variables\n' \
                                + 'variable interconOpt ' + str(args.interconnect_opt + 1) + '\n' \
                                + 'variable debugInterfaces ' + str(args.debug_intfs) + '\n' \
                                + 'variable interconRegSlice_all ' + regslice_all + '\n' \
                                + 'variable interconRegSlice_mem ' + regslice_mem + '\n' \
                                + 'variable interconRegSlice_hwruntime ' + regslice_hwruntime + '\n' \
                                + 'variable interleaving_stride ' + (hex(args.memory_interleaving_stride) if args.memory_interleaving_stride is not None else str(args.memory_interleaving_stride)) + '\n'\
                                + 'variable simplify_interconnection ' + str(args.simplify_interconnection).lower() + '\n' \
                                + 'variable floorplanning_constr ' + str(args.floorplanning_constr) + '\n' \
                                + 'variable slr_slices ' + str(args.slr_slices) + '\n' \
                                + '\n' \
                                + '# ' + board.name + ' board variables\n' \
                                + 'variable board ' + board.name + '\n' \
                                + 'variable chipPart ' + chip_part + '\n' \
                                + 'variable clockFreq ' + str(args.clock) + '\n' \
                                + 'variable arch_device ' + board.arch.device + '\n'

    if args.slr_slices is not None or args.floorplanning_constr is not None:
        vivado_project_variables += 'variable board_slr_num ' + str(board.arch.slr.num) + '\n' \
                                    + 'variable board_slr_master ' + str(board.arch.slr.master) + '\n'

    vivado_project_variables += 'variable address_map [dict create]\n' \
                                + 'dict set address_map "ompss_base_addr" ' + board.address_map.ompss_base_addr + '\n' \
                                + 'dict set address_map "mem_base_addr" ' + board.address_map.mem_base_addr + '\n' \
                                + 'dict set address_map "mem_type" ' + board.mem.type + '\n'

    if board.arch.device == 'zynq' or board.arch.device == 'zynqmp':
        vivado_project_variables += 'dict set address_map "mem_size" ' + hex(decimalFromHumanReadable(board.mem.size)) + '\n'
    elif board.arch.device == 'alveo':
        vivado_project_variables += 'dict set address_map "mem_num_banks" ' + str(board.mem.num_banks) + '\n' \
                                    + 'dict set address_map "mem_bank_size" ' + hex(decimalFromHumanReadable(board.mem.bank_size)) + '\n'

    if board.board_part:
        vivado_project_variables += '\n' \
                                    + 'variable boardPart [list ' + ' '.join(board.board_part) + ']\n'
    vivado_project_variables += '\n' \
                                + '# Hardware Instrumentation variables\n' \
                                + 'variable hwcounter ' + str(args.hwcounter) + '\n' \
                                + 'variable hwinst ' + str(args.hwinst) + '\n'

    vivado_project_variables += '\n' \
                                + '# HW runtime variables\n' \
                                + 'variable hwruntime ' + str(args.hwruntime) + '\n' \
                                + 'variable extended_hwruntime ' + str(args.extended_hwruntime) + '\n' \
                                + 'variable lock_hwruntime ' + str(args.lock_hwruntime) + '\n' \
                                + 'variable cmdInSubqueue_len ' + str(args.cmdin_subqueue_len) + '\n' \
                                + 'variable cmdOutSubqueue_len ' + str(args.cmdout_subqueue_len) + '\n' \
                                + 'variable spawnInQueue_len ' + str(args.spawnin_queue_len) + '\n' \
                                + 'variable spawnOutQueue_len ' + str(args.spawnout_queue_len) + '\n' \
                                + 'variable hwruntime_interconnect ' + str(args.hwruntime_interconnect) + '\n' \
                                + 'variable enable_spawn_queues ' + str(not args.disable_spawn_queues) + '\n'

    vivado_project_variables += '\n' \
                                + '# List of accelerators\n' \
                                + 'set accs [list'

    for acc in accs[0:num_accs]:
        acc_name = str(acc.type) + ':' + str(acc.num_instances) + ':' + acc.name

        vivado_project_variables += ' ' + acc_name

    vivado_project_variables += ']\n'

    # Generate acc instance list with SLR info
    # Placement info is only needed for registers, constraints are dumped into constraint file
    if (args.slr_slices == 'acc') or (args.slr_slices == 'all'):
        acc_pl_dict = 'set acc_placement [dict create '
        for acc in accs[0:num_accs]:
            acc_pl_dict += ' ' + str(acc.type) + ' [list'
            for slrnum in acc.SLR:
                acc_pl_dict += ' ' + str(slrnum)
            acc_pl_dict += ']'
        acc_pl_dict += ']'
        vivado_project_variables += acc_pl_dict + '\n'

    # Generate acc constraint file
    if (args.floorplanning_constr == 'acc') or (args.floorplanning_constr == 'all'):
        accConstrFiles = open(f'{project_backend_path}/board/{board.name}/constraints/acc_floorplan.xdc', 'w')
        for acc in accs[0:num_accs]:
            # Instantiate each accelerator with a single instance and placement info
            for instanceNumber in range(acc.num_instances):
                accBlock = f'{acc.name}_{instanceNumber}'
                accConstrFiles.write(f'add_cells_to_pblock [get_pblocks slr{acc.SLR[instanceNumber]}_pblock] '
                                     + '[get_cells {'
                                     + f'*/{accBlock}/Adapter_outStream '
                                     + f'*/{accBlock}/Adapter_inStream '
                                     + f'*/{accBlock}/accID '
                                     + f'*/{accBlock}/{acc.name}_ompss'
                                     + '}]\n')
        accConstrFiles.close()

    if args.hwruntime == 'pom':
        picos_args_hash = '{}-{}-{}-{}-{}-{}-{}-{}-{}-{}'.format(
                          args.picos_max_args_per_task,
                          args.picos_max_deps_per_task,
                          args.picos_max_copies_per_task,
                          args.picos_num_dcts,
                          args.picos_tm_size,
                          args.picos_dm_size,
                          args.picos_vm_size,
                          args.picos_dm_ds,
                          args.picos_dm_hash,
                          args.picos_hash_t_size)
        vivado_project_variables += '\n' \
                                    + '# Picos parameter hash\n' \
                                    + 'variable picos_args_hash {}'.format(picos_args_hash)

    if args.datainterfaces_map and os.path.exists(args.datainterfaces_map):
        if args.verbose_info:
            msg.log('Parsing user data interfaces map: ' + args.datainterfaces_map)

        vivado_project_variables += '\n' \
                                    + '# List of datainterfaces map\n' \
                                    + 'set dataInterfaces_map [list'

        with open(args.datainterfaces_map) as map_file:
            map_data = map_file.readlines()
            for map_line in map_data:
                elems = map_line.strip().replace('\n', '').split('\t')
                if len(elems) >= 2 and len(elems[0]) > 0 and elems[0][0] != '#':
                    vivado_project_variables += ' {' + elems[0] + ' ' + elems[1] + '}'

        vivado_project_variables += ']\n'
    elif args.datainterfaces_map:
        msg.error('User data interfaces map not found: ' + args.datainterfaces_map)
    else:
        vivado_project_variables += '\n' \
                                    + '# List of datainterfaces map\n' \
                                    + 'set dataInterfaces_map [list]\n'

    if args.debug_intfs == 'custom' and os.path.exists(args.debug_intfs_list):
        if args.verbose_info:
            msg.log('Parsing user-defined interfaces to debug: ' + args.debug_intfs_list)

        vivado_project_variables += '\n' \
                                    + '# List of debugInterfaces list\n' \
                                    + 'set debugInterfaces_list [list'

        with open(args.debug_intfs_list) as map_file:
            map_data = map_file.readlines()
            for map_line in map_data:
                elems = map_line.strip().replace('\n', '')
                if elems[0][0] != '#':
                    vivado_project_variables += ' ' + str(elems)

        vivado_project_variables += ']\n'
    elif args.debug_intfs == 'custom':
        msg.error('User-defined interfaces to debug file not found: ' + args.debug_intfs_list)

    vivado_project_variables_file = open(project_backend_path + '/scripts/projectVariables.tcl', 'w')
    vivado_project_variables_file.write(vivado_project_variables)
    vivado_project_variables_file.close()
Esempio n. 17
0
def run_bitstream_step(project_args):
    global args
    global board
    global chip_part
    global start_time
    global ait_backend_path
    global project_backend_path

    args = project_args['args']
    board = project_args['board']
    start_time = project_args['start_time']
    project_path = project_args['path']

    chip_part = board.chip_part + ('-' + board.es if
                                   (board.es
                                    and not args.ignore_eng_sample) else '')
    ait_backend_path = ait_path + '/backend/' + args.backend
    project_backend_path = project_path + '/' + args.backend
    project_step_path = project_backend_path + '/scripts/' + script_folder

    # Check if the requirements are met
    check_requirements()

    # Remove old directories used on the bitstream step
    shutil.rmtree(project_step_path, ignore_errors=True)

    # Create directories and copy necessary files for bitstream step
    shutil.copytree(ait_backend_path + '/scripts/' + script_folder,
                    project_step_path,
                    ignore=shutil.ignore_patterns('*.py*'))

    if os.path.isfile(project_backend_path + '/' + args.name + '/' +
                      args.name + '.xpr'):
        # Enable beta device on Vivado init script
        if board.board_part:
            p = subprocess.Popen(
                'echo "enable_beta_device ' + chip_part +
                '\nset_param board.repoPaths [list ' + project_backend_path +
                '/board/' + board.name + '/board_files]" > ' +
                project_backend_path + '/scripts/Vivado_init.tcl',
                shell=True)
            retval = p.wait()
        else:
            p = subprocess.Popen('echo "enable_beta_device ' + chip_part +
                                 '" > ' + project_backend_path +
                                 '/scripts/Vivado_init.tcl',
                                 shell=True)
            retval = p.wait()

        os.environ['MYVIVADO'] = project_backend_path + '/scripts'

        p = subprocess.Popen(
            'vivado -nojournal -nolog -notrace -mode batch -source ' +
            project_step_path + '/generate_bitstream.tcl',
            cwd=project_backend_path + '/scripts/',
            stdout=sys.stdout.subprocess,
            stderr=sys.stdout.subprocess,
            shell=True)

        if args.verbose:
            for line in iter(p.stdout.readline, b''):
                sys.stdout.write(line.decode('utf-8'))

        retval = p.wait()
        del os.environ['MYVIVADO']
        if retval:
            msg.error('Bitstream generation failed', start_time, False)
        else:
            if board.arch.device == 'zynq':
                bif_file = open(
                    project_backend_path + '/' + args.name + '/' + args.name +
                    '.runs/impl_1/bitstream.bif', 'w')
                bif_file.write('all:\n' + '{\n' + '\t' + args.name +
                               '_design_wrapper.bit\n' + '}')
                bif_file.close()
                p = subprocess.Popen(
                    'bootgen -image bitstream.bif -arch zynq -process_bitstream bin -w',
                    cwd=project_backend_path + '/' + args.name + '/' +
                    args.name + '.runs/impl_1',
                    stdout=sys.stdout.subprocess,
                    stderr=sys.stdout.subprocess,
                    shell=True)

                if args.verbose:
                    for line in iter(p.stdout.readline, b''):
                        sys.stdout.write(line.decode('utf-8'))

                retval = p.wait()
                if retval:
                    msg.warning('Could not create .bit.bin file')
                else:
                    shutil.copy2(
                        glob.glob(project_backend_path + '/' + args.name +
                                  '/' + args.name + '.runs/impl_1/' +
                                  args.name + '*.bit.bin')[0],
                        project_path + '/' + args.name + '.bit.bin')

            shutil.copy2(
                glob.glob(project_backend_path + '/' + args.name + '/' +
                          args.name + '.runs/impl_1/' + args.name +
                          '*.bit')[0], project_path + '/' + args.name + '.bit')
            shutil.copy2(
                glob.glob(project_backend_path + '/' + args.name + '/' +
                          args.name + '.runs/impl_1/' + args.name +
                          '*.bin')[0], project_path + '/' + args.name + '.bin')
            gen_utilization_report(project_path + '/' + args.name +
                                   '.resources-impl.txt')
            gen_wns_report(project_path + '/' + args.name + '.timing-impl.txt')
            msg.success('Bitstream generated')
    else:
        msg.error(
            'No Vivado .xpr file exists for the current project. Bitstream generation failed'
        )
Esempio n. 18
0
def run_design_step(project_args):
    global args
    global board
    global accs
    global chip_part
    global start_time
    global num_accs
    global num_instances
    global num_acc_creators
    global ait_backend_path
    global project_backend_path

    args = project_args['args']
    board = project_args['board']
    accs = project_args['accs']
    start_time = project_args['start_time']
    num_accs = project_args['num_accs']
    num_instances = project_args['num_instances']
    num_acc_creators = project_args['num_acc_creators']
    project_path = project_args['path']

    chip_part = board.chip_part + ('-' + board.es if (board.es and not args.ignore_eng_sample) else '')
    ait_backend_path = ait_path + '/backend/' + args.backend
    project_backend_path = project_path + '/' + args.backend
    project_step_path = project_backend_path + '/scripts/' + script_folder

    # Check if the requirements are met
    check_requirements()

    # Load accelerator placement info
    load_acc_placement(accs, args)

    # Remove old directories used on the design step
    shutil.rmtree(project_step_path, ignore_errors=True)
    shutil.rmtree(project_backend_path + '/board', ignore_errors=True)
    shutil.rmtree(project_backend_path + '/IPs', ignore_errors=True)
    shutil.rmtree(project_backend_path + '/templates', ignore_errors=True)

    # Create directories and copy necessary files for design step
    shutil.copytree(ait_backend_path + '/scripts/' + script_folder, project_step_path, ignore=shutil.ignore_patterns('*.py*'))
    shutil.copytree(ait_backend_path + '/board/' + board.name, project_backend_path + '/board/' + board.name)

    os.makedirs(project_backend_path + '/IPs')
    os.makedirs(project_backend_path + '/templates')
    shutil.copy2(ait_backend_path + '/templates/dummy_acc.tcl', project_backend_path + '/templates')

    ip_list = [ip for ip in os.listdir(ait_backend_path + '/IPs/') if re.search(r'.*\.(zip|v|vhdl)$', ip)]
    for ip in ip_list:
        shutil.copy2(ait_backend_path + '/IPs/' + ip, project_backend_path + '/IPs')

    for template in glob.glob(ait_backend_path + '/templates/hwruntime/' + args.hwruntime
                              + '/' + ('extended/' if args.extended_hwruntime else '')
                              + '*.tcl'):
        shutil.copy2(template, project_backend_path + '/templates')

    for ipdef in glob.glob(ait_backend_path + '/IPs/hwruntime/' + args.hwruntime + '/*.zip'):
        shutil.copy2(ipdef, project_backend_path + '/IPs')

    if args.memory_interleaving_stride is not None:
        subprocess.check_output(['sed -i "s/\`undef __ENABLE__/\`define __ENABLE__/" ' + project_backend_path + '/IPs/addrInterleaver.v'], shell=True)
        subprocess.check_output(['sed -i "s/\`define __WIDTH__ 64/\`define __WIDTH__ ' + str(board.mem.addr_width) + '/" ' + project_backend_path + '/IPs/addrInterleaver.v'], shell=True)

    if args.user_constraints and os.path.exists(args.user_constraints):
        constraints_path = project_backend_path + '/board/' + board.name + '/constraints'
        if not os.path.exists(constraints_path):
            os.mkdir(constraints_path)
        if args.verbose_info:
            msg.log('Adding user constraints file: ' + args.user_constraints)
        shutil.copy2(args.user_constraints, constraints_path + '/')
    elif args.user_constraints:
        msg.error('User constraints file not found: ' + args.user_constraints)

    if args.user_pre_design and os.path.exists(args.user_pre_design):
        user_pre_design_ext = args.user_pre_design.split('.')[-1] if len(args.user_pre_design.split('.')) > 1 else ''
        if user_pre_design_ext != 'tcl':
            msg.error('Invalid extension for PRE design TCL script: ' + args.user_pre_design)
        elif args.verbose_info:
            msg.log('Adding pre design user script: ' + args.user_pre_design)
        shutil.copy2(args.user_pre_design, project_step_path + '/userPreDesign.tcl')
    elif args.user_pre_design:
        msg.error('User PRE design TCL script not found: ' + args.user_pre_design)

    if args.user_post_design and os.path.exists(args.user_post_design):
        user_post_design_ext = args.user_post_design.split('.')[-1] if len(args.user_post_design.split('.')) > 1 else ''
        if user_post_design_ext != 'tcl':
            msg.error('Invalid extension for POST design TCL script: ' + args.user_post_design)
        elif args.verbose_info:
            msg.log('Adding post design user script: ' + args.user_post_design)
        shutil.copy2(args.user_post_design, project_step_path + '/userPostDesign.tcl')
    elif args.user_post_design:
        msg.error('User POST design TCL script not found: ' + args.user_post_design)

    # Generate tcl file with project variables
    generate_Vivado_variables_tcl()

    # Enable beta device on Vivado init script
    if os.path.exists(project_backend_path + '/board/' + board.name + '/board_files'):
        p = subprocess.Popen('echo "enable_beta_device ' + chip_part + '\nset_param board.repoPaths [list '
                             + project_backend_path + '/board/' + board.name + '/board_files]" > '
                             + project_backend_path + '/scripts/Vivado_init.tcl', shell=True)
        retval = p.wait()
    else:
        p = subprocess.Popen('echo "enable_beta_device ' + chip_part + '" > '
                             + project_backend_path + '/scripts/Vivado_init.tcl', shell=True)
        retval = p.wait()

    os.environ['MYVIVADO'] = project_backend_path

    p = subprocess.Popen('vivado -nojournal -nolog -notrace -mode batch -source '
                         + project_step_path + '/generate_design.tcl',
                         cwd=project_backend_path + '/scripts', stdout=sys.stdout.subprocess,
                         stderr=sys.stdout.subprocess, shell=True)

    if args.verbose:
        for line in iter(p.stdout.readline, b''):
            sys.stdout.write(line.decode('utf-8'))

    retval = p.wait()
    del os.environ['MYVIVADO']
    if retval:
        msg.error('Block Design generation failed', start_time, False)
    else:
        msg.success('Block Design generated')

    if (args.hwruntime == 'pom'):
        regex_strings = [
            (r'MAX_ARGS_PER_TASK = [0-9]*', 'MAX_ARGS_PER_TASK = {}'.format(args.picos_max_args_per_task)),
            (r'MAX_DEPS_PER_TASK = [0-9]*', 'MAX_DEPS_PER_TASK = {}'.format(args.picos_max_deps_per_task)),
            (r'MAX_COPIES_PER_TASK = [0-9]*', 'MAX_COPIES_PER_TASK = {}'.format(args.picos_max_copies_per_task)),
            (r'NUM_DCTS = [0-9]*', 'NUM_DCTS = {}'.format(args.picos_num_dcts)),
            (r'TM_SIZE = [0-9]*', 'TM_SIZE = {}'.format(args.picos_tm_size)),
            (r'DM_SIZE = [0-9]*', 'DM_SIZE = {}'.format(args.picos_dm_size)),
            (r'VM_SIZE = [0-9]*', 'VM_SIZE = {}'.format(args.picos_vm_size)),
            (r'DM_DS = "[a-zA-Z_]*"', 'DM_DS = "{}"'.format(args.picos_dm_ds)),
            (r'DM_HASH = "[a-zA-Z_]*"', 'DM_HASH = "{}"'.format(args.picos_dm_hash)),
            (r'HASH_T_SIZE = [0-9]*', 'HASH_T_SIZE = {}'.format(args.picos_hash_t_size))]

        config_file_path = glob.glob(project_backend_path + '/IPs/bsc_ompss_picosompssmanager_*/')[0] + 'src/config.sv'

        with open(config_file_path, 'r') as config_file:
            config_str = config_file.read()
        for regex_str in regex_strings:
            config_str = re.sub(regex_str[0], regex_str[1], config_str, count=1)
        with open(config_file_path, 'w') as config_file:
            config_file.write(config_str)
Esempio n. 19
0
def check_requirements():
    if not distutils.spawn.find_executable('vivado'):
        msg.error('vivado not found. Please set PATH correctly')
Esempio n. 20
0
def get_accelerators(project_path):
    global accs
    global num_accs
    global num_instances
    global num_acc_creators

    if args.verbose_info:
        msg.log('Searching accelerators in folder: ' + os.getcwd())

    accs = []
    acc_types = []
    acc_names = []
    num_accs = 0
    num_instances = 0
    num_acc_creators = 0
    args.extended_hwruntime = False  # Can't be enabled if no accelerator requires it
    args.lock_hwruntime = False  # Will not be enabled if no accelerator requires it

    for file_ in sorted(glob.glob(os.getcwd() + '/ait_*.json')):
        acc_config_json = json.load(open(file_))
        for acc_config in acc_config_json:
            acc = Accelerator(acc_config)

            if not re.match('^[A-Za-z][A-Za-z0-9_]*$', acc.name):
                msg.error(
                    '\'' + acc.name +
                    '\' is an invalid accelerator name. Must start with a letter and contain only letters, numbers or underscores'
                )

            msg.info('Found accelerator \'' + acc.name + '\'')

            num_accs += 1
            num_instances += acc.num_instances

            if acc.type in acc_types:
                msg.error('Two accelerators use the same type: \'' +
                          str(acc.type) +
                          '\' (maybe you should use the onto clause)')
            elif acc.name in acc_names:
                msg.error(
                    'Two accelerators use the same name: \'' + str(acc.name) +
                    '\' (maybe you should change the fpga task definition)')
            acc_types.append(acc.type)
            acc_names.append(acc.name)

            # Check if the acc is a task creator
            if acc.task_creation:
                args.extended_hwruntime = True
                num_acc_creators += acc.num_instances
                accs.insert(0, acc)
            else:
                accs.append(acc)

            # Check if the acc needs instrumentation support
            if acc.instrumentation:
                args.hwinst = True

            # Check if the acc needs lock support
            if acc.lock:
                args.lock_hwruntime = True

    if num_accs == 0:
        msg.error('No accelerators found')

    # Generate the .xtasks.config file
    xtasks_config_file = open(
        project_path + '/' + args.name + '.xtasks.config', 'w')
    xtasks_config = 'type\t#ins\tname\t    \n'
    for acc in accs:
        xtasks_config += str(acc.type).zfill(19) + '\t' + str(
            acc.num_instances).zfill(3) + '\t' + acc.name.ljust(
                31)[:31] + '\t000\n'
    xtasks_config_file.write(xtasks_config)
    xtasks_config_file.close()

    if args.hwinst:
        hwinst_acc_json_string = json.dumps(
            {
                'full_path': ait_path + '/backend/' + args.backend +
                '/HLS/src/Adapter_instr.cpp',
                'filename': 'Adapter_instr.cpp',
                'name': 'Adapter_instr',
                'type': 0,
                'num_instances': 1,
                'task_creation': 'false',
                'instrumentation': 'false',
                'periodic': 'false',
                'lock': 'false'
            },
            indent=4)
        hwinst_acc_json = json.loads(hwinst_acc_json_string)
        hwinst_acc = Accelerator(hwinst_acc_json)
        accs.append(hwinst_acc)
Esempio n. 21
0
def run_boot_step(project_args):
    global start_time
    global project_backend_path
    global petalinux_build_path
    global petalinux_install_path

    start_time = project_args['start_time']
    project_path = project_args['path']
    board = project_args['board']
    args = project_args['args']

    project_backend_path = project_path + '/' + args.backend
    project_step_path = project_backend_path + '/scripts/' + script_folder
    ait_backend_path = ait_path + '/backend/' + args.backend

    petalinux_build_path = os.path.realpath(
        os.getenv('PETALINUX_BUILD')) if os.getenv('PETALINUX_BUILD') else ''
    petalinux_install_path = os.path.realpath(os.getenv(
        'PETALINUX_INSTALL')) if os.getenv('PETALINUX_INSTALL') else ''

    check_requirements()

    # During the execution of this step disable the Vivado_init.tcl script
    disable_init_scripts()

    path_hdf = project_backend_path + '/' + args.name + '/' + args.name + '.sdk/'

    if os.path.exists(petalinux_install_path + '/.version-history'):
        # Seems to be petalinux 2019.1 or later (may match other untested versions)
        command = 'petalinux-config --silentconfig --get-hw-description=' + path_hdf
    else:
        # Seems to be petalinux 2018.3 or previous (may match other untested versions)
        command = 'petalinux-config --oldconfig --get-hw-description=' + path_hdf

    if args.verbose_info:
        msg.log('> ' + command)
    p = subprocess.Popen(command,
                         stdout=sys.stdout.subprocess,
                         stderr=sys.stdout.subprocess,
                         cwd=petalinux_build_path,
                         shell=True)

    if args.verbose:
        for line in iter(p.stdout.readline, b''):
            sys.stdout.write(line.decode('utf-8'))

    retval = p.wait()
    if retval:
        restore_init_scripts()
        msg.error('Generation of petalinux boot files failed', start_time,
                  False)

    if os.path.exists(petalinux_build_path +
                      '/subsystems/linux/configs/device-tree/'):
        # Seems to be petalinux 2016.3 (may match other untested versions)
        if args.verbose_info:
            msg.log('Fixing devicetree (2016.3 mode)')

        petalinux_build_dts_path = petalinux_build_path + '/subsystems/linux/configs/device-tree/'
        shutil.copy2(
            project_backend_path + '/' + args.name + '/pl_ompss_at_fpga.dtsi',
            petalinux_build_dts_path)

        content_dtsi = None
        with open(petalinux_build_dts_path + '/system-conf.dtsi', 'r') as file:
            content_dtsi = file.read().splitlines()

        line = [
            idx for idx in range(len(content_dtsi))
            if content_dtsi[idx].find('/include/ "pl.dtsi"') != -1
        ]
        content_dtsi.insert(line[0] + 1, '/include/ \"pl_ompss_at_fpga.dtsi\"')

        board_dtsi_fix_file = project_backend_path + '/board/' + board.name + '/' + board.name + '_boot.dtsi'
        if os.path.exists(board_dtsi_fix_file):
            shutil.copy2(board_dtsi_fix_file, petalinux_build_dts_path)
            content_dtsi.insert(line[0] + 2,
                                '/include/ \"' + board.name + '_boot.dtsi\"')

        with open(petalinux_build_dts_path + '/system-conf.dtsi', 'w') as file:
            file.write('\n'.join(content_dtsi))

        command = 'petalinux-build -c bootloader -x mrproper'
        if args.verbose_info:
            msg.log('> ' + command)
        p = subprocess.Popen(command,
                             stdout=sys.stdout.subprocess,
                             stderr=sys.stdout.subprocess,
                             cwd=petalinux_build_path,
                             shell=True)
        if args.verbose:
            for line in iter(p.stdout.readline, b''):
                sys.stdout.write(line.decode('utf-8'))

        retval = p.wait()
        if retval:
            restore_init_scripts()
            msg.error('Generation of petalinux boot files failed', start_time,
                      False)
    elif os.path.exists(
            petalinux_build_path +
            '/project-spec/meta-user/recipes-bsp/device-tree/files/'):
        # Seems to be petalinux 2018.3 or 2019.1 (may match other untested versions)
        if args.verbose_info:
            msg.log('Fixing devicetree (2018.3 mode)')

        petalinux_build_dts_path = petalinux_build_path + '/project-spec/meta-user/recipes-bsp/device-tree/files/'

        content_dtsi = None
        with open(petalinux_build_dts_path + '/system-user.dtsi', 'r') as file:
            content_dtsi = file.read().splitlines()

        # Remove old includes to pl_bsc.dtsi and insert the new one
        line = [
            idx for idx in range(len(content_dtsi))
            if content_dtsi[idx].find('pl_bsc.dtsi') != -1
        ]
        if len(line) == 1:
            content_dtsi.pop(line[0])
        elif len(line) > 1:
            restore_init_scripts()
            msg.error(
                'Uncontrolled path in run_boot_step: more than 1 line of system-user.dtsi contains pl_bsc.dtsi'
            )

        # Remove old includes to pl_ompss_at_fpga.dtsi and insert the new one
        line = [
            idx for idx in range(len(content_dtsi))
            if content_dtsi[idx].find('pl_ompss_at_fpga.dtsi') != -1
        ]
        if len(line) == 1:
            content_dtsi.pop(line[0])
        elif len(line) > 1:
            restore_init_scripts()
            msg.error(
                'Uncontrolled path in run_boot_step: more than 1 line of system-user.dtsi contains pl_ompss_at_fpga.dtsi'
            )

        line = [
            idx for idx in range(len(content_dtsi))
            if content_dtsi[idx].find('pl_ompss_at_fpga.dtsi') != -1
        ]
        content_dtsi.insert(
            len(content_dtsi), '/include/ \"' + project_backend_path + '/' +
            args.name + '/pl_ompss_at_fpga.dtsi' + '\"')

        # Remove old includes to <board>_boot.dtsi and insert the new one
        line = [
            idx for idx in range(len(content_dtsi))
            if content_dtsi[idx].find(board.name + '_boot.dtsi') != -1
        ]
        if len(line) == 1:
            content_dtsi.pop(line[0])
        elif len(line) > 1:
            restore_init_scripts()
            msg.error(
                'Uncontrolled path in run_boot_step: more than 1 line of system-user.dtsi contains <board>_bsc.dtsi'
            )

        board_dtsi_fix_file = project_backend_path + '/board/' + board.name + '/' + board.name + '_boot.dtsi'
        if os.path.exists(board_dtsi_fix_file):
            shutil.copy2(board_dtsi_fix_file, petalinux_build_dts_path)
            content_dtsi.insert(len(content_dtsi),
                                '/include/ \"' + board_dtsi_fix_file + '\"')

        with open(petalinux_build_dts_path + '/system-user.dtsi', 'w') as file:
            file.write('\n'.join(content_dtsi))
    else:
        msg.warning(
            'Devicetree fix failed. Petalinux version cannot be determined. Continuing anyway...'
        )

    command = 'petalinux-build'
    if args.verbose_info:
        msg.log('> ' + command)
    p = subprocess.Popen(command,
                         stdout=sys.stdout.subprocess,
                         stderr=sys.stdout.subprocess,
                         cwd=petalinux_build_path,
                         shell=True)
    if args.verbose:
        for line in iter(p.stdout.readline, b''):
            sys.stdout.write(line.decode('utf-8'))

    retval = p.wait()
    if retval:
        restore_init_scripts()
        msg.error('Generation of petalinux boot files failed', start_time,
                  False)

    path_bit = project_path + '/' + args.name + '.bit'
    command = 'petalinux-package --force --boot --fsbl ./images/linux/*_fsbl.elf'
    command += ' --fpga ' + path_bit + ' --u-boot ./images/linux/u-boot.elf'
    if args.verbose_info:
        msg.log('> ' + command)
    p = subprocess.Popen(command,
                         stdout=sys.stdout.subprocess,
                         stderr=sys.stdout.subprocess,
                         cwd=petalinux_build_path,
                         shell=True)
    if args.verbose:
        for line in iter(p.stdout.readline, b''):
            sys.stdout.write(line.decode('utf-8'))

    retval = p.wait()
    if retval:
        msg.error('Generation of petalinux boot files failed', start_time,
                  False)
    else:
        shutil.copy2(petalinux_build_path + '/images/linux/BOOT.BIN',
                     project_path)
        shutil.copy2(petalinux_build_path + '/images/linux/image.ub',
                     project_path)
        msg.success('Petalinux boot files generated')

    restore_init_scripts()
Esempio n. 22
0
def ait_main():
    global args

    start_time = time.time()

    args = None

    parser = ArgParser()

    args = parser.parse_args()
    msg.setProjectName(args.name)
    msg.setPrintTime(args.verbose_info)
    msg.setVerbose(args.verbose)

    msg.info('Using ' + args.backend + ' backend')

    board = json.load(open(ait_path + '/backend/' + args.backend + '/board/' +
                           args.board + '/basic_info.json'),
                      object_hook=JSONObject)

    # Check vendor-related board arguments
    parser.vendor_parser[args.backend].check_board_args(args, board)

    if not int(board.frequency.min) <= args.clock <= int(board.frequency.max):
        msg.error('Clock frequency requested (' + str(args.clock) +
                  'MHz) is not within the board range (' +
                  str(board.frequency.min) + '-' + str(board.frequency.max) +
                  'MHz)')

    if (args.slr_slices is not None or args.floorplanning_constr
            is not None) and not hasattr(board.arch, 'slr'):
        msg.error(
            'Use of placement constraints is only available for boards with SLRs'
        )

    project_path = os.path.normpath(
        os.path.realpath(args.dir + '/' + args.name + '_ait'))
    project_backend_path = os.path.normpath(project_path + '/' + args.backend)

    # Add backend to python import path
    sys.path.insert(0, ait_path + '/backend/' + args.backend + '/scripts')

    # Check for backend support for the given board
    if not args.disable_board_support_check:
        check_board_support(board)

    sys.stdout = Logger(project_path)
    sys.stdout.log.write(
        os.path.basename(sys.argv[0]) + ' ' + ' '.join(sys.argv[1:]) + '\n\n')

    get_accelerators(project_path)

    parser.check_hardware_runtime_args(args, max(2, num_instances))

    project_args = {
        'path':
        os.path.normpath(
            os.path.realpath(args.dir) + '/' + args.name + '_ait'),
        'num_accs':
        num_accs,
        'num_instances':
        num_instances,
        'num_acc_creators':
        num_acc_creators,
        'accs':
        accs,
        'board':
        board,
        'args':
        args
    }

    for step in generation_steps[args.backend]:
        if generation_steps[args.backend].index(
                args.from_step) <= generation_steps[args.backend].index(
                    step) <= generation_steps[args.backend].index(
                        args.to_step):
            generation_step_package = os.path.basename(
                os.path.dirname(
                    glob.glob(ait_path + '/backend/' + args.backend +
                              '/scripts/*-' + step + '/')[0]))
            generation_step_module = '%s.%s' % (generation_step_package, step)
            module = importlib.import_module(generation_step_module)
            step_func = getattr(module, 'STEP_FUNC')
            msg.info('Starting \'' + step + '\' step')
            step_start_time = time.time()
            project_args['start_time'] = step_start_time
            step_func(project_args)
            msg.success(
                'Step \'' + step + '\' finished. ' +
                secondsToHumanReadable(int(time.time() - step_start_time)) +
                ' elapsed')
        else:
            msg.warning('Step \'' + step + '\' is disabled')

    msg.success('Accelerator automatic integration finished. ' +
                secondsToHumanReadable(int(time.time() - start_time)) +
                ' elapsed')