def create_library(ubxlib_dir, arduino_dir, toolchain, library_path, postfix, clean, printer, prompt, keep_going_flag): '''Create the ubxlib library''' call_list = [] success = False # Delete the library directory if clean is required if clean: u_utils.deltree(library_path + postfix, printer, prompt) printer.string("{}creating library...".format(prompt)) # Assemble the call list for the creation process call_list.append("python") call_list.append(arduino_dir + os.sep + "u_arduino" + postfix + ".py") call_list.append("-p") call_list.append(toolchain) call_list.append("-u") call_list.append(ubxlib_dir) call_list.append("-o") call_list.append(library_path + postfix) call_list.append(arduino_dir + os.sep + "source" + postfix + ".txt") call_list.append(arduino_dir + os.sep + "include" + postfix + ".txt") success = run_command(call_list, LIBRARY_CREATE_GUARD_TIME_SECONDS, printer, prompt, keep_going_flag) return success
def clear_prebuilt_library(library_path, mcu, printer, prompt): '''Clear a pre-built library''' prebuilt_dir = os.path.join(library_path, "src", mcu.lower()) if os.path.exists(prebuilt_dir): printer.string("{}deleting pre-built library directory {}...\n". \ format(prompt, prebuilt_dir)) u_utils.deltree(prebuilt_dir, printer, prompt)
def run(instance, mcu, toolchain, connection, connection_lock, platform_lock, misc_locks, clean, defines, ubxlib_dir, working_dir, printer, reporter, test_report_handle): '''Build/run on STM32Cube''' return_value = -1 mcu_dir = ubxlib_dir + os.sep + SDK_DIR + os.sep + "mcu" + os.sep + mcu instance_text = u_utils.get_instance_text(instance) # Create a unique project name prefix in case more than # one process is running this updated_project_name_prefix = UPDATED_PROJECT_NAME_PREFIX + str( os.getpid()) + "_" workspace_subdir = STM32CUBE_IDE_WORKSPACE_SUBDIR + "_" + str(os.getpid()) elf_path = None downloaded = False running = False download_list = None # Only one toolchain for STM32Cube del toolchain prompt = PROMPT + instance_text + ": " # Print out what we've been told to do text = "running STM32Cube for " + mcu if connection and "debugger" in connection and connection["debugger"]: text += ", on STLink debugger serial number " + connection["debugger"] if clean: text += ", clean build" if defines: text += ", with #define(s)" for idx, define in enumerate(defines): if idx == 0: text += " \"" + define + "\"" else: text += ", \"" + define + "\"" if ubxlib_dir: text += ", ubxlib directory \"" + ubxlib_dir + "\"" if working_dir: text += ", working directory \"" + working_dir + "\"" printer.string("{}{}.".format(prompt, text)) # On STM32F4 we can get USB errors if we try to do a download # on one platform while another is performing SWO logging. # Since each board only runs a single instance of stuff we # can work around this be ensuring that all downloads are # completed before SWO logging begins. # Add us to the list of pending downloads if misc_locks and ("stm32f4_downloads_list" in misc_locks): download_list = misc_locks["stm32f4_downloads_list"] download_list.append(instance_text) reporter.event(u_report.EVENT_TYPE_BUILD, u_report.EVENT_START, "STM32Cube") # Switch to the working directory with u_utils.ChangeDir(working_dir): # Check that everything we need is installed if check_installation(PATHS_LIST, printer, prompt): # Fetch Unity if u_utils.fetch_repo(u_utils.UNITY_URL, u_utils.UNITY_SUBDIR, None, printer, prompt): # I've no idea why but on every other # build STM32Cube loses track of where # most of the files are: you'll see it # say that it can't find u_cfg_sw.h and # fail. Until we find out why just # give it two goes, deleting the project # we created before trying again. retries = 2 while (elf_path is None) and (retries > 0): # The STM32Cube IDE, based on Eclipse # has no mechanism for overriding the locations # of things so here we read the .project # file and replace the locations of the # STM32Cube SDK and Unity files as # appropriate if create_project( mcu_dir, PROJECT_NAME, updated_project_name_prefix + PROJECT_NAME, STM32CUBE_FW_PATH, working_dir + os.sep + u_utils.UNITY_SUBDIR, printer, prompt): # Do the build build_start_time = time() elf_path = build_binary( mcu_dir, workspace_subdir, updated_project_name_prefix + PROJECT_NAME, clean, defines, printer, prompt) if elf_path is None: reporter.event(u_report.EVENT_TYPE_BUILD, u_report.EVENT_INFORMATION, "unable to build, will retry") printer.string("{}if the compilation." \ " failure was because" \ " the build couldn't" \ " even find u_cfg_sw.h" \ " then ignore it, Eclipse" \ " lost its head, happens" \ " a lot, we will try again.". \ format(prompt)) else: reporter.event( u_report.EVENT_TYPE_BUILD, u_report.EVENT_WARNING, "unable to create STM32Cube project, will retry") retries -= 1 if elf_path: reporter.event( u_report.EVENT_TYPE_BUILD, u_report.EVENT_PASSED, "build took {:.0f} second(s)".format(time() - build_start_time)) # Lock the connection. with u_connection.Lock(connection, connection_lock, CONNECTION_LOCK_GUARD_TIME_SECONDS, printer, prompt) as locked_connection: if locked_connection: # I have seen download failures occur if two # ST-Link connections are initiated at the same time. with u_utils.Lock( platform_lock, PLATFORM_LOCK_GUARD_TIME_SECONDS, "platform", printer, prompt) as locked_platform: if locked_platform: reporter.event( u_report.EVENT_TYPE_DOWNLOAD, u_report.EVENT_START) # Do the download. I have seen the STM32F4 debugger # barf on occasions so give this two bites of # the cherry retries = 2 while not downloaded and (retries > 0): downloaded = download( connection, DOWNLOAD_GUARD_TIME_SECONDS, elf_path, printer, prompt) retries -= 1 if not downloaded: if connection and "serial_port" in connection \ and connection["serial_port"]: # Before retrying, reset the USB port u_utils.usb_reset("STMicroelectronics STLink" \ "Virtual COM Port (" + connection["serial_port"] + ")", printer, prompt) sleep(5) if platform_lock: # Once the download has been done (or not) the platform lock # can be released, after a little safety sleep sleep(1) platform_lock.release() if downloaded: # Remove us from the list of pending downloads if download_list: download_list.remove(instance_text) reporter.event( u_report.EVENT_TYPE_DOWNLOAD, u_report.EVENT_COMPLETE) # Wait for all the other downloads to complete before # starting SWO logging u_utils.wait_for_completion( download_list, "download", DOWNLOADS_COMPLETE_GUARD_TIME_SECONDS, printer, prompt) # So that all STM32Cube instances don't start up at # once, which can also cause problems, wait the # instance-number number of seconds. hold_off = instance[0] if hold_off > 30: hold_off = 30 sleep(hold_off) # Create and empty the SWO data file and decoded text file file_handle = open(SWO_DATA_FILE, "w").close() file_handle = open( SWO_DECODED_TEXT_FILE, "w").close() reporter.event( u_report.EVENT_TYPE_TEST, u_report.EVENT_START) try: # Start a process which reads the # SWO output from a file, decodes it and # writes it back to a file process = Process( target=swo_decode_process, args=(SWO_DATA_FILE, SWO_DECODED_TEXT_FILE)) process.start() # Two bites at the cherry again retries = 2 while not running and (retries > 0): # Now start Open OCD to reset the target # and capture SWO output sleep(1) with u_utils.ExeRun( open_ocd( OPENOCD_COMMANDS, connection), printer, prompt): running = True # Open the SWO decoded text file for # reading, binary to prevent the line # endings being munged. file_handle = open( SWO_DECODED_TEXT_FILE, "rb") # Monitor progress based on the decoded # SWO text return_value = u_monitor. \ main(file_handle, u_monitor.CONNECTION_PIPE, RUN_GUARD_TIME_SECONDS, RUN_INACTIVITY_TIME_SECONDS, instance, printer, reporter, test_report_handle) file_handle.close() retries -= 1 if not running: sleep(5) process.terminate() except KeyboardInterrupt: # Tidy up process on SIGINT printer.string( "{}caught CTRL-C, terminating..." .format(prompt)) process.terminate() return_value = -1 if return_value == 0: reporter.event( u_report.EVENT_TYPE_TEST, u_report.EVENT_COMPLETE) else: reporter.event( u_report.EVENT_TYPE_TEST, u_report.EVENT_FAILED) else: reporter.event( u_report.EVENT_TYPE_DOWNLOAD, u_report.EVENT_FAILED) else: reporter.event(u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "unable to lock a connection") else: reporter.event(u_report.EVENT_TYPE_BUILD, u_report.EVENT_FAILED, "check debug log for details") # To avoid a build up of stuff, delete the temporary build and # workspace on exit tmp = mcu_dir + os.sep + updated_project_name_prefix + PROJECT_NAME if os.path.exists(tmp): printer.string("{}deleting temporary build directory {}...". \ format(prompt, tmp)) u_utils.deltree(tmp, printer, prompt) if os.path.exists(workspace_subdir): printer.string("{}deleting temporary workspace directory {}...". \ format(prompt, workspace_subdir)) u_utils.deltree(workspace_subdir, printer, prompt) else: reporter.event(u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "unable to fetch Unity") else: reporter.event( u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "there is a problem with the tools installation for STM32F4") # Remove us from the list of pending downloads for safety try: misc_locks["stm32f4_downloads_list"].remove(instance_text) except (AttributeError, ValueError, TypeError): pass return return_value
def build_binary(mcu_dir, workspace_subdir, project_name, clean, defines, printer, prompt): '''Build''' call_list = [] build_dir = mcu_dir + os.sep + project_name + os.sep + PROJECT_CONFIGURATION num_defines = 0 too_many_defines = False elf_path = None # The STM32Cube IDE doesn't provide # a mechanism to override the build # output directory in the .cproject file # from the command-line so I'm afraid # all output will end up in a # sub-directory with the name of the # PROJECT_CONFIGURATION off the project # directory. <sigh> printer.string("{}building in {}.".format(prompt, build_dir)) if not clean or u_utils.deltree(build_dir, printer, prompt): for idx, define in enumerate(defines): # Add the #defines as environment variables # Note that these must be deleted afterwards # in case someone else is going to use the # worker that this was run in if idx >= MAX_NUM_DEFINES: too_many_defines = True printer.string("{}{} #defines" \ " supplied but only" \ " {} are supported by" \ " this STM32Cube IDE" \ " project file".format(prompt, len(defines), MAX_NUM_DEFINES)) break os.environ["U_FLAG" + str(idx)] = "-D" + define num_defines += 1 # Print the environment variables for debug purposes printer.string("{}environment is:".format(prompt)) text = subprocess.check_output([ "set", ], shell=True) for line in text.splitlines(): printer.string("{}{}".format(prompt, line.decode())) if not too_many_defines: # Delete the workspace sub-directory first if it is there # to avoid the small chance that the name has been used # previously, in which case the import would fail u_utils.deltree(workspace_subdir, printer, prompt) # Assemble the whole call list # # The documentation for command-line, AKA # headless, use of Eclipse can be found here: # https://gnu-mcu-eclipse.github.io/advanced/headless-builds/ # # And you can get help by running stm32cubeidec with # the command-line: # # stm32cubeidec.exe --launcher.suppressErrors -nosplash # -application org.eclipse.cdt.managedbuilder.core.headlessbuild # -data PATH_TO_YOUR_WORKSPACE -help # # This information found nailed to the door of the # bog in the basement underneath the "beware of the # leopard" sign call_list.append(STM32CUBE_IDE_PATH + os.sep + "stm32cubeidec.exe") call_list.append("--launcher.suppressErrors") call_list.append("-nosplash") call_list.append("-application") call_list.append( "org.eclipse.cdt.managedbuilder.core.headlessbuild") call_list.append("-data") call_list.append(workspace_subdir) call_list.append("-import") call_list.append(mcu_dir + os.sep + project_name) call_list.append("-no-indexer") call_list.append("-build") call_list.append(project_name + "/" + PROJECT_CONFIGURATION) call_list.append("-console") # Print what we're gonna do tmp = "" for item in call_list: tmp += " " + item printer.string("{}in directory {} calling{}". \ format(prompt, os.getcwd(), tmp)) # Call stm32cubeidec.exe to do the build if (u_utils.exe_run(call_list, BUILD_GUARD_TIME_SECONDS, printer, prompt)): # The binary should be elf_path = build_dir + os.sep + project_name + ".elf" # Delete the environment variables again while num_defines > 0: num_defines -= 1 del os.environ["U_FLAG" + str(num_defines)] return elf_path
def create_project(project_path, old_project_name, new_project_name, stm32cube_fw_path, unity_dir, printer, prompt): '''Create a new project with the right paths''' new_project_path = project_path + os.sep + new_project_name success = False # If there is already a project with our intended name, # delete it if u_utils.deltree(new_project_path, printer, prompt): # Create the new project directory printer.string("{}creating {}...".format(prompt, new_project_path)) os.makedirs(new_project_path) # Read the .cproject file from the old project printer.string("{}reading .cproject file`...".format(prompt)) file_handle = open( project_path + os.sep + old_project_name + os.sep + ".cproject", "r") string = file_handle.read() file_handle.close() # Write it out to the new printer.string("{}writing .cproject file...".format(prompt)) file_handle = open( project_path + os.sep + new_project_name + os.sep + ".cproject", "w") file_handle.write(string) file_handle.close() # Read the .project file from the old project printer.string("{}reading .project file...".format(prompt)) file_handle = open( project_path + os.sep + old_project_name + os.sep + ".project", "r") string = file_handle.read() file_handle.close() # Replace "<name>blah</name>" with "<name>test_only_blah</name> printer.string("{}changing name in .cproject file from \"{}\"" \ " to \"{}\"...".format(prompt, old_project_name, new_project_name)) string = string.replace("<name>" + old_project_name + "</name>", "<name>" + new_project_name + "</name>", 1) # Replace STM32CUBE_FW_PATH printer.string("{}updating STM32CUBE_FW_PATH to \"{}\"...". \ format(prompt, stm32cube_fw_path)) string = replace_variable_list_value(string, "STM32CUBE_FW_PATH", stm32cube_fw_path) # Replace UNITY_PATH printer.string("{}updating UNITY_PATH to \"{}\"...". \ format(prompt, unity_dir)) string = replace_variable_list_value(string, "UNITY_PATH", unity_dir) # Write it out to the new printer.string("{}writing .project file...".format(prompt)) file_handle = open( project_path + os.sep + new_project_name + os.sep + ".project", "w") file_handle.write(string) file_handle.close() # Write in a warning file just in case anyone # wonders what the hell this weird project is file_handle = open( project_path + os.sep + new_project_name + os.sep + "ignore_this_directory.txt", "w") file_handle.write("See u_run_stm32cube.py for an explanation.") file_handle.close() success = True return success
def session_run(database, instances, filter_string, ubxlib_dir, working_dir, clean, summary_report_file, test_report_file, debug_file, process_pool, session_name=None, print_queue=None, print_queue_prompt=None, abort_on_first_failure=False, unity_dir=None): '''Start a session running the given instances''' session = {} summary_report_file_path = None test_report_file_path = None debug_file_path = None return_value = 0 local_agent = False agent_context = None with CONTEXT_LOCK: # Start the agent if not already running agent_context = get() if agent_context: if print_queue: agent_context["print_thread"].add_forward_queue(print_queue, print_queue_prompt) else: return_value = -1 # HW reset is false when the agent is started implicitly: # it is up to the caller to call agent.start() explicitly # if it wants a HW reset if start(print_queue, hw_reset=False): return_value = 0 agent_context = get() local_agent = True if agent_context: printer = agent_context["printer"] # Name the session and add it to the session list session["id"] = agent_context["next_session_id"] agent_context["next_session_id"] += 1 session["name"] = "session " + str(session["id"]) if session_name: session["name"] = session_name # Set a flag to indicate that the session is # running: processes can watch this and, if it is # cleared, they must exit at the next opportunity session["running_flag"] = CONTEXT_MANAGER.Event() session["running_flag"].set() session["process_running_count"] = 0 session["processes"] = [] agent_context["sessions"].append(session) agent_context["session_running_count"] += 1 # Launch a thread that manages reporting # from multiple sources session["report_queue"] = None session["reporter"] = None session["report_thread"] = None session["summary_report_handle"] = None if summary_report_file: summary_report_file_path = working_dir + os.sep + summary_report_file session["summary_report_handle"] = open(summary_report_file_path, "w") if session["summary_report_handle"]: printer.string("{}writing summary report to \"{}\".". \ format(PROMPT, summary_report_file_path)) else: printer.string("{}unable to open file \"{}\" for summary report.". \ format(PROMPT, summary_report_file_path)) session["report_queue"] = agent_context["manager"].Queue() session["report_thread"] = u_report.ReportThread(session["report_queue"], session["summary_report_handle"]) session["report_thread"].start() session["reporter"] = u_report.ReportToQueue(session["report_queue"], None, None, agent_context["printer"]) session["reporter"].open() # Add any new platform locks required for these instances create_platform_locks(database, instances, agent_context["manager"], agent_context["platform_locks"]) # Set up all the instances for instance in instances: # Provide a working directory that is unique # for each instance and make sure it exists if working_dir: this_working_dir = working_dir + os.sep + \ INSTANCE_DIR_PREFIX + \ u_utils.get_instance_text(instance) else: this_working_dir = os.getcwd() + os.sep + \ INSTANCE_DIR_PREFIX + \ u_utils.get_instance_text(instance) if not os.path.isdir(this_working_dir): os.makedirs(this_working_dir) # Only clean the working directory if requested if clean: u_utils.deltree(this_working_dir, printer, PROMPT) os.makedirs(this_working_dir) # Create the file paths for this instance if summary_report_file: summary_report_file_path = this_working_dir + os.sep + summary_report_file if test_report_file: test_report_file_path = this_working_dir + os.sep + test_report_file if debug_file: debug_file_path = this_working_dir + os.sep + debug_file # Start u_run.main in each worker thread process = {} process["platform"] = u_data.get_platform_for_instance(database, instance) process["instance"] = instance # Create a flag to be set by u_run. while the process is running process["running_flag"] = CONTEXT_MANAGER.Event() process["platform_lock"] = None process["connection_lock"] = u_connection.get_lock(instance) for platform_lock in agent_context["platform_locks"]: if process["platform"] == platform_lock["platform"]: process["platform_lock"] = platform_lock["lock"] break process["handle"] = process_pool.apply_async(u_run.main, (database, instance, filter_string, True, ubxlib_dir, this_working_dir, process["connection_lock"], process["platform_lock"], agent_context["misc_locks"], agent_context["print_queue"], session["report_queue"], summary_report_file_path, test_report_file_path, debug_file_path, session["running_flag"], process["running_flag"], unity_dir)) session["process_running_count"] += 1 session["processes"].append(process) # The lock is released while we're running so that others can get in if agent_context: try: # IMPORTANT: need to be careful here with the bits of context # referenced while the context lock is released. Stick to things # within a session (or a process of a session) and don't remove # sessions. That way it won't conflict with other calls into this # agent. # Wait for all the launched processes to complete printer.string("{}all instances now launched.".format(PROMPT)) loop_count = 0 while agent_context.is_alive() and (session["process_running_count"] > 0): for process in session["processes"]: instance_text = u_utils.get_instance_text(process["instance"]) if not "stopped" in process and process["handle"].ready(): try: # If the return value has gone negative, i.e. # an infrastructure failure, leave it there, # else add the number of test failures to it if (return_value >= 0 and process["handle"].get() > 0) or \ (return_value <= 0 and process["handle"].get() < 0): return_value += process["handle"].get() if (return_value != 0) and abort_on_first_failure: session["running_flag"].clear() printer.string("{}an instance has failed, aborting" \ " (gracefully, might take a while)" \ " as requested...". \ format(PROMPT)) abort_on_first_failure = False except Exception as ex: # If an instance threw an exception then flag an # infrastructure error return_value = -1 printer.string("{}instance {} threw exception \"{}:" \ " {}\" but I can't tell you where" \ " I'm afraid.". \ format(PROMPT, instance_text, type(ex).__name__, str(ex))) if session["reporter"]: session["reporter"].event(u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "instance {} threw exception \"{}: {}\"". \ format(instance_text, type(ex).__name__, str(ex))) process["stopped"] = True session["process_running_count"] -= 1 if session["process_running_count"] <= 0: session["stopped"] = True if not process["handle"].ready() and \ (loop_count == STILL_RUNNING_REPORT_SECONDS): printer.string("{}instance {} still running.". \ format(PROMPT, instance_text)) loop_count += 1 if loop_count > STILL_RUNNING_REPORT_SECONDS: loop_count = 0 sleep(1) except KeyboardInterrupt: # Start things cleaning up session["running_flag"].clear() raise KeyboardInterrupt from ex # Now need to lock again while we're manipulating stuff with CONTEXT_LOCK: if agent_context: # Remove the session from the list idx_to_remove = None for idx, item in enumerate(agent_context["sessions"]): if item["id"] == session["id"]: idx_to_remove = idx break if idx_to_remove is not None: agent_context["session_running_count"] -= 1 agent_context["sessions"].pop(idx_to_remove) # Tidy up if session["reporter"]: session["reporter"].event_extra_information("return value overall {} (0 = success," \ " negative = probable infrastructure" \ " failure, positive = failure(s) (may" \ " still be due to infrastructure))". \ format(return_value)) session["reporter"].close() if session["report_thread"]: session["report_thread"].stop_thread() session["report_thread"].join() session["report_thread"] = None if session["summary_report_handle"]: session["summary_report_handle"].close() session["summary_report_handle"] = None printer.string("{}run(s) complete, return value {}.". format(PROMPT, return_value)) if local_agent: stop() else: if print_queue: agent_context["print_thread"].remove_forward_queue(print_queue) return return_value
def run_instances(database, instances, filter_string, ubxlib_dir, working_dir, clean, summary_report_file, test_report_file, debug_file): '''Run the given instances''' return_value = 0 processes = [] platform_locks = [] misc_locks = {} alive_count = 0 report_thread = None report_queue = None reporter = None summary_report_file_path = None test_report_file_path = None debug_file_path = None summary_report_handle = None manager = Manager() # Create a lock to cover things that cross # platforms or that any process of u_run.main() # may need to perform outside of its working # directory misc_locks["system_lock"] = manager.RLock() # Create a lock which can be used on Nordic # platforms (nRF5 and Zephyer): performing a # JLink download to a board while JLink RTT logging # is active on any other board will often stop # the RTT logging even though the sessions are # aimed at debuggers with entirely different # serial numbers. misc_locks["jlink_lock"] = manager.RLock() # Create a "lock" that can be used on STM32F4 # platforms to ensure that all downloads are # completed before logging commences. We # can do this, rather than locking a tool for the # whole time as we have to do with Nordic, because # each STM32F4 board only runs a single instance misc_locks["stm32f4_downloads_list"] = manager.list() # It is possible for some platforms to be a bit # pants at running in multiple instances # hence here we create a lock per platform and pass it # into the instance for it to be able to manage # multiplicity if required create_platform_locks(database, instances, manager, platform_locks) # Launch a thread that prints stuff out # nicely from multiple sources print_queue = manager.Queue() print_thread = u_utils.PrintThread(print_queue) print_thread.start() # Set up a printer for this thread to print to the queue printer = u_utils.PrintToQueue(print_queue, None, True) if summary_report_file: # Launch a thread that manages reporting # from multiple sources summary_report_file_path = working_dir + os.sep + summary_report_file summary_report_handle = open(summary_report_file_path, "w") if summary_report_handle: printer.string("{}writing overall summary report to \"{}\".". \ format(PROMPT, summary_report_file_path)) else: printer.string("{}unable to open file \"{}\" for overall summary report.". \ format(PROMPT, summary_report_file_path)) report_queue = manager.Queue() report_thread = u_report.ReportThread(report_queue, summary_report_handle) report_thread.start() reporter = u_report.ReportToQueue(report_queue, None, None, printer) reporter.open() # From this post: # https://stackoverflow.com/questions/11312525/catch-ctrlc-sigint-and-exit-multiprocesses-gracefully-in-python # ...create a pool of worker processes to run our # instances, then they will handle sigint correctly # and tidy up after themselves. # SIGINT is ignored while the pool is created original_sigint_handler = signal(SIGINT, SIG_IGN) pool = NoDaemonPool(len(instances)) signal(SIGINT, original_sigint_handler) # Create locks for connections u_connection.init_locks(manager) try: # Set up all the instances for instance in instances: # Provide a working directory that is unique # for each instance and make sure it exists if working_dir: this_working_dir = working_dir + os.sep + \ INSTANCE_DIR_PREFIX + \ u_utils.get_instance_text(instance).replace(".", "_") else: this_working_dir = os.getcwd() + os.sep + \ INSTANCE_DIR_PREFIX + \ u_utils.get_instance_text(instance).replace(".", "_") if not os.path.isdir(this_working_dir): os.makedirs(this_working_dir) # Only clean the working directory if requested if clean: u_utils.deltree(this_working_dir, printer, PROMPT) os.makedirs(this_working_dir) # Create the file paths for this instance if summary_report_file: summary_report_file_path = this_working_dir + os.sep + summary_report_file if test_report_file: test_report_file_path = this_working_dir + os.sep + test_report_file if debug_file: debug_file_path = this_working_dir + os.sep + debug_file # Start u_run.main in each worker thread process = {} process["platform"] = u_data.get_platform_for_instance( database, instance) process["instance"] = instance process["platform_lock"] = None process["connection_lock"] = u_connection.get_lock(instance) for platform_lock in platform_locks: if process["platform"] == platform_lock["platform"]: process["platform_lock"] = platform_lock["lock"] break process["handle"] = pool.apply_async( u_run.main, (database, instance, filter_string, True, ubxlib_dir, this_working_dir, process["connection_lock"], process["platform_lock"], misc_locks, print_queue, report_queue, summary_report_file_path, test_report_file_path, debug_file_path)) alive_count += 1 processes.append(process.copy()) # Wait for all the launched processes to complete printer.string("{}all instances now launched.".format(PROMPT)) loop_count = 0 while alive_count > 0: for process in processes: instance_text = u_utils.get_instance_text(process["instance"]) if not "dealt_with" in process and process["handle"].ready(): try: # If the return value has gone negative, i.e. # an infrastructure failure, leave it there, # else add the number of test failures to it if (return_value >= 0 and process["handle"].get() > 0) or \ (return_value <= 0 and process["handle"].get() < 0): return_value += process["handle"].get() except KeyboardInterrupt as ex: raise KeyboardInterrupt from ex except Exception as ex: # If an instance threw an exception then flag an # infrastructure error return_value = -1 printer.string("{}instance {} threw exception \"{}:" \ " {}\" but I can't tell you where" \ " I'm afraid.". \ format(PROMPT, instance_text, type(ex).__name__, str(ex))) if reporter: reporter.event(u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "instance {} threw exception \"{}: {}\"". \ format(instance_text, type(ex).__name__, str(ex))) alive_count -= 1 process["dealt_with"] = True if not process["handle"].ready() and \ (loop_count == STILL_RUNNING_REPORT_SECONDS): printer.string("{}instance {} still running.". \ format(PROMPT, instance_text)) loop_count += 1 if loop_count > STILL_RUNNING_REPORT_SECONDS: loop_count = 0 sleep(1) except KeyboardInterrupt: # Pools can tidy themselves up on SIGINT printer.string( "{}caught CTRL-C, terminating instances...".format(PROMPT)) if reporter: reporter.event(u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "CTRL-C received, terminating") pool.terminate() return_value = -1 # Tidy up pool.close() pool.join() if reporter: reporter.event_extra_information("return value overall {} (0 = success, negative =" \ " probable infrastructure failure, positive =" \ " failure(s) (may still be due to infrastructure))". \ format(return_value)) reporter.close() # Wait for the print and report queues to empty # and stop the print process printer.string("{}all runs complete, return value {}.".format( PROMPT, return_value)) sleep(1) print_thread.stop_thread() print_thread.join() # Stop the reporting process if report_thread: report_thread.stop_thread() report_thread.join() if summary_report_handle: summary_report_handle.close() return return_value
def build_ses(clean, ubxlib_dir, defines, printer, prompt, reporter): '''Build on SES''' call_list = [] ses_dir = ubxlib_dir + os.sep + RUNNER_DIR_SES output_dir = os.getcwd() + os.sep + BUILD_SUBDIR_SES too_many_defines = False hex_file_path = None # Put the path to SES builder at the front of the call list call_list.append(SES_PATH + os.sep + SES_NAME) # Then the -config switch with the configuration and project name call_list.append("-config") call_list.append("".join(SES_BUILD_CONFIGURATION)) call_list.append("".join( (ses_dir + os.sep + PROJECT_NAME_SES + ".emProject").replace( "\\", "/"))) # Set the output directory call_list.append("-property") call_list.append("".join( ("build_output_directory=" + output_dir).replace("\\", "/"))) call_list.append("-property") call_list.append("".join(("build_intermediate_directory=" + output_dir + os.sep + "obj").replace("\\", "/"))) # Add verbose echo otherwise SES builder can be a tad quiet call_list.append("-echo") call_list.append("-verbose") if defines: # Create the U_FLAGS entries for idx, define in enumerate(defines): if idx >= SES_MAX_NUM_DEFINES: too_many_defines = True string = "{}{} #defines supplied but only" \ " {} are supported by this Segger" \ " Embedded Studio project file". \ format(prompt, len(defines), SES_MAX_NUM_DEFINES) reporter.event(u_report.EVENT_TYPE_BUILD, u_report.EVENT_ERROR, string) printer.string(string) break # Note that the quotes which are required on the # command-line when including a define of the format # BLAH=XXX are not required here. call_list.append("-D") call_list.append("U_FLAG" + str(idx) + "=" + define) if not too_many_defines: # Add the nRF5 SDK path and Unity paths, # making sure that SES gets "/" as it likes # and not "\" call_list.append("-D") call_list.append("NRF5_PATH=" + "".join(NRF5SDK_PATH.replace("\\", "/"))) call_list.append("-D") call_list.append("UNITY_PATH=" + "".join((os.getcwd() + os.sep + u_utils.UNITY_SUBDIR).replace("\\", "/"))) # Clear the output folder if we're not just running if not clean or u_utils.deltree(BUILD_SUBDIR_SES, printer, prompt): # Print what we're gonna do tmp = "" for item in call_list: tmp += " " + item printer.string("{}in directory {} calling{}". \ format(prompt, os.getcwd(), tmp)) # Call Segger Embedded Studio builder to do the build # Set shell to keep Jenkins happy if u_utils.exe_run(call_list, BUILD_GUARD_TIME_SECONDS, printer, prompt, shell_cmd=True): hex_file_path = output_dir + os.sep + PROJECT_NAME_SES + ".hex" else: reporter.event(u_report.EVENT_TYPE_BUILD, u_report.EVENT_ERROR, "unable to clean build directory") return hex_file_path
def build_gcc(clean, build_subdir, ubxlib_dir, defines, printer, prompt, reporter): '''Build on GCC''' call_list = [] hex_file_path = None # The Nordic Makefile can only handle a # single sub-directory name which # must be off the directory that # Makefile is located in, so need to be # in the Makefile directory for building # to work directory = ubxlib_dir + os.sep + RUNNER_DIR_GCC printer.string("{}CD to {}.".format(prompt, directory)) # Set the Unity path before we change though unity_path = os.getcwd() + os.sep + u_utils.UNITY_SUBDIR with u_utils.ChangeDir(directory): # Clear the output folder if we're not just running if not clean or u_utils.deltree(build_subdir, printer, prompt): if defines: # Create the CFLAGS string cflags = "" for idx, define in enumerate(defines): if idx == 0: cflags = "-D" + define else: cflags += " -D" + define # Note: when entering things from the command-line # if there is more than one CFLAGS parameter then # they must be quoted but that is specifically # NOT required here as the fact that CFLAGS # is passed in as one array entry is sufficient # Assemble the whole call list call_list.append("make") call_list.append("NRF5_PATH=" + NRF5SDK_PATH) call_list.append("UNITY_PATH=" + unity_path.replace("\\", "/")) if defines: call_list.append("CFLAGS=" + cflags) call_list.append("OUTPUT_DIRECTORY=" + build_subdir) call_list.append("GNU_VERSION=" + GNU_VERSION) call_list.append("GNU_PREFIX=" + GNU_PREFIX) call_list.append("GNU_INSTALL_ROOT=" + GNU_INSTALL_ROOT) # Print what we're gonna do tmp = "" for item in call_list: tmp += " " + item printer.string("{}in directory {} calling{}". \ format(prompt, os.getcwd(), tmp)) # Call make to do the build # Set shell to keep Jenkins happy if u_utils.exe_run(call_list, BUILD_GUARD_TIME_SECONDS, printer, prompt, shell_cmd=True): hex_file_path = os.getcwd() + os.sep + build_subdir + \ os.sep + "nrf52840_xxaa.hex" else: reporter.event(u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "unable to clean build directory") return hex_file_path
def build(board, clean, ubxlib_dir, defines, env, printer, prompt, reporter): '''Build using west''' call_list = [] defines_text = "" runner_dir = ubxlib_dir + os.sep + RUNNER_DIR output_dir = os.getcwd() + os.sep + BUILD_SUBDIR custom_board_dir = ubxlib_dir + os.sep + CUSTOM_BOARD_DIR custom_board_root = ubxlib_dir + os.sep + CUSTOM_BOARD_ROOT build_dir = None # Put west at the front of the call list call_list.append("west") # Make it verbose call_list.append("-v") # Do a build call_list.append("build") # Pick up .overlay and .conf files automatically call_list.append("-p") call_list.append("auto") # Board name call_list.append("-b") call_list.append((board).replace("\\", "/")) # Under Zephyr we may need to override the Zephyr board files # Check if this board has such an override board_files = os.listdir(custom_board_dir) for board_file in board_files: if board == board_file: call_list.append(runner_dir) call_list.append("-DBOARD_ROOT=" + custom_board_root) break # Build products directory call_list.append("-d") call_list.append((BUILD_SUBDIR).replace("\\", "/")) if clean: # Clean call_list.append("-p") call_list.append("always") # Now the path to build call_list.append((runner_dir).replace("\\", "/")) # CCACHE is a pain in the bum: falls over on Windows # path length issues randomly and doesn't say where. # Since we're generally doing clean builds, disable it env["CCACHE_DISABLE"] = "1" if defines: # Set up the U_FLAGS environment variables for idx, define in enumerate(defines): if idx == 0: defines_text += "-D" + define else: defines_text += " -D" + define printer.string("{}setting environment variables U_FLAGS={}".format( prompt, defines_text)) env["U_FLAGS"] = defines_text # Clear the output folder ourselves as well, just # to be completely sure if not clean or u_utils.deltree(BUILD_SUBDIR, printer, prompt): # Print what we're gonna do tmp = "" for item in call_list: tmp += " " + item printer.string("{}in directory {} calling{}". \ format(prompt, os.getcwd(), tmp)) # Call west to do the build # Set shell to keep Jenkins happy if u_utils.exe_run(call_list, BUILD_GUARD_TIME_SECONDS, printer, prompt, shell_cmd=True, set_env=env): build_dir = output_dir else: reporter.event(u_report.EVENT_TYPE_BUILD, u_report.EVENT_ERROR, "unable to clean build directory") return build_dir
def build(build_dir, sketch_path, library_path, mcu, board, defines, clean, printer, prompt, reporter, keep_going_flag): '''Build ubxlib for Arduino''' call_list = [] defines_text = "" build_path = None # We build inside the build_dir but in a sub-directory specific # to the sketch build_dir = os.path.join(build_dir, os.path.basename(os.path.split(sketch_path)[0])) # Make sure that the build directory exists and is # cleaned if required if os.path.exists(build_dir): if clean: u_utils.deltree(build_dir, printer, prompt) os.makedirs(build_dir) else: os.makedirs(build_dir) if os.path.exists(build_dir): printer.string("{}building {} in {}...".format(prompt, sketch_path, build_dir)) # Assemble the call list for the creation process call_list.append(ARDUINO_CLI_EXE) call_list.append("compile") if library_path: call_list.append("--libraries") call_list.append(library_path) call_list.append("--fqbn") call_list.append(board) if clean: call_list.append("--clean") call_list.append("-v") call_list.append("--build-path") call_list.append(build_dir) call_list.append("--build-cache-path") call_list.append(build_dir) if defines: for define in defines: if defines_text: defines_text += " " defines_text += "\"-D" + define + "\"" # Set the flags for our ubxlib files, which are .c call_list.append("--build-property") call_list.append("compiler.c.extra_flags=" + defines_text) # Set the flags for the .ino application files, which are .cpp call_list.append("--build-property") call_list.append("compiler.cpp.extra_flags=" + defines_text) call_list.append(sketch_path) if run_command(call_list, BUILD_GUARD_TIME_SECONDS, printer, prompt, keep_going_flag): build_path = build_dir if library_path: # If that was succesful, copy the ".a" files to the correct # locations under each library/mcu for root, _directories, files in os.walk( os.path.join(build_dir, LIBRARIES_SUB_DIR)): for file in files: if file.endswith(".a"): source = os.path.join(root, file) library_name = os.path.basename( os.path.split(source)[0]) destination_dir = os.path.join( library_path, library_name, "src", mcu.lower()) destination = os.path.join(destination_dir, library_name + ".a") if not os.path.isdir(destination_dir): try: os.makedirs(destination_dir) except OSError: reporter.event(u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "could not create directory \"" + \ destination + "\"") build_path = None break call_list = [] if u_utils.is_linux(): call_list.append("cp") else: call_list.append("copy") call_list.append("/Y") call_list.append(source) call_list.append(destination) try: printer.string("{}copying {} to {}...". \ format(prompt, source, destination)) subprocess.check_output( u_utils.subprocess_osify(call_list), shell=True) except subprocess.CalledProcessError as error: reporter.event( u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "{}error when copying {} to {}, {} {}: \"{}\"" .format(prompt, source, destination, error.cmd, error.returncode, error.output)) build_path = None break else: reporter.event(u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "could not create directory \"" + build_dir + "\"") return build_path
def build(esp_idf_dir, ubxlib_dir, build_dir, defines, env, clean, printer, prompt, reporter): '''Build the code''' call_list = [] defines_text = "" success = False # Make sure that the build directory exists and is # cleaned if required if os.path.exists(build_dir): if clean: u_utils.deltree(build_dir, printer, prompt) os.makedirs(build_dir) # Note: used to delete sdkconfig here to # force it to be regenerated from the # sdkconfig.defaults file however we can't # do that with parallel builds as the file # might be in use. Just need to be sure # that none of our builds fiddle with # it (which they shouldn't for consistency # anyway). else: os.makedirs(build_dir) # CCACHE is a pain in the bum: falls over on Windows # path length issues randomly and doesn't say where. # Since we're generally doing clean builds, disable it env["CCACHE_DISABLE"] = "1" if os.path.exists(build_dir): printer.string("{}building code...".format(prompt)) # Set up the U_FLAGS environment variables for idx, define in enumerate(defines): if idx == 0: defines_text += "-D" + define else: defines_text += " -D" + define printer.string("{}setting environment variables U_FLAGS={}". format(prompt, defines_text)) env["U_FLAGS"] = defines_text # Assemble the call list for the build process call_list.append("python") call_list.append(esp_idf_dir + os.sep + "tools\\idf.py") call_list.append("-C") call_list.append(ubxlib_dir + os.sep + \ "port\\platform\\esp-idf\\mcu\\esp32" + os.sep + PROJECT_SUBDIR) call_list.append("-B") call_list.append(build_dir) call_list.append("-D") call_list.append("TEST_COMPONENTS=" + TEST_COMPONENT) call_list.append("size") call_list.append("build") # Print what we're gonna do tmp = "" for item in call_list: tmp += " " + item printer.string("{}in directory {} calling{}". \ format(prompt, os.getcwd(), tmp)) # Do the build, # set shell to True to keep Jenkins happy success = u_utils.exe_run(call_list, BUILD_GUARD_TIME_SECONDS, printer, prompt, shell_cmd=True, set_env=env) else: reporter.event(u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "could not create directory \"" + build_dir + "\"") return success
def create_project(project_name, source_project_dir, destination_project_dir, stm32cube_fw_path, unity_dir, printer, prompt): '''Create a new project with the right paths''' source_project_path = source_project_dir + os.sep + project_name destination_project_path = destination_project_dir + os.sep + project_name success = False # If there is already a project with our intended name, # delete it if u_utils.deltree(destination_project_path, printer, prompt): # Create the new project directory printer.string("{}creating {}...".format(prompt, destination_project_path)) os.makedirs(destination_project_path) # Read the .cproject file from the old project printer.string("{}reading .cproject file...".format(prompt)) file_handle = open(source_project_path + os.sep + ".cproject", "r") string = file_handle.read() file_handle.close() # Write it out to the new printer.string("{}writing {}/.cproject file...".format( prompt, destination_project_path)) file_handle = open(destination_project_path + os.sep + ".cproject", "w") file_handle.write(string) file_handle.close() # Read the .project file from the old project printer.string("{}reading .project file...".format(prompt)) file_handle = open(source_project_path + os.sep + ".project", "r") string = file_handle.read() file_handle.close() tree = ElementTree.parse(source_project_path + os.sep + ".project") # Replace UBX_PROJ_PATH variable using XPath printer.string("{}updating UBX_PROJ_PATH to \"{}\"...". \ format(prompt, source_project_path)) replace_variable_path(tree, 'UBX_PROJ_PATH', source_project_path) # Replace STM32CUBE_FW_PATH printer.string("{}updating STM32CUBE_FW_PATH to \"{}\"...". \ format(prompt, stm32cube_fw_path)) replace_variable_path(tree, 'STM32CUBE_FW_PATH', stm32cube_fw_path) # Replace UNITY_PATH printer.string("{}updating UNITY_PATH to \"{}\"...". \ format(prompt, unity_dir)) replace_variable_path(tree, 'UNITY_PATH', unity_dir) # Write it out to the new printer.string("{}writing {}/.project file...".format( prompt, destination_project_path)) tree.write(destination_project_path + os.sep + ".project") # Write in a warning file just in case anyone # wonders what the hell this weird project is file_handle = open( destination_project_path + os.sep + "ignore_this_directory.txt", "w") file_handle.write("See u_run_stm32cube.py for an explanation.") file_handle.close() success = True return success
def build_gcc(clean, build_subdir, ubxlib_dir, unity_dir, defines, printer, prompt, reporter, keep_going_flag): '''Build on GCC''' call_list = [] hex_file_path = None makefile = ubxlib_dir + os.sep + RUNNER_DIR_GCC + os.sep + "Makefile" outputdir = os.getcwd() + os.sep + build_subdir # The Nordic Makefile.common that is included by our Makefile # is quite limited and weird behaiviours: # 1. It is not possible to specify an OUTPUT_DIRECTORY that # is not on the same drive as the source code. In our case # the source code is mounted as a subst device in the Windows # case. # 2. Makefile.common expects having a "Makefile" in the current # directory. However, since we want the build output to be placed # outside the source tree and due to 1) we want to call our # Makefile using "make -f $UBXLIB_DIR/$RUNNER_DIR_GCC/Makefile" # from a workdir. In this case the Makefile will NOT be located # in current directory. So to get nRF5 SDK Makefile.common happy # we fake this Makefile with an empty file: Path('./Makefile').touch() # Clear the output folder if we're not just running if not clean or u_utils.deltree(outputdir, printer, prompt): if defines: # Create the CFLAGS string cflags = "" for idx, define in enumerate(defines): if idx == 0: cflags = "-D" + define else: cflags += " -D" + define # Note: when entering things from the command-line # if there is more than one CFLAGS parameter then # they must be quoted but that is specifically # NOT required here as the fact that CFLAGS # is passed in as one array entry is sufficient # Assemble the whole call list call_list += ["make", "-j8", "-f", makefile] call_list.append("NRF5_PATH=" + NRF5SDK_PATH) call_list.append("UNITY_PATH=" + unity_dir.replace("\\", "/")) if defines: call_list.append("CFLAGS=" + cflags) call_list.append("OUTPUT_DIRECTORY=" + build_subdir) call_list.append("GNU_VERSION=" + GNU_VERSION) call_list.append("GNU_PREFIX=" + GNU_PREFIX) call_list.append("GNU_INSTALL_ROOT=" + GNU_INSTALL_ROOT) # Print what we're gonna do tmp = "" for item in call_list: tmp += " " + item printer.string("{}in directory {} calling{}". \ format(prompt, os.getcwd(), tmp)) # Call make to do the build # Set shell to keep Jenkins happy if u_utils.exe_run(call_list, BUILD_GUARD_TIME_SECONDS, printer, prompt, shell_cmd=True, keep_going_flag=keep_going_flag): hex_file_path = outputdir + \ os.sep + "nrf52840_xxaa.hex" else: reporter.event(u_report.EVENT_TYPE_INFRASTRUCTURE, u_report.EVENT_FAILED, "unable to clean build directory") return hex_file_path