def main(default=None, commands=None, config_spec="", tests=None, description=None): """Parses command line arguments and configuration, then runs the appropriate command. """ start_time = datetime.now() log.debug("Start: '%s'", " ".join(sys.argv)) log.debug("Time: '%s'", start_time) commands = collect_commands(default, commands or []) # Configure the application from the command line and get the # command to be run run_command, arguments, interactive = configure( default, commands, config_spec, "Thanks for using %(prog)s." if description is None else description) # Store the commands and tests globally # I believe global is justified here for simplicity if tests is not None: global TESTS # pylint:disable=W0603 TESTS = tests global COMMANDS # pylint:disable=W0603 COMMANDS = commands # Initialize the main logger based on the configuration # and handle the state safely with logging_context(), \ StateHandler(filename=conf['pyexperiment.state_filename'], load=conf['pyexperiment.load_state'], save=conf['pyexperiment.save_state'], rotate_n_files=conf[ 'pyexperiment.rotate_n_state_files']): # Run the command with the supplied arguments if run_command is not None: result = run_command(*arguments) if result is not None: print(result) # Drop to the interactive console if necessary, passing the result if interactive: embed_interactive(result=result) # After everything is done, print timings if necessary if (((isinstance(conf['pyexperiment.print_timings'], bool) and conf['pyexperiment.print_timings']) or conf['pyexperiment.print_timings'] == 'True')): log.print_timings() end_time = datetime.now() log.debug("End: '%s'", " ".join(sys.argv)) log.debug("Time: '%s'", end_time) log.debug("Took: %.3fs", (end_time - start_time).total_seconds())
def _replicate_single_thread(function, no_replicates, subkey_pattern=SUBSTATE_KEY_PATTERN): """Replicate the experiment defined by the function """ for i in range(no_replicates): with substate_context(subkey_pattern % i): log.debug("Running " + subkey_pattern % i) function()
def __call__(self): with substate_context(self.context): self.ready_queue.put(True) log.debug("Running " + self.context) try: result = self.target() except Exception as err: log.fatal("Error in sub-process: %s", traceback.format_exc()) raise err return result
def test_basic_console_logging(self): """Test the most basic console logging at the debug level """ log.initialize(console_level=logging.DEBUG) log.debug("Test string: %s, int: %s, float: %f", 'bla', 12, 3.14) log.close() self.assertNotEqual(len(self.log_stream.getvalue()), 0) self.assertRegexpMatches( self.log_stream.getvalue(), r'Test string: bla, int: 12, float: 3.14')
def _replicate_multiprocessing(function, no_replicates, no_processes, subkey_pattern=SUBSTATE_KEY_PATTERN): """Replicate the experiment defined by the function in multiple processes """ with processing_state_context(): pool = multiprocessing.Pool(processes=no_processes) manager = multiprocessing.Manager() ready_queue = manager.Queue() # pylint: disable=no-member result_threads = [] for i in range(no_replicates): # Create a target function to be run in a separate process # using a separate state context target = TargetCreator(function, ready_queue, subkey_pattern % i) result = pool.apply_async(target) # Use a thread to wait for the result waiter = threading.Thread(target=result.get) waiter.start() result_threads.append(waiter) # Make sure the process is really running (probably not necessary) while not ready_queue.get(): pass # pragma: no cover # Wait for the pool, then join it log.debug("Closing pool") pool.close() log.debug("Joining pool") pool.join() # Make sure all the results are in log.debug("Joining threads") for thread in result_threads: thread.join()
def hello(): """Logs a message """ log.debug("Hello")