def init_logger(default_level=pylogging.LogLevel.INFO, defaults=tuple(), add_help=False): """Initialize logging system Arguments: default_level: default log level for the logging system args: list of tuples for channel default values add_help: [bool] print help when called with --help, set to True if you don't have an own parser. Example: init_logger(args, [('sthal', 'INFO'), ('halbe', 'WARN')]) """ parser = argparse.ArgumentParser(description='PSP visualization tool', add_help=add_help) add_logger_options(parser, default_level) parser_args, _ = parser.parse_known_args() pylogging.reset() pylogging.default_config( level=parser_args.loglevel, fname=parser_args.logfile if parser_args.logfile else "", print_location=parser_args.loglocation, color=parser_args.logcolor, date_format=parser_args.logdate_format) for name, level in defaults: pylogging.set_loglevel(pylogging.get(name), to_level(level)) for name, level in parser_args.logchannel: pylogging.set_loglevel(pylogging.get(name), to_level(level))
def loggerInit(): pylog.reset() layout = pylog.ColorLayout(True, 'ABSOLUTE') appenderConsole = pylog.ConsoleAppender(layout) appenderConsole.setOption("target", pylog.ConsoleAppender.getSystemErr()) appenderConsole.activateOptions() logger = pylog.get_root() logger.addAppender(appenderConsole)
def default_test_msg(print_location, date_format): pylogging.reset() pylogging.default_config(level=pylogging.LogLevel.DEBUG, print_location=print_location, date_format=date_format, color=True) l = pylogging.get("default_config") l.debug("time format: {}".format(date_format))
def default_test_msg(print_location, date_format): pylogging.reset() pylogging.default_config( level=pylogging.LogLevel.DEBUG, print_location=print_location, date_format=date_format, color=True) l = pylogging.get("default_config") l.debug("time format: {}".format(date_format))
def test_reset(self): logger.log_to_cout(logger.LogLevel.WARN) logger1 = logger.get("test") logger2 = logger.get("xyz") logger.reset() self.assertEqual(0, logger1._get_number_of_appenders()) self.assertEqual(0, logger2._get_number_of_appenders()) self.assertEqual(0, logger.get_root()._get_number_of_appenders())
def test_reset(self): logger.log_to_cout(logger.LogLevel.WARN) logger1 = logger.get("test"); logger2 = logger.get("xyz"); logger.reset() self.assertEqual(0, logger1._get_number_of_appenders()) self.assertEqual(0, logger2._get_number_of_appenders()) self.assertEqual(0, logger.get_root()._get_number_of_appenders())
def main(): parser = argparse.ArgumentParser() parser.add_argument('--source-hicann', required=True, type=parse_hicann, metavar='<enum>|<x>,<y>', help='source HICANNOnWafer') parser.add_argument('--source-hline', required=False, type=int, help='source HLineOnHICANN') parser.add_argument('--source-vline', required=False, type=int, help='source VLineOnHICANN') parser.add_argument('--target-hicann', required=True, type=parse_hicann, metavar='<enum>|<x>,<y>', help='target HICANNOnWafer') parser.add_argument('--target-vertical', action='store_true', help='target should be VLineOnHICANN') parser.add_argument('--without-hicann', default=[], type=parse_hicann, nargs="+", metavar='<enum>|<x>,<y>', help='unavailable HICANNOnWafer') args = parser.parse_args() pylogging.reset() pylogging.default_config(date_format='absolute') alone = pyalone.Alone() for hicann in iter_all(HICANNOnWafer): if hicann in args.without_hicann: continue alone.add(hicann) if args.source_hline is not None: line = HLineOnHICANN(args.source_hline) elif args.source_vline is not None: line = VLineOnHICANN(args.source_vline) else: parser.error('Please specify one of --source-hline / --source-vline') source = pyalone.L1BusOnWafer(args.source_hicann, line) target = pyalone.Target(args.target_hicann, vertical if args.target_vertical else horizontal) routes = alone.find_routes(source, target) for route in routes: print(route)
def test_msg(print_location, date_format): pylogging.reset() layout = pylogging.ColorLayout(True, date_format) layout.setOption("printlocation", str(print_location).lower()) layout.activateOptions() appender = pylogging.ConsoleAppender(layout) appender.setOption("target", pylogging.ConsoleAppender.getSystemErr()) appender.activateOptions() l = pylogging.get_root() pylogging.set_loglevel(l, pylogging.LogLevel.DEBUG) l.addAppender(appender) l = pylogging.get("manual_config") l.warn("time format: {}".format(date_format))
def test_file_logging(self): log = os.path.join(self.temp, 'test_file_logging.log') logger.log_to_file(log, logger.LogLevel.WARN) logger1 = logger.get("test"); logger2 = logger.get("xyz"); logger3 = logger.get("xyz.test"); logger.set_loglevel(logger1, logger.LogLevel.WARN) logger.set_loglevel(logger2, logger.LogLevel.DEBUG) logger.set_loglevel(logger3, logger.LogLevel.INFO) logger.LOG4CXX_FATAL(logger1, "FATAL") logger.LOG4CXX_ERROR(logger1, "ERROR") logger.LOG4CXX_WARN (logger1, "WARN") logger.LOG4CXX_INFO (logger1, "INFO") logger.LOG4CXX_DEBUG(logger1, "DEBUG") logger.LOG4CXX_TRACE(logger1, "TRACE") logger2.FATAL("FATAL") logger2.ERROR("ERROR") logger2.WARN ("WARN") logger2.INFO ("INFO") logger2.DEBUG("DEBUG") logger2.TRACE("TRACE") logger.LOG4CXX_FATAL(logger3, "FATAL") logger.LOG4CXX_ERROR(logger3, "ERROR") logger.LOG4CXX_WARN (logger3, "WARN") logger.LOG4CXX_INFO (logger3, "INFO") logger.LOG4CXX_DEBUG(logger3, "DEBUG") logger.LOG4CXX_TRACE(logger3, "TRACE") logger.reset() with open(log) as f: expected = \ """FATAL test FATAL ERROR test ERROR WARN test WARN FATAL xyz FATAL ERROR xyz ERROR WARN xyz WARN INFO xyz INFO DEBUG xyz DEBUG FATAL xyz.test FATAL ERROR xyz.test ERROR WARN xyz.test WARN INFO xyz.test INFO """ self.assertEqualLogLines(expected, f.read())
def test_file_logging(self): log = os.path.join(self.temp, 'test_file_logging.log') logger.log_to_file(log, logger.LogLevel.WARN) logger1 = logger.get("test") logger2 = logger.get("xyz") logger3 = logger.get("xyz.test") logger.set_loglevel(logger1, logger.LogLevel.WARN) logger.set_loglevel(logger2, logger.LogLevel.DEBUG) logger.set_loglevel(logger3, logger.LogLevel.INFO) logger.LOG4CXX_FATAL(logger1, "FATAL") logger.LOG4CXX_ERROR(logger1, "ERROR") logger.LOG4CXX_WARN(logger1, "WARN") logger.LOG4CXX_INFO(logger1, "INFO") logger.LOG4CXX_DEBUG(logger1, "DEBUG") logger.LOG4CXX_TRACE(logger1, "TRACE") logger2.FATAL("FATAL") logger2.ERROR("ERROR") logger2.WARN("WARN") logger2.INFO("INFO") logger2.DEBUG("DEBUG") logger2.TRACE("TRACE") logger.LOG4CXX_FATAL(logger3, "FATAL") logger.LOG4CXX_ERROR(logger3, "ERROR") logger.LOG4CXX_WARN(logger3, "WARN") logger.LOG4CXX_INFO(logger3, "INFO") logger.LOG4CXX_DEBUG(logger3, "DEBUG") logger.LOG4CXX_TRACE(logger3, "TRACE") logger.reset() with open(log) as f: expected = \ """FATAL test FATAL ERROR test ERROR WARN test WARN FATAL xyz FATAL ERROR xyz ERROR WARN xyz WARN INFO xyz INFO DEBUG xyz DEBUG FATAL xyz.test FATAL ERROR xyz.test ERROR WARN xyz.test WARN INFO xyz.test INFO """ self.assertEqualLogLines(expected, f.read())
def setUp(self): pylogging.reset() pylogging.default_config(pylogging.LogLevel.ERROR) pylogging.set_loglevel( pylogging.get("marocco"), pylogging.LogLevel.INFO) self.log = pylogging.get(__name__) self.temporary_directory = tempfile.mkdtemp(prefix="marocco-test-") self.marocco = pymarocco.PyMarocco() self.marocco.backend = pymarocco.PyMarocco.None self.marocco.persist = os.path.join( self.temporary_directory, "results.bin") self.marocco.neuron_placement.default_neuron_size(4)
def test_default_logger(self): log_all = os.path.join(self.temp, 'test_default_logger_all.log') log_default = os.path.join(self.temp, 'test_default_logger_default.log') logger1 = logger.get("test") logger.default_config(logger.LogLevel.DEBUG, log_all, date_format="NULL") # Loglevel should be ignored, because the root logger is configured logger_default = logger.get_old_logger(logger.LogLevel.TRACE) logger.append_to_file(log_default, logger_default) for l in (logger_default, logger1): logger.LOG4CXX_FATAL(l, "FATAL") logger.LOG4CXX_ERROR(l, "ERROR") logger.LOG4CXX_WARN(l, "WARN") logger.LOG4CXX_INFO(l, "INFO") logger.LOG4CXX_DEBUG(l, "DEBUG") logger.LOG4CXX_TRACE(l, "TRACE") logger.reset() with open(log_all) as f: expected = \ """FATAL PyLogging FATAL ERROR PyLogging ERROR WARN PyLogging WARN INFO PyLogging INFO DEBUG PyLogging DEBUG FATAL test FATAL ERROR test ERROR WARN test WARN INFO test INFO DEBUG test DEBUG """ self.assertEqualLogLines(expected, f.read()) with open(log_default) as f: expected = \ """FATAL PyLogging FATAL ERROR PyLogging ERROR WARN PyLogging WARN INFO PyLogging INFO DEBUG PyLogging DEBUG """ self.assertEqualLogLines(expected, f.read())
def setUp(self): pylogging.reset() pylogging.default_config(pylogging.LogLevel.ERROR) pylogging.set_loglevel(pylogging.get("marocco"), pylogging.LogLevel.INFO) self.log = pylogging.get(__name__) self.temporary_directory = tempfile.mkdtemp(prefix="marocco-test-") self.marocco = pymarocco.PyMarocco() self.marocco.backend = pymarocco.PyMarocco.Without self.marocco.persist = os.path.join(self.temporary_directory, "results.bin") self.marocco.neuron_placement.default_neuron_size(4) self.marocco.continue_despite_synapse_loss = True self.marocco.calib_backend = pymarocco.PyMarocco.CalibBackend.Default self.marocco.defects.backend = pymarocco.Defects.Backend.Without
def test_default_logger(self): log_all = os.path.join(self.temp, 'test_default_logger_all.log') log_default = os.path.join(self.temp, 'test_default_logger_default.log') logger1 = logger.get("test") logger.default_config(logger.LogLevel.DEBUG, log_all, date_format="NULL") # Loglevel should be ignored, because the root logger is configured logger_default = logger.get_old_logger(logger.LogLevel.TRACE) logger.append_to_file(log_default, logger_default) for l in (logger_default, logger1): logger.LOG4CXX_FATAL(l, "FATAL") logger.LOG4CXX_ERROR(l, "ERROR") logger.LOG4CXX_WARN (l, "WARN") logger.LOG4CXX_INFO (l, "INFO") logger.LOG4CXX_DEBUG(l, "DEBUG") logger.LOG4CXX_TRACE(l, "TRACE") logger.reset() with open(log_all) as f: expected = \ """FATAL PyLogging FATAL ERROR PyLogging ERROR WARN PyLogging WARN INFO PyLogging INFO DEBUG PyLogging DEBUG FATAL test FATAL ERROR test ERROR WARN test WARN INFO test INFO DEBUG test DEBUG """ self.assertEqualLogLines(expected, f.read()) with open(log_default) as f: expected = \ """FATAL PyLogging FATAL ERROR PyLogging ERROR WARN PyLogging WARN INFO PyLogging INFO DEBUG PyLogging DEBUG """ self.assertEqualLogLines(expected, f.read())
def runTest(self): pylogging.reset() pylogging.default_config(pylogging.LogLevel.INFO) marocco = pymarocco.PyMarocco() marocco.neuron_placement.default_neuron_size(8) marocco.calib_backend = pymarocco.PyMarocco.CalibBackend.Default marocco.defects.backend = pymarocco.Defects.Backend.Without marocco.hicann_configurator = pysthal.HICANNConfigurator() marocco.continue_despite_synapse_loss = True marocco.backend = pymarocco.PyMarocco.ESS marocco.experiment_time_offset = 5e-7 n_exc = 100 # Number of excitatory neurons per group sim_duration = 200. pp_start = 50. # start = center of pulse-packet weight_exc = 0.002 # uS weight for excitatory to excitatory connections # (double than in reference paper) pynn.setup( max_delay=20., marocco=marocco, ) # v_thresh close to v_rest to make sure there are some spikes neuron_params = { 'v_rest': -65., 'v_thresh': -62.5, } exc_pop = pynn.Population(n_exc, pynn.IF_cond_exp, neuron_params) exc_pop.record() pop_stim = pynn.Population(n_exc, pynn.SpikeSourceArray, {'spike_times': [pp_start]}) conn = pynn.FixedNumberPreConnector(60, weights=weight_exc, delays=20.) pynn.Projection(pop_stim, exc_pop, conn, target='excitatory') pynn.run(sim_duration) pynn.end()
def test_file_logging_with_filter(self): logger1 = logger.get("test"); logger2 = logger.get("xyz"); logger3 = logger.get("xyz.test"); logger.set_loglevel(logger1, logger.LogLevel.WARN) logger.set_loglevel(logger2, logger.LogLevel.DEBUG) logger.set_loglevel(logger3, logger.LogLevel.INFO) # Test different filter log = os.path.join(self.temp, 'test_file_logging_with_filter.log') app = logger.append_to_file(log, logger.get_root()) f = logger.LevelRangeFilter() f.setLevelMin(logger.LogLevel.DEBUG) f.setLevelMax(logger.LogLevel.WARN) app.addFilter(f) log2 = os.path.join(self.temp, 'test_file_logging_with_filter2.log') app = logger.append_to_file(log2, logger.get_root()) f = logger.LevelRangeFilter() f.setLevelMin(logger.LogLevel.ERROR) f.setLevelMax(logger.LogLevel.FATAL) app.addFilter(f) log3 = os.path.join(self.temp, 'test_file_logging_with_filter3.log') app = logger.append_to_file(log3, logger2) f = logger.LevelRangeFilter() f.setLevelMin(logger.LogLevel.ERROR) f.setLevelMax(logger.LogLevel.FATAL) app.addFilter(f) for l in (logger1, logger2, logger3): logger.LOG4CXX_FATAL(l, "FATAL") logger.LOG4CXX_ERROR(l, "ERROR") logger.LOG4CXX_WARN (l, "WARN") logger.LOG4CXX_INFO (l, "INFO") logger.LOG4CXX_DEBUG(l, "DEBUG") logger.LOG4CXX_TRACE(l, "TRACE") logger.reset() with open(log) as f: expected = """WARN test WARN WARN xyz WARN INFO xyz INFO DEBUG xyz DEBUG WARN xyz.test WARN INFO xyz.test INFO """ self.assertEqualLogLines(expected, f.read()) with open(log2) as f: expected = """FATAL test FATAL ERROR test ERROR FATAL xyz FATAL ERROR xyz ERROR FATAL xyz.test FATAL ERROR xyz.test ERROR """ self.assertEqualLogLines(expected, f.read()) with open(log3) as f: expected = """FATAL xyz FATAL ERROR xyz ERROR FATAL xyz.test FATAL ERROR xyz.test ERROR """ self.assertEqualLogLines(expected, f.read())
def setUp(self): logger.reset(); self.temp = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.temp)
def test_config_from_file(self): import inspect log = os.path.join(self.temp, 'log') config = os.path.join(self.temp, 'config') with open(config, 'w') as f: f.write(""" # Set root logger level to DEBUG and its only appender to A1. log4j.rootLogger=WARN, A1 # More detail from xyz, but only a bit more from xyz.test log4j.logger.xyz=TRACE log4j.logger.xyz.test=INFO # A1 is set to be a ConsoleAppender. log4j.appender.A1=org.apache.log4j.FileAppender log4j.appender.A1.File={log} #log4j.appender.A1=org.apache.log4j.ConsoleAppender # A1 uses PatternLayout. log4j.appender.A1.layout=org.apache.log4j.ColorLayout log4j.appender.A1.layout.Color=true log4j.appender.A1.layout.PrintLocation=true """.format(log=log)) logger.config_from_file(config) logger1 = logger.get("test") logger2 = logger.get("xyz") logger3 = logger.get("xyz.test") filename = inspect.getframeinfo(inspect.currentframe())[0] loglines = [] def logln(msg): f = inspect.stack()[1][0] l = inspect.getframeinfo(f)[1] loc = " -> [33m" + filename + "[0m:[31m" + str(l) + "[0m" loglines.append(msg) loglines.append('\n') loglines.append(loc) loglines.append('\n') logger.LOG4CXX_FATAL(logger1, "FATAL") logln("[31mFATAL [0mtest FATAL") logger.LOG4CXX_ERROR(logger1, "ERROR") logln("[31mERROR [0mtest ERROR") logger.LOG4CXX_WARN(logger1, "WARN") logln("[33mWARN [0mtest WARN") logger.LOG4CXX_INFO(logger1, "INFO") logger.LOG4CXX_DEBUG(logger1, "DEBUG") logger.LOG4CXX_TRACE(logger1, "TRACE") logger2.FATAL("FATAL") logln("[31mFATAL [0mxyz FATAL") logger2.ERROR("ERROR") logln("[31mERROR [0mxyz ERROR") logger2.WARN("WARN") logln("[33mWARN [0mxyz WARN") logger2.INFO("INFO") logln("[32mINFO [0mxyz INFO") logger2.DEBUG("DEBUG") logln("[32mDEBUG [0mxyz DEBUG") logger2.TRACE("TRACE") logln("[32mTRACE [0mxyz TRACE") logger.LOG4CXX_FATAL(logger3, "FATAL") logln("[31mFATAL [0mxyz.test FATAL") logger.LOG4CXX_ERROR(logger3, "ERROR") logln("[31mERROR [0mxyz.test ERROR") logger.LOG4CXX_WARN(logger3, "WARN") logln("[33mWARN [0mxyz.test WARN") logger.LOG4CXX_INFO(logger3, "INFO") logln("[32mINFO [0mxyz.test INFO") logger.LOG4CXX_DEBUG(logger3, "DEBUG") logger.LOG4CXX_TRACE(logger3, "TRACE") logger.reset() # Hope this flushes the logger ;) with open(log) as f: expected = "".join(loglines) self.assertEqual(expected, f.read())
def test_file_logging_with_filter(self): logger1 = logger.get("test") logger2 = logger.get("xyz") logger3 = logger.get("xyz.test") logger.set_loglevel(logger1, logger.LogLevel.WARN) logger.set_loglevel(logger2, logger.LogLevel.DEBUG) logger.set_loglevel(logger3, logger.LogLevel.INFO) # Test different filter log = os.path.join(self.temp, 'test_file_logging_with_filter.log') app = logger.append_to_file(log, logger.get_root()) f = logger.LevelRangeFilter() f.setLevelMin(logger.LogLevel.DEBUG) f.setLevelMax(logger.LogLevel.WARN) app.addFilter(f) log2 = os.path.join(self.temp, 'test_file_logging_with_filter2.log') app = logger.append_to_file(log2, logger.get_root()) f = logger.LevelRangeFilter() f.setLevelMin(logger.LogLevel.ERROR) f.setLevelMax(logger.LogLevel.FATAL) app.addFilter(f) log3 = os.path.join(self.temp, 'test_file_logging_with_filter3.log') app = logger.append_to_file(log3, logger2) f = logger.LevelRangeFilter() f.setLevelMin(logger.LogLevel.ERROR) f.setLevelMax(logger.LogLevel.FATAL) app.addFilter(f) for l in (logger1, logger2, logger3): logger.LOG4CXX_FATAL(l, "FATAL") logger.LOG4CXX_ERROR(l, "ERROR") logger.LOG4CXX_WARN(l, "WARN") logger.LOG4CXX_INFO(l, "INFO") logger.LOG4CXX_DEBUG(l, "DEBUG") logger.LOG4CXX_TRACE(l, "TRACE") logger.reset() with open(log) as f: expected = """WARN test WARN WARN xyz WARN INFO xyz INFO DEBUG xyz DEBUG WARN xyz.test WARN INFO xyz.test INFO """ self.assertEqualLogLines(expected, f.read()) with open(log2) as f: expected = """FATAL test FATAL ERROR test ERROR FATAL xyz FATAL ERROR xyz ERROR FATAL xyz.test FATAL ERROR xyz.test ERROR """ self.assertEqualLogLines(expected, f.read()) with open(log3) as f: expected = """FATAL xyz FATAL ERROR xyz ERROR FATAL xyz.test FATAL ERROR xyz.test ERROR """ self.assertEqualLogLines(expected, f.read())
def setUp(self): logger.reset() self.temp = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.temp)
#!/usr/bin/env python """ Example Script for simulation of an AdEx neuron on the ESS Note: Neuron and synapse parameters are chosen to be within the parameter ranges of the default calibration. """ import pyhmf as pynn #import pyNN.nest as pynn from pymarocco import PyMarocco, Defects import pylogging import pysthal # configure logging pylogging.reset() pylogging.default_config(level=pylogging.LogLevel.INFO, fname="logfile.txt", dual=False) # Mapping config marocco = PyMarocco() marocco.backend = PyMarocco.ESS # choose Executable System Specification instead of real hardware marocco.calib_backend = PyMarocco.CalibBackend.Default marocco.defects.backend = Defects.Backend.None marocco.hicann_configurator = pysthal.HICANNConfigurator() marocco.experiment_time_offset = 5.e-7 # can be low for ESS, as no repeater locking required marocco.neuron_placement.default_neuron_size( 4) # default number of hardware neuron circuits per pyNN neuron marocco.persist = "nmpm1_adex_neuron_ess.bin" marocco.param_trafo.use_big_capacitors = False
def setUp(self): pylogging.reset() pylogging.default_config(pylogging.LogLevel.WARN)
""" Example Script for simulation of an AdEx neuron on the ESS Note: Neuron and synapse parameters are chosen to be within the parameter ranges of the default calibration. """ import pyhmf as pynn #import pyNN.nest as pynn from pymarocco import PyMarocco, Defects import pylogging import Coordinate as C import pysthal # configure logging pylogging.reset() pylogging.default_config(level=pylogging.LogLevel.INFO, fname="logfile.txt", dual=False) # Mapping config marocco = PyMarocco() marocco.backend = PyMarocco.ESS # choose Executable System Specification instead of real hardware marocco.calib_backend = PyMarocco.CalibBackend.Default marocco.defects.backend = Defects.Backend.None marocco.neuron_placement.skip_hicanns_without_neuron_blacklisting(False) marocco.hicann_configurator = pysthal.HICANNConfigurator() marocco.experiment_time_offset = 5.e-7 # can be low for ESS, as no repeater locking required marocco.neuron_placement.default_neuron_size(4) # default number of hardware neuron circuits per pyNN neuron marocco.persist = "nmpm1_adex_neuron_ess.bin" marocco.param_trafo.use_big_capacitors = False
def test_config_from_file(self): import inspect log = os.path.join(self.temp, 'log') config = os.path.join(self.temp, 'config') with open(config, 'w') as f: f.write(""" # Set root logger level to DEBUG and its only appender to A1. log4j.rootLogger=WARN, A1 # More detail from xyz, but only a bit more from xyz.test log4j.logger.xyz=TRACE log4j.logger.xyz.test=INFO # A1 is set to be a ConsoleAppender. log4j.appender.A1=org.apache.log4j.FileAppender log4j.appender.A1.File={log} #log4j.appender.A1=org.apache.log4j.ConsoleAppender # A1 uses PatternLayout. log4j.appender.A1.layout=org.apache.log4j.ColorLayout log4j.appender.A1.layout.Color=true log4j.appender.A1.layout.PrintLocation=true """.format(log=log)) logger.config_from_file(config) logger1 = logger.get("test"); logger2 = logger.get("xyz"); logger3 = logger.get("xyz.test"); filename = inspect.getframeinfo(inspect.currentframe())[0] loglines = [] def logln(msg): f = inspect.stack()[1][0] l = inspect.getframeinfo(f)[1] loc = " -> [33m" + filename + "[0m:[31m" + str(l) + "[0m" loglines.append(msg) loglines.append('\n') loglines.append(loc) loglines.append('\n') logger.LOG4CXX_FATAL(logger1, "FATAL"); logln("[31mFATAL [0mtest FATAL") logger.LOG4CXX_ERROR(logger1, "ERROR"); logln("[31mERROR [0mtest ERROR") logger.LOG4CXX_WARN (logger1, "WARN"); logln("[33mWARN [0mtest WARN") logger.LOG4CXX_INFO (logger1, "INFO") logger.LOG4CXX_DEBUG(logger1, "DEBUG") logger.LOG4CXX_TRACE(logger1, "TRACE") logger2.FATAL("FATAL") ;logln("[31mFATAL [0mxyz FATAL") logger2.ERROR("ERROR") ;logln("[31mERROR [0mxyz ERROR") logger2.WARN ("WARN") ;logln("[33mWARN [0mxyz WARN") logger2.INFO ("INFO") ;logln("[32mINFO [0mxyz INFO") logger2.DEBUG("DEBUG") ;logln("[32mDEBUG [0mxyz DEBUG") logger2.TRACE("TRACE") ;logln("[32mTRACE [0mxyz TRACE") logger.LOG4CXX_FATAL(logger3, "FATAL") ;logln("[31mFATAL [0mxyz.test FATAL") logger.LOG4CXX_ERROR(logger3, "ERROR") ;logln("[31mERROR [0mxyz.test ERROR") logger.LOG4CXX_WARN (logger3, "WARN") ;logln("[33mWARN [0mxyz.test WARN") logger.LOG4CXX_INFO (logger3, "INFO") ;logln("[32mINFO [0mxyz.test INFO") logger.LOG4CXX_DEBUG(logger3, "DEBUG") logger.LOG4CXX_TRACE(logger3, "TRACE") logger.reset() # Hope this flushes the logger ;) with open(log) as f: expected = "".join(loglines) self.assertEqual(expected, f.read())
import pyhaldls_v2 as hal import pystadls_v2 as sta import dlens_v2.halco import pylogging as logger logger.reset() logger.default_config(level=logger.LogLevel.DEBUG)
def inner_loop(): parser = argparse.ArgumentParser() parser.add_argument('--cal', type=str, default='../adv/calibration_20.json') parser.add_argument('--dac', type=str, default='../adv/dac_07_chip_20.json') parser.add_argument('--load_from', type=str, default='') parser.add_argument('--out', type=str, default='') parser.add_argument('--pl', type=int, choices=range(32), default=4) parser.add_argument('--lr', type=str, choices=['q', 'greedy', 'ann'], default='q') parser.add_argument('--generation', type=int, default=-1) parser.add_argument('--n_batch', type=int, default=1) parser.add_argument('--n_iter', type=int, default=1) parser.add_argument('--dependent', default=False, action='store_true') parser.add_argument('--verbose', default=False, action='store_true') args = parser.parse_args() with open(args.cal) as f: calibrated_config = json.load(f) with open(args.dac) as f: dac_config = json.load(f) pylogging.reset() pylogging.default_config(level=pylogging.LogLevel.INFO, fname="", print_location=False, color=True, date_format='RELATIVE') logger = pylogging.get('main') agent = SpikingBanditAgent(logger) n_batch = args.n_batch agent_hp = agent.default_hyperparameters if args.lr == 'q': learning_rule = IncrementalLearningRule() elif args.lr == 'ann': learning_rule = ANNLearningRule() if args.load_from != '': traj = pp.Trajectory(filename=args.load_from) traj.v_auto_load = True traj.f_load(index=-1, force=True) pop_size = traj.parameters.pop_size n_iter = traj.parameters.n_iteration max_fitness = -100 best_individual = None if args.generation == -1: gen_index = n_iter - 1 else: gen_index = args.generation for j in range(pop_size): traj.v_idx = gen_index * pop_size + j # print(traj.v_idx) fitness = traj.results.crun.fitness if fitness > max_fitness: max_fitness = fitness best_individual = dict( traj.parameters.individual.f_get_children()) best_individual.pop('seed', None) for k, v in best_individual.items(): best_individual[k] = best_individual[k][traj.v_idx] print(best_individual) if args.lr == 'q': agent_hp = dict( action_inhibition=best_individual['action_inhibition'], stim_inhibition=best_individual['stim_inhibition']) lr_hp = dict( learning_rate=best_individual['learning_rate'], learning_rate_decay=best_individual['learning_rate_decay'], weight_prior=best_individual['weight_prior']) learning_rule = IncrementalLearningRule(lr_hp) elif args.lr == 'ann': lr_hp = dict(learning_rate=best_individual['learning_rate'], ann_parameters=best_individual['ann_parameters']) agent_hp = agent.default_hyperparameters learning_rule = ANNLearningRule(lr_hp) else: logger.error('Learning rule {:s} not supported yet'.format( args.lr)) quit() bps = [] ar = [] regrets = [] with Connector(calibrated_config, dac_config, args.pl) as connector: for i in range(args.n_iter): bandit_probabilities = np.random.rand(n_batch, 2) if args.dependent: bandit_probabilities[:, 1] = 1. - bandit_probabilities[:, 0] bandit_probabilities = bandit_probabilities.reshape((-1, )) try: r = agent.play_bandit_batch(bandit_probabilities, 100, n_batch, agent_hp, learning_rule, connector) regrets.append(r[0]) except: continue ar.append(r[1]['a_r']) bps.append(bandit_probabilities) logger.info('iteration made') print(np.mean(regrets)) if args.verbose: spikes = r[1]['spikes'] logger.info(spikes[:20, :]) logger.info('') logger.info(spikes[-20:, :]) logger.info('A total of {} spikes was received'.format( spikes.shape[0])) if args.out != '': with open(args.out, 'wb') as f: pickle.dump(dict(bandit_probabilities=bps, a_r=ar), f) logger.info('Finished')