def startLog(build=None): log = Capturing() if debug: print '>' * 10 else: log.build = build log.__enter__() return log
def test_emitter_csv_simple_stdout(self): with Capturing() as _output: self._test_emitter_csv_simple_stdout() output = "%s" % _output assert len(_output) == 2 assert "dummy_feature" in output assert "metadata" in output
def test_emitter_csv_compressed_stdout(self): with Capturing() as _output: with Emitter(urls=['stdout://'], emitter_args={'namespace': '123', 'compress': True}) as emitter: emitter.emit("dummy", {'test': 'bla'}, 'dummy') output = "%s" % _output
def test_emitter_csv_simple_stdout(): with Capturing() as _output: _test_emitter_csv_simple_stdout() output = "%s" % _output assert len(_output) == 2 assert "dummy_feature" in output assert "metadata" in output print sys._getframe().f_code.co_name, 1
def test_emitter_graphite_simple_stdout(): with Capturing() as _output: _test_emitter_graphite_simple_stdout() output = "%s" % _output # should look like this: # ['namespace777.dummy-feature.test3 3.000000 1449870719', # 'namespace777.dummy-feature.test2 2.000000 1449870719', # 'namespace777.dummy-feature.test4 4.000000 1449870719'] assert len(_output) == 3 assert "dummy_feature" not in output # can't have '_' assert "dummy-feature" in output # can't have '_' assert "metadata" not in output assert 'namespace777.dummy-feature.test2' in output assert 'namespace777.dummy-feature.test3' in output assert 'namespace777.dummy-feature.test4' in output assert len(_output[0].split(' ')) == 3 # three fields in graphite format assert len(_output[1].split(' ')) == 3 # three fields in graphite format assert len(_output[2].split(' ')) == 3 # three fields in graphite format assert float(_output[0].split(' ')[1]) == 12345.0 assert float(_output[1].split(' ')[1]) == 12345.0 assert float(_output[2].split(' ')[1]) == 12345.0 print sys._getframe().f_code.co_name, 1
def train(args): global DEBUG DEBUG = args.debug # get timestamp for model id dt = datetime.datetime.now() timestamp = '{}-{}/{:02d}-{:02d}-{:02d}'.format(dt.strftime("%b"), dt.day, dt.hour, dt.minute, dt.second) model_dir = os.path.join(EXP_DIR, timestamp) os.makedirs(model_dir) # configure logging logging.basicConfig(filename=os.path.join(model_dir, 'log.txt'), level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') if args.verbosity >= 1: root = logging.getLogger() root.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') ch.setFormatter(formatter) root.addHandler(ch) # set device (if using CUDA) if torch.cuda.is_available(): torch.cuda.device(args.gpu) # write the args to outfile for k, v in sorted(vars(args).items()): logging.info('{} : {}\n'.format(k, v)) # load data training_set, validation_set = load_data(args) logging.info( 'Loaded data: {} training examples, {} validation examples\n'.format( len(training_set.graphs), len(validation_set.graphs))) # get config experiment_config = get_experiment_config(args, training_set) # initialize model if args.load is None: logging.info('Initializing model...\n') model = experiment_config.model_generator( experiment_config.model_config) else: logging.info('Loading model from {}\n'.format(args.load)) model = torch.load(os.path.join(EXP_DIR, args.load, 'model.ckpt')) if torch.cuda.is_available(): model.cuda() logging.info(model) logging.info('Training loss: {}\n'.format(experiment_config.loss_fn)) # optimizer lr = args.lr optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=args.weight_decay) scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min', verbose=True, factor=0.5, patience=3, min_lr=lr / 32) logging.info(optimizer) logging.info(scheduler) # Start Training for epoch in range(1, args.epochs + 1): if args.randomize_nodes: training_set.randomize_nodes() train_results = train_one_epoch(model, training_set, experiment_config.loss_fn, optimizer, experiment_config.monitors, args.debug) logging.info(results_str(epoch, train_results, 'train')) if epoch % 5 == 0: results = evaluate_one_epoch(model, validation_set, experiment_config.loss_fn, experiment_config.monitors) logging.info(results_str(epoch, results, 'eval')) torch.save(model, os.path.join(model_dir, 'model.ckpt')) logging.info("Saved model to {}\n".format( os.path.join(model_dir, 'model.ckpt'))) logging.info("Training: processed {:.1f} graphs per second".format( len(training_set.graphs) / train_results['time'])) with Capturing() as output: scheduler.step(results['loss']) if len(output) > 0: logging.info(output[0]) return model
def run_single(args, result): sys.argv = ['test.py'] + args with Capturing() as output: execfile('test.py')
from bottle import post, get, run, static_file, default_app from capturing import Capturing from autopilot import Autopilot config = { 'photos_directory': 'photos', 'port': 3001 } capturing = Capturing(config['photos_directory']) autopilot = Autopilot() @get('/') @get('/index.html') def index_html(): return static('index.html', 'text/html') @get('/<file_path:re:js/.*>') def js(file_path): return static(file_path, 'application/javascript') @get('/<file_path:re:css/.*>') def css(file_path): return static(file_path, 'text/css') # obsolete @get('/photos-count')