def combine_cmb_noise(config, n, cmb_dir, noise_dir, coupling_matrix, mask): combined_dir = os.path.join(config['toplevel_dir'], 'ffp8_mc_combined_{:04}'.format(n)) if not os.path.exists(combined_dir): os.mkdir(combined_dir) logger = log.make_logger('ffp8_combined_{}'.format(n), log_file=os.path.join(combined_dir, 'log.txt'), toplevel_log_file=os.path.join( config['toplevel_dir'], 'lgmca_postprocessing_combination_log.txt')) with log.Timer(logger, 'Combining CMB {0} and noise {0}'.format(n)).with_level( logging.INFO): with log.Timer(logger, 'Reading CMB & noise maps and combining them'): cmb_map = hp.read_map(os.path.join(cmb_dir, 'FFP8_v1_aggregated_cmb.fits'), verbose=False) noise_map = hp.read_map(os.path.join( noise_dir, 'FFP8_v1_aggregated_cmb.fits'), verbose=False) combined_map_ring = cmb_map + noise_map # No need to waste memory del cmb_map del noise_map combined_map = hp.reorder(combined_map_ring, r2n=True) cls = hp.anafast(combined_map_ring, lmax=config['matmask_maxl'], use_weights=True) hp.write_map(os.path.join(combined_dir, 'FFP8_v1_aggregated_map.fits'), combined_map, nest=True, overwrite=True) hp.write_cl(os.path.join(combined_dir, 'FFP8_v1_aggregated_cls.fits'), cls, overwrite=True) shutil.copyfile( os.path.join(cmb_dir, 'FFP8_v1_aggregated_beam.txt'), os.path.join(combined_dir, 'FFP8_v1_aggregated_beam.txt')) with log.Timer(logger, 'Computing masked pspec and decoupling'): masked_powerspec = hp.anafast(combined_map_ring * mask, lmax=config['matmask_maxl'], use_weights=True) recovered_pspec = np.linalg.solve(coupling_matrix, masked_powerspec) hp.write_cl(os.path.join(combined_dir, 'mask_corrected_spectra.fits'), recovered_pspec, overwrite=True)
import mirror_engine import log logger = log.make_logger("log") logger.info("Starting.") while True: engine = mirror_engine.MirrorEngine() engine.initialize() try: engine.run() except: logger.exception("Uncaught exception!") # TODO: # compare diffs on up and downstream # update downstream dme with upstream
import os import traceback from alerting import Db, rule_from_form # contains all errors as key:(title,msg) items. # will be used throughout the runtime to track all encountered errors errors = {} # will contain the latest data last_update = None config = make_config(config) logger = make_logger('app', config) logger.debug('app starting') backend = Backend(config, logger) s_metrics = structured_metrics.StructuredMetrics(config, logger) graphs_manager = Graphs() graphs_manager.load_plugins() graphs_all = graphs_manager.list_graphs() bottle.TEMPLATE_PATH.insert(0, os.path.dirname(__file__)) @route('<path:re:/assets/.*>') @route('<path:re:/timeserieswidget/.*(js|css)>') @route('<path:re:/timeserieswidget/timezone-js/src/.*js>') @route('<path:re:/timeserieswidget/tz/.*>')
def generate_episode(env: Blackjack, player_policy, ep_no): history = [] done = False observation = env.reset() while not done: state = State(*observation) history.append(state) log.debug('Episode no {}: {}'.format(ep_no, state)) observation, reward, done, auxiliary = env.step( player_policy[state.to_policy_key()]) return history, reward if __name__ == '__main__': log = make_logger(__name__, logging.DEBUG) env = Blackjack() state_value = np.zeros( (N_DEALER_CARD_SUM_POSSIBILITIES, N_PLAYER_CARDS_SUM_POSSIBILITIES, N_USABLE_ACE_LAYERS)) player_policy = np.ones(state_value.shape, dtype=np.int32) player_policy[:, (PLAYER_INIT_STICK_SUM - PLAYER_MIN):, :] = 0 returns = defaultdict(list) for i in range(100000): episode, reward = generate_episode(env, player_policy, i) log.info('Episode no {} rewarded {:2}: {}'.format(i, reward, episode)) for state in episode: key = state.to_policy_key() returns[key].append(reward) state_value[key] = np.mean(returns[key])
import logging import os from enum import Enum from itertools import product import numpy as np from gym import Env from gym.spaces import Tuple, Discrete from log import make_logger log = make_logger(__name__, logging.INFO) ACTIONS = list(product((-1, 0, 1), (-1, 0, 1))) class CellType(Enum): OFF = 0 ROAD = 1 START = 2 STOP = 3 @staticmethod def values(): return list(CellType) class Reward(Enum): WIN = 1 STEP = -1 LOOSE = -5
put_request(c_s, pwr, duration) def put_request(c_s, pwr, duration): """ take a formatted color string and duration float and put that request to the LIFX API """ inf('**** put request: {}, {}, {}s'.format(c_s, pwr, duration)) data = json.dumps( {'selector':'all', 'power': pwr, 'color': c_s, 'duration': duration, }) r = requests.put(config.state_url(), data, headers=creds.headers) inf(r) logger = log.make_logger() inf('<<<<<<<<<<<<<<<<<< SYSTEM RESTART >>>>>>>>>>>>>>>>>>>>>') test_connection() # update sunrise / sunset every day MS_DAY = 60 * 60 * 24 * 1000 refresh_solar_info = tornado.ioloop.PeriodicCallback(LUT.refresh_solar(), MS_DAY) refresh_solar_info.start() switch('on', False) print 'state now: ' + str(LUT.state_now()) print 'next state: ' + str(LUT.next_state()) print 'secs to next state: ' + str(LUT.secs_to_next_state())
import dashboards import os import traceback from alerting import Db, rule_from_form # contains all errors as key:(title,msg) items. # will be used throughout the runtime to track all encountered errors errors = {} # will contain the latest data last_update = None config = make_config(config) logger = make_logger('app', config) logger.debug('app starting') backend = Backend(config, logger) s_metrics = structured_metrics.StructuredMetrics(config, logger) graphs_manager = Graphs() graphs_manager.load_plugins() graphs_all = graphs_manager.list_graphs() bottle.TEMPLATE_PATH.insert(0, os.path.dirname(__file__)) @route('<path:re:/assets/.*>') @route('<path:re:/timeserieswidget/.*(js|css)>') @route('<path:re:/timeserieswidget/timezone-js/src/.*js>') @route('<path:re:/timeserieswidget/tz/.*>')
from validation import RuleEditForm, RuleAddForm import traceback from alerting import Db, rule_from_form # contains all errors as key:(title,msg) items. # will be used throughout the runtime to track all encountered errors errors = {} # will contain the latest data last_update = None config = make_config(config) logger = make_logger("app", config) logger.debug("app starting") backend = Backend(config, logger) s_metrics = structured_metrics.StructuredMetrics(config, logger) graphs_manager = Graphs() graphs_manager.load_plugins() graphs_all = graphs_manager.list_graphs() @route("<path:re:/assets/.*>") @route("<path:re:/timeserieswidget/.*(js|css)>") @route("<path:re:/timeserieswidget/timezone-js/src/.*js>") @route("<path:re:/timeserieswidget/tz/.*>") @route("<path:re:/DataTables/media/js/.*js>") @route("<path:re:/DataTablesPlugins/integration/bootstrap/.*(js|css)>")
def put_request(c_s, pwr, duration): """ take a formatted color string and duration float and put that request to the LIFX API """ inf('**** put request: {}, {}, {}s'.format(c_s, pwr, duration)) data = json.dumps({ 'selector': 'all', 'power': pwr, 'color': c_s, 'duration': duration, }) r = requests.put(config.state_url(), data, headers=creds.headers) inf(r) logger = log.make_logger() inf('<<<<<<<<<<<<<<<<<< SYSTEM RESTART >>>>>>>>>>>>>>>>>>>>>') test_connection() # update sunrise / sunset every day MS_DAY = 60 * 60 * 24 * 1000 refresh_solar_info = tornado.ioloop.PeriodicCallback(LUT.refresh_solar(), MS_DAY) refresh_solar_info.start() switch('on', False) print 'state now: ' + str(LUT.state_now()) print 'next state: ' + str(LUT.next_state()) print 'secs to next state: ' + str(LUT.secs_to_next_state())
#!/usr/bin/env python2 import os import sys import config from backend import Backend, make_config from log import make_logger import structured_metrics config = make_config(config) os.chdir(os.path.dirname(os.path.abspath(__file__))) logger = make_logger('update_metrics', config) try: backend = Backend(config, logger) s_metrics = structured_metrics.StructuredMetrics(config, logger) errors = s_metrics.load_plugins() if len(errors) > 0: logger.warn('errors encountered while loading plugins:') for e in errors: print '\t%s' % e logger.info("fetching/saving metrics from graphite...") backend.download_metrics_json() logger.info("generating structured metrics data...") backend.update_data(s_metrics) logger.info("success!") except Exception, e: # pylint: disable=W0703 logger.error("sorry, something went wrong: %s", e) from traceback import print_exc
from gym import Env from envs.CliffWalkingEnv import CliffWalking from log import make_logger from windy_gridworld import Sarsa, generate_episode log = make_logger(__name__) class QLearning(Sarsa): def __init__(self, env: Env, alpha=0.5, gamma=1, epsilon=0.1): super().__init__(env, alpha, gamma, epsilon) def greedy_value(self, state): return self.action_value[state].max() def on_new_state(self, prev_state, action, reward, next_state, done): q = self.action_value[prev_state][action] q_next = self.greedy_value(next_state) self.action_value[prev_state][action] += self.alpha * (reward + self.gamma * q_next - q) if __name__ == '__main__': env = CliffWalking() algorithm = QLearning(env, alpha=0.5, gamma=1, epsilon=0.1) for ep in range(int(1e2)): # 1e4 for Sarsa moves = generate_episode(env, algorithm) log.info('Episode no. {} done in moves {}'.format(ep, moves)) log.info('Done learning!') algorithm.epsilon = 0