def run(self, config): logger.init_logger(config.log_uri) # Load market data log.info('Loading data...') data = market_data.load_market_data(config.tickers, config.ticker_types, config.data_sources, \ config.start_date, config.end_date, config.history_window, config.csv_data_uri) log.info('Data loaded!') print # Create the trading algorithm w/o parameters trading_algorithm = TradingAlgorithm.create_trading_algorithm(config.algorithm_uri, config.tickers, \ config.history_window) # Setup and run the optimizer optimizer = of.create_optimizer( config.num_processors, config.optimizer_name, trading_algorithm, config.commission, config.ticker_spreads, config.optimization_metric, config.optimization_metric_ascending, config.optimization_parameters, config.time_resolution) log.info('Running the optimizer...') optimizer.run(data, config.start_date, config.end_date) log.info('Ran optimizer!') print return optimizer.results
def main(): # Command line arguments if len(sys.argv) >= 2 and sys.argv[1] == options.cookie_arg: options.rewrite_cookies = True logger.init_logger() options.read_profile() if options.run_tests and options.debug: tests.run_tests() tasks = [] f = open('websites.txt', 'r') lines = f.readlines() for line in lines: if len(line) < 2: continue if line.split()[0] == 'SmythsToys': smyths_toys = websites.SmythsToys(line.split()[1]) t = task.Task(smyths_toys) t.start() tasks.append(t) elif line.split()[0] == 'Target': target = websites.Target(line.split()[1]) t = task.Task(target) t.start() tasks.append(t)
def __init__(self, ip, port, pid_file): self.register_options() common_config.init(sys.argv[1:]) logger.init_logger() LOG.debug('Full set of CONF:') cfg.CONF.log_opt_values(LOG, syslog.DEBUG) self._ip = ip self._port = port super(DeamonMain, self).__init__(pid_file)
def main(): init_logger(logging.INFO) args = parser.parse_args() run(args) extract(EXPORT_FILE, int(args.nlarge)) clean()
def __init__(self): self.global_config = None self.project_config = None self.keychain = None init_logger() self._load_global_config() self._load_project_config() self._load_keychain() self._add_repo_to_path()
def main(argv): logger.init_logger() logging.info('Welcome to GOGetter 3000 :) \nType -h for help') input_path, output_path = arguments_parser.parse_input(argv) fh = file_handler.FileHandler(input_path, output_path) dm = data_manipulate.DataManipulate(fh) res = dm.get_host_dict_from_csv() if res: fh.save_to_csv(res)
def create_app(app_name, config_setting): if not os.getcwd in sys.path: sys.path.append(os.getcwd()) app = Flask(app_name) init_logger(app_name, logging.DEBUG, logging.StreamHandler()) # 1. load config setting, DEBUG flag is set here try: app.config.from_object(config_setting) except ImportError: pass app_context = app.app_context() app_context.push() return app
def tokenize(file_path): """ Load and tokenize the tsv file Args: file_path(str): the tsv file path. Returns: index_dict (dict{word:list(ids)}) : contains the inverse indices """ logger = init_logger() logger.info("Will start loading the tsv corpus..") articles = _load_content(file_path) # dict{id: text} index_dict = defaultdict(list) logger.info("Done loading corpus..") i = 0 for article in articles: i = i + 1 if i % 1000 == 0: logger.info("Now processing at %s", i) text = articles[article] words = extract(text, "en") index_dict = _add_to_inverse_index(index_dict, (article, words)) logger.info("Done.. successfuly created inverse index") return index_dict
def __init__(self, agent, init_balance=1000000, max_bets_per_session=5, *args, **kwargs): super().__init__(*args, **kwargs) self.logger = init_logger(self.__class__.__name__, show_debug=kwargs.get('debug', True)) self.agent = agent self.init_balance = init_balance self.balance = init_balance self.max_bets_per_session = max_bets_per_session self.pending_bet_count = 0 self.pending_bets: List[Bet] = [] # won't place bet until the last bet finished self.pending_bet_time: Optional[datetime] = None self.win_nb = 0 self.draw_nb = 0 self.total_bets = 0 # take profit and stop loss self.tp_pct = kwargs.get('tp_pct', 0.2) self.sl_pct = kwargs.get('sl_pct', 0.03) self.restricted_odds = [0, .25]
def __init__(self, matches_by_date, strategy: BaseStrategy, **kwargs): self.matches_by_date = matches_by_date self.logger = init_logger(self.__class__.__name__, show_debug=kwargs.get('debug', True)) self.strategy = strategy self.nb_win = 0 self.nb_bets = 0
def main(): logger = init_logger('test.aiohttp.bitcoin', log_level=logging.DEBUG, propagate=False) logger.info("start test aiohttp server.") profile_file = os.path.join(os.getcwd(), 'config.yml') with open(profile_file) as stream: config = yaml.load(stream) static_path = os.path.join(os.getcwd(), 'static') loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) app = web.Application() app.router.add_get('/', index_handle) app.router.add_static('/css', os.path.join(static_path, "css")) app.router.add_static('/js', os.path.join(static_path, "js")) app.router.add_static('/fonts', os.path.join(static_path, "fonts")) app.router.add_static('/statics', os.path.join(static_path, "statics")) app.router.add_route('GET', '/api', api_handler) app.router.add_route('GET', '/ws', WebSocket, name='chat') app['websockets'] = weakref.WeakSet() app['queue'] = asyncio.Queue() app['config'] = config app.on_shutdown.append(on_shutdown) app.on_startup.append(start_background_tasks) app.on_cleanup.append(cleanup_background_tasks) web.run_app(app)
def __init__(self, main_logger=None): """Initializes the logging for the object. Args: main_logger: logger to which the logs should be sent, optional """ self.log = main_logger or logger.init_logger("validator")
def __init__(self, **kwargs): self.logger = init_logger(__name__, show_debug=True) self.n = kwargs.get('action_size', constants.action_size) self.action_space = spaces.Discrete(self.n) self.obs_shape = (1, kwargs.get('observation', constants.observation_size)) self.observation_space = spaces.Box(low=0, high=100, shape=self.obs_shape, dtype='float16') self.data_path = kwargs.get('data_path', 'csv_files/train.csv') self.matches: List[MatchLogRecord] = [] self.activate_matches: List[MatchLogRecord] = [] self.balance = kwargs.get('default_balance', 100) self.init_balance = kwargs.get('default_balance', 100) self.default_stake = kwargs.get('default_stake', 1) self.balance_history = [] self.in_play_match: Optional[MatchLogRecord] = None self.odd_hdp: Optional[OddAsianHDP] = None self.odd_1x2: Optional[Odd1x2] = None self.current_step = 0 self.step_left = 0 self.window_index = 0 self._init_matches() self.seed(kwargs.get('seed', 1))
def generate_site(source_folder, output_folder): log = init_logger(__name__, logging.INFO) log.info('Generating site from %r', source_folder) layouts_folder_path = source_folder + '/layout' log.info('Getting layouts from %r', layouts_folder_path) # Prepare jinja environment jinja_env = init_jinja_environment(layouts_folder_path) log.info('Jinja environment created successfully') try: for file_path in list_files(source_folder, '.rst'): # Get the html content and the template used to construct the html html, template_name = generate_html(file_path, jinja_env) # Write to the file and pass the information to the logger output_file_name = Path(file_path).stem + '.html' write_output(output_folder, output_file_name, html) log.info("Wrote %r with template %r", output_file_name, template_name) log.info("SUCCESS") except FileNotFoundError: log.critical("Input file or folder not found") log.critical("Could not find necessary the files") log.critical("Ending process ...") except ValueError: log.critical("Template not found in layout folder") log.critical("Could not find necessary the files ...") log.critical("Ending process ...")
async def get(self): self.logger = init_logger('test.aiohttp.bitcoin.ws', log_level=logging.DEBUG, propagate=False) self.logger.debug("get WebSocket") self.status = True ws = web.WebSocketResponse() await ws.prepare(self.request) self.request.app['websockets'].add(ws) while self.status: msg = await ws.receive() if msg.type == WSMsgType.close: self.logger.debug('Close ws') await ws.close() self.status = False break elif msg.type == 1: self.logger.debug('send str') self.logger.debug(msg) await ws.send_str(msg.data) elif msg.type == WSMsgType.error: self.logger.debug('ws connection closed with exception %s' % ws.exception()) break self.request.app['websockets'].remove(ws) self.logger.debug('websocket connection closed') return ws
def main(args, conf): logger = init_logger(conf['path']['log_file_path']) logger.info("=" * 10 + " Experiment Starts " + "=" * 10) show_conf(conf) client = establish_connection(conf['db']['connection_string']) db_name = conf['db']['db_name'] collection_name = args.cname dataset_size = int(conf['db']['dataset_size']) granularity = int(conf['visual']['granularity']) if args.builddb: build_db(client=client, db_name=db_name, collection_name=collection_name, distribution=args.builddb, dataset_size=dataset_size, dataset_dir=conf['path']['dataset_dir']) # if args.importdata: # collection = client[db_name][collection_name] # collection.drop_indexes() # collection.drop() # dataset_file_name = "{}_dist.txt".format(collection_name) # dataset_path = join(conf['path']['dataset_dir'], dataset_file_name) # import_dataset(collection=collection, dataset_path=dataset_path) if args.generatequery: generate_query(collection=client[db_name][collection_name], collection_name=collection_name, granularity=granularity, dataset_size=dataset_size, repetition=int(args.generatequery), query_dir=conf['path']['query_dir'], grid_dir=conf['path']['grid_dir']) if args.runexperiment: query_dir = join(conf['path']['query_dir'], collection_name) try: query_files_names = [ fn for fn in listdir(query_dir) if isfile(join(query_dir, fn)) ] for fn in query_files_names: query = load_query(join(query_dir, fn)) exec_query(collection=client[db_name][collection_name], collection_name=collection_name, granularity=granularity, queries=query, query_file_name=fn, fig_dir=conf["path"]["fig_dir"], grid_dir=conf["path"]["grid_dir"]) except FileNotFoundError as e: logger.error(e) if client: client.close() logger.info("=" * 10 + " Experiment Finished " + "=" * 10)
def main(): args = sys.argv[1:] config.parse_config(args) check_root_user() init_logger() setup_pid_file() from server import handle_sftp_session server = StreamServer( ('0.0.0.0', int(config.options.get('sftp_port', 2200))), handle_sftp_session) _logger.info('Solt SFTP server is running and waiting for connections...') try: server.serve_forever() except (SystemExit, KeyboardInterrupt): server.close()
def init_args(): max_verbose_level = len(LEVELS) parser = argparse.ArgumentParser( description= '%sCreate a docx file from a markdown file with a template file.' % __banner__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-o', '--output', help='Output filename', default='mddot_file_%s.docx' % int(time.time())) parser.add_argument( '-V', '--version', action='version', version="MDdot Mk.%s" % (__version__), help='Print version', ) parser.add_argument('-v', '--verbose', help='Change log output level from 0 to %s : %s' % (max_verbose_level, LEVELS), default='0', type=int) requiredNamed = parser.add_argument_group('Required arguments') requiredNamed.add_argument('-m', '--md', help='Input markdown filename', required=True) requiredNamed.add_argument('-d', '--docx', help='Input docx template filename', required=True) args = parser.parse_args() if args.verbose < 0: args.verbose = 0 elif args.verbose >= max_verbose_level: args.verbose = max_verbose_level - 1 init_logger(args.verbose) return args
def main(): global logger init_logger(logger, 'logs/launcher.log', 10000000, 'info', 2) parser = parse_args() args = parser.parse_args() if args.install_environ: install_environ() if args.start_environ: start_environ() if args.build_code: build_code(CODE_DIR) if args.start_services: start_services(MOSIP_SERVICES, MOSIP_VERSION) if args.stop_services: stop_services(MOSIP_SERVICES, MOSIP_VERSION)
def ruce_log(self, msg, level='info'): log, handler = logger.init_logger(self.config.log_conf) if level == 'info': log.info(msg) elif level == 'warning': log.warning(msg) elif level == 'error': log.error(msg) log.removeHandler(handler)
def main(): """ Gobi's main function. Finds the test functions, runs the machine, connects to them, and runs the tests """ init_logger() info("Welcome to gobi. Sit back and relax :)") vclient = vagrant.Vagrant() test_funcs = get_all_test_functions() assert test_funcs > 0, "No tests found. What do you want me to run?" info("Found %d tests to run" % len(test_funcs)) info("Setting up the environment...") vclient.up() info("Environment is up and ready") debug("Taking snapshot...") vagrant_take_snapshot() debug("Snapshot taken") init_fabric(vclient) counter = 1 for task in test_funcs: # After the first test, clean - delete cache and revert to snapshot if counter != 1: clear_fabric_cache() debug("Reverting to snapshot...") vagrant_revert_to_snapshot() debug("Reverted!") info("Running test number %d - %s" % (counter, task.__name__)) execute(task) counter += 1 info("All tests finished") info("Destroying environment...") vclient.destroy() info("Environment has been destroyed...") info("Gobi, out")
def main(): global logger parser = parse_args() args = parser.parse_args() update_db = True update_ldap = True if args.only_db: update_ldap = False if args.only_ldap: update_db = False init_logger(logger, 'logs/add_user.log', 10000000, 'info', 1) uinfo = parse_umc_csv(args.csv) conn = psycopg2.connect( "dbname=mosip_master user=postgres port=9001 password=postgres") conn.autocommit = True cur = conn.cursor() ld = ldap.initialize('ldap://localhost:10389') ld.bind('uid=admin,ou=system', 'secret') for u in uinfo: if update_db: add_umc(u, cur) if update_ldap: try: add_role_in_ldap(u.role, u.country, ld) except ldap.ALREADY_EXISTS: logger.info('Role already exists in LDAP: %s' % (u.role)) try: add_user_in_ldap(u, ld) except ldap.ALREADY_EXISTS: logger.info('User already exists in LDAP: %s' % (u.uid)) try: add_user_to_role(u.uid, u.role, ld, u.country) except ldap.TYPE_OR_VALUE_EXISTS: logger.info('User-Role already in LDAP: %s-%s' % (u.uid, u.role)) conn.commit() conn.close()
def main(dtime): ''' Args: dtime: Updated dtime for the db script ''' global logger init_logger(logger, 'logs/reprocess.log', 10000000, 'info', 1) conn = psycopg2.connect("dbname=mosip_regprc user=postgres") conn.autocommit = True cur = conn.cursor() rids = get_invalid_packets(cur, dtime) logger.info('Total invalid packets = %s' % len(rids)) logger.info('Invalid packets = %s' % rids) for rid in rids: logger.info('reprocessing %s' % rid) err = reprocess_packet(rid, cur, HOST) if err: logger.error('Packet %s.zip not found' % rid)
def setUp(self): super(CurrencyHandlerTestCase, self).setUp() main_logger = logger.init_logger('main') test_csv_handler = csv_handler.CsvHandler( os.path.join('..', config.MAIN_CSV_PATH), config.RATES_FILE_NAME, main_logger ) self.test_currency_handler = currency_handler.CurrencyHandler( test_csv_handler.get_csv_data(), main_logger)
def main(): # parse command line arguments args = parse_arguments() # init logging init_logger(args.logfile, args.verbosity) logger = get_logger(os.path.basename(__file__)) logger.info('Running the rawhttpget script in verbosity level: %d' % args.verbosity) # download the file with the given url logger.info('Downloading file at: %s' % args.url) with Timer() as t: try: filepath = urlretrieve(args.url, args.port, args.directory) except (ValueError, RuntimeError) as e: logger.error('%s, quit' % e.message) exit(1) logger.info('File is downloaded to: %s' % filepath) logger.info('Time taken: %ss' % t.duration)
def __init__(self, main_logger=None): """Initalizes the logger for the object. Creates an async http client to communicate with the service. Initalizes the api key from the config. Args: main_logger: logger to which the logs should be sent, optional """ self.log = main_logger or logger.init_logger("mailgun") self.http_client = AsyncHTTPClient() self.key = config.MAILGUN_KEY
def __init__(self, cfg_path): '''Constructor of the Runner class. It expects the path to the config file to runs as the only parameter.''' if isinstance(cfg_path, str): self.cfg_path = cfg_path self.cfg = Config.load_from_json(self.cfg_path) elif isinstance(cfg_path, Config): self.cfg_path = None self.cfg = cfg_path else: raise Exception( 'cfg_path must be either a path or a Config object') logger.init_logger(self.cfg) self.__prepare_summary_writers() self.__prepare_results_directory() self.__load_embeddings() self.data_loader = DataLoader(self.cfg)
def __init__(self, main_logger = None): """Initalizes the logger for the object. Creates an async http client to communicate with the service. Initalizes the api key from the config. Args: main_logger: logger to which the logs should be sent, optional """ self.log = main_logger or logger.init_logger("mandrill") self.http_client = AsyncHTTPClient() self.key = config.MANDRILL_KEY
def run_threads(self, sim: bool) -> None: # Register Communication object to Base Manager BaseManager.register("Communication", Communication) # Create manager object manager: BaseManager = BaseManager() # Start manager manager.start() # Create Communication object from manager comm_obj = manager.Communication() log_queue: Queue = Queue(-1) logging_process = init_logger(log_queue) logging_process.start() worker_configurer(log_queue) # Create new processes logging.info("Spawning Processes") flight_args = (comm_obj, sim, log_queue, worker_configurer, self.state_settings) flight_process: Process = self.init_flight(flight_args) # Start flight function flight_process.start() logging.debug("Flight process with id %d started", flight_process.pid) logging.debug(f"Title: {self.state_settings.run_title}") logging.debug(f"Description: {self.state_settings.run_description}") try: while comm_obj.get_state() != "final": # If the process is no longer alive, # (i.e. error has been raised in this case) # then create a new instance and start the new process # (i.e. restart the process) if flight_process.is_alive() is not True: logging.error("Flight process terminated, restarting") flight_process: Process = self.init_flight(flight_args) flight_process.start() except KeyboardInterrupt: # Ctrl-C was pressed # TODO send a message to the flight process to land instead of # basically overwriting the process logging.info("Ctrl-C Pressed, forcing drone to land") comm_obj.set_state("land") flight_process: Process = self.init_flight(flight_args) flight_process.start() # Join flight process before exiting function flight_process.join() logging.info("All processes ended, Goodbye!") logging_process.stop()
def run(self, config): logger.init_logger(config.log_uri) # Estimate data count needed prior to first out-of-sample period freq_factor = self._frequency_diff_factor(config.time_resolution, config.sample_period) data_request_history_window = (config.in_sample_periods * freq_factor) + config.history_window # Load market data log.info('Loading data...') data = market_data.load_market_data(config.tickers, config.ticker_types, config.data_sources, \ config.start_date, config.end_date, data_request_history_window, config.csv_data_uri) log.info('Data loaded!') print # Create the trading algorithm w/o parameters trading_algorithm = TradingAlgorithm.create_trading_algorithm(config.algorithm_uri, config.tickers, \ config.history_window) # Create the optimizer optimizer = of.create_optimizer(config.num_processors, config.optimizer_name, trading_algorithm, config.commission, \ config.ticker_spreads, config.optimization_metric, config.optimization_metric_ascending, config.optimization_parameters, config.time_resolution) # Create the backtester backtester = b.Backtester(-1, trading_algorithm, config.cash, config.commission, config.ticker_spreads) # Setup and run the walk forward analyzer walk_forward_analyzer = WalkForwardAnalyzer(config.in_sample_periods, config.out_of_sample_periods, config.sample_period, \ optimizer, backtester) log.info('Running the walk forward analyzer...') walk_forward_analyzer.run(data, config.start_date, config.end_date, config.cash) log.info('Ran the walk forward analyzer!') print return walk_forward_analyzer.results
def __init__(self, main_logger = None): """Initalizes the logger for the object. Creates an async boto client to communicate with the aws service. API keys are derived from the environment (e.g. ~/.aws/credentials). Args: main_logger: logger to which the logs should be sent, optional """ self.log = main_logger or logger.init_logger("ses") self.ses_client = Botocore( service='ses', operation='SendEmail', region_name='eu-west-1' )
def connect_to_kdbdb(host="localhost",port="10000"): # Create log handler log_handler = logger.init_logger() # Connection to database and exception handling try: q = qconnection.QConnection(host="localhost", port=10000) q.open() return q except qconnection.QConnectionException as err: log_handler.error(err) except: log_handler.error("Unable to connect KDB")
def __init__(self, app=None): self.logger = init_logger('test.aiohttp.bitcoin.rpcclient', log_level=logging.DEBUG, propagate=False) self.app = app config = app['config'] customer_records = config.get('customer_records')[0] login = customer_records.get('bitcoin_rpc_username') password = customer_records.get('bitcoin_rpc_password') self.url = "http://{0}".format( customer_records.get('bitcoin_rpc_address')) self.auth = BasicAuth(login=login, password=password)
def __init__(self, main_logger=None): """Initalizes the logger for the object. Creates an async boto client to communicate with the aws service. API keys are derived from the environment (e.g. ~/.aws/credentials). Args: main_logger: logger to which the logs should be sent, optional """ self.log = main_logger or logger.init_logger("ses") self.ses_client = Botocore(service='ses', operation='SendEmail', region_name='eu-west-1')
def run(self, config): logger.init_logger(config.log_uri) # Load market data log.info('Loading data...') data = market_data.load_market_data(config.tickers, config.ticker_types, config.data_sources, \ config.start_date, config.end_date, config.history_window, config.csv_data_uri) log.info('Data loaded!') print # Create the trading algorithm trading_algorithm = TradingAlgorithm.create_trading_algorithm(config.algorithm_uri, config.tickers, \ config.history_window, config.algorithm_parameters) # Setup and run the backtester backtester = Backtester(0, trading_algorithm, config.cash, config.commission, config.ticker_spreads) log.info('Running the backtester...') backtester.run(data, config.start_date, config.end_date) log.info('Ran backtester!') print return backtester.results
def main(config_file): #读取配置 init_config(config_file) #初始化日志 logger.init_logger() #初始化db init_db() #建立索引 init_db_index() #初始化邮件对象 mail_box.init_mail() #初始化定时任务 task.init_task() #添加任务 myfund.init_fund_task() #开始执行任务 taskobj = global_obj.get_obj("task_timer") taskobj.RunForever()
def __init__(self, db, main_logger = None): """Initalizes the database and the logger for the object. Creates objects for all available email handlers. Args: main_logger: logger to which the logs should be sent, optional db: database connection object. """ self.log = main_logger or logger.init_logger("email") self.db = db self.handlers = { config.EMAIL_HANDLERS.MANDRILL.value: MandrillEmailHandler(self.log), config.EMAIL_HANDLERS.MAILGUN.value: MailgunEmailHandler(self.log), config.EMAIL_HANDLERS.SES.value: SesEmailHandler(self.log) } self.db.init_email_handlers(config.EMAIL_HANDLERS)
def __init__(self, main_logger=None): """Initializes the database connection and logging for the object. Args: main_logger: logger to which the logs should be sent, optional Raises: RuntimeError: if redis is not connected properly. """ self.log = main_logger or logger.init_logger("db") self.db_r = redis.StrictRedis( host = config.REDIS_HOST, port = config.REDIS_PORT, db = config.REDIS_DB ) if not self.db_r or not self.test_redis_connection(): # critical raise RuntimeError("Redis not connected properly.") self.log.debug("Redis initialization complete.")
def main(): logger.init_logger() o = Oracle() o.run()
""" from crtc import Crtc from config import config import logger import datetime import time import re import check_offset import os import sys import shelve import subprocess #initialize the logger logfile = logger.init_logger('hipat_control') def check_running(): """will check if hipat_control is already running. returns: address to pidfile """ pid = str(os.getpid()) pidfile = "/mnt/tmpfs/check_offset.pid" if os.path.isfile(pidfile): #if a pidfile exists new_pid = file(pidfile, 'r').read() try: os.kill(int(new_pid),0) #check if a process is running except: file(pidfile, 'w').write(pid) #if not we write our own pid
def setUp(self): super(CsvHandlerTestCase, self).setUp() self.main_logger = logger.init_logger('main')
bar.set_cb(get_process_c) bar.start_progress(fsize) resolve_rtcm3_c(buff) bar.stop_progress() f.close() log_close_c(0) log_close_c(1) log_close_c(2) t_e = time.time() print "done! time consumed:{}".format(t_e-t_s) if __name__ == '__main__': from logger import init_logger fn = tkFileDialog.askopenfilename(initialdir=os.getcwd()) f = open(fn, "rb") fo = open("pvt.sol") f_size = os.path.getsize(fn) init_logger(fo) parse_v1log(f,f_size) # f_out = open("pvt.sol", "w") #plot_f(f_out,"green")
returns: offset in ms to the reference server. """ from config import config import subprocess import time import sys import shelve import re import datetime import math import logger #initialize the logger logfile = logger.init_logger('check_offset') def ntpd_running(): """Will make sure ntpd is running. If ntpd has stopped the offset to the reference server can have been to great, that means we will need to do a more direct time synchronization to the server. When ntpd_running() is complete the ntpd server is guaranteed to be running. returns: none if ntpd was running, returns "restarted" if there was a problem with ntpd. """ ref_server = config["hipat_reference"] #if Ntpd isn't running we set the date manually and restart the service. ntpd_status = subprocess.call(["pgrep", "ntpd"], stdout=subprocess.PIPE) if (ntpd_status != 0): logfile.warn("Ntpd not running, running ntpdate and restarting") subprocess.call(["/etc/rc.d/ntpd", "stop"])
from serial import Serial from timeout import timeout #import the timeout decorator from config import config #configuration dictionary import logger import re import datetime import time import shelve import math import sys import os import check_offset import subprocess #initialize the logger logfile = logger.init_logger('crtc') ser_buffer = '' #Global receive buffer class Crtc(): """Crtc is the class handling all the communication over the serial interface. """ def __init__(self, address=config['serial_address']): """Initiating the serial port """ self.ser = Serial(address, 4800, timeout=3) self.ser.close() def __str__(self): """print serial buffer."""
response.add_code(config.RESPONSE_ERROR) response.add_msg('Internal Error') finally: if 'favicon' not in self.request.uri: response = tornado.escape.json_encode( response.get()) self.write(response) self.finish() ############################################################################## # MAIN APPLICATION ############################################################################## if __name__ == '__main__': global main_logger main_logger = logger.init_logger('main') global main_csv_handler main_csv_handler = csv_handler.CsvHandler( config.MAIN_CSV_PATH, config.RATES_FILE_NAME, main_logger) global main_currency_handler main_currency_handler = currency_handler.CurrencyHandler( main_csv_handler.get_csv_data(), main_logger) http_server = tornado.httpserver.HTTPServer(Application()) http_server.listen(config.PORT) main_logger.debug("Application initialized.") tornado.ioloop.IOLoop.instance().start()