def main(): log.setup() logger = logging.getLogger(__name__) logger.info("App starting") try: utils.check_python_version() logger.info(f'Reading config file "{CONFIG_FILENAME}"') with open(CONFIG_FILENAME) as f: config_parser = configparser.ConfigParser() config_parser.read_file(f) logger.info('Config: %s', config_parser.items('general')) config = config_parser['general'] fonts = utils.get_fonts(config) images = get_weather_images() logger.info('Import epd control library') (epd_so, panel_size) = utils.get_epd_data(config) logger.info("Initial refresh") refresh.refresh(panel_size, fonts, images, config, epd_so, True) # Once in the beginning logger.info('Starting scheduler') scheduler = BlockingScheduler() scheduler.add_job(lambda: main_loop(panel_size, fonts, images, config, epd_so), 'cron', minute='*/1') scheduler.start() except FileNotFoundError as e: logger.exception(f'Error opening file "{CONFIG_FILENAME}": %s', str(e)) except KeyboardInterrupt: logger.warning("KeyboardInterrupt error") except Exception as e: logger.exception('Unexpected error: %s', str(e))
import argparse import searching import time from utils import check_python_version SEARCH_LIMIT = 1000000 DISPLAY_LIMIT = 10 check_python_version() se = searching.SearchEngine() def clamp(val, min_val, max_val): return max(min(val, max_val), min_val) def print_search_results_page(page_no, search_results): end = page_no * DISPLAY_LIMIT start = end - DISPLAY_LIMIT for rank, doc in enumerate(search_results): if rank in range(start, end): print("%d. %s" % (rank + 1, doc.url)) def is_valid_ranking(parser, ranking): if ranking not in se.rankings: parser.error(("Invalid ranking algorithm: %s. Avaialble ranking algorithms: %s" % (ranking, se.rankings))) else: return str(ranking)
# flask_app.proxy_server = proxy_server flask_app.proxy_api = ProxyServer.AsyncApi flask_app.proxy_execute = proxy_server_api flask_app.proxy_is_running = proxy_server.is_running flask_app.proxy_reset_statistic = proxy_server.reset_statistic flask_app.proxy_server_info = proxy_server._get_server_info flask_app.proxy_clients_info = proxy_server._get_clients_info thread_proxy.setDaemon(True) thread_proxy.start() if start_proxy: flask_app.proxy_execute(ProxyServer.AsyncApi.STARTUP_SERVER) try: flask_app.run(host=host, port=web_port, debug=False) except KeyboardInterrupt: flask_app.execute(ProxyServer.AsyncApi.SHUTDOWN_SERVER) finally: proxy_loop.stop() if __name__ == '__main__': check_python_version(3, 7) log.remove() log.add(sys.stderr, level="DEBUG") # log.add("any-proxy-server.log", level="INFO", rotation="1 month") # run_proxy_server('0.0.0.0', 10000, 'ProxyServer') run_proxy_server_with_web('0.0.0.0', 10000, 'ProxyServer', web_port=9999)
log_file = os.path.abspath(__file__).replace(".py", ".log") logging.basicConfig(filename=log_file, level=logging.DEBUG, format="%(asctime)s %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p") # setup logger to write to screen as well as writing to log file # define a Handler which writes INFO messages or higher to the sys.stderr console = logging.StreamHandler() console.setLevel(logging.INFO) # set a format which is simpler for console use formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') # tell the handler to use this format console.setFormatter(formatter) # add the handler to the root logger logging.getLogger('').addHandler(console) logger.info("") logger.info("Start census-loader") utils.check_python_version(logger) if main(): logger.info("Finished successfully!") else: logger.fatal("Something bad happened!") logger.info("") logger.info( "-------------------------------------------------------------------------------" )
def generate(args): config = read_config(args.config) try: db_type = config.get("server", "type") mfs_path_in = config.getoutdirpath("output", "manifests") list_path_in = config.getoutdirpath("output", "lists") server_age_path_in = config.getoutdirpath("server", "age_directory") server_sdl_path_in = config.getoutdirpath("server", "sdl_directory") if args.stage: mfs_path_out = config.getoutdirpath("stage", "manifests") list_path_out = config.getoutdirpath("stage", "lists") server_age_path_out = config.getoutdirpath("stage", "age_directory") server_sdl_path_out = config.getoutdirpath("stage", "sdl_directory") else: mfs_path_out, list_path_out = mfs_path_in, list_path_in server_age_path_out, server_sdl_path_out = server_age_path_in, server_sdl_path_in game_data_path = config.getindirpath("source", "data_path") game_scripts_path = config.getindirpath("source", "scripts_path") gather_path = config.getindirpathopt("source", "gather_path") droid_key = utils.get_droid_key(config.get("server", "droid_key")) make_preloader_mfs = config.getboolean("server", "secure_manifest") py_version = (config.getint("python", "major"), config.getint("python", "minor")) py_exe = config.getinfilepathopt("python", "path") except Exception as e: # reraise as AssetError so config errors look sane. raise assets.AssetError(f"Config problem: {e}") # If we are staging, we'll want to clear out the contents of the staging paths. if args.stage: logging.info("Clearing staging directories...") staging_dirs = [mfs_path_out.iterdir(), list_path_out.iterdir()] if server_age_path_out: staging_dirs.append(server_age_path_out.iterdir()) if server_sdl_path_out: staging_dirs.append(server_sdl_path_out.iterdir()) for i in itertools.chain.from_iterable(staging_dirs): if i.exists() and i.is_dir(): shutil.rmtree(i) else: i.unlink(missing_ok=True) logging.warn("No output will be staged for DELETED content.") # Find python2-compatible schtuff if py_exe and py_exe.is_file() and utils.check_python_version( py_exe, py_version): logging.debug(f"Using configured Python executable: {py_exe}") else: py_exe = utils.find_python_exe(py_version) if not utils.check_python_version(py_exe, py_version): py_exe = None if not py_exe: logging.critical( f"Could not find Python {py_version[0]}.{py_version[1]}") cached_db = assets.load_asset_database(mfs_path_in, list_path_in, db_type) prebuilts = assets.load_prebuilt_assets(game_data_path, game_scripts_path, py_exe) gathers = assets.load_gather_assets(gather_path) source_assets = assets.merge_asset_dicts(prebuilts, gathers) ncpus = args.threads if args.threads > 0 else None staged_assets = defaultdict(manifest.ManifestEntry) age_manifests = dependencies.find_age_dependencies(source_assets, staged_assets, ncpus) client_manifests = dependencies.find_client_dependencies( source_assets, staged_assets) dependencies.find_script_dependencies(source_assets, staged_assets) with tempfile.TemporaryDirectory() as td: temp_path = Path(td) if args.dry_run: list_path_out, mfs_path_out = temp_path, temp_path # dry-run forces these files to be copied for testing purposes server_age_path_out = temp_path.joinpath("server_age_files") server_sdl_path_out = temp_path.joinpath("server_sdl_files") if args.reuse_python: # Dry runs can overwrite the list output path with a temp location. We want to use the # old output as input, so get the actual value. cfg_list_path = config.getoutdirpath("output", "lists") plasma_python.reuse(cached_db.lists, source_assets, staged_assets, cfg_list_path) else: plasma_python.process(source_assets, staged_assets, temp_path, droid_key, py_exe, py_version) commit.copy_server_assets(source_assets, staged_assets, server_age_path_in, server_sdl_path_in, server_age_path_out, server_sdl_path_out, ncpus) commit.encrypt_staged_assets(source_assets, staged_assets, temp_path, droid_key, ncpus) commit.hash_staged_assets(source_assets, staged_assets, ncpus) commit.find_dirty_assets(cached_db.assets, staged_assets) # Need to merge everything before we can begin the compress proc secure_manifests, secure_lists = commit.make_secure_downloads( staged_assets, make_preloader_mfs) manifests = commit.merge_manifests(age_manifests, client_manifests, secure_manifests) commit.compress_dirty_assets(manifests, cached_db.assets, source_assets, staged_assets, mfs_path_out, args.force, ncpus) commit.copy_secure_assets(secure_lists, source_assets, staged_assets, list_path_in, list_path_out, droid_key, ncpus) commit.nuke_unstaged_assets(cached_db, staged_assets, mfs_path_out, list_path_out) assets.nuke_dead_manifests(cached_db.manifests, cached_db.lists, manifests, secure_lists, mfs_path_out, list_path_out, db_type) assets.save_asset_database(cached_db.manifests, cached_db.lists, staged_assets, manifests, secure_lists, mfs_path_out, list_path_out, db_type, droid_key) return True