def setup_logging_and_parse_arguments(blocktitle): # ---------------------------------------------------------------------------- # Get parse commandline and default arguments # ---------------------------------------------------------------------------- args, defaults = _parse_arguments() # ---------------------------------------------------------------------------- # Setup logbook before everything else # ---------------------------------------------------------------------------- logger.configure_logging(os.path.join(args.save, 'logbook.txt')) # ---------------------------------------------------------------------------- # Write arguments to file, as txt # ---------------------------------------------------------------------------- tools.write_dictionary_to_file(sorted(vars(args).items()), filename=os.path.join( args.save, 'args.txt')) # ---------------------------------------------------------------------------- # Log arguments # ---------------------------------------------------------------------------- with logger.LoggingBlock(blocktitle, emph=True): for argument, value in sorted(vars(args).items()): reset = colorama.Style.RESET_ALL color = reset if value == defaults[argument] else colorama.Fore.CYAN logging.info('{}{}: {}{}'.format(color, argument, value, reset)) # ---------------------------------------------------------------------------- # Postprocess # ---------------------------------------------------------------------------- args = postprocess_args(args) return args
def main(): parser = argparse.ArgumentParser(description="Peeper") parser.add_argument("-c", "--config", default='conf.yaml', help="path to the configuration file") parser.add_argument("-l", "--log", help="path to the log file") parser.add_argument("--log-level", choices=get_log_levels(), default='INFO', help="logging level") args = parser.parse_args() configure_logging(args.log, args.log_level, 'sender') log("Loading configuration...") conf = get_config(args.config) requests = conf.get_requests() input_help = ['Select command:', "0: exit application"] + [ '%s: %s' % (key, requests[key].title) for key in requests ] command_id = '' while command_id != '0': log(*input_help) try: command_id = input('Input command code: ') except: log("Stopping app because of: ") traceback.print_exc(file=sys.stdout) break if command_id in requests: try: connection = http.client.HTTPConnection(host=conf.get_host(), port=conf.get_port()) req_settings = requests[command_id] connection.request(req_settings.type, req_settings.path, json.dumps(req_settings.body), req_settings.headers) response = connection.getresponse() log("Status: {} and reason: {}".format(response.status, response.reason)) log("Response body:", response.read().decode(), '\n') connection.close() except Exception as exc: log('Unable to send request', lvl=logging.WARNING) log(str(exc), lvl=logging.WARNING) else: if command_id != '0': log("Unknown command code!", '\n') else: log("'0' selected, exiting application...")
def setup_logging_and_parse_arguments(blocktitle): # ---------------------------------------------------------------------------- # Get parse commandline and default arguments # ---------------------------------------------------------------------------- args, defaults = _parse_arguments() # ---------------------------------------------------------------------------- # Setup logbook before everything else # ---------------------------------------------------------------------------- logger.configure_logging(os.path.join(args.save, "logbook.txt")) # ---------------------------------------------------------------------------- # Write arguments to file, as json and txt # ---------------------------------------------------------------------------- json.write_dictionary_to_file(vars(args), filename=os.path.join( args.save, "args.json"), sortkeys=True) json.write_dictionary_to_file(vars(args), filename=os.path.join(args.save, "args.txt"), sortkeys=True) # ---------------------------------------------------------------------------- # Log arguments # ---------------------------------------------------------------------------- with logger.LoggingBlock(blocktitle, emph=True): for argument, value in sorted(vars(args).items()): reset = colorama.Style.RESET_ALL color = reset if value == defaults[argument] else colorama.Fore.CYAN if isinstance(value, dict): for sub_argument, sub_value in collections.OrderedDict( value).items(): logging.info("{}{}_{}: {}{}".format( color, argument, sub_argument, sub_value, reset)) else: logging.info("{}{}: {}{}".format(color, argument, value, reset)) # ---------------------------------------------------------------------------- # Postprocess # ---------------------------------------------------------------------------- args = postprocess_args(args) return args
def __init__(self, config_file): self.config_file = Path(config_file) self.config = read_json(self.config_file) experiment_name = self.config['experiment_name'] out_dir = Path(self.config['trainer']['save_dir']) timestamp = datetime.now().strftime(r'%Y%m%d_%H%M%S') self.save_dir = out_dir / 'models' / experiment_name / timestamp self.log_dir = out_dir / 'log' / experiment_name / timestamp self.samples_dir = out_dir / 'samples' / experiment_name / timestamp self.save_dir.mkdir(parents=True, exist_ok=True) self.log_dir.mkdir(parents=True, exist_ok=True) self.samples_dir.mkdir(parents=True, exist_ok=True) configure_logging(self.log_dir) self.log_levels = { 0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG, }
except OSError: print( "BAZARR The configuration directory doesn't exist and Bazarr cannot create it (permission issue?)." ) exit(2) if not os.path.exists(os.path.join(args.config_dir, 'config')): os.mkdir(os.path.join(args.config_dir, 'config')) if not os.path.exists(os.path.join(args.config_dir, 'db')): os.mkdir(os.path.join(args.config_dir, 'db')) if not os.path.exists(os.path.join(args.config_dir, 'log')): os.mkdir(os.path.join(args.config_dir, 'log')) if not os.path.exists(os.path.join(args.config_dir, 'cache')): os.mkdir(os.path.join(args.config_dir, 'cache')) configure_logging(settings.general.getboolean('debug') or args.debug) import logging # deploy requirements.txt if not args.no_update: try: import lxml, numpy, webrtcvad except ImportError: try: import pip except ImportError: logging.info( 'BAZARR unable to install requirements (pip not installed).') else: if os.path.expanduser("~") == '/': logging.info(
teledb.create_timeline() #------------------------------------------------------------------------------ if __name__ == '__main__': if sys.version_info[0] < 3: sys.exit('Python 3 or a more recent version is required.') description = 'Telegram parser version {}'.format(VERSION) parser = argparse.ArgumentParser(description=description) parser.add_argument('infilename', help='input file cache4.db') parser.add_argument('outdirectory', help='output directory, must exist') parser.add_argument('-v', '--verbose', action='count', help='verbose level, -v to -vvv') args = parser.parse_args() logger.configure_logging(args.verbose) if os.path.exists(args.infilename): if os.path.isdir(args.outdirectory): process(args.infilename, args.outdirectory) else: logger.error('Output directory [%s] does not exist!', args.outdirectory) else: logger.error('The provided input file does not exist!')
def save_settings(settings_items): from database import database configure_debug = False configure_captcha = False update_schedule = False update_path_map = False configure_proxy = False for key, value in settings_items: # Intercept database stored settings if key == 'enabled_languages': database.execute("UPDATE table_settings_languages SET enabled=0") for item in value: database.execute( "UPDATE table_settings_languages SET enabled=1 WHERE code2=?", (item, )) continue # Make sure that text based form values aren't pass as list unless they are language list if isinstance(value, list) and len(value) == 1 and key not in [ 'settings-general-serie_default_language', 'settings-general-movie_default_language' ]: value = value[0] # Make sure empty language list are stored correctly due to bug in bootstrap-select if key in [ 'settings-general-serie_default_language', 'settings-general-movie_default_language' ] and value == ['null']: value = [] settings_keys = key.split('-') if value == 'true': value = 'True' elif value == 'false': value = 'False' if key == 'settings-auth-password': value = hashlib.md5(value.encode('utf-8')).hexdigest() if key == 'settings-general-debug': configure_debug = True if key in [ 'settings-general-anti_captcha_provider', 'settings-anticaptcha-anti_captcha_key', 'settings-deathbycaptcha-username', 'settings-deathbycaptcha-password' ]: configure_captcha = True if key in [ 'update_schedule', 'settings-general-use_sonarr', 'settings-general-use_radarr', 'settings-general-auto_update', 'settings-general-upgrade_subs' ]: update_schedule = True if key in [ 'settings-general-path_mappings', 'settings-general-path_mappings_movie' ]: update_path_map = True if key in [ 'settings-proxy-type', 'settings-proxy-url', 'settings-proxy-port', 'settings-proxy-username', 'settings-proxy-password' ]: configure_proxy = True if settings_keys[0] == 'settings': settings[settings_keys[1]][settings_keys[2]] = str(value) with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle: settings.write(handle) # Reconfigure Bazarr to reflect changes if configure_debug: from logger import configure_logging configure_logging(settings.general.getboolean('debug') or args.debug) if configure_captcha: configure_captcha_func() if update_schedule: from api import scheduler scheduler.update_configurable_tasks() if update_path_map: from helper import path_mappings path_mappings.update() if configure_proxy: configure_proxy_func()
def load_proteins(self, species_id): proteins = [] with open(self.storage_dir + str(species_id) + '-proteins.txt') as file: for line in file: r = line.split() proteins.append(Protein(int(r[0]), r[1], r[2])) return proteins def load_proteins_names(self, species_id): names = dict() with open(self.storage_dir + str(species_id) + '-proteins_names.txt') as file: for line in file: r = line.split() names[int(r[0])] = [n.strip() for n in r[1:]] return names if __name__ == '__main__': import logger logger.configure_logging(loglevel='DEBUG') repository = StringDbFileRepository() proteins = repository.load_proteins(1148) print("{0} proteins loaded".format(len(proteins))) p1 = proteins[0] print("p1: {0}, species: {1} ".format(p1, p1.speciesId)) names = repository.load_proteins_names(1148) print("{0} names: {1} ".format(p1.externalId, ','.join(names[p1.id])))
raise ValueError("failed to get interactions file for {0}".format(speciesId)) interactions_file = files[0] if not os.path.isfile(interactions_file): raise ValueError("failed to get interactions file for {0}".format(speciesId)) logging.debug('using %s', interactions_file) return interactions_file def read_score(score_file): with open(score_file) as s: return s.readline().strip() def sort_abundances(dataset): cmd="cat '{0}'".format(dataset) cmd= cmd + "| awk '{print $2,$1}' | sort -gr | awk '{print $2,$1}'" sorted_out = subprocess.check_output(cmd, shell=True).decode('utf8') sorted_abundances = [l +'\n' for l in sorted_out.split('\n')] return sorted_abundances if __name__ == '__main__': logger.configure_logging() if not os.path.exists(TMP): os.makedirs(TMP) #ruffus.pipeline_printout(sys.stdout, [score, collect_scores], verbose_abbreviated_path=6, verbose=3) ruffus.pipeline_run([score, collect_scores], verbose=6, multiprocess=1) # shutil.rmtree(TMP)
from render_jinja import render_templates from server import run_server PARSER = argparse.ArgumentParser() PARSER.add_argument('--path', type=str, required=True) PARSER.add_argument('--collection', action='append', required=True, type=str) PARSER.add_argument('--theme', type=str, required=True) PARSER.add_argument('--static', type=str, action='append', required=False) PARSER.add_argument('--serve', default=False, action='store_true') PARSER.add_argument('--port', type=int, default=8000) PARSER.add_argument('--verbose', default=False, action='store_true') ARGS = PARSER.parse_args() configure_logging(ARGS.verbose) log_default = get_logger(__name__) def build_website(path, dist_path, theme_path, theme, collections, statics): website = create_website_model(path, collections) log_default('Loading "%s" theme...', theme) templates = load_templates(theme_path) log_default('Rendering templates...') render_templates(dist_path, templates, website) # add static files
def main(): # --------------------------------------------------- # Set working directory to folder containing main.py # --------------------------------------------------- os.chdir(os.path.dirname(os.path.realpath(__file__))) # ---------------------------------------------------------------- # Activate syntax highlighting in tracebacks for better debugging # ---------------------------------------------------------------- colored_traceback.add_hook() # ----------------------------------------------------------- # Configure logging # ----------------------------------------------------------- logging_filename = os.path.join(commandline.parse_save_dir(), constants.LOGGING_LOGBOOK_FILENAME) logger.configure_logging(logging_filename) # ---------------------------------------------------------------- # Register type factories before parsing the commandline. # NOTE: We decided to explicitly call these init() functions, to # have more precise control over the timeline # ---------------------------------------------------------------- with logging.block("Registering factories", emph=True): augmentations.init() datasets.init() losses.init() models.init() optim.init() visualizers.init() logging.info('Done!') # ----------------------------------------------------------- # Parse commandline after factories have been filled # ----------------------------------------------------------- args = commandline.parse_arguments(blocktitle="Commandline Arguments") # ----------------------- # Telegram configuration # ----------------------- with logging.block("Telegram", emph=True): logger.configure_telegram(constants.LOGGING_TELEGRAM_MACHINES_FILENAME) # ---------------------------------------------------------------------- # Log git repository hash and make a compressed copy of the source code # ---------------------------------------------------------------------- with logging.block("Source Code", emph=True): logging.value("Git Hash: ", system.git_hash()) # Zip source code and copy to save folder filename = os.path.join(args.save, constants.LOGGING_ZIPSOURCE_FILENAME) zipsource.create_zip(filename=filename, directory=os.getcwd()) logging.value("Archieved code: ", filename) # ---------------------------------------------------- # Change process title for `top` and `pkill` commands # This is more "informative" in `nvidia-smi` ;-) # ---------------------------------------------------- args = config.configure_proctitle(args) # ------------------------------------------------- # Set random seed for python, numpy, torch, cuda.. # ------------------------------------------------- config.configure_random_seed(args) # ----------------------------------------------------------- # Machine stats # ----------------------------------------------------------- with logging.block("Machine Statistics", emph=True): if args.cuda: args.device = torch.device("cuda:0") logging.value("Cuda: ", torch.version.cuda) logging.value("Cuda device count: ", torch.cuda.device_count()) logging.value("Cuda device name: ", torch.cuda.get_device_name(0)) logging.value("CuDNN: ", torch.backends.cudnn.version()) device_no = 0 if 'CUDA_VISIBLE_DEVICES' in os.environ.keys(): device_no = os.environ['CUDA_VISIBLE_DEVICES'] args.actual_device = "gpu:%s" % device_no else: args.device = torch.device("cpu") args.actual_device = "cpu" logging.value("Hostname: ", system.hostname()) logging.value("PyTorch: ", torch.__version__) logging.value("PyTorch device: ", args.actual_device) # ------------------------------------------------------ # Fetch data loaders. Quit if no data loader is present # ------------------------------------------------------ train_loader, validation_loader = config.configure_data_loaders(args) # ------------------------------------------------------------------------- # Check whether any dataset could be found # ------------------------------------------------------------------------- success = any(loader is not None for loader in [train_loader, validation_loader]) if not success: logging.info( "No dataset could be loaded successfully. Please check dataset paths!" ) quit() # ------------------------------------------------------------------------- # Configure runtime augmentations # ------------------------------------------------------------------------- training_augmentation, validation_augmentation = config.configure_runtime_augmentations( args) # ---------------------------------------------------------- # Configure model and loss. # ---------------------------------------------------------- model_and_loss = config.configure_model_and_loss(args) # -------------------------------------------------------- # Print model visualization # -------------------------------------------------------- if args.logging_model_graph: with logging.block("Model Graph", emph=True): logger.log_module_info(model_and_loss.model) if args.logging_loss_graph: with logging.block("Loss Graph", emph=True): logger.log_module_info(model_and_loss.loss) # ------------------------------------------------------------------------- # Possibly resume from checkpoint # ------------------------------------------------------------------------- checkpoint_saver, checkpoint_stats = config.configure_checkpoint_saver( args, model_and_loss) if checkpoint_stats is not None: with logging.block(): logging.info("Checkpoint Statistics:") with logging.block(): logging.values(checkpoint_stats) # --------------------------------------------------------------------- # Set checkpoint stats # --------------------------------------------------------------------- if args.checkpoint_mode in ["resume_from_best", "resume_from_latest"]: args.start_epoch = checkpoint_stats["epoch"] # --------------------------------------------------------------------- # Checkpoint and save directory # --------------------------------------------------------------------- with logging.block("Save Directory", emph=True): if args.save is None: logging.info("No 'save' directory specified!") quit() logging.value("Save directory: ", args.save) if not os.path.exists(args.save): os.makedirs(args.save) # ------------------------------------------------------------ # If this is just an evaluation: overwrite savers and epochs # ------------------------------------------------------------ if args.training_dataset is None and args.validation_dataset is not None: args.start_epoch = 1 args.total_epochs = 1 train_loader = None checkpoint_saver = None args.optimizer = None args.lr_scheduler = None # ---------------------------------------------------- # Tensorboard summaries # ---------------------------------------------------- logger.configure_tensorboard_summaries(args.save) # ------------------------------------------------------------------- # From PyTorch API: # If you need to move a model to GPU via .cuda(), please do so before # constructing optimizers for it. Parameters of a model after .cuda() # will be different objects with those before the call. # In general, you should make sure that optimized parameters live in # consistent locations when optimizers are constructed and used. # ------------------------------------------------------------------- model_and_loss = model_and_loss.to(args.device) # ---------------------------------------------------------- # Configure optimizer # ---------------------------------------------------------- optimizer = config.configure_optimizer(args, model_and_loss) # ---------------------------------------------------------- # Configure learning rate # ---------------------------------------------------------- lr_scheduler = config.configure_lr_scheduler(args, optimizer) # -------------------------------------------------------------------------- # Configure parameter scheduling # -------------------------------------------------------------------------- param_scheduler = config.configure_parameter_scheduler( args, model_and_loss) # quit() # ---------------------------------------------------------- # Cuda optimization # ---------------------------------------------------------- if args.cuda: torch.backends.cudnn.benchmark = constants.CUDNN_BENCHMARK # ---------------------------------------------------------- # Configurate runtime visualization # ---------------------------------------------------------- visualizer = config.configure_visualizers( args, model_and_loss=model_and_loss, optimizer=optimizer, param_scheduler=param_scheduler, lr_scheduler=lr_scheduler, train_loader=train_loader, validation_loader=validation_loader) if visualizer is not None: visualizer = visualizer.to(args.device) # ---------------------------------------------------------- # Kickoff training, validation and/or testing # ---------------------------------------------------------- return runtime.exec_runtime( args, checkpoint_saver=checkpoint_saver, lr_scheduler=lr_scheduler, param_scheduler=param_scheduler, model_and_loss=model_and_loss, optimizer=optimizer, train_loader=train_loader, training_augmentation=training_augmentation, validation_augmentation=validation_augmentation, validation_loader=validation_loader, visualizer=visualizer)
from flask import Flask, make_response, render_template, jsonify, request from logger import configure_logging from mxcache import MxCache from metrics import Metrics from flask_mail import Mail app = Flask('app') app.config.from_object('app.config') configure_logging(app.config.get('DEBUG')) mxcache = MxCache(app) metrics = Metrics(app) mail = Mail(app) from flask_rq2 import RQ RQ_ASYNC = 'REDIS_URL' in app.config.keys() RQ_TIMEOUT = app.config.get('RQ_TIMEOUT', 180) rq = RQ(async=RQ_ASYNC, default_timeout=RQ_TIMEOUT) rq.app_worker_path = 'app.worker_preload' rq.init_app(app) from views import address app.register_blueprint(address) @app.route('/') def index(): return make_response('ok')
from flask import Flask, make_response, render_template, jsonify, request from logger import configure_logging from mxcache import MxCache from metrics import Metrics from flask_mail import Mail app = Flask('app') app.config.from_object('app.config') configure_logging(app.config.get('DEBUG')) mxcache = MxCache(app) metrics = Metrics(app) mail = Mail(app) from flask_rq2 import RQ RQ_ASYNC = 'REDIS_URL' in app.config.keys() RQ_TIMEOUT = app.config.get('RQ_TIMEOUT', 180) rq = RQ(async=RQ_ASYNC, default_timeout=RQ_TIMEOUT) rq.app_worker_path = 'app.worker_preload' rq.init_app(app) from views import address app.register_blueprint(address) @app.route('/') def index(): return make_response('ok') @app.route('/test')
pin = int(pin_number) logging.info("Turning on pin {}".format(pin)) GPIO.setup(pin, GPIO.OUT) GPIO.output(pin, GPIO.HIGH) return dumps({'success': True}), 200, {'ContentType': 'application/json'} except Exception as e: logging.exception("Error while trying to activate pin") return dumps({'success': False}), 500, {'ContentType': 'application/json'} @app.route('/pin_off/<pin_number>') def pin_off(pin_number): try: pin = int(pin_number) logging.info("Turning off pin {}".format(pin)) GPIO.setup(pin, GPIO.OUT) GPIO.output(pin, GPIO.LOW) return dumps({'success': True}), 200, {'ContentType': 'application/json'} except Exception as e: logging.exception("Error while trying to deactivate pin") return dumps({'success': False}), 500, {'ContentType': 'application/json'} if __name__ == '__main__': configure_logging("/home/rcarausu/", "led_control") GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) app.run(host="0.0.0.0", port=8081, debug=True)
def save_settings(settings_items): from database import database configure_debug = False configure_captcha = False update_schedule = False update_path_map = False configure_proxy = False exclusion_updated = False for key, value in settings_items: # Intercept database stored settings if key == 'enabled_languages': database.execute("UPDATE table_settings_languages SET enabled=0") for item in value: database.execute("UPDATE table_settings_languages SET enabled=1 WHERE code2=?", (item,)) continue # Make sure that text based form values aren't pass as list unless they are language list if isinstance(value, list) and len(value) == 1 and key not in ['settings-general-serie_default_language', 'settings-general-movie_default_language']: value = value[0] # Make sure empty language list are stored correctly due to bug in bootstrap-select if key in ['settings-general-serie_default_language', 'settings-general-movie_default_language'] and value == ['null']: value = [] settings_keys = key.split('-') if value == 'true': value = 'True' elif value == 'false': value = 'False' if key == 'settings-auth-password': if value != settings.auth.password: value = hashlib.md5(value.encode('utf-8')).hexdigest() if key == 'settings-general-debug': configure_debug = True if key in ['settings-general-anti_captcha_provider', 'settings-anticaptcha-anti_captcha_key', 'settings-deathbycaptcha-username', 'settings-deathbycaptcha-password']: configure_captcha = True if key in ['update_schedule', 'settings-general-use_sonarr', 'settings-general-use_radarr', 'settings-general-auto_update', 'settings-general-upgrade_subs']: update_schedule = True if key in ['settings-general-path_mappings', 'settings-general-path_mappings_movie']: update_path_map = True if key in ['settings-proxy-type', 'settings-proxy-url', 'settings-proxy-port', 'settings-proxy-username', 'settings-proxy-password']: configure_proxy = True if key in ['settings-sonarr-excluded_tags', 'settings-sonarr-only_monitored', 'settings-sonarr-excluded_series_types', 'settings.radarr.excluded_tags', 'settings-radarr-only_monitored']: exclusion_updated = True if key == 'settings-addic7ed-username': if key != settings.addic7ed.username: region.delete('addic7ed_data') if key == 'settings-legendasdivx-username': if key != settings.legendasdivx.username: region.delete('legendasdivx_cookies2') if key == 'settings-opensubtitles-username': if key != settings.opensubtitles.username: region.delete('os_token') if key == 'settings-opensubtitlescom-username': if key != settings.opensubtitlescom.username: region.delete('oscom_token') if key == 'settings-subscene-username': if key != settings.subscene.username: region.delete('subscene_cookies2') if key == 'settings-titlovi-username': if key != settings.titlovi.username: region.delete('titlovi_token') if settings_keys[0] == 'settings': settings[settings_keys[1]][settings_keys[2]] = str(value) with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle: settings.write(handle) # Reconfigure Bazarr to reflect changes if configure_debug: from logger import configure_logging configure_logging(settings.general.getboolean('debug') or args.debug) if configure_captcha: configure_captcha_func() if update_schedule: from api import scheduler scheduler.update_configurable_tasks() if update_path_map: from helper import path_mappings path_mappings.update() if configure_proxy: configure_proxy_func() if exclusion_updated: from event_handler import event_stream event_stream(type='badges_series') event_stream(type='badges_movies')
from pathlib import Path import os from hashlib import md5 import zipfile import logging from logger import configure_logging GTFS_FOLDER = 'cleaned_undefined_zombies' data_dir_path = Path('../../data/gtfs') in_dir_path = data_dir_path / GTFS_FOLDER gtfs_paths = [filepath for filepath in in_dir_path.glob('*.zip')] log_name = os.path.basename(__file__).rsplit('.', 1)[0] configure_logging(log_path=Path('./'), log_name=log_name) logger = logging.getLogger(log_name) unique = [] filenames = [] for filename in gtfs_paths: with zipfile.ZipFile(filename, 'r') as myzip: contents = '' for name in sorted(myzip.namelist()): with myzip.open(name) as myfile: contents += myfile.read() filehash = md5(contents).hexdigest() if filehash not in unique: unique.append(filehash) filenames.append(filename) else: index = unique.index(filehash)
import pandas as pd OUTPUT_IMG_FOLDER_NAME = 'custom' IMG_WIDTH = 1024 IMG_HEIGHT = IMG_WIDTH ZOOM = 15 URL_TEMPLATE = 'https://api.maptiler.com/maps/ac954d00-25c8-4a7a-8773-40afb4d17a18/256/{z}/{x}/{y}.jpg?key=3rAT6TUcA56m3Ge4l5Xk' in_dir_path = Path('../../data/gtfs/cleaned_undefined_zombies') out_data_path = Path('../../data/route_imgs/') out_dir_path = out_data_path / OUTPUT_IMG_FOLDER_NAME out_dir_path.mkdir(parents=True, exist_ok=True) log_name = str(os.path.basename(__file__).rsplit('.', 1)[0]) configure_logging(log_path=out_dir_path, log_name=log_name) logger = logging.getLogger(log_name) total_files = sum(1 for _ in in_dir_path.glob('*.zip')) files_counter = 0 saved_routes_counter = 0 # Dataframe to hold all routes we produce for each file and then write them # to a file routes_df = pd.DataFrame( columns=['img', 'gtfs', 'route_id', 'center_lon', 'center_lat', 'zoom']) routes_df_filepath = out_dir_path / 'imgs_info.csv' routes_df_csv_created = False for gtfs_file_path in in_dir_path.glob('*.zip'): files_counter = files_counter + 1
import unittest from testing import * from logger import configure_logging configure_logging() if __name__ == '__main__': unittest.main()
def save_settings(settings_items): from database import database configure_debug = False configure_captcha = False update_schedule = False update_path_map = False configure_proxy = False exclusion_updated = False # Subzero Mods update_subzero = False subzero_mods = get_array_from(settings.general.subzero_mods) if len(subzero_mods) == 1 and subzero_mods[0] == '': subzero_mods = [] for key, value in settings_items: settings_keys = key.split('-') # Make sure that text based form values aren't pass as list if isinstance(value, list) and len(value) == 1 and settings_keys[-1] not in array_keys: value = value[0] if value in empty_values: value = None # Make sure empty language list are stored correctly if settings_keys[-1] in array_keys and value[0] in empty_values : value = [] # Handle path mappings settings since they are array in array if settings_keys[-1] in ['path_mappings', 'path_mappings_movie']: value = [v.split(',') for v in value] if value == 'true': value = 'True' elif value == 'false': value = 'False' if key == 'settings-auth-password': if value != settings.auth.password and value != None: value = hashlib.md5(value.encode('utf-8')).hexdigest() if key == 'settings-general-debug': configure_debug = True if key in ['settings-general-anti_captcha_provider', 'settings-anticaptcha-anti_captcha_key', 'settings-deathbycaptcha-username', 'settings-deathbycaptcha-password']: configure_captcha = True if key in ['update_schedule', 'settings-general-use_sonarr', 'settings-general-use_radarr', 'settings-general-auto_update', 'settings-general-upgrade_subs', 'settings-sonarr-series_sync', 'settings-sonarr-episodes_sync', 'settings-radarr-movies_sync', 'settings-sonarr-full_update', 'settings-sonarr-full_update_day', 'settings-sonarr-full_update_hour', 'settings-radarr-full_update', 'settings-radarr-full_update_day', 'settings-radarr-full_update_hour', 'settings-general-wanted_search_frequency', 'settings-general-wanted_search_frequency_movie', 'settings-general-upgrade_frequency']: update_schedule = True if key in ['settings-general-path_mappings', 'settings-general-path_mappings_movie']: update_path_map = True if key in ['settings-proxy-type', 'settings-proxy-url', 'settings-proxy-port', 'settings-proxy-username', 'settings-proxy-password']: configure_proxy = True if key in ['settings-sonarr-excluded_tags', 'settings-sonarr-only_monitored', 'settings-sonarr-excluded_series_types', 'settings.radarr.excluded_tags', 'settings-radarr-only_monitored']: exclusion_updated = True if key == 'settings-addic7ed-username': if key != settings.addic7ed.username: region.delete('addic7ed_data') if key == 'settings-legendasdivx-username': if key != settings.legendasdivx.username: region.delete('legendasdivx_cookies2') if key == 'settings-opensubtitles-username': if key != settings.opensubtitles.username: region.delete('os_token') if key == 'settings-opensubtitlescom-username': if key != settings.opensubtitlescom.username: region.delete('oscom_token') if key == 'settings-subscene-username': if key != settings.subscene.username: region.delete('subscene_cookies2') if key == 'settings-titlovi-username': if key != settings.titlovi.username: region.delete('titlovi_token') if settings_keys[0] == 'settings': settings[settings_keys[1]][settings_keys[2]] = str(value) if settings_keys[0] == 'subzero': mod = settings_keys[1] enabled = value == 'True' if mod in subzero_mods and not enabled: subzero_mods.remove(mod) elif enabled: subzero_mods.append(mod) # Handle color if mod == 'color': previous = None for exist_mod in subzero_mods: if exist_mod.startswith('color'): previous = exist_mod break if previous is not None: subzero_mods.remove(previous) if value not in empty_values: subzero_mods.append(value) update_subzero = True if update_subzero: settings.set('general', 'subzero_mods', ','.join(subzero_mods)) with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle: settings.write(handle) # Reconfigure Bazarr to reflect changes if configure_debug: from logger import configure_logging configure_logging(settings.general.getboolean('debug') or args.debug) if configure_captcha: configure_captcha_func() if update_schedule: from api import scheduler scheduler.update_configurable_tasks() if update_path_map: from helper import path_mappings path_mappings.update() if configure_proxy: configure_proxy_func() if exclusion_updated: from event_handler import event_stream event_stream(type='badges_series') event_stream(type='badges_movies')