async def exportLog(self): while (True): self.dbManager.cleanDb(Config().get("ELB_LOG_AGE")) logs = self.dbManager.getLogs(Config().get("ELB_LOG_AGE")) tag_re = re.compile(r'(<!--.*?-->|<[^>]*>)') with open(Config().get("ELB_LOG_NAME"), "w") as f: for row in logs: rowLines = row[2].split("\n") for line in rowLines: try: t = time.strftime("%a %d %b %H:%M:%S", time.gmtime(float(row[0]))) stripped = html.escape( tag_re.sub("", line).strip()) f.write("[%s] <%s> %s\n" % (t, row[1], stripped)) except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception( exc_type, exc_value, exc_traceback) with open("parse_errors.txt", "a") as g: g.write(''.join(line for line in lines)) g.write("\n") continue secsToSleep = (int(Config().get("ELB_EXPORT_LOG_TIME")) * 60) * 60 await asyncio.sleep(secsToSleep)
def logs(cls): config_object = Config() config = config_object.get_config() if config_object.primary_backend or config_object.secondary_backend: backend_role = config.get("backend_server_role", "primary") backend_command = [ "docker-compose", "-f", "docker-compose.backend.{}.yml".format(backend_role), "-f", "docker-compose.backend.{}.override.yml".format(backend_role), "-p", config_object.get_prefix("backend"), "logs", "-f" ] CLI.run_command(backend_command, config.get("kobodocker_path"), True) if config_object.frontend: frontend_command = [ "docker-compose", "-f", "docker-compose.frontend.yml", "-f", "docker-compose.frontend.override.yml", "-p", config_object.get_prefix("frontend"), "logs", "-f" ] CLI.run_command(frontend_command, config.get("kobodocker_path"), True)
def logs(cls): config = Config() dict_ = config.get_dict() if config.primary_backend or config.secondary_backend: backend_role = dict_['backend_server_role'] backend_command = ['docker-compose', '-f', 'docker-compose.backend.{}.yml'.format( backend_role), '-f', 'docker-compose.backend.{}.override.yml'.format( backend_role), '-p', config.get_prefix('backend'), 'logs', '-f' ] CLI.run_command(backend_command, dict_['kobodocker_path'], True) if config.frontend: frontend_command = ['docker-compose', '-f', 'docker-compose.frontend.yml', '-f', 'docker-compose.frontend.override.yml', '-p', config.get_prefix('frontend'), 'logs', '-f'] CLI.run_command(frontend_command, dict_['kobodocker_path'], True)
def run(force_setup=False): if not platform.system() in ['Linux', 'Darwin']: CLI.colored_print('Not compatible with this OS', CLI.COLOR_ERROR) else: config = Config() dict_ = config.get_dict() if config.first_time: force_setup = True if force_setup: dict_ = config.build() Template.render(config) support = Support() support.copy_support_scripts() # # config.init_letsencrypt() # # Setup.update_hosts(dict_) else: print("Running smoothly") # if config.auto_detect_network(): # Template.render(config) # Setup.update_hosts(dict_) Command.start()
def post_update(cls, cron): config = Config() # When `cron` is True, we want to bypass question and just recreate # YML and environment files from new templates if cron is True: current_dict = config.get_upgraded_dict() config.set_config(current_dict) config.write_config() Template.render(config, force=True) sys.exit(0) message = ('After an update, it is strongly recommended to run\n' '`python3 run.py --setup` to regenerate environment files.') CLI.framed_print(message, color=CLI.COLOR_INFO) response = CLI.yes_no_question('Do you want to proceed?') if response is True: current_dict = config.build() Template.render(config) config.init_letsencrypt() Setup.update_hosts(current_dict) question = 'Do you want to (re)start containers?' response = CLI.yes_no_question(question) if response is True: Command.start()
class BadWordFilter(commands.Cog): """ Deletes The bad words from chat """ def __init__(self, bot): """ Cog initialization Args: bot (discord.ext.commands.Bot): Instance of the bot """ self.bot = bot self.bad_words = Config().get("BAD_WORDS").split(",") self.ignore_chans = Config().get("IGNORE_CHANNELS").split(",") Config().load_config() # Events @commands.Cog.listener() async def on_message(self, message): """on_message executes whenever a message is posted""" if not self.ignore_chans.__contains__(message.channel.name): words = message.content.split() for word in words: if self.bad_words.__contains__(word.lower()): await message.delete()
def logs(cls): config_object = Config() config = config_object.get_config() if config_object.master_backend or config_object.slave_backend: backend_role = config.get("backend_server_role", "master") backend_command = ["docker-compose", "-f", "docker-compose.backend.{}.yml".format(backend_role), "-f", "docker-compose.backend.{}.override.yml".format(backend_role), "logs", "-f"] if config.get("docker_prefix", "") != "": backend_command.insert(-2, "-p") backend_command.insert(-2, config.get("docker_prefix")) CLI.run_command(backend_command, config.get("kobodocker_path"), True) if config_object.frontend: frontend_command = ["docker-compose", "-f", "docker-compose.frontend.yml", "-f", "docker-compose.frontend.override.yml", "logs", "-f"] if config.get("docker_prefix", "") != "": frontend_command.insert(-2, "-p") frontend_command.insert(-2, config.get("docker_prefix")) CLI.run_command(frontend_command, config.get("kobodocker_path"), True)
def task_initial(self, grab, task): self.logger.debug('[{}] Initial url: {}'.format(task.name, task.url)) if self._check_body_errors(grab, task): self.logger.fatal('[{}] Err task with url {}, attempt {}'.format( task.name, task.url, task.task_try_count)) return try: items = grab.doc.select( '//div[contains(@class, "pagination")]//a[contains(@href, "{}")]' .format(Config.get('SITE_PAGE_PARAM'))) max_page = get_max_page(items, 0, -1) self.logger.info('[{}] Task: {}, max_page: {}'.format( task.name, task.url, max_page)) url_gen = UrlGenerator(task.url, Config.get('SITE_PAGE_PARAM')) for p in range(0, max_page + 1): url = url_gen.get_page(p) yield Task('parse_page', url=url, priority=90) except Exception as e: self._process_error(grab, task, e) self.logger.info('[{}] Tasks added...'.format(task.name))
def load_config(): if len(sys.argv) > 1: Config.load(os.path.join(os.path.dirname(__file__), 'config'), sys.argv[1]) return True return False
def validate_already_run(): """ Validates that Setup has been run at least once and kobo-docker has been pulled and checked out before going further. """ config_object = Config() config = config_object.get_config() def display_error_message(message): max_chars_count = 51 message_length = len(message) spacer = " " * (max_chars_count - message_length) CLI.colored_print("╔═════════════════════════════════════════════════════╗", CLI.COLOR_ERROR) CLI.colored_print("║ {}{} ║".format(message, spacer), CLI.COLOR_ERROR) CLI.colored_print("║ Please run `./run.py --setup` first . ║", CLI.COLOR_ERROR) CLI.colored_print("╚═════════════════════════════════════════════════════╝", CLI.COLOR_ERROR) sys.exit(1) try: config['kobodocker_path'] except KeyError: display_error_message('No configuration file found.') if not os.path.isdir(os.path.join(config["kobodocker_path"], ".git")): display_error_message('`kobo-docker` repository is missing!')
def compose_frontend(cls, args): config = Config() dict_ = config.get_dict() command = ['docker-compose', '-f', 'docker-compose.frontend.yml', '-f', 'docker-compose.frontend.override.yml', '-p', config.get_prefix('frontend')] command.extend(args) subprocess.call(command, cwd=dict_['kobodocker_path'])
def copy_support_scripts(self): config = Config() dict_ = config.get_dict() base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.copy_shiny(dict_, base_dir) self.copy_postgres(dict_, base_dir)
def compose_frontend(cls, args): config_object = Config() config = config_object.get_config() command = ["docker-compose", "-f", "docker-compose.frontend.yml", "-f", "docker-compose.frontend.override.yml"] if config.get("docker_prefix", "") != "": command.extend(['-p', config.get("docker_prefix")]) command.extend(args) subprocess.call(command, cwd=config.get("kobodocker_path"))
def test_read_config(): config_dict = {"kobodocker_path": "/tmp"} with patch(builtin_open, mock_open(read_data=json.dumps(config_dict))) as mock_file: config_object = Config() config_object.read_config() assert config_object.get_config().get( "kobodocker_path") == config_dict.get("kobodocker_path") return config_object
def stop_nginx(cls): config_object = Config() config = config_object.get_config() nginx_stop_command = [ "docker-compose", "-f", "docker-compose.frontend.yml", "-f", "docker-compose.frontend.override.yml", "-p", config_object.get_prefix("frontend"), "stop", "nginx" ] CLI.run_command(nginx_stop_command, config.get("kobodocker_path"))
def __init__(self, thread_number: int, try_limit: int = 0) -> None: super().__init__(thread_number=thread_number, network_try_limit=try_limit, priority_mode='const') # Logger self.log = Log(DSpiderCommon.logger) self.logger = DSpiderCommon.logger # Re module init Ree.init() # Work data self.single_task_mode = False self.tasks_store = {} self.result = [] self.cookie_jar = {} # Info self.info = StatCounter() self.info.add_task(StatCounter.TASK_FACTORY) # Common vars self.domain = UrlGenerator.get_host_from_url( Config.get_seq('SITE_URL')[0]) self.err_limit = try_limit # Cache cache_enabled = Config.get('APP_CACHE_ENABLED', '') cache_db_host = Config.get('APP_CACHE_DB_HOST', '') if cache_enabled and cache_db_host: cache_db_name = Config.get('APP_CACHE_DB_NAME', 'pythonparsers') cache_db_type = Config.get('APP_CACHE_DB_TYPE', 'mysql') cache_db_port = int(Config.get('APP_CACHE_DB_PORT', '3306')) cache_db_user = Config.get('APP_CACHE_DB_USER', 'root') cache_db_pass = Config.get('APP_CACHE_DB_PASS', '') if cache_db_user and cache_db_pass: self.setup_cache(backend=cache_db_type, database=cache_db_name, host=cache_db_host, port=cache_db_port, user=cache_db_user, password=cache_db_pass) else: self.setup_cache(backend=cache_db_type, database=cache_db_name, host=cache_db_host, port=cache_db_port) self.logger.info('!!! CACHE MODE ENABLED !!!') # Debug mode (only 1 iteration of each task) if Config.get('APP_SINGLE_TASK', ''): self.logger.info('!!! SINGLE MODE ENABLED !!!') self.single_task_mode = True self.logger.info('Init parser ok...')
def stop_nginx(cls): config = Config() dict_ = config.get_dict() nginx_stop_command = [ 'docker-compose', '-f', 'docker-compose.frontend.yml', '-f', 'docker-compose.frontend.override.yml', '-p', config.get_prefix('frontend'), 'stop', 'nginx' ] CLI.run_command(nginx_stop_command, dict_['kobodocker_path'])
def __init__(self, bot): """ Cog initialization Args: bot (discord.ext.commands.Bot): Instance of the bot """ self.bot = bot Config().load_config() self.ignoreUsers = Config().get("FEEDER_IGNORE_USERS").lower().split(",")
def upgrade(cls): config_object = Config() config = config_object.get_config() Setup.run(config) CLI.colored_print("KoBoToolbox has been upgraded", CLI.COLOR_SUCCESS) # update itself git_command = ["git", "pull", "origin", "master"] CLI.run_command(git_command) CLI.colored_print("KoBoInstall has been upgraded", CLI.COLOR_SUCCESS)
def __init__(self, bot): """ Cog initialization Args: bot (discord.ext.commands.Bot): Instance of the bot """ self.bot = bot self.bad_words = Config().get("BAD_WORDS").split(",") self.ignore_chans = Config().get("IGNORE_CHANNELS").split(",") Config().load_config()
def compose_backend(cls, args): config = Config() dict_ = config.get_dict() backend_role = dict_['backend_server_role'] command = [ 'docker-compose', '-f', 'docker-compose.backend.{}.yml'.format(backend_role), '-f', 'docker-compose.backend.{}.override.yml'.format(backend_role), '-p', config.get_prefix('backend') ] command.extend(args) subprocess.call(command, cwd=dict_['kobodocker_path'])
def start_maintenance(cls): config_object = Config() config = config_object.get_config() frontend_command = [ "docker-compose", "-f", "docker-compose.maintenance.yml", "-p", config_object.get_prefix("maintenance"), "up", "-d", "maintenance" ] CLI.run_command(frontend_command, config.get("kobodocker_path")) CLI.colored_print("Maintenance mode has been started", CLI.COLOR_SUCCESS)
def __init__(self, thread_number, try_limit=0): super().__init__(thread_number=thread_number, network_try_limit=try_limit, priority_mode='const') DSpider._check_body_errors = check_body_errors DSpider._process_error = process_error DSpider._common_init = common_init self._common_init(try_limit) Ree.init() Ree.is_page_number(Config.get('SITE_PAGE_PARAM')) self.const_zero_stock = Config.get('APP_STOCK_ZERO') self.const_price_sep = Config.get('APP_PRICE_SEP') self.const_enc = Config.get('APP_OUTPUT_ENC')
def compose_backend(cls, args): config_object = Config() config = config_object.get_config() backend_role = config.get("backend_server_role", "master") command = [ "docker-compose", "-f", "docker-compose.backend.{}.yml".format(backend_role), "-f", "docker-compose.backend.{}.override.yml".format(backend_role), ] if config.get("docker_prefix", "") != "": command.extend(['-p', config.get("docker_prefix")]) command.extend(args) subprocess.call(command, cwd=config.get("kobodocker_path"))
def compose_backend(cls, args): config_object = Config() config = config_object.get_config() backend_role = config.get("backend_server_role", "primary") command = [ "docker-compose", "-f", "docker-compose.backend.{}.yml".format(backend_role), "-f", "docker-compose.backend.{}.override.yml".format(backend_role), "-p", config_object.get_prefix("backend") ] command.extend(args) subprocess.call(command, cwd=config.get("kobodocker_path"))
def start_maintenance(cls): config = Config() dict_ = config.get_dict() frontend_command = [ 'docker-compose', '-f', 'docker-compose.maintenance.yml', '-f', 'docker-compose.maintenance.override.yml', '-p', config.get_prefix('maintenance'), 'up', '-d' ] CLI.run_command(frontend_command, dict_['kobodocker_path']) CLI.colored_print('Maintenance mode has been started', CLI.COLOR_SUCCESS)
def __init__(self) -> None: self.cfg = Config() self.db = DB() self.tx_storage = TxStorage() # self.tx_storage = TxStorageMock() self.network_id = self.cfg.as_str('thorchain.network_id', NetworkIdents.TESTNET_MULTICHAIN) logging.info(f'Starting Chaosnetleaders backend for network {self.network_id!r}') self.scanner: Optional[TxScanner] = None self.thor: Optional[ThorConnector] = None self.value_filler: Optional[ValueFiller] = None self.api: Optional[API] = None
def __init__(self, thread_number, try_limit=0): super().__init__(thread_number=thread_number, network_try_limit=try_limit, priority_mode='const') DSpider._check_body_errors = check_body_errors DSpider._process_error = process_error DSpider._common_init = common_init self._common_init(try_limit) Ree.init() Ree.is_page_number(Config.get('SITE_PAGE_PARAM')) self.const_price_on_request = Config.get('APP_PRICE_ON_REQUEST') self.const_stock_zero = Config.get('APP_STOCK_ZERO') self.const_default_place = 'Полежаевская'
async def on_message(self, message): """on_message executes whenever a message is posted""" if (not self.configValid): return # check if the message we got was from the channel we are logging and if it's not a system msg if ((message.channel.name == Config().get("ELB_CHANNEL")) and (not message.is_system())): # skip gifs if ("tenor.com" in message.content): return # get rid of all emojis msg = self.stripEmoji(message.content).strip() if ((len(msg) >= int(Config().get("ELB_MIN_MSG_LEN"))) and (message.author.name.lower() not in self.ignoredNicks)): # print("%s %s %s" % (int(message.created_at.timestamp()), message.author.name, msg)) self.dbManager.insertLog(int(message.created_at.timestamp()), message.author.name, msg)
async def on_message(self, message): """on_message executes whenever a message is posted""" if (message.content == "!d bump" and message.channel.name == Config().get("DISBUMP_CHANNEL")): # Set our reminder channel and bumper role attributes for use later # # There is probably a much more elegant solution to this self.reminder_channel = message.channel for r in message.channel.guild.roles: if r.name == Config().get("DISBUMP_ROLE"): self.bumper_role = r if self.is_waiting is not True: self.is_waiting = True await self.remind()
def __init__(self, pca, plots_dir=None): """ Expects a PCA object with 'results' and 'explained_variance' """ self.pca = pca self.colors = Config('colors') # FIXME use super()__init__()! self.base_dir = plots_dir # FIXME should be in super too if plots_dir is None: self.base_dir = self.pca.dataset.source.plots_dir self.plot_settings = Config('plots')['PCA'] self.explained_variance = self.pca.explained_variance
class PCAPlotter(BasePlotter): def __init__(self, pca, plots_dir=None): """ Expects a PCA object with 'results' and 'explained_variance' """ self.pca = pca self.colors = Config('colors') # FIXME use super()__init__()! self.base_dir = plots_dir # FIXME should be in super too if plots_dir is None: self.base_dir = self.pca.dataset.source.plots_dir self.plot_settings = Config('plots')['PCA'] self.explained_variance = self.pca.explained_variance def draw_ax(self, ax, components_to_plot, show_ticks): """ Draws a scatterplot of the first two columns in eigenvalues """ if len(components_to_plot) != 2: error_msg = 'I only know how to plot exactly TWO components. ' error_msg += 'I received: {}'.format(components_to_plot) raise ValueError(error_msg) selected_components = self.pca.result[components_to_plot] grouped_results = selected_components.groupby(level='population') for population, values in grouped_results: kwargs = self._plot_kwargs(population) x, y = components_to_plot values.plot(kind='scatter', x=x, y=y, ax=ax, label=population, **kwargs) # Set the axes labels xlabel_prefix = '-' if self.pca.inverted_x else '' ylabel_prefix = '-' if self.pca.inverted_y else '' xlabel_suffix = '' if self.pca.rotated: xlabel_suffix = '\nRotated {}°'.format(int(self.pca.rotation_angle)) xvariance = self.explained_variance.ix[x]['percentage'] xlabel = "{}{}: {}{}".format(xlabel_prefix, x, xvariance, xlabel_suffix) ax.set_xlabel(xlabel) yvariance = self.explained_variance.ix[y]['percentage'] ylabel = "{}{}: {}".format(ylabel_prefix, y, yvariance) ax.set_ylabel(ylabel) if not show_ticks: # Remove non-data ink ax.tick_params(axis="x", bottom="off", top="off") ax.tick_params(axis="y", left="off", right="off") hide_spines_and_ticks(ax, 'all') return ax def _plot_kwargs(self, population): primary = population in self.plot_settings['primary_populations'] importance = 'primary' if primary else 'secondary' kwargs = { # Generate a new color for a population if there's no color defined # in the settings yml. 'color': self.colors.get(population, self._new_color()), 'marker': self.plot_settings[importance]['marker'], 'lw': self.plot_settings[importance]['linewidth'], 'alpha': self.plot_settings[importance]['alpha'], 's': self.plot_settings[importance]['markersize'], 'zorder': self.plot_settings[importance]['zorder'], } return kwargs def _new_color(self): if not hasattr(self, '_more_colors'): palette_name = self.colors['QualitativePalette'] populations = self.pca.result.index.get_level_values('population') number_of_populations = len(populations.unique()) self._more_colors = sns.color_palette(palette_name, number_of_populations) return self._more_colors.pop(0)