def setup_module(): with suppress(FileNotFoundError): shutil.rmtree(TEST_OUTPUT) with suppress(FileExistsError): os.makedirs(TEST_OUTPUT) with open(_make_output('blank.hocr'), 'w') as f: f.write(HOCR_TEMPLATE)
def __init__(self, config, container): """Construct a libertine application session for a container. :param config: A session configuration object. :param container: The container in which the application will be run. """ super().__init__() self._app = None self._config = config self._container = container self._bridge_pairs = [] self._child_processes = [] self._selector = selectors.DefaultSelector() self._set_signal_handlers() self.callback(self._shutdown) self._ensure_paths_exist() with suppress(AttributeError): for bridge_config in self._config.socket_bridges: self._create_bridge_listener(bridge_config) with suppress(AttributeError): for task_config in self._config.prelaunch_tasks: if task_config.task_type == TaskType.LAUNCH_SERVICE: utils.get_logger().info(utils._("launching {launch_task}").format(launch_task=task_config.datum[0])) task = LaunchServiceTask(task_config) self._child_processes.append(task) task.start(self._config.host_environ)
def diff_classes(self): """Generate the difference in object classes between the policies.""" self.log.info( "Generating class differences from {0.left_policy} to {0.right_policy}".format(self)) self.added_classes, self.removed_classes, matched_classes = self._set_diff( (SymbolWrapper(c) for c in self.left_policy.classes()), (SymbolWrapper(c) for c in self.right_policy.classes())) self.modified_classes = dict() for left_class, right_class in matched_classes: # Criteria for modified classes # 1. change to permissions (inherited common is expanded) left_perms = left_class.perms with suppress(NoCommon): left_perms |= left_class.common.perms right_perms = right_class.perms with suppress(NoCommon): right_perms |= right_class.common.perms added_perms, removed_perms, matched_perms = self._set_diff(left_perms, right_perms) if added_perms or removed_perms: self.modified_classes[left_class] = modified_classes_record(added_perms, removed_perms, matched_perms)
def put(self, body): """ update the solution :statuscode 422: ``issue_id`` does not refer to a valid issue returns updated solution object on success """ obj = self.context with suppress(KeyError): obj.title = body['title'] with suppress(KeyError): obj.content = body['content'] with suppress(KeyError): obj.issue = issue_or_422(self.db, body['issue_id']) try: tags = body['tags'] except KeyError: pass else: obj.tags = Tag.from_names(self.db, tags) obj.mtime = datetime.datetime.now() self.db.flush() return obj.as_dict(self.user)
def __init__(self, context): self.context = context self.uid_min = 90000001 self.uid_max = 100000000 self.dc = None self.enabled = False self.domain_info = None self.domain_name = None self.parameters = None self.directory = None self.ldap_servers = None self.ldap = None self.domain_users_guid = None self.user_dn = None self.group_dn = None self.cv = Condition() self.bind_thread = Thread(target=self.bind, daemon=True) self.bind_thread.start() os.environ['LOGNAME'] = 'root' # Remove winbind cache files with contextlib.suppress(FileNotFoundError): os.remove('/var/db/samba4/winbindd_cache.tdb') with contextlib.suppress(FileNotFoundError): os.remove('/var/db/samba4/winbindd_cache.tdb.bak') with contextlib.suppress(FileNotFoundError): os.remove('/var/db/samba4/winbindd_cache.tdb.old')
def quit(self): for clone in self.clones: with suppress(RemoteDisconnected): clone.quit() with suppress(RemoteDisconnected): super().quit()
def __init__(self, name, options, project=None): self.name = name self.build_packages = [] self.stage_packages = [] with contextlib.suppress(AttributeError): self.stage_packages = options.stage_packages with contextlib.suppress(AttributeError): self.build_packages = options.build_packages self.project = project self.options = options if project: self.partdir = os.path.join(project.parts_dir, self.name) else: self.partdir = os.path.join(os.getcwd(), 'parts', self.name) self.sourcedir = os.path.join(self.partdir, 'src') self.installdir = os.path.join(self.partdir, 'install') self.build_basedir = os.path.join(self.partdir, 'build') source_subdir = getattr(self.options, 'source_subdir', None) if source_subdir: self.builddir = os.path.join(self.build_basedir, source_subdir) else: self.builddir = self.build_basedir
def project_build(environment=ENVIRONMENT.get('default_env', 'dev')): """Build the project.""" build_d = ENVIRONMENT['project']['build_d'] build_log = os.path.join(build_d, '.build') proj_env = [ x for x in ENVIRONMENT.get('environment', {}) if x.get('name', '') == environment ][0] dirs = { '.': ENVIRONMENT['project']['src_d'], 'lib': ENVIRONMENT['project']['lib_d'], } context = {} with suppress(AttributeError): context = { k: v for k, v in proj_env.get('variables', {}).items() } env.update_context(ENVIRONMENT, context) rendered = [ os.path.join(build_d, origine.replace(path, name)) for name, path in dirs.items() for origine in fs.lstree(path, recursive=True) ] msg.write(msg.INFORMATION, 'Building project', *rendered) env.render_tree(context, dirs, build_d) with suppress(OSError), open(build_log, 'w') as fp: fp.write('\n'.join(rendered))
def delete(self): with suppress(FileNotFoundError, IsADirectoryError): os.remove(self.hdf_file) for col in self._categorical_cols(): with suppress(FileNotFoundError, IsADirectoryError): os.remove(self._get_category_file(col))
def _load_code(self, plugin_name, properties): module_name = plugin_name.replace('-', '_') module = None with contextlib.suppress(ImportError): module = _load_local('x-{}'.format(plugin_name)) logger.info('Loaded local plugin for %s', plugin_name) if not module: with contextlib.suppress(ImportError): module = importlib.import_module( 'snapcraft.plugins.{}'.format(module_name)) if not module: logger.info('Searching for local plugin for %s', plugin_name) with contextlib.suppress(ImportError): module = _load_local(module_name) if not module: raise PluginError('unknown plugin: {}'.format(plugin_name)) plugin = _get_plugin(module) options = _make_options(properties, plugin.schema()) self.code = plugin(self.name, options) if common.host_machine != common.target_machine: logger.debug( 'Setting {!r} as the compilation target for {!r}'.format( common.target_machine, plugin_name)) self.code.set_target_machine(common.target_machine)
def _load_include(mapping, key, val): """Update given dictionary with given ``include`` directive item. An ``include`` directive is any dictionary key named ``include`` with a path or a list of paths for value. The file targeted by those paths should be a YAML environment file to be added to given *mapping*. This function will not loop over the dictionary and asumes *key* and *val* is an item part of it. :param dict mapping: Dictionary to look in and to be updated by the ``include`` directives. :param key: A key from *mapping*. :param value: A value from *mapping*. """ if key == 'include': with suppress(KeyError): del mapping[key] cls.update(mapping, cls.load(val)) # Try to look for inclue directives if list of dict. with suppress(TypeError): cls.dmap(_load_include, *val, recurse=True)
async def check_streams(self): for stream in self.streams: with contextlib.suppress(Exception): try: embed = await stream.is_online() except OfflineStream: if not stream._messages_cache: continue for message in stream._messages_cache: with contextlib.suppress(Exception): autodelete = await self.db.guild(message.guild).autodelete() if autodelete: await message.delete() stream._messages_cache.clear() await self.save_streams() else: if stream._messages_cache: continue for channel_id in stream.channels: channel = self.bot.get_channel(channel_id) mention_str, edited_roles = await self._get_mention_str(channel.guild) if mention_str: content = _("{mention}, {stream.name} is live!").format( mention=mention_str, stream=stream ) else: content = _("{stream.name} is live!").format(stream=stream) m = await channel.send(content, embed=embed) stream._messages_cache.append(m) if edited_roles: for role in edited_roles: await role.edit(mentionable=False) await self.save_streams()
def remove_listener(self, event, listener): """Remove a listener from the emitter. Args: event (str): The event name on which the listener is bound. listener: A reference to the same object given to add_listener. Returns: bool: True if a listener was removed else False. This method only removes one listener at a time. If a listener is attached multiple times then this method must be called repeatedly. Additionally, this method removes listeners first from the those registered with 'on' or 'add_listener'. If none are found it continue to remove afterwards from those added with 'once'. """ with contextlib.suppress(ValueError): self._listeners[event].remove(listener) return True with contextlib.suppress(ValueError): self._once[event].remove(listener) return True return False
def get_outdated_report(self, step: steps.Step) -> OutdatedReport: """Return an OutdatedReport class describing why the step is outdated. A step is considered to be outdated if an earlier step in the lifecycle has been run more recently, or if the source code changed on disk. This means the step needs to be updated by taking modified files from the previous step. This is in contrast to a "dirty" step, which must be cleaned and run again. :param steps.Step step: The step to be checked. :returns: OutdatedReport if the step is outdated, None otherwise. """ try: return getattr(self, "check_{}".format(step.name))() except AttributeError: with contextlib.suppress(errors.StepHasNotRunError): timestamp = self.step_timestamp(step) for previous_step in reversed(step.previous_steps()): # Has a previous step run since this one ran? Then this # step needs to be updated. with contextlib.suppress(errors.StepHasNotRunError): if timestamp < self.step_timestamp(previous_step): return OutdatedReport(previous_step_modified=previous_step) return None
def _prepare_files(self, src_rel_path='', tgt_rel_path=''): """Prepare src files and tgt filenames for a test.""" with suppress(FileExistsError): os.mkdir(os.path.join(self.src_directory, src_rel_path)) # Prepare the source directory structure under self.src_directory # .../<src_rel_path>/test.mp3 # .../<src_rel_path>/cover.jpg old_filename = self._set_up_src_file(os.path.join('test', 'data', 'test.mp3'), os.path.join(src_rel_path, 'test.mp3')) old_additional_filename = self._set_up_src_file(os.path.join('test', 'data', 'mb.jpg'), os.path.join(src_rel_path, 'cover.jpg')) with suppress(FileExistsError): os.mkdir(os.path.join(self.tgt_directory, tgt_rel_path)) # Prepare the target filenames under self.tgt_directory # .../<tgt_rel_path>/test.mp3 # .../<tgt_rel_path>/cover.jpg new_filename = self._set_up_tgt_filename(os.path.join(tgt_rel_path, 'test.mp3')) new_additional_filename = self._set_up_tgt_filename(os.path.join(tgt_rel_path, 'cover.jpg')) return (old_filename, old_additional_filename, new_filename, new_additional_filename)
def fetch_altmetrics(cv): """For relevant items in cv with a DOI, fetches altmetric and saves json docs in 'altmetrics/', fetched badge images and saves as pngs to 'latex/images/' """ URL = 'http://api.altmetric.com/v1/doi/{0}' with suppress(FileExistsError): ALTMETRICS.mkdir() with suppress(FileExistsError): LATEX_IMAGES.mkdir() for section in WITH_DOI: for item in cv[section]: doi = item.get('doi') if doi: res = requests.get(URL.format(doi)) if 200 == res.status_code: res = res.json() with altmetrics_json_path(doi).open('w') as outfile: json.dump(res, outfile) # image = requests.get(res['images']['small'], stream=True) image = requests.get( 'https://d1uo4w7k31k5mn.cloudfront.net/v1/{0}.png'.format(int(res['score'])) ) with altmetrics_image_path(doi).open('wb') as outfile: for chunk in image: outfile.write(chunk)
def __init__(self, name, options, project=None): self.name = name self.build_packages = [] self.stage_packages = [] with contextlib.suppress(AttributeError): self.stage_packages = options.stage_packages.copy() with contextlib.suppress(AttributeError): self.build_packages = options.build_packages.copy() self.project = project self.options = options # The remote parts can have a '/' in them to separate the main project # part with the subparts. This is rather unfortunate as it affects the # the layout of parts inside the parts directory causing collisions # between the main project part and its subparts. part_dir = name.replace('/', '\N{BIG SOLIDUS}') if project: self.partdir = os.path.join(project.parts_dir, part_dir) else: self.partdir = os.path.join(os.getcwd(), 'parts', part_dir) self.sourcedir = os.path.join(self.partdir, 'src') self.installdir = os.path.join(self.partdir, 'install') self.build_basedir = os.path.join(self.partdir, 'build') source_subdir = getattr(self.options, 'source_subdir', None) if source_subdir: self.builddir = os.path.join(self.build_basedir, source_subdir) else: self.builddir = self.build_basedir
def get_dirty_report(self, step): """Return a DirtyReport class describing why step is dirty. Returns None if step is not dirty. """ # Retrieve the stored state for this step (assuming it has already run) state = self.get_state(step) differing_properties = set() differing_options = set() with contextlib.suppress(AttributeError): # state.properties contains the old YAML that this step cares # about, and we're comparing it to those same keys in the current # YAML (self._part_properties). If they've changed, then this step # is dirty and needs to run again. differing_properties = state.diff_properties_of_interest( self._part_properties) with contextlib.suppress(AttributeError): # state.project_options contains the old project options that this # step cares about, and we're comparing it to those same options in # the current project. If they've changed, then this step is dirty # and needs to run again. differing_options = state.diff_project_options_of_interest( self._project_options) if differing_properties or differing_options: return DirtyReport(differing_properties, differing_options) return None
def show_conf(conf): conf = copy.deepcopy(conf) with suppress(KeyError): conf["driver"] = show_partial(conf["driver"]) with suppress(KeyError): conf["population"] = [conf["population"][0], "…snip…"] return str(conf)
def write_snap_directory(self): # First migrate the snap directory. It will overwrite any conflicting # files. for root, directories, files in os.walk('snap'): for directory in directories: source = os.path.join(root, directory) destination = os.path.join(self._snap_dir, source) file_utils.create_similar_directory(source, destination) for file_path in files: source = os.path.join(root, file_path) destination = os.path.join(self._snap_dir, source) with contextlib.suppress(FileNotFoundError): os.remove(destination) file_utils.link_or_copy(source, destination) # Now copy the hooks contained within the snap directory directly into # meta (they don't get wrappers like the ones that come from parts). snap_hooks_dir = os.path.join('snap', 'hooks') hooks_dir = os.path.join(self._snap_dir, 'meta', 'hooks') if os.path.isdir(snap_hooks_dir): os.makedirs(hooks_dir, exist_ok=True) for hook_name in os.listdir(snap_hooks_dir): source = os.path.join(snap_hooks_dir, hook_name) destination = os.path.join(hooks_dir, hook_name) # First, verify that the hook is actually executable if not os.stat(source).st_mode & stat.S_IEXEC: raise CommandError('hook {!r} is not executable'.format( hook_name)) with contextlib.suppress(FileNotFoundError): os.remove(destination) file_utils.link_or_copy(source, destination)
def _build_app_dict(self, request, label=None): """ Build the app dictionary. The optional `label` parameter filters models of a specific app. """ app_dict = {} if label: models = { m: m_a for m, m_a in self._registry.items() if m._meta.app_label == label } else: models = self._registry for model, model_admin in models.items(): app_label = model._meta.app_label has_module_perms = model_admin.has_module_permission(request) if not has_module_perms: continue perms = model_admin.get_model_perms(request) # Check whether user has any perm for this module. # If so, add the module to the model_list. if True not in perms.values(): continue info = (app_label, model._meta.model_name) model_dict = { 'name': capfirst(model._meta.verbose_name_plural), 'object_name': model._meta.object_name, 'perms': perms, } if perms.get('change'): with suppress(NoReverseMatch): model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name) if perms.get('add'): with suppress(NoReverseMatch): model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name) if app_label in app_dict: app_dict[app_label]['models'].append(model_dict) else: app_dict[app_label] = { 'name': apps.get_app_config(app_label).verbose_name, 'app_label': app_label, 'app_url': reverse( 'admin:app_list', kwargs={'app_label': app_label}, current_app=self.name, ), 'has_module_perms': has_module_perms, 'models': [model_dict], } if label: return app_dict.get(label) return app_dict
def remove_garbage_lock_files(): lockfilename = "" # HUB SINGLE INSTANCE MODE lockfilename = os.path.join(_find_home(), ".samp") hub_is_running, lockfiledict = check_running_hub(lockfilename) if not hub_is_running: # If lockfilename belongs to a dead hub, then it is deleted if os.path.isfile(lockfilename): with suppress(OSError): os.remove(lockfilename) # HUB MULTIPLE INSTANCE MODE lockfiledir = os.path.join(_find_home(), ".samp-1") if os.path.isdir(lockfiledir): for filename in os.listdir(lockfiledir): if filename.startswith('samp-hub'): lockfilename = os.path.join(lockfiledir, filename) hub_is_running, lockfiledict = check_running_hub(lockfilename) if not hub_is_running: # If lockfilename belongs to a dead hub, then it is deleted if os.path.isfile(lockfilename): with suppress(OSError): os.remove(lockfilename)
def _get_word_sentences(self, text, pronounce) -> dict: API = ( "https://xn--fsqx9h.xn--v0qr21b.xn--kpry57d/%E7%9C%8B/" "?%E6%BC%A2%E5%AD%97={text}&%E8%87%BA%E7%BE%85={pronounce}" ) url = API.format(text=text, pronounce=pronounce) try: _ = json.loads(requests.get(url).text) except Exception: return {} mandarin_sentence = None with suppress(KeyError, IndexError): mandarin_sentence = _["例句"][0]["華語"] chinese_sentence = None with suppress(KeyError, IndexError): chinese_sentence = _["例句"][0]["漢字"] taiwanese_sentence = None with suppress(KeyError, IndexError): taiwanese_sentence = _["例句"][0]["臺羅"] d = { 'mandarin': mandarin_sentence, 'chinese': chinese_sentence, 'taiwanese': taiwanese_sentence, } return d
def wait_closed(self): if self.send_hb_task is not None: with suppress(asyncio.CancelledError): yield from self.send_hb_task if self.monitor_task is not None: with suppress(asyncio.CancelledError): yield from self.monitor_task
def retry(self, *args, run_number: int=0, **kwargs) -> int: """ Updates the number of time the program has run and relaunches it :param args: additional arguments :param run_number: the number of time the benchmark has run :param kwargs: additional keyword arguments :return: 0|1|None on success|failure|unexpected result """ with suppress(subprocess.CalledProcessError): launch_and_log(self.trigger.stop_cmd.split(" ")) with suppress(FileNotFoundError), \ open(os.path.join(self.trigger.conf.getdir("install", "install_directory"))) as httpd_pid: pid = int(httpd_pid.read()) launch_and_log(["kill", str(pid)]) run_number += 1 if run_number > self.maximum_tries: return 1 logging.warning("An error occurred while launching apache, retrying") self.trigger.clean_logs() return self.run(*args, run_number=run_number, **kwargs)
def tearDown(self): # Clean up files created during this test with suppress(FileNotFoundError): os.remove(self.test_conf_file_path) # Give the cluster a bit of extra time to finish working (before forcefully killing it and failing the test) with suppress(TestClusterTimeoutError): self.cluster.block_until_build_queue_empty(timeout=5) # Kill processes and make sure all processes exited with 0 exit code services = self.cluster.kill() # only check the exit code if not on Windows as Popen.terminate kills the process on Windows and the exit # code is not zero. # TODO: remove the is_windows() check after we can handle exit on Windows gracefully. if not is_windows(): for service in services: self.assertEqual( service.return_code, 0, 'Service running on url: {} should exit with code 0, but exited with code {}.'.format( service.url, service.return_code, ), ) # Remove the temp dir. This will delete the log files, so should be run after cluster shuts down. self.test_app_base_dir.cleanup()
def gatherFrom(self, name, brow=None): d = brow or webdriver.PhantomJS() d.get(name) #if name == 'http://www.proxynova.com/proxy-server-list/elite-proxies/': # #attrs = 'prx', 'type!='Transparent", country, port, tmres, time # with suppress(AttributeError): d.close() # return [i.get_attribute('prx') for i in browser.find_elements_by_css_selector('.proxy')] txt = d.page_source allips = [ips for (ips, _) in re.findall(r'(([0-9]{1,3}\.){3}[0-9]{1,3}\:[0-9]+)', txt)] if allips: with suppress(AttributeError): d.close() return allips for i in d.find_elements_by_css_selector('tr'): try: ip = re.search(r'([0-9]{1,3}\.){3}[0-9]{1,3}', i.text) port = re.search(r'\s([0-9]{2,5})\s', i.text) allips.append(ip.group()+':'+port.groups()[0]) except Exception as e: pass with suppress(AttributeError): d.close() return allips
def read(self, table): # If the read kwarg `data_start` is 'guess' then the table may have extraneous # lines between the end of the header and the beginning of data. if self.data.start_line == 'guess': # Replicate the first part of BaseReader.read up to the point where # the table lines are initially read in. with suppress(TypeError): # For strings only if os.linesep not in table + '': self.data.table_name = os.path.basename(table) self.data.header = self.header self.header.data = self.data # Get a list of the lines (rows) in the table lines = self.inputter.get_lines(table) # Now try increasing data.start_line by one until the table reads successfully. # For efficiency use the in-memory list of lines instead of `table`, which # could be a file. for data_start in range(len(lines)): self.data.start_line = data_start with suppress(Exception): table = super().read(lines) return table else: return super().read(table)
def __enter__(self): # todo in flask 0.11 you can use .pop() as a dict with suppress(AttributeError): self._actual_user = copy.deepcopy(g._actual_user) del g._actual_user with suppress(AttributeError): self.mongo_prefix = copy.deepcopy(g.mongo_prefix) del g.mongo_prefix
def setUp(self): self.testfile = 'tiny' self.testfilepath = os.path.expanduser(os.sep.join( ('~', '.xarray_tutorial_data', self.testfile))) with suppress(OSError): os.remove('{}.nc'.format(self.testfilepath)) with suppress(OSError): os.remove('{}.md5'.format(self.testfilepath))
"proxy", "pip", "run", "shell", "spm", "ssh", "support", "syndic", ) # tiamat pip breaks singlebin on Windows at the moment # https://gitlab.com/saltstack/pop/tiamat-pip/-/issues/4 if not sys.platform.startswith("win"): PIP_PATH = pathlib.Path(f"{os.sep}opt", "saltstack", "salt", "pypath") with contextlib.suppress(PermissionError): PIP_PATH.mkdir(mode=0o755, parents=True, exist_ok=True) tiamatpip.configure.set_user_site_packages_path(PIP_PATH) def redirect(argv): """ Change the args and redirect to another salt script """ if len(argv) < 2: msg = "Must pass in a salt command, available commands are:" for cmd in AVAIL: msg += f"\n{cmd}" print(msg, file=sys.stderr, flush=True) sys.exit(1) cmd = sys.argv[1]
data.found_in[self.pages['en']] = [self.pages['fr'], self.pages['pl']] data.found_in[self.pages['fr']] = [self.pages['en'], self.pages['pl']] data.found_in[self.pages['pl']] = [self.pages['en'], self.pages['fr']] self.data = data def test_simple_graph(self): """Test that GraphDrawer.createGraph does not raise exception.""" drawer = interwiki_graph.GraphDrawer(self.data) drawer.createGraph() def test_octagon(self): """Test octagon nodes.""" self.data.found_in[self.pages['en2']] = [self.pages['fr']] drawer = interwiki_graph.GraphDrawer(self.data) self.assertEqual({self.pages['en'].site}, drawer._octagon_site_set()) drawer.createGraph() nodes = drawer.graph.obj_dict['nodes'] for node, shape in [('"pl:origin"', 'rectangle'), ('"fr:origin"', 'rectangle'), ('"en:origin"', 'octagon')]: with self.subTest(node=node): self.assertEqual(nodes[node][0]['attributes']['shape'], shape) if __name__ == '__main__': # pragma: no cover with suppress(SystemExit): unittest.main()
async def setup_start(message): text = await get_string(message.chat.id, "notes", "filters_setup_start") with suppress(MessageNotModified): await message.edit_text(text)
async def restore(self, tar_file: tarfile.TarFile) -> None: """Restore state of an add-on.""" with TemporaryDirectory(dir=str(self.sys_config.path_tmp)) as temp: # extract snapshot def _extract_tarfile(): """Extract tar snapshot.""" with tar_file as snapshot: snapshot.extractall(path=Path(temp)) try: await self.sys_run_in_executor(_extract_tarfile) except tarfile.TarError as err: _LOGGER.error("Can't read tarfile %s: %s", tar_file, err) raise AddonsError() from None # Read snapshot data try: data = read_json_file(Path(temp, "addon.json")) except JsonFileError: raise AddonsError() from None # Validate try: data = SCHEMA_ADDON_SNAPSHOT(data) except vol.Invalid as err: _LOGGER.error( "Can't validate %s, snapshot data: %s", self.slug, humanize_error(data, err), ) raise AddonsError() from None # If available if not self._available(data[ATTR_SYSTEM]): _LOGGER.error("Add-on %s is not available for this Platform", self.slug) raise AddonsNotSupportedError() # Restore local add-on informations _LOGGER.info("Restore config for addon %s", self.slug) restore_image = self._image(data[ATTR_SYSTEM]) self.sys_addons.data.restore(self.slug, data[ATTR_USER], data[ATTR_SYSTEM], restore_image) # Check version / restore image version = data[ATTR_VERSION] if not await self.instance.exists(): _LOGGER.info("Restore/Install image for addon %s", self.slug) image_file = Path(temp, "image.tar") if image_file.is_file(): with suppress(DockerAPIError): await self.instance.import_image(image_file) else: with suppress(DockerAPIError): await self.instance.install(version, restore_image) await self.instance.cleanup() elif self.instance.version != version or self.legacy: _LOGGER.info("Restore/Update image for addon %s", self.slug) with suppress(DockerAPIError): await self.instance.update(version, restore_image) else: with suppress(DockerAPIError): await self.instance.stop() # Restore data def _restore_data(): """Restore data.""" shutil.copytree(str(Path(temp, "data")), str(self.path_data)) _LOGGER.info("Restore data for addon %s", self.slug) if self.path_data.is_dir(): await remove_data(self.path_data) try: await self.sys_run_in_executor(_restore_data) except shutil.Error as err: _LOGGER.error("Can't restore origin data: %s", err) raise AddonsError() from None # Restore AppArmor profile_file = Path(temp, "apparmor.txt") if profile_file.exists(): try: await self.sys_host.apparmor.load_profile( self.slug, profile_file) except HostAppArmorError: _LOGGER.error("Can't restore AppArmor profile") raise AddonsError() from None # Run add-on if data[ATTR_STATE] == STATE_STARTED: return await self.start() _LOGGER.info("Finish restore for add-on %s", self.slug)
def unregister_folder_link(self, path: Path) -> None: with suppress(OSError): self._get_folder_link(path.name).unlink()
def tearDown(self) -> None: with suppress(OSError): os.remove(self.SUBDAG_TEST_FILEPATH)
def strip_dimensions(self, text_lines, location, ax): """ Calculate the dimension Returns ------- out : types.SimpleNamespace A structure with all the coordinates required to draw the strip text and the background box. """ dpi = 72 num_lines = len(text_lines) get_property = self.theme.themeables.property bbox = ax.get_window_extent().transformed( self.figure.dpi_scale_trans.inverted()) ax_width, ax_height = bbox.width, bbox.height # in inches strip_size = self.strip_size(location, num_lines) m1, m2 = self.inner_strip_margins(location) m1, m2 = m1 / dpi, m2 / dpi margin = 0 # default if location == 'right': box_x = 1 box_y = 0 box_width = strip_size / ax_width box_height = 1 # y & height properties of the background slide and # shrink the strip vertically. The y margin slides # it horizontally. with suppress(KeyError): box_y = get_property('strip_background_y', 'y') with suppress(KeyError): box_height = get_property('strip_background_y', 'height') with suppress(KeyError): margin = get_property('strip_margin_y') x = 1 + (strip_size - m2 + m1) / (2 * ax_width) y = (2 * box_y + box_height) / 2 # margin adjustment hslide = 1 + margin * strip_size / ax_width x *= hslide box_x *= hslide else: box_x = 0 box_y = 1 box_width = 1 box_height = strip_size / ax_height # x & width properties of the background slide and # shrink the strip horizontally. The y margin slides # it vertically. with suppress(KeyError): box_x = get_property('strip_background_x', 'x') with suppress(KeyError): box_width = get_property('strip_background_x', 'width') with suppress(KeyError): margin = get_property('strip_margin_x') x = (2 * box_x + box_width) / 2 y = 1 + (strip_size - m1 + m2) / (2 * ax_height) # margin adjustment vslide = 1 + margin * strip_size / ax_height y *= vslide box_y *= vslide dimensions = types.SimpleNamespace( x=x, y=y, box_x=box_x, box_y=box_y, box_width=box_width, box_height=box_height) return dimensions
def unsubscribe(self, name): with suppress(KeyError): self._subscribers.remove(name)
allowed_failures = set() print("python exec:", sys.executable) print("sys.path:", sys.path) if "conda" in sys.executable: print("conda environment:") subprocess.run(["conda", "list"]) else: print("pip environment:") subprocess.run(["pip", "list"]) print(f"xarray: {xarray.__version__}, {xarray.__file__}") with suppress(ImportError): import matplotlib matplotlib.use("Agg") try: import rasterio # noqa: F401 except ImportError: allowed_failures.update( ["gallery/plot_rasterio_rgb.py", "gallery/plot_rasterio.py"]) try: import cartopy # noqa: F401 except ImportError: allowed_failures.update([ "gallery/plot_cartopy_facetgrid.py",
class JsonMixin: ''' Mixin to use a JSON serializer. ''' _SERIALIZER = json class IniMixin: ''' Mixin to use an INI serializer. ''' _SERIALIZER = ini with contextlib.suppress(ImportError): from xdgconfig.serializers import xml __all__.append('XmlMixin') class XmlMixin: ''' Mixin to use an XML serializer. ''' _SERIALIZER = xml with contextlib.suppress(ImportError): from xdgconfig.serializers import toml __all__.append('TomlMixin') class TomlMixin:
async def version(request): # Use the version.json file in the current dir. with suppress(IOError): with open(VERSION_FILE) as fd: return web.json_response(json.load(fd)) return web.HTTPNotFound()
async def build(self): # noqa: D102 args = self.context.args logger.info( "Building ROS package in '{args.path}' with build type 'catkin'". format_map(locals())) # reuse CMake build task with additional logic extension = CmakeBuildTask() extension.set_context(context=self.context) # additional arguments if args.cmake_args is None: args.cmake_args = [] args.cmake_args += ['-DCATKIN_INSTALL_INTO_PREFIX_ROOT=0'] if args.test_result_base: # catkin appends the project name itself args.cmake_args.append('-DCATKIN_TEST_RESULTS_DIR=' + os.path.dirname(args.test_result_base)) if args.symlink_install: args.cmake_args.append('-DCATKIN_SYMLINK_INSTALL=ON') if args.catkin_cmake_args: args.cmake_args += args.catkin_cmake_args # invoke the build additional_targets = [] # if no specific target is specified consider building the 'tests' # target and continue if such a target doesn't exist if args.cmake_target is None: if not args.catkin_skip_building_tests: additional_targets.append('tests') args.cmake_target_skip_unavailable = True rc = await extension.build(skip_hook_creation=True, additional_targets=additional_targets) # if the build has failed getting targets might not be possible try: has_install_target = await has_target(args.build_base, 'install') except Exception: # noqa: B902 if not rc: raise has_install_target = False # for catkin packages add additional hooks after the package has # been built and installed depending on the installed files # only if the package has an install target additional_hooks = [] if has_install_target: additional_hooks += create_environment_hook('ros_package_path', Path( args.install_base), self.context.pkg.name, 'ROS_PACKAGE_PATH', 'share', mode='prepend') additional_hooks += create_pythonpath_environment_hook( args.build_base, Path(args.install_base), self.context.pkg.name) additional_hooks += create_pkg_config_path_environment_hooks( Path(args.install_base), self.context.pkg.name) # register hooks created via catkin_add_env_hooks shell_extensions = get_shell_extensions() file_extensions = OrderedDict() for shell_extensions_same_prio in shell_extensions.values(): for shell_extension in shell_extensions_same_prio.values(): for file_ext in shell_extension.get_file_extensions(): file_extensions[file_ext] = shell_extension custom_hooks_path = Path(args.install_base) / \ 'share' / self.context.pkg.name / 'catkin_env_hook' for file_extension, shell_extension in file_extensions.items(): file_extension_hooks = sorted( custom_hooks_path.glob('*.{file_extension}'.format_map( locals()))) if file_extension_hooks: # since not all shell extensions might implement this with suppress(NotImplementedError): # try to set CATKIN_ENV_HOOK_WORKSPACE explicitly # before sourcing these hooks additional_hooks.append( shell_extension.create_hook_set_value( 'catkin_env_hook_workspace', Path(args.install_base), self.context.pkg.name, 'CATKIN_ENV_HOOK_WORKSPACE', '')) additional_hooks += file_extension_hooks create_environment_scripts(self.context.pkg, args, additional_hooks=additional_hooks) # ensure that the install base has the marker file # identifying it as a catkin workspace marker = Path(args.install_base) / '.catkin' marker.touch(exist_ok=True) return rc
def _drop_table(self, name: str) -> None: # database might have been dropped, so we suppress the # corresponding Exception with contextlib.suppress(ImpylaError): self.drop_table(name)
break if not t.parallelisable: break transformers.append(t) pipes = fns[len(transformers):] if transformers and transformers[0].expect_json: transformers.insert(0, JsonReader()) log = logging.getLogger(__name__).info with smart_open(file, "r") as f, smart_open(output, "w") as o: # SimpleIO implements "write" so print works, but not all the IO methods. text_o = cast(TextIO, o) # suppress BrokenPipeError to allow running inside a unix pipe. with contextlib.suppress( BrokenPipeError), contextlib.ExitStack() as stack: if not transformers: results: Iterable = f else: log(f"preparing {transformers}") transform = stack.enter_context(compose(transformers)) if processes <= 0: results = transform.map(f) else: p = multiprocessing.current_process() log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}" ) pool = stack.enter_context( multiprocessing.Pool( processes=processes, initializer=set_global_transformer,
def deploy(self): with suppress(FileNotFoundError): self.dest.unlink() with suppress(FileNotFoundError): copy(self.src, self.dest)
def on_status(self, status): # Use this if you plan to use the json functionality below # with open('tweets.json', 'a', encoding='utf-8') as f: # Supress errors so if that specific tweet has an issue for whatever reason, it will skip it. Similar to try/except. with suppress(Exception): userid = str(status.user.id) # "userid in ids" mentioned below removes all of the mentions and retweets and makes sure it only comes from the original account. # Tweepy has no built in way to exclude that to my knowledge based on stackoverflow answers. if FollowerMode == True and userid in ids: # You can do this for example - " if status.place.country == 'United States': ", # but most people don't have their country listed. status.user.location often shows 'state' or 'city, state' and/or country, # but their location is user set so it can really be something made up like 'outer space'. If it's that important, # you could always try and use an API to see if it's a valid location. print('-' * 80) # Prints the name for each ID that's defined in 'idsdict' with suppress(Exception): print(list(idsdict.keys())[list(idsdict.values()).index(int(userid))]) print('User: '******'None': print('Location: ' + status.user.location) with suppress(Exception): print('Country: ' + status.place.country) # Checks to see if tweet is 'extended'/long. If it is, it will display the full tweet. try: text = status.extended_tweet['full_text'] except AttributeError: text = status.text print('Tweet: ' + text) elif FollowerMode == False: print('-' * 80) print('User: '******'None': print('Location: ' + status.user.location) with suppress(Exception): print('Country: ' + status.place.country) #print(status) try: text = status.extended_tweet['full_text'] except AttributeError: text = status.text print('Tweet: ' + text) # OPTIONAL - Auto responder bot thingy if search[0] in text: try: api.update_status(f"@{status.user.screen_name} hmmm weird flex but ok..{status.id}", status.user.id) print(f'Replied to {status.user.screen_name}') print(text) except Exception as e: print(f'couldn\'t reply. \n Reason: {e}') sleep(0.015) # Prevents the display from hiccups and keeps the scrolling smooth when scanning all sleep(0.016)
async def load(self) -> None: """Async initialize of object.""" with suppress(DockerAPIError): await self.instance.attach(tag=self.version)
def _run_surface_impl(*controls, system, surface, num_lines, min_neighbour_dist, save_file=None, init_result=None, serializer='auto'): r"""Implementation of the surface's run. :param controls: Control objects which govern the iteration. :type controls: AbstractControl The other parameters are the same as for :meth:`.run`. """ start_time = time.time() # CONTROL SETUP def filter_ctrl(ctrl_type): return [ctrl for ctrl in controls if isinstance(ctrl, ctrl_type)] line_ctrl = filter_ctrl(LineControl) controls = filter_ctrl(SurfaceControl) stateful_ctrl = filter_ctrl(StatefulControl) data_ctrl = filter_ctrl(DataControl) convergence_ctrl = filter_ctrl(ConvergenceControl) # HELPER FUNCTIONS def get_line(t, init_line_result=None): """ Runs a line calculation and returns its result. """ return _line_run._run_line_impl(*copy.deepcopy(line_ctrl), system=system, line=lambda ky: surface(t, ky), init_result=init_line_result) # setting up async handler if save_file is not None: def handler(res): _LOGGER.info( 'Saving surface result to file {} (ASYNC)'.format(save_file)) io.save(res, save_file, serializer=serializer) else: handler = None with AsyncHandler(handler) as save_thread: def add_line(t): """ Adds a line to the Surface, if it is within min_neighbour_dist of the given lines. """ # find whether the line is allowed still dist = data.nearest_neighbour_dist(t) if dist < min_neighbour_dist: if dist == 0: _LOGGER.info("Line at t = {} exists already.".format(t)) else: _LOGGER.warn( "'min_neighbour_dist' reached: cannot add line at t = {}" .format(t)) return SurfaceResult(data, stateful_ctrl, convergence_ctrl) _LOGGER.info('Adding line at t = {}'.format(t)) data.add_line(t, get_line(t)) return update_result() def update_result(): """ Updates all data controls, then creates the result object, saves it to file if necessary and returns the result. """ # update data controls for d_ctrl in data_ctrl: d_ctrl.update(data) result = SurfaceResult(data, stateful_ctrl, convergence_ctrl) save_thread.send(copy.deepcopy(result)) return result def collect_convergence(): """ Calculates which neighbours are not converged """ res = np.array([True] * (len(data.lines) - 1)) for c_ctrl in convergence_ctrl: res &= c_ctrl.converged _LOGGER.info( 'Convergence criteria fulfilled for {} of {} neighbouring lines.' .format(sum(res), len(res))) return res # STEP 1 -- MAKE USE OF INIT_RESULT # initialize stateful controls from old result if init_result is not None: _LOGGER.info("Initializing result from 'init_result'.") # make sure old result doesn't change init_result = copy.deepcopy(init_result) # get states from pre-existing Controls for s_ctrl in stateful_ctrl: with contextlib.suppress(KeyError): s_ctrl.state = init_result.ctrl_states[ s_ctrl.__class__.__name__] data = init_result.data # re-run lines with existing result as input _LOGGER.info('Re-running existing lines.') for line in data.lines: _LOGGER.info('Re-running line for t = {}'.format(line.t)) line.result = get_line(line.t, line.result) update_result() else: data = SurfaceData() # STEP 2 -- PRODUCE REQUIRED STRINGS # create lines required by num_lines _LOGGER.info("Adding lines required by 'num_lines'.") for t in np.linspace(0, 1, num_lines): result = add_line(t) # STEP 3 -- MAIN LOOP N = len(data.lines) conv = collect_convergence() while not all(conv): # add lines for all non-converged values new_t = [(t1 + t2) / 2 for (t1, t2), c in zip(zip(data.t, data.t[1:]), conv) if not c] for t in new_t: result = add_line(t) # check if new lines appeared N_new = len(data.lines) if N == N_new: break N = N_new conv = collect_convergence() end_time = time.time() _LOGGER.info(end_time - start_time, tags=('box', 'skip-before', 'timing')) _LOGGER.info(result.convergence_report, tags=('box', 'convergence_report', 'skip')) return result
async def restart(self) -> None: """Restart add-on.""" with suppress(AddonsError): await self.stop() await self.start()
def __init__(self) -> None: self.matches: List[str] = [] ddir = cache_dir() with suppress(FileExistsError): os.makedirs(ddir) self.history_path = os.path.join(ddir, 'shell.history')
async def _await_cancel(task): task.cancel() with suppress(asyncio.CancelledError): await task
async def welcome_security_passed(message: Union[CallbackQuery, Message], state, strings): user_id = message.from_user.id async with state.proxy() as data: chat_id = data['chat_id'] msg_id = data['msg_id'] verify_msg_id = data['verify_msg_id'] to_delete = data['to_delete'] with suppress(ChatAdminRequired): await unmute_user(chat_id, user_id) with suppress(MessageToDeleteNotFound, MessageCantBeDeleted): if to_delete: await bot.delete_message(chat_id, msg_id) await bot.delete_message(user_id, verify_msg_id) await state.finish() with suppress(MessageToDeleteNotFound, MessageCantBeDeleted): message_id = redis.get(f"welcome_security_users:{user_id}:{chat_id}") # Delete the person's real security button if exists! if message_id: await bot.delete_message(chat_id, message_id) redis.delete(f"welcome_security_users:{user_id}:{chat_id}") with suppress(JobLookupError): scheduler.remove_job(f"wc_expire:{chat_id}:{user_id}") title = (await db.chat_list.find_one({'chat_id': chat_id}))['chat_title'] if 'data' in message: await message.answer(strings['passed_no_frm'] % title, show_alert=True) else: await message.reply(strings['passed'] % title) db_item = await get_greetings_data(chat_id) if 'message' in message: message = message.message # Welcome if 'note' in db_item and not db_item.get("welcome_disabled", False): text, kwargs = await t_unparse_note_item( message.reply_to_message if message.reply_to_message is not None else message, db_item['note'], chat_id) await send_note(user_id, text, **kwargs) # Welcome mute if 'welcome_mute' in db_item and db_item['welcome_mute'][ 'enabled'] is not False: user = await bot.get_chat_member(chat_id, user_id) if 'can_send_messages' not in user or user['can_send_messages'] is True: await restrict_user(chat_id, user_id, until_date=convert_time( db_item['welcome_mute']['time'])) chat = await db.chat_list.find_one({'chat_id': chat_id}) buttons = None if chat_nick := chat['chat_nick'] if chat.get('chat_nick', None) else None: buttons = InlineKeyboardMarkup().add( InlineKeyboardButton(text=strings['click_here'], url=f"t.me/{chat_nick}"))
def main( scheduler, host, worker_port, listen_address, contact_address, nanny_port, nthreads, nprocs, nanny, name, pid_file, resources, dashboard, bokeh, bokeh_port, scheduler_file, dashboard_prefix, tls_ca_file, tls_cert, tls_key, dashboard_address, worker_class, preload_nanny, **kwargs, ): g0, g1, g2 = gc.get_threshold( ) # https://github.com/dask/distributed/issues/1653 gc.set_threshold(g0 * 3, g1 * 3, g2 * 3) enable_proctitle_on_current() enable_proctitle_on_children() if bokeh_port is not None: warnings.warn( "The --bokeh-port flag has been renamed to --dashboard-address. " "Consider adding ``--dashboard-address :%d`` " % bokeh_port) dashboard_address = bokeh_port if bokeh is not None: warnings.warn( "The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. " ) dashboard = bokeh sec = { k: v for k, v in [ ("tls_ca_file", tls_ca_file), ("tls_worker_cert", tls_cert), ("tls_worker_key", tls_key), ] if v is not None } if nprocs == "auto": nprocs, nthreads = nprocesses_nthreads() else: nprocs = int(nprocs) if nprocs < 0: nprocs = CPU_COUNT + 1 + nprocs if nprocs <= 0: logger.error( "Failed to launch worker. Must specify --nprocs so that there's at least one process." ) sys.exit(1) if nprocs > 1 and not nanny: logger.error( "Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1." ) sys.exit(1) if contact_address and not listen_address: logger.error( "Failed to launch worker. " "Must specify --listen-address when --contact-address is given") sys.exit(1) if nprocs > 1 and listen_address: logger.error("Failed to launch worker. " "You cannot specify --listen-address when nprocs > 1.") sys.exit(1) if (worker_port or host) and listen_address: logger.error( "Failed to launch worker. " "You cannot specify --listen-address when --worker-port or --host is given." ) sys.exit(1) try: if listen_address: (host, worker_port) = get_address_host_port(listen_address, strict=True) if contact_address: # we only need this to verify it is getting parsed (_, _) = get_address_host_port(contact_address, strict=True) else: # if contact address is not present we use the listen_address for contact contact_address = listen_address except ValueError as e: logger.error("Failed to launch worker. " + str(e)) sys.exit(1) if nanny: port = nanny_port else: port = worker_port if not nthreads: nthreads = CPU_COUNT // nprocs if pid_file: with open(pid_file, "w") as f: f.write(str(os.getpid())) def del_pid_file(): if os.path.exists(pid_file): os.remove(pid_file) atexit.register(del_pid_file) if resources: resources = resources.replace(",", " ").split() resources = dict(pair.split("=") for pair in resources) resources = valmap(float, resources) else: resources = None loop = IOLoop.current() worker_class = import_term(worker_class) if nanny: kwargs["worker_class"] = worker_class kwargs["preload_nanny"] = preload_nanny if nanny: kwargs.update({ "worker_port": worker_port, "listen_address": listen_address }) t = Nanny else: if nanny_port: kwargs["service_ports"] = {"nanny": nanny_port} t = worker_class if (not scheduler and not scheduler_file and dask.config.get("scheduler-address", None) is None): raise ValueError("Need to provide scheduler address like\n" "dask-worker SCHEDULER_ADDRESS:8786") with suppress(TypeError, ValueError): name = int(name) nannies = [ t( scheduler, scheduler_file=scheduler_file, nthreads=nthreads, loop=loop, resources=resources, security=sec, contact_address=contact_address, host=host, port=port, dashboard=dashboard, dashboard_address=dashboard_address, name=name if nprocs == 1 or name is None or name == "" else str(name) + "-" + str(i), **kwargs, ) for i in range(nprocs) ] async def close_all(): # Unregister all workers from scheduler if nanny: await asyncio.gather(*[n.close(timeout=2) for n in nannies]) signal_fired = False def on_signal(signum): nonlocal signal_fired signal_fired = True if signum != signal.SIGINT: logger.info("Exiting on signal %d", signum) return asyncio.ensure_future(close_all()) async def run(): await asyncio.gather(*nannies) await asyncio.gather(*[n.finished() for n in nannies]) install_signal_handlers(loop, cleanup=on_signal) try: loop.run_sync(run) except TimeoutError: # We already log the exception in nanny / worker. Don't do it again. if not signal_fired: logger.info("Timed out starting worker") sys.exit(1) except KeyboardInterrupt: pass finally: logger.info("End worker")
# Welcome if 'note' not in db_item: db_item['note'] = { 'text': strings['default_welcome'], 'parse_mode': 'md' } reply_to = (message.message_id if 'clean_welcome' in db_item and db_item['clean_welcome']['enabled'] is not False else None) text, kwargs = await t_unparse_note_item(message, db_item['note'], chat_id) msg = await send_note(chat_id, text, reply_to=reply_to, **kwargs) # Clean welcome if 'clean_welcome' in db_item and db_item['clean_welcome'][ 'enabled'] is not False: if 'last_msg' in db_item['clean_welcome']: with suppress(MessageToDeleteNotFound, MessageCantBeDeleted): if value := redis.get(_clean_welcome.format(chat=chat_id)): await bot.delete_message(chat_id, value) redis.set(_clean_welcome.format(chat=chat_id), msg.id) # Welcome mute if user_id == BOT_ID: return if 'welcome_mute' in db_item and db_item['welcome_mute'][ 'enabled'] is not False: user = await bot.get_chat_member(chat_id, user_id) if 'can_send_messages' not in user or user['can_send_messages'] is True: if not await check_admin_rights(message, chat_id, BOT_ID, ['can_restrict_members']): await message.reply(strings['not_admin_wm']) return
@get_strings_dec("notes") async def note_btn(event, strings, regexp=None, **kwargs): chat_id = int(regexp.group(2)) user_id = event.from_user.id note_name = regexp.group(1).lower() if not (note := await db.notes.find_one({ "chat_id": chat_id, "names": { "$in": [note_name] } })): await event.answer(strings["no_note"]) return with suppress(MessageCantBeDeleted): await event.message.delete() await get_note( event.message, db_item=note, chat_id=chat_id, send_id=user_id, rpl_id=None, event=event, ) @register(CommandStart(re.compile(r"btnnotesm")), allow_kwargs=True) @get_strings_dec("notes") async def note_start(message, strings, regexp=None, **kwargs): # Don't even ask what it means, mostly it workaround to support note names with _
import contextlib import os import shutil os.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) shutil.rmtree('.pytest_cache', ignore_errors=True) #shutil.rmtree(os.path.join('bpc_template', '__pycache__'), ignore_errors=True) shutil.rmtree(os.path.join('tests', '__pycache__'), ignore_errors=True) with contextlib.suppress(OSError): os.remove('.coverage')
def get_influx_connection(conf, test_write=False, test_read=False): # noqa: C901 """Create the correct influx connection for the API version.""" kwargs = { CONF_TIMEOUT: TIMEOUT, } precision = conf.get(CONF_PRECISION) if conf[CONF_API_VERSION] == API_VERSION_2: kwargs[CONF_TIMEOUT] = TIMEOUT * 1000 kwargs[CONF_URL] = conf[CONF_URL] kwargs[CONF_TOKEN] = conf[CONF_TOKEN] kwargs[INFLUX_CONF_ORG] = conf[CONF_ORG] kwargs[CONF_VERIFY_SSL] = conf[CONF_VERIFY_SSL] if CONF_SSL_CA_CERT in conf: kwargs[CONF_SSL_CA_CERT] = conf[CONF_SSL_CA_CERT] bucket = conf.get(CONF_BUCKET) influx = InfluxDBClientV2(**kwargs) query_api = influx.query_api() initial_write_mode = SYNCHRONOUS if test_write else ASYNCHRONOUS write_api = influx.write_api(write_options=initial_write_mode) def write_v2(json): """Write data to V2 influx.""" data = {"bucket": bucket, "record": json} if precision is not None: data["write_precision"] = precision try: write_api.write(**data) except (urllib3.exceptions.HTTPError, OSError) as exc: raise ConnectionError(CONNECTION_ERROR % exc) from exc except ApiException as exc: if exc.status == CODE_INVALID_INPUTS: raise ValueError(WRITE_ERROR % (json, exc)) from exc raise ConnectionError(CLIENT_ERROR_V2 % exc) from exc def query_v2(query, _=None): """Query V2 influx.""" try: return query_api.query(query) except (urllib3.exceptions.HTTPError, OSError) as exc: raise ConnectionError(CONNECTION_ERROR % exc) from exc except ApiException as exc: if exc.status == CODE_INVALID_INPUTS: raise ValueError(QUERY_ERROR % (query, exc)) from exc raise ConnectionError(CLIENT_ERROR_V2 % exc) from exc def close_v2(): """Close V2 influx client.""" influx.close() buckets = [] if test_write: # Try to write b"" to influx. If we can connect and creds are valid # Then invalid inputs is returned. Anything else is a broken config with suppress(ValueError): write_v2(b"") write_api = influx.write_api(write_options=ASYNCHRONOUS) if test_read: tables = query_v2(TEST_QUERY_V2) if tables and tables[0].records: buckets = [ bucket.values["name"] for bucket in tables[0].records ] else: buckets = [] return InfluxClient(buckets, write_v2, query_v2, close_v2) # Else it's a V1 client if CONF_SSL_CA_CERT in conf and conf[CONF_VERIFY_SSL]: kwargs[CONF_VERIFY_SSL] = conf[CONF_SSL_CA_CERT] else: kwargs[CONF_VERIFY_SSL] = conf[CONF_VERIFY_SSL] if CONF_DB_NAME in conf: kwargs[CONF_DB_NAME] = conf[CONF_DB_NAME] if CONF_USERNAME in conf: kwargs[CONF_USERNAME] = conf[CONF_USERNAME] if CONF_PASSWORD in conf: kwargs[CONF_PASSWORD] = conf[CONF_PASSWORD] if CONF_HOST in conf: kwargs[CONF_HOST] = conf[CONF_HOST] if CONF_PATH in conf: kwargs[CONF_PATH] = conf[CONF_PATH] if CONF_PORT in conf: kwargs[CONF_PORT] = conf[CONF_PORT] if CONF_SSL in conf: kwargs[CONF_SSL] = conf[CONF_SSL] influx = InfluxDBClient(**kwargs) def write_v1(json): """Write data to V1 influx.""" try: influx.write_points(json, time_precision=precision) except ( requests.exceptions.RequestException, exceptions.InfluxDBServerError, OSError, ) as exc: raise ConnectionError(CONNECTION_ERROR % exc) from exc except exceptions.InfluxDBClientError as exc: if exc.code == CODE_INVALID_INPUTS: raise ValueError(WRITE_ERROR % (json, exc)) from exc raise ConnectionError(CLIENT_ERROR_V1 % exc) from exc def query_v1(query, database=None): """Query V1 influx.""" try: return list(influx.query(query, database=database).get_points()) except ( requests.exceptions.RequestException, exceptions.InfluxDBServerError, OSError, ) as exc: raise ConnectionError(CONNECTION_ERROR % exc) from exc except exceptions.InfluxDBClientError as exc: if exc.code == CODE_INVALID_INPUTS: raise ValueError(QUERY_ERROR % (query, exc)) from exc raise ConnectionError(CLIENT_ERROR_V1 % exc) from exc def close_v1(): """Close the V1 Influx client.""" influx.close() databases = [] if test_write: write_v1([]) if test_read: databases = [db["name"] for db in query_v1(TEST_QUERY_V1)] return InfluxClient(databases, write_v1, query_v1, close_v1)
def _notify_release(self) -> None: callbacks, self._callbacks = self._callbacks[:], [] for cb in callbacks: with suppress(Exception): cb()
async def fed_post_log(fed, text): if 'log_chat_id' not in fed: return chat_id = fed['log_chat_id'] with suppress(Unauthorized, NeedAdministratorRightsInTheChannel, ChatNotFound): await bot.send_message(chat_id, text)
async def __auto_poll(): logger.debug('Auto poll started.') with suppress(asyncio.CancelledError): while True: producer.poll(0) await asyncio.sleep(0.1)