def generate_ebuild_template(*, description: str, enable_python: bool, enable_dobin: bool, homepage: str, app_path: Path, app_name: str, ): ic(enable_python) inherit_python = '' rdepend_python = '' if enable_python: inherit_python = 'inherit distutils-r1' rdepend_python = depend_python result = ebuild.format(description=description, inherit_python=inherit_python, depend_python=rdepend_python, homepage=homepage, app_path=app_path, year=str(date.today().year), ) if enable_dobin: result += generate_src_install_dobin_template(app_name) return result
def generate_setup_py(*, url: str, package_name: str, command: str, license: str, owner: str, owner_email: str, description: str, ): ic(url, package_name, command, license, owner, owner_email, description,) return setup_py.format(package_name=package_name, command=command, url=url, license=license, owner=owner, owner_email=owner_email, description=description,)
def dumpconfig( ctx, verbose: bool, debug: bool, ipython: bool, null: bool, ): database = ctx.obj['database'] config, config_mtime = click_read_config( click_instance=click, app_name=ctx.obj['appname'], verbose=verbose, debug=debug, ) pprint.pprint(config) with self_contained_session(db_url=database) as session: query = "select * from INFORMATION_SCHEMA.COLUMNS where table_name = 'pubchem'" for index, match in enumerate(session.bind.execute(query).fetchall()): ic(index, match) if ipython: import IPython IPython.embed()
def cli( media: Optional[tuple[str]], novideo: bool, noaudio: bool, subtitles: bool, loop: bool, random: bool, skip_ahead: int, not_fullscreen, verbose: Union[bool, int, float], verbose_inf: bool, dict_input: bool, ): # video = not novideo fullscreen = not not_fullscreen if verbose: # ic(fullscreen) ic(media, skip_ahead) skip_set = set() if media: iterator = media else: iterator = unmp( verbose=verbose, valid_types=[ bytes, ], ) for index, m in enumerate(iterator): path = Path(os.fsdecode(m)) if verbose: ic(path) try: chan = extract_chan( path=path, verbose=verbose, ) if chan in skip_set: continue except ValueError: pass try: play( media=path, novideo=novideo, noaudio=noaudio, subtitles=subtitles, loop=loop, verbose=verbose, fullscreen=fullscreen, skip_ahead=skip_ahead, ) except PlayChanLaterError as e: chan = e.args[0] skip_set.add(chan)
def remote_add_origin(*, app_path: Path, local: bool, app_name: str, hg: bool, verbose: int, ): if hg: raise NotImplementedError('hg') repo_config_command = f"git remote add origin [email protected]:jakeogh/{app_name}.git" ic(repo_config_command) if not local: with chdir(app_path): os.system(repo_config_command) else: ic('local == True, skipping:', repo_config_command) enable_github = [ "#!/bin/sh", 'hub create {}'.format('jakeogh/' + app_name), repo_config_command, 'git push --set-upstream origin master', 'touch .push_enabled', "\n"] enable_github = "\n".join(enable_github) output_file = app_path / Path('enable_github.sh') with open(output_file, 'x', encoding='utf8') as fh: fh.write(enable_github)
def last_cid( ctx, verbose: bool, debug: bool, ipython: bool, ): database = ctx.obj['database'] config, config_mtime = click_read_config( click_instance=click, app_name=ctx.obj['appname'], verbose=verbose, debug=debug, ) if verbose: ic(config, config_mtime) #query = "SELECT pubchem.pubchem_compound_cid from pubchem ORDER BY pubchem.pubchem_compound_cid" query = "SELECT MAX(pubchem.pubchem_compound_cid) from pubchem" with self_contained_session(db_url=database) as session: for index, match in enumerate(session.bind.execute(query).fetchone()): ic(index, match) if ipython: import IPython IPython.embed()
def dbquery( ctx, verbose: bool, debug: bool, ipython: bool, null: bool, ): ''' session.bind.execute("select column_name, data_type, character_maximum_length from INFORMATION_SCHEMA.COLUMNS where table_name = 'pubchem'").fetchall() ''' database = ctx.obj['database'] config, config_mtime = click_read_config( click_instance=click, app_name=ctx.obj['appname'], verbose=verbose, debug=debug, ) if verbose: ic(config, config_mtime) with self_contained_session(db_url=database) as session: if verbose: ic(session) if ipython: import IPython IPython.embed()
def parse_rsync_log_to_list( *, email_address: str, gpgMaildir_archive_folder: Path, ): ic() rsync_log = '/dev/shm/.gpgmda_rsync_last_new_mail_' + email_address with open(rsync_log, 'r') as fh: rsync_log = fh.readlines() full_path_list = [] line = None for line in rsync_log: line = line.strip() # remove newlines if 'exists' not in line: if 'gpgMaildir' in line: if line.startswith('>f'): new_gpgmda_file_path = gpgMaildir_archive_folder / Path( line.split(' ')[1]) ic(new_gpgmda_file_path) full_path_list.append(new_gpgmda_file_path) #message_list = [] #if line: # for path in full_path_list: # assert len(path.as_posix()) > 0 # message_list.append(line) return full_path_list
def indexes( ctx, verbose: bool, debug: bool, ipython: bool, ): database = ctx.obj['database'] config, config_mtime = click_read_config( click_instance=click, app_name=ctx.obj['appname'], verbose=verbose, debug=debug, ) if verbose: ic(config, config_mtime) #query = "SELECT pubchem.pubchem_compound_cid from pubchem ORDER BY pubchem.pubchem_compound_cid" query = "SELECT * FROM pg_indexes WHERE tablename = 'pubchem';" #ic('column_name, data_type, character_maximum_length, column_default, is_nullable') with self_contained_session(db_url=database) as session: for index, match in enumerate(session.bind.execute(query).fetchall()): ic(index, match) if ipython: import IPython IPython.embed()
def __iter__(self): cursor = None if not hasattr(self, 'type'): self._connect() if self.type in ['set', 'hash']: if self.type == 'set': func = 'sscan' elif self.type == 'hash': func = 'hscan' else: raise ValueError(self.type) function = getattr(self.r, func) cursor, values = function(self.key) if self.debug: ic(cursor, type(values), len(values)) for v in values: yield v while cursor != 0: cursor, values = function(self.key, cursor) if self.debug: ic(cursor, type(values), len(values)) for v in values: yield v elif self.type == 'list': for v in self.r.lrange(self.key, 0, -1): yield v elif self.type == 'zset': for v in self.r.zrange(self.key, 0, -1, desc=True): yield v else: raise RedisKeyTypeNotFoundError(self.type)
def raise_multiple(): choice = round(random.random()) ic(choice) if choice == 0: raise ValueError if choice == 1: raise TypeError
def move_terminal_text_up_one_page(): ic('moving terminal text up one page') tput_p = subprocess.Popen(['tput', 'lines'], stdout=subprocess.PIPE) tput_p_output = tput_p.communicate() tput_p_output = tput_p_output[0].decode('utf8').strip() for line in range(int(tput_p_output)): print('', file=sys.stderr)
def check_df(path: Path): _path = path.as_posix() df_result = sh.df("-h").splitlines() found = False for line in df_result: if _path in line: ic(line) found = True if not found: raise ValueError(f"{_path} not in df -h output")
def validate_ram_size(ctx, param, vm_ram): ic(vm_ram) sysram_bytes = virtual_memory().total if not isinstance(vm_ram, int): vm_ram_bytes = humanfriendly.parse_size(vm_ram) else: vm_ram_bytes = vm_ram if vm_ram_bytes >= sysram_bytes: sysram_human = humanfriendly.format_size(sysram_bytes) vm_ram_human = humanfriendly.format_size(vm_ram_bytes) raise click.BadParameter('You entered {0} for --vm-ram but the host system only has {1}. Exiting.'.format(vm_ram_human, sysram_human)) return vm_ram_bytes
def address_db_build( ctx, email_address: str, ): '''build address database for use with address_query''' ic() ctx = ctx.invoke(build_paths, email_address=email_address) update_notmuch_address_db_build( email_address=email_address, email_archive_folder=ctx.email_archive_folder, gpgmaildir=ctx.gpgmaildir, notmuch_config_file=ctx.notmuch_config_file, notmuch_config_folder=ctx.notmuch_config_folder)
def parse_pubchem_sdtags(content, verbose=False): assert isinstance(content, bytes) content = content.decode('utf8') if verbose: ic(content) preamble = True body = False changelog = False sdf_format_dict = {"preamble": '', "body": '', "changelog": ''} sdf_keys_dict = {} for line in content.splitlines(): line = line + '\r\n' #print(line) assert isinstance(line, str) if line.startswith("PubChem Substance Associated SD Fields"): preamble = False body = True changelog = False continue if line.startswith("Document Version History"): preamble = False body = False changelog = True continue if preamble: sdf_format_dict['preamble'] += line if body: sdf_format_dict['body'] += line if changelog: sdf_format_dict['changelog'] += line body = False current_key = False for line in sdf_format_dict['body'].splitlines(): #print(line) if re.match(r" [A-Z]", line): #print(line) new_key = line.strip() new_key = new_key.replace(' ', '_') current_key = new_key sdf_keys_dict[new_key] = '' body = True continue if body: assert current_key sdf_keys_dict[current_key] += line #assert 'mol_chiral_flag' in sdf_keys_dict.keys() return sdf_keys_dict
def parse_url(repo_url: str, *, apps_folder: Path, verbose: int, keep_underscore: bool = False, # for rename ): if verbose: ic(repo_url) if repo_url.startswith('git:github.com:'): app_name = repo_url.split(':')[-1].split('.git')[0] app_user = repo_url.split(':')[-1].split('/')[0] else: url_parsed = urlparse(repo_url) if verbose: ic(url_parsed) repo_url_path = Path(url_parsed.path) app_name = repo_url_path.parts[-1] app_user = repo_url_path.parts[-2] app_name = app_name.lower() if not keep_underscore: app_name = app_name.replace('_', '-') app_module_name = app_name.replace('-', '_') ic(app_module_name) app_path = apps_folder / Path(app_name) ic(app_path) return app_name, app_user, app_module_name, app_path
def cli( ctx, hostnames: Sequence[str], verbose: Union[bool, int, float], verbose_inf: bool, dict_input: bool, ): iterator = hostnames index = 0 for index, hostname in enumerate(iterator): if verbose: ic(index, hostname)
def replace_match_pairs_in_file(*, path: Path, match_pairs: tuple, verbose: int, ) -> None: assert isinstance(match_pairs, tuple) for old_match, new_match in match_pairs: if old_match == new_match: continue ic(path, old_match, new_match) replace_text(path=path, match=old_match, replacement=new_match, verbose=verbose, )
def list_keys(ctx): iterator = keys_and_sizes(r=ctx.obj['r']) for index, value in enumerate_input(iterator=iterator, null=ctx.obj['null'], progress=ctx.obj['progress'], skip=False, head=False, tail=False, debug=ctx.obj['debug'], verbose=ctx.obj['verbose'],): if ctx.obj['verbose']: ic(index, value) print(value, end=ctx.obj['end'])
def start_alot( *, email_address, email_archive_folder, verbose=False, ): ic() check_for_notmuch_database(email_archive_folder=email_archive_folder) alot_config = subprocess.Popen( ["gpgmda-client-make-alot-config.sh", email_address], stdout=subprocess.PIPE).communicate() alot_theme = subprocess.Popen(["gpgmda-client-make-alot-theme.sh"], stdout=subprocess.PIPE).communicate() alot_config_f = open('/dev/shm/__alot_config_' + email_address, 'w') alot_theme_f = open('/dev/shm/__alot_theme_' + email_address, 'w') alot_config_f.write(alot_config[0].decode('UTF8')) alot_theme_f.write(alot_theme[0].decode('UTF8')) alot_config_f.close() alot_theme_f.close() notmuch_config_folder = email_archive_folder / Path('_notmuch_config') notmuch_config_file = notmuch_config_folder / Path('.notmuch_config') maildirs_folder = email_archive_folder / Path('_Maildirs') ic('starting alot') os.system(' '.join(['alot', '--version'])) move_terminal_text_up_one_page( ) # so alot does not overwrite the last messages on the terminal alot_config_file = Path('/dev/shm/__alot_config_' + email_address) if verbose: ic(alot_config_file) alot_command = ' '.join([ '/usr/bin/alot', '-C', '256', '--debug-level=debug', '--logfile=/dev/shm/__alot_log', '--notmuch-config', notmuch_config_file.as_posix(), '--mailindex-path', maildirs_folder.as_posix(), '-c', alot_config_file.as_posix() ]) if verbose: ic(alot_command) alot_p = os.system(alot_command) if verbose: ic(alot_p)
def namespaces_and_sizes(r): namespaces = set() namespace_count: DefaultDict[str, int] = defaultdict(int) namespace_size: DefaultDict[str, int] = defaultdict(int) namespace_values: DefaultDict[str, int] = defaultdict(int) namespace_types: DefaultDict[str, set] = defaultdict(set) broken_namespaces = set() for result in keys_and_sizes(r): #ic(result) key, key_type, length, key_memory_used_bytes, key_memory_used_kbytes, key_memory_used_mbytes = result[:] if '#' in key: namespace = key.split('#')[0] if namespace not in namespaces: # redundant for a set, ust to print them while working ic(namespace, key) namespaces.add(namespace) namespace_count[namespace] += 1 #try: namespace_size[namespace] += key_memory_used_bytes #except TypeError: # pass namespace_values[namespace] += length namespace_types[namespace].add(key_type) else: broken_namespaces.add(key) ns_list = list(namespaces) ns_list.sort() output_table = PrettyTable() output_table.field_names = ['name', 'count', 'values', 'size', 'types'] for namespace in ns_list: type_list = [t for t in namespace_types[namespace]] print(namespace, namespace_count[namespace], namespace_values[namespace], str(int(namespace_size[namespace] / 1024 / 1024)) + 'MB', type_list) output_table.add_row([namespace, namespace_count[namespace], namespace_values[namespace], str(int(namespace_size[namespace] / 1024 / 1024)) + 'MB', type_list]) print(output_table) if broken_namespaces: print("\n\nlen(broken_namesapces):", len(broken_namespaces), file=sys.stderr) for ns in broken_namespaces: print(ns, file=sys.stderr)
def nineify(ctx, app): not_root() assert '/' in app group, name = app.split('/') ic(group) ic(name) relative_destination = Path(group) / Path(name) template_path = Path("/var/db/repos/gentoo") / relative_destination ic(template_path) local_overlay = Path("/home/cfg/_myapps/jakeogh") destination = local_overlay / relative_destination ic(template_path, destination) try: shutil.copytree(template_path, destination) except FileExistsError as e: ic(e)
def notmuch_query( ctx, email_address: str, query: str, ): '''execute arbitrary notmuch query notmuch search --output=files "thread:000000000003c194"''' ic() ctx = ctx.invoke(build_paths, email_address=email_address) ic(query) run_notmuch(mode="query_notmuch", email_address=email_address, query=query, email_archive_folder=ctx.email_archive_folder, gpgmaildir=ctx.gpgmaildir, notmuch_config_file=ctx.notmuch_config_file, notmuch_config_folder=ctx.notmuch_config_folder)
def afew_query( ctx, email_address: str, query: str, ): '''execute arbitrary afew query''' ic() ctx = ctx.invoke(build_paths, email_address=email_address) ic(query) run_notmuch(mode="query_afew", email_address=email_address, query=query, email_archive_folder=ctx.email_archive_folder, gpgmaildir=ctx.gpgmaildir, notmuch_config_file=ctx.notmuch_config_file, notmuch_config_folder=ctx.notmuch_config_folder)
def _connect(self): self.r = redis.StrictRedis(host=self.ip, port=self.port) self.type = self.r.type(self.key).decode('utf8') #ic(self.type, self.key) #ic(key_type) if self.type == 'none': if self.verbose: ic('uncreated new key:', self.key, self.key_type) if self.key_type is None: raise ValueError( 'key:', self.key, 'does not exist', 'key_type must be specified to create a new key') self.type = self.key_type else: if self.key_type is not None: if self.key_type != self.type: raise ValueError(self.type, 'does not match', self.key_type)
def clone_repo(*, branch: str, repo_url: str, apps_folder: Path, template_repo_url: str, app_path: Path, app_group: str, hg: bool, local: bool, verbose: int, ): app_name, app_user, _, _ = parse_url(repo_url, apps_folder=apps_folder, verbose=verbose,) rename_cloned_repo = False if template_repo_url: template_app_name, template_app_user, _, _ = parse_url(template_repo_url, apps_folder=apps_folder, verbose=verbose,) repo_to_clone_url = template_repo_url if template_app_name != app_name: rename_cloned_repo = True else: repo_to_clone_url = repo_url if hg: sh.hg('clone', repo_to_clone_url, str(app_path)) else: sh.git.clone(repo_to_clone_url, str(app_path)) if branch != "master": branch_cmd = "git checkout -b " + '"' + branch + '"' ic(branch_cmd) os.system(branch_cmd) if not rename_cloned_repo: # when renaming a template repo, dont want to fork if its one of my repos git_fork_cmd = "hub fork" os.system(git_fork_cmd) else: rename_repo_at_app_path(app_path=app_path, app_group=app_group, local=local, hg=hg, old_name=template_app_name, new_name=app_name, verbose=verbose, )
def replace_text(path: Path, match: str, replacement: str, verbose: int, ) -> None: if verbose: ic(match, replacement) replace_text_in_file(path=path, match=match.encode('utf8'), replacement=replacement.encode('utf8'), output_fh=None, read_mode='rb', write_mode='wb', stdout=False, remove_match=False, verbose=verbose, )
def find( ctx, match: str, verbose: bool, cid: bool, debug: bool, ipython: bool, ): assert match database = ctx.obj['database'] config, config_mtime = click_read_config( click_instance=click, app_name=ctx.obj['appname'], verbose=verbose, debug=debug, ) if verbose: ic(config, config_mtime) if not cid: query = "SELECT * from pubchem WHERE pubchem.pubchem_iupac_name LIKE '%%{}%%' ORDER BY pubchem_exact_mass DESC".format( match) else: query = "SELECT * from pubchem WHERE pubchem_compound_cid = '{}'".format( match) with self_contained_session(db_url=database) as session: result = session.bind.execute(query) result_keys = result.keys() for index, match in enumerate(result.fetchall()): result_zip = zip(result_keys, match) #result_dict = {k.replace('pubchem_', ''): v for (k, v) in result_zip if v} result_dict = {k: v for (k, v) in result_zip if v} humanized_result_dict = humanize_result_dict(result_dict) ic(index, humanized_result_dict) #ic(result_keys) if ipython: import IPython IPython.embed()
def add(self, *value: str, index=None, verbose=False): #ic(value) if not hasattr(self, 'type'): self._connect() if self.add_disabled: raise ValueError( 'hash_length was not specified and hash_values is True, so adding to the key is disabled' ) if self.hash_values: value = generate_truncated_string_hash( string=value, algorithm=self.algorithm, length=self.hash_length, verbose=self.verbose, debug=self.debug, ) #value = binascii.unhexlify(value) if self.type == 'zset': if index: mapping = {value[0]: index} else: mapping = {value[0]: time.time()} if verbose: ic(self.key, mapping) result = self.r.zadd(self.key, mapping) #ic('done adding to zset') return result if self.type == 'set': #ic(self.key, *value) result = self.r.sadd(self.key, *value) return result if self.type == 'list': #ic(self.key, *value) result = self.r.rpush(self.key, *value) return result #if self.type == 'hash': # return result raise RedisKeyTypeNotFoundError(self.type)