def publish(self): narrate('publishing authorized_keys to', self.server) prov = '.provisional' if self.trial_run else '' entries = [ fmt("# This file was generated by sshdeploy on {date}.") ] if self.include: entries += [ '\n'.join([ fmt('# Contents of {self.include_file}:'), self.include ]) ] for name in sorted(self.keys.keys()): key = self.keys[name] comment = self.comment[name] comment = [comment] if is_str(comment) else comment restrictions = self.restrictions[name] if not is_str(restrictions): restrictions = ','.join(restrictions) restricted_key = ' '.join(cull([restrictions, key])) entries.append('\n'.join(comment + [restricted_key])) # delete any pre-existing provisional files # the goal here is to leave a clean directory when not trial-run try: run_sftp(self.server, [ fmt('rm .ssh/authorized_keys.provisional') ]) except OSError as err: pass # now upload the new authorized_keys file try: authkey = to_path('authorized_keys.%s' % self.server) with authkey.open('w') as f: f.write('\n\n'.join(entries) + '\n') authkey.chmod(0o600) if self.bypass: warn( 'You must manually upload', fmt('<keydir>/authorized_keys.{self.server}.'), culprit=self.server ) else: run_sftp(self.server, [ fmt('put -p {authkey} .ssh/authorized_keys{prov}') ]) except OSError as err: error(os_error(err))
def __init__(self, question, length = 3, *, answer = None, dictionary = None, master = None, version = None, sep = ' ', prefix = '', suffix = '', is_secret = True, ): self.question = question try: self.length = int(length) except ValueError: raise PasswordError( 'expecting an integer for length.', culprit=error_source() ) if not dictionary or is_str(dictionary): self.alphabet = Dictionary(dictionary).get_words else: self.alphabet = dictionary self.master = master self.version = version self.shift_sort = False self.sep = sep self.prefix = prefix self.suffix = suffix self.is_secret = is_secret if answer: # answer allows the user to override the generator and simply # specify the answer. This is also used when producing the archive. self.secret = str(answer)
def render_command(cmd, option_args=True): if is_str(cmd): cmd = shlex.split(cmd) else: cmd = [str(c) for c in cmd] # Cannot use to_str() because it can change some arguments when not intended. # This is particularly problematic the duplicify arguments in embalm. if option_args is True: duplicity_option_args = { '--gpg-binary': 1, '--log-file': 1, '--archive-dir': 1, '--name': 1, '--sftp-command': 1, '--file-to-restore': 1, '--ssh-backend': 1, '--exclude': 1, '--time': 1, } option_args = duplicity_option_args cmd.reverse() lines = [] while cmd: opt = cmd.pop() num_args = option_args.get(opt, 0) argument = [opt] for i in range(num_args): argument.append(quote(cmd.pop())) lines.append(' '.join(argument)) return ' \\\n '.join(lines)
def check(self): # add some possibly useful placeholders into settings home_dir = os.environ.get("HOME") if home_dir and "home_dir" not in self.settings: self.settings["home_dir"] = home_dir self.settings["config_dir"] = CONFIG_DIR self.settings["log_dir"] = DATA_DIR self.do_not_expand = Collection(self.settings.get("do_not_expand", "")) # gather the string valued settings together (can be used by resolve) self.str_settings = { k: v for k, v in self.settings.items() if is_str(v) } # complain about required settings that are missing missing = [] required_settings = "repository".split() for each in required_settings: if not self.settings.get(each): missing.append(each) if missing: m = conjoin(missing) raise Error(f"{m}: no value given for {plural(m):setting}.") self.working_dir = to_path(self.settings.get("working_dir", "/")) if not self.working_dir.exists(): raise Error("{self.working_dir!s} not found.", culprit="working_dir") if not self.working_dir.is_absolute(): raise Error("must be an absolute path.", culprit="working_dir")
def to_python(obj, _level=0): """Recursively convert object to string with reasonable formatting""" def leader(relative_level=0): return (_level + relative_level) * ' ' code = [] if type(obj) == dict: code += ['{'] for key in sorted(obj.keys()): value = obj[key] code += [ '%s%r: %s,' % (leader(1), key, to_python(value, _level + 1)) ] code += ['%s}' % (leader(0))] elif type(obj) == list: code += ['['] for each in obj: code += ['%s%s,' % (leader(1), to_python(each, _level + 1))] code += ['%s]' % (leader(0))] elif type(obj) == tuple: code += ['('] for each in obj: code += ['%s%s,' % (leader(1), to_python(each, _level + 1))] code += ['%s)' % (leader(0))] elif type(obj) == set: code += ['set(['] for each in sorted(obj): code += ['%s%s,' % (leader(1), to_python(each, _level + 1))] code += ['%s])' % (leader(0))] elif is_str(obj) and '\n' in obj: code += ['"""' + indent(dedent(obj), leader(1)) + leader(0) + '"""'] else: code += [repr(obj)] return '\n'.join(code)
def conceal(cls, plaintext, decorate=False, encoding=None, symmetric=False, gpg_ids=None): encoding = encoding if encoding else get_setting('encoding') plaintext = str(plaintext).encode(encoding) if not gpg_ids: gpg_ids = get_setting('gpg_ids', []) if is_str(gpg_ids): gpg_ids = gpg_ids.split() encrypted = cls.gpg.encrypt(plaintext, gpg_ids, armor=True, symmetric=bool(symmetric)) if not encrypted.ok: msg = ' '.join( cull( ['unable to encrypt.', getattr(encrypted, 'stderr', None)])) raise PasswordError(msg) ciphertext = str(encrypted) if decorate: return 'GPG("""\n%s""")' % indent(ciphertext) else: return ciphertext
def run(cls, command, args, settings, options): # read command line cmdline = docopt(cls.USAGE, argv=[command] + args) show_available = cmdline["--available"] unknown = Color("yellow") known = Color("cyan") if show_available: output("Emborg settings:") for name, desc in EMBORG_SETTINGS.items(): output(f"{known(name):>33s}: {desc}") output() output("Borg settings:") for name, attrs in BORG_SETTINGS.items(): output(f"{known(name):>33s}: {attrs['desc']}") return 0 if settings: for k, v in settings: is_known = k in EMBORG_SETTINGS or k in BORG_SETTINGS key = known(k) if is_known else unknown(k) if k == "passphrase": v = "<set>" output(f"{key:>33}: {render(v, level=6)}") try: if is_str(v) and "{" in v: output( f'{"":>24}{render(settings.resolve(v), level=6)}') except Error: pass
def save(self, contents, gpg_ids=None): path = self.path if not gpg_ids: gpg_ids = get_setting('gpg_ids', []) if is_str(gpg_ids): gpg_ids = gpg_ids.split() if not gpg_ids: raise Error('must specify GPG ID.') use_gpg, use_armor = self._choices() if use_gpg: try: encoded = contents.encode(get_setting('encoding')) encrypted = self.gpg.encrypt(encoded, gpg_ids, armor=use_armor) if not encrypted.ok: msg = ' '.join(cull([ 'unable to encrypt.', getattr(encrypted, 'stderr', None) ])) raise Error(msg, culprit=path, sep='\n') else: if use_armor: path.write_text(str(encrypted)) else: path.write_bytes(encrypted.data) except ValueError as err: raise Error(str(err), culprit=path) else: path.write_text(contents, get_setting('encoding')) path.chmod(0o600)
def run_borg(self, cmd, args='', borg_opts=None, emborg_opts=()): # prepare the command os.environ.update(self.publish_passcode()) os.environ['BORG_DISPLAY_PASSPHRASE'] = 'no' if self.ssh_command: os.environ['BORG_RSH'] = self.ssh_command executable = self.value('borg_executable', BORG) if borg_opts is None: borg_opts = self.borg_options(cmd, emborg_opts) command = ([executable] + cmd.split() + borg_opts + (args.split() if is_str(args) else args)) environ = { k: v for k, v in os.environ.items() if k.startswith('BORG_') } if 'BORG_PASSPHRASE' in environ: environ['BORG_PASSPHRASE'] = '<redacted>' narrate('setting environment variables:', render(environ)) # check if ssh agent is present if self.needs_ssh_agent: for ssh_var in 'SSH_AGENT_PID SSH_AUTH_SOCK'.split(): if ssh_var not in os.environ: warn( 'environment variable not found, is ssh-agent running?', culprit=ssh_var) # run the command narrate('running:\n{}'.format( indent(render_command(command, borg_options_arg_count)))) narrating = 'verbose' in emborg_opts or 'narrate' in emborg_opts modes = 'soeW' if narrating else 'sOEW' return Run(command, modes=modes, stdin='', env=os.environ, log=False)
def __init__(self, length = 4, *, dictionary = None, master = None, version = None, sep = ' ', prefix = '', suffix = '', is_secret = True, ): try: self.length = int(length) except ValueError: raise PasswordError( 'expecting an integer for length.', culprit=error_source() ) if not dictionary or is_str(dictionary): self.alphabet = Dictionary(dictionary).get_words else: self.alphabet = dictionary self.master = master self.version = version self.shift_sort = False self.sep = sep self.prefix = prefix self.suffix = suffix self.is_secret = is_secret
def read_config(): if Config.get('READ'): return # already read # First open the config file from .gpg import PythonFile path = get_setting('config_file') assert path.suffix.lower() not in ['.gpg', '.asc'] config_file = PythonFile(path) try: contents = config_file.run() for k, v in contents.items(): if k.startswith('_'): continue if k not in CONFIG_DEFAULTS: warn('%s: unknown.' % k, culprit=config_file) continue if k.endswith('_executable'): argv = v.split() if is_str(v) else list(v) path = to_path(argv[0]) if not path.is_absolute(): warn( 'should use absolute path for executables.', culprit=(config_file, k) ) Config[k] = v Config['READ'] = True except Error as err: comment('not found.', culprit=config_file) # Now open the hashes file hashes_file = PythonFile(get_setting('hashes_file')) try: contents = hashes_file.run() Config.update({k.lower(): v for k,v in contents.items()}) except Error as err: pass # Now open the account list file account_list_file = PythonFile(get_setting('account_list_file')) try: contents = account_list_file.run() Config.update({k.lower(): v for k,v in contents.items()}) except Error as err: pass # initilize GPG from .gpg import GnuPG GnuPG.initialize() # Now read the user key file user_key_file = get_setting('user_key_file') if user_key_file: user_key_file = PythonFile(get_setting('user_key_file')) try: contents = user_key_file.run() Config.update({k.lower(): v for k,v in contents.items()}) except Error as err: pass
def __init__(self, collection): if is_str(collection): self.collection = collection.split() elif is_collection(collection): self.collection = collection else: # is scalar self.collection = {None, collection}
def __init__(self, collection, splitter=None): if is_str(collection): self.collection = collection.split(splitter) elif is_collection(collection): self.collection = collection elif collection is None: self.collection = [] else: # is scalar self.collection = {None: collection}
def flatten(collection, split=False): # if split is specified, create list from string by splitting at whitespace if split and is_str(collection): collection = collection.split() if is_collection(collection): for each in collection: for e in flatten(each): yield e else: yield collection
def pager(text): program = get_setting('use_pager') if not is_str(program): program = os.environ.get('PAGER', 'less') if program: try: Run([program], stdin=text, modes='WoEs') return except Error as e: e.report(culprit=program) output(text)
def p_type(p): "type : string kinds" n = p[1] meta = {} kind = ' '.join(s for s in p[2] if is_str(s)).lower() if kind: meta['kind'] = kind for each in p[2]: if is_mapping(each): meta.update(each) elif isinstance(each, Struct): meta['struct'] = each p[0] = (n, Type(name=n, **meta))
def open_browser(cls, name, key=None): browser = cls.get_field("browser", default=None) if browser is None or is_str(browser): browser = StandardBrowser(name) # get the urls from the urls attribute if not key: key = getattr(cls, "default_url", None) urls = getattr(cls, "urls", []) if type(urls) != dict: if is_str(urls): urls = urls.split() urls = {None: urls} # get the urls from the url recognizers # currently urls from recognizers dominate over those from attributes discovery = getattr(cls, "discovery", ()) for each in Collection(discovery): urls.update(each.all_urls()) # select the urls try: urls = urls[key] except TypeError: if key: raise Error("keys are not supported with urls on this account.", culprit=key) except KeyError: keys = cull(urls.keys()) if keys: raise Error("unknown key, choose from %s." % conjoin(keys), culprit=key) else: raise Error("keys are not supported with urls on this account.", culprit=key) url = list(Collection(urls))[0] # use the first url specified # open the url browser.run(url)
def __init__(self, args, expected, expected_type, cmp_dirs, remove): # args are the arguments to the emborg command # If expected, stdout/stderr should match the given value # expected_type may contain keywords, 'regex', 'diff', 'error', and/or # 'ignore' # - if regex is given then expected is taken to be a regular expression # otherwise the result much match expected verbatim # - if diff is given then emborg is expected to exit with an exit status of 1 # - if error is given then emborg is expected to exit with an exit status of 2 # - if ignore is given, stdout/stderr is not checked # cmp_dirs is a pair of directories that, if given, should match exactly # remove contains files or directories to be deleted before the test runs # # args, expected, and cmp_dirs may contain the literal text fragment: ⟪EMBORG⟫ # that is replaced by the absolute path to the emborg directory: .../emborg # replace ⟪EMBORG⟫ and ⟪DATE⟫ macros date = arrow.now().format("YYYY-MM-DD") args = args.split() if is_str(args) else args args = [a.replace("⟪EMBORG⟫", emborg_dir_wo_slash) for a in args] args = [a.replace("⟪DATE⟫", date) for a in args] if expected is not None: expected = expected.replace("⟪EMBORG⟫", emborg_dir_wo_slash) expected = expected.replace("⟪DATE⟫", date) if cmp_dirs: cmp_dirs = cmp_dirs.replace("⟪EMBORG⟫", emborg_dir_wo_slash) cmp_dirs = cmp_dirs.replace("⟪DATE⟫", date) self.args = args self.expected = expected self.expected_type = expected_type.split() self.cmp_dirs = cmp_dirs.split() if is_str(cmp_dirs) else cmp_dirs self.cmd = emborg_exe + self.args self.command = " ".join(self.cmd) self.remove = remove self.diffout = None
def __init__(self, collection, splitter=None, **kwargs): if is_str(collection): if callable(splitter): self.collection = splitter(collection, **kwargs) return elif splitter is not False: self.collection = collection.split(splitter) return if is_collection(collection): self.collection = collection return if collection is None: self.collection = [] return # is scalar self.collection = {None: collection}
def check(self): # gather the string valued settings together (can be used by resolve) self.str_settings = { k: v for k, v in self.settings.items() if is_str(v) } # complain about required settings that are missing missing = [] required_settings = 'repository'.split() for each in required_settings: if not self.settings.get(each): missing.append(each) if missing: missing = conjoin(missing) raise Error(f'{missing}: no value given for setting.')
def preprocess(cls, master, fileinfo, seen): # return if this account has already been processed if hasattr(cls, '_file_info_'): return # account has already been processed # add fileinfo cls._file_info_ = fileinfo # dedent any string attributes for k, v in cls.__dict__.items(): if is_str(v) and '\n' in v: setattr(cls, k, dedent(v)) # add master seed if master and not hasattr(cls, '_%s__NO_MASTER' % cls.__name__): if not hasattr(cls, 'master_seed'): cls.master_seed = master cls._master_source_ = 'file' else: cls._master_source_ = 'account' # convert aliases to a list if hasattr(cls, 'aliases'): aliases = list(Collection(cls.aliases)) cls.aliases = aliases else: aliases = [] # canonicalize names and look for duplicates new = {} account_name = cls.get_name() path = cls._file_info_.path for name in [account_name] + aliases: canonical = canonicalize(name) Account._accounts[canonical] = cls if canonical in seen: if name == account_name: warn('duplicate account name.', culprit=name) else: warn('alias duplicates existing name.', culprit=name) codicil('Seen in %s in %s.' % seen[canonical]) codicil('And in %s in %s.' % (account_name, path)) break else: new[canonical] = (account_name, path) seen.update(new)
def test_emborg_overdue(initialize, name, conf, args, expected, expected_type, dependencies): if skip_test(dependencies): return with cd(tests_dir): if conf: with open('.config/overdue.conf', 'w') as f: f.write(conf) try: args = args.split() if is_str(args) else args overdue = Run(emborg_overdue_exe + args, "sOEW") if 'regex' in expected_type.split(): assert bool(re.fullmatch(expected, overdue.stdout)), name else: assert expected == overdue.stdout, name except Error as e: assert str(e) == expected, name
def get_value(cls, field=None): """Get account value. Return value from the account given a user friendly identifier or script. User friendly identifiers include: | *None*: value of default field | *name*: scalar value | *name.key* or *name[key]*: | member of a dictionary or array | key is string for dictionary, integer for array Scripts are simply strings with embedded attributes. Ex: *'username: {username}, password: {passcode}'* Args: field (str): Field identifier or script. Returns: :class:`avendesora.AccountValue`: the desired value. """ # get default if field was not given if not field: field = cls.get_scalar('default', default=None) # determine whether field is actually a script is_script = is_str(field) and '{' in field and '}' in field if is_script: # run the script script = Script(field) script.initialize(cls) value = str(script) is_secret = script.is_secret name = key = desc = None else: name, key = cls.split_field(field) value = cls.get_scalar(name, key) is_secret = cls.is_secret(name, key) try: desc = value.get_description() except AttributeError: desc = None return AccountValue(value, is_secret, name, key, desc)
def get_setting(name, default=None, expand=True): name = name.lower() try: value = Config[name] except KeyError: try: value = CONFIG_DEFAULTS[name] except KeyError: try: value = NONCONFIG_SETTINGS[name] except KeyError: return default if value is None: return default if name == 'gpg_ids': value = value.split() if is_str(value) else value elif expand and name.endswith('_file'): value = to_path(get_setting('settings_dir'), value) return value
def render(self, fmts=('{f} ({d}): {v}', '{f}: {v}')): """Return value formatted as a string. Args: fmts (collection of strings): *fmts* contains a sequence of format strings that are tried in sequence. The first one for which all keys are known is used. The possible keys are: | n -- name (identifier for the first level of a field) | k -- key (identifier for the second level of a field) | f -- field (name.key) | d -- description | v -- value If none work, the value alone is returned. Returns: The value rendered as a string. """ value = str(self.value) if '\n' in value: value = '\n' + indent(dedent(value), get_setting('indent')).strip('\n') if is_str(fmts): fmts = fmts, # build list of arguments, deleting any that is not set args = { k: v for k, v in [('f', self.field), ('k', self.key), ( 'n', self.name), ('d', self.desc), ('v', value)] if v } # format the arguments, use first format sting that works for fmt in fmts: try: return fmt.format(**args) except KeyError: pass # nothing worked, just return the value return value
def render_key(s): if not is_str(s): raise NestedTextError(template='keys must be strings.', culprit=s) stripped = s.strip(' ') if '\n' in s: raise NestedTextError(s, template='keys must not contain newlines.', culprit=repr(s)) if (len(stripped) < len(s) or s[:1] == "#" or s.startswith("- ") or s.startswith("> ") or ': ' in s or s[:1] + s[-1:] in ['""', "''"]): if '"' in s and "'" in s: raise NestedTextError( s, template= """keys that require quoting must not contain both " and '.""", culprit=s, ) return repr(s) return s
def render_dict_item(key, dictionary, indent, rdumps): value = dictionary[key] if is_a_scalar(key): key = str(key) if not is_a_str(key): raise NestedTextError(template='keys must be strings.', culprit=key) multiline_key_required = (not key or '\n' in key or key.strip(' ') != key or key[:1] == "#" or key[:2] in ["- ", "> ", ": "] or ': ' in key) if multiline_key_required: key = "\n".join(": " + l if l else ":" for l in key.split('\n')) if is_str(value): # force use of multiline value with multiline keys return key + "\n" + add_leader(value, indent * ' ' + '> ') else: return key + rdumps(value) else: return add_prefix(key + ":", rdumps(value))
def conceal(cls, plaintext, decorate=False, encoding=None, symmetric=False): encoding = encoding if encoding else get_setting('encoding') plaintext = str(plaintext).encode(encoding) gpg_ids = get_setting('gpg_ids', []) if is_str(gpg_ids): gpg_ids = gpg_ids.split() encrypted = cls.gpg.encrypt( plaintext, gpg_ids, armor=True, symmetric=bool(symmetric) ) if not encrypted.ok: msg = ' '.join(cull([ 'unable to encrypt.', getattr(encrypted, 'stderr', None) ])) raise Error(msg) ciphertext = str(encrypted) if decorate: return 'GPG("""\n%s""")' % indent(ciphertext) else: return ciphertext
def get_setting(name, default=None, expand=True): name = name.lower() try: value = Config[name] except KeyError: try: value = CONFIG_DEFAULTS[name] except KeyError: try: value = NONCONFIG_SETTINGS[name] except KeyError: return default if value is None: return default if name == 'gpg_ids': value = value.split() if is_str(value) else value elif name.endswith('_dir'): value = Path(value) elif expand and name.endswith('_file'): value = get_setting('settings_dir') / value return value
def check(self): # gather the string valued settings together (can be used by resolve) self.str_settings = { k: v for k, v in self.settings.items() if is_str(v) } # complain about required settings that are missing missing = [] for each in [ 'dest_server', 'dest_dir', 'src_dir', 'ssh_backend_method', ]: if not self.settings.get(each): missing.append(each) if missing: missing = conjoin(missing) self.fail(f'{missing}: no value given.') # default the working_dir if it was not specified working_dir = self.settings.get('working_dir') if not working_dir: working_dir = self.resolve(DEFAULT_WORKING_DIR) self.settings['working_dir'] = working_dir self.str_settings['working_dir'] = working_dir # check the ssh_backend_method if self.ssh_backend_method not in ['option', 'protocol']: self.fail( f'{self.ssh_backend_method}:', 'invalid value given for ssh_backend_method.', ) # add the working directory to excludes excludes = self.settings.get('excludes', []) excludes.append(self.working_dir) self.settings['excludes'] = excludes
def save(self, contents, gpg_ids=None): path = self.path if not gpg_ids: gpg_ids = get_setting('gpg_ids', []) if is_str(gpg_ids): gpg_ids = gpg_ids.split() if not gpg_ids: # raise PasswordError('must specify GPG ID.') log('no gpg id available, using symmetric encryption.') use_gpg, use_armor = self._choices() if use_gpg: try: encoded = contents.encode(get_setting('encoding')) if gpg_ids: encrypted = self.gpg.encrypt(encoded, gpg_ids, armor=use_armor) else: encrypted = self.gpg.encrypt(encoded, None, symmetric='AES256', armor=use_armor) if not encrypted.ok: msg = ' '.join( cull([ 'unable to encrypt.', getattr(encrypted, 'stderr', None) ])) raise PasswordError(msg, culprit=path, sep='\n') else: path.write_bytes(encrypted.data) except ValueError as e: raise PasswordError(full_stop(e), culprit=path) else: path.write_text(contents, encoding=get_setting('encoding')) self.chmod()
def run(cls, command, args, settings, options): # read command line cmdline = docopt(cls.USAGE, argv=[command] + args) # check for required settings src_dirs = render_paths(settings.src_dirs) if not src_dirs: raise Error('src_dirs: setting has no value.') # check the dependencies are available for each in settings.values('must_exist'): path = to_path(each) if not path.exists(): raise Error('does not exist, perform setup and restart.', culprit=each) # run prerequisites cmds = settings.value('run_before_backup') if is_str(cmds): cmds = [cmds] for cmd in cull(cmds): narrate('running pre-backup script:', cmd) try: Run(cmd, 'SoEW') except Error as e: e.reraise(culprit=('run_before_backup', cmd.split()[0])) # run borg try: settings.run_borg( cmd='create', args=[settings.destination(True)] + render_paths(settings.src_dirs), emborg_opts=options, ) except Error as e: if e.stderr and 'is not a valid repository' in e.stderr: e.reraise( codicil="Run 'emborg init' to initialize the repository.") else: raise # update the date files narrate('update date file') now = arrow.now() settings.date_file.write_text(str(now)) # run any scripts specified to be run after a backup cmds = settings.value('run_after_backup') if is_str(cmds): cmds = [cmds] for cmd in cull(cmds): narrate('running post-backup script:', cmd) try: Run(cmd, 'SoEW') except Error as e: e.reraise(culprit=('run_after_backup', cmd.split()[0])) if cmdline['--fast']: return # prune the archives if requested try: # check the archives if requested activity = 'checking' if settings.check_after_create: narrate('checking archive') check = CheckCommand() check.run('check', [], settings, options) activity = 'pruning' if settings.prune_after_create: narrate('pruning archives') prune = PruneCommand() prune.run('prune', [], settings, options) except Error as e: e.reraise( codicil=(f'This error occurred while {activity} the archives.', 'No error was reported while creating the archive.'))
def main(): version = f'{__version__} ({__released__})' cmdline = docopt(__doc__, version=version) quiet = cmdline['--quiet'] problem = False with Inform(flush=True, quiet=quiet, version=version) as inform: # read the settings file settings_file = PythonFile(CONFIG_DIR, OVERDUE_FILE) settings_filename = settings_file.path settings = settings_file.run() # gather needed settings default_maintainer = settings.get('default_maintainer') default_max_age = settings.get('default_max_age', 28) dumper = settings.get('dumper', f'{getusername()}@{gethostname()}') repositories = settings.get('repositories') root = settings.get('root') # process repositories table backups = [] if is_str(repositories): for line in repositories.split('\n'): line = line.split('#')[0].strip() # discard comments if not line: continue backups.append([c.strip() for c in line.split('|')]) else: for each in repositories: backups.append([ each.get('host'), each.get('path'), each.get('maintainer'), each.get('max_age') ]) def send_mail(recipient, subject, message): if cmdline['--mail']: display(f'Reporting to {recipient}.\n') mail_cmd = ['mailx', '-r', dumper, '-s', subject, recipient] Run(mail_cmd, stdin=message, modes='soeW0') # check age of repositories now = arrow.now() display(f'current time = {now}') for host, path, maintainer, max_age in backups: maintainer = default_maintainer if not maintainer else maintainer max_age = int(max_age) if max_age else default_max_age try: path = to_path(root, path) if not path.is_dir(): raise Error('does not exist or is not a directory.', culprit=path) paths = list(path.glob('index.*')) if not paths: raise Error('no sentinel file found.', culprit=path) if len(paths) > 1: raise Error('too many sentinel files.', *paths, sep='\n ') path = paths[0] mtime = arrow.get(path.stat().st_mtime) delta = now - mtime age = 24 * delta.days + delta.seconds / 3600 report = age > max_age display( dedent(f""" HOST: {host} sentinel file: {path!s} last modified: {mtime} since last change: {age:0.1f} hours maximum age: {max_age} hours overdue: {report} """)) if report: problem = True subject = f"backup of {host} is overdue" msg = overdue_message.format(host=host, path=path, age=age) send_mail(maintainer, subject, msg) except OSError as e: problem = True msg = os_error(e) error(msg) if maintaner: send_mail(maintainer, f'{get_prog_name()} error', error_message.format(msg)) except Error as e: problem = True e.report() if maintaner: send_mail(maintainer, f'{get_prog_name()} error', error_message.format(str(e))) terminate(problem)
def main(): version = f'{__version__} ({__released__})' cmdline = docopt(__doc__, version=version) quiet = cmdline['--quiet'] problem = False use_color = Color.isTTY() and not cmdline['--no-color'] passes = Color('green', enable=use_color) fails = Color('red', enable=use_color) # prepare to create logfile log = to_path(DATA_DIR, OVERDUE_LOG_FILE) if OVERDUE_LOG_FILE else False if log: data_dir = to_path(DATA_DIR) if not data_dir.exists(): try: # data dir does not exist, create it data_dir.mkdir(mode=0o700, parents=True, exist_ok=True) except OSError as e: warn(os_error(e)) log = False with Inform(flush=True, quiet=quiet, logfile=log, version=version): # read the settings file try: settings_file = PythonFile(CONFIG_DIR, OVERDUE_FILE) settings = settings_file.run() except Error as e: e.terminate() # gather needed settings default_maintainer = settings.get('default_maintainer') default_max_age = settings.get('default_max_age', 28) dumper = settings.get('dumper', f'{username}@{hostname}') repositories = settings.get('repositories') root = settings.get('root') # process repositories table backups = [] if is_str(repositories): for line in repositories.split('\n'): line = line.split('#')[0].strip() # discard comments if not line: continue backups.append([c.strip() for c in line.split('|')]) else: for each in repositories: backups.append([ each.get('host'), each.get('path'), each.get('maintainer'), each.get('max_age') ]) def send_mail(recipient, subject, message): if cmdline['--mail']: display(f'Reporting to {recipient}.\n') mail_cmd = ['mailx', '-r', dumper, '-s', subject, recipient] Run(mail_cmd, stdin=message, modes='soeW0') # check age of repositories for host, path, maintainer, max_age in backups: maintainer = default_maintainer if not maintainer else maintainer max_age = float(max_age) if max_age else default_max_age try: path = to_path(root, path) if path.is_dir(): paths = list(path.glob('index.*')) if not paths: raise Error('no sentinel file found.', culprit=path) if len(paths) > 1: raise Error('too many sentinel files.', *paths, sep='\n ') path = paths[0] mtime = arrow.get(path.stat().st_mtime) delta = now - mtime age = 24 * delta.days + delta.seconds / 3600 report = age > max_age color = fails if report else passes if report or not cmdline['--no-passes']: display( color( dedent(f""" HOST: {host} sentinel file: {path!s} last modified: {mtime} since last change: {age:0.1f} hours maximum age: {max_age} hours overdue: {report} """).lstrip())) if report: problem = True subject = f"backup of {host} is overdue" msg = overdue_message.format(host=host, path=path, age=age) send_mail(maintainer, subject, msg) except OSError as e: problem = True msg = os_error(e) error(msg) if maintainer: send_mail(maintainer, f'{get_prog_name()} error', error_message.format(msg)) except Error as e: problem = True e.report() if maintainer: send_mail(maintainer, f'{get_prog_name()} error', error_message.format(str(e))) terminate(problem)
def read_config(): if Config.get('READ'): return # already read # First open the config file from .gpg import PythonFile path = get_setting('config_file') assert path.suffix.lower() not in ['.gpg', '.asc'] config_file = PythonFile(path) if not config_file.exists(): # have not yet initialized this account return try: contents = config_file.run() for k, v in contents.items(): if k.startswith('_'): continue if k not in CONFIG_DEFAULTS: warn('%s: unknown.' % k, culprit=config_file) continue if k.endswith('_executable'): argv = v.split() if is_str(v) else list(v) path = Path(argv[0]) if not path.is_absolute(): warn('should use absolute path for executables.', culprit=(config_file, k)) Config[k] = v Config['READ'] = True except PasswordError: comment('not found.', culprit=config_file) # Now open the hashes file hashes_file = PythonFile(get_setting('hashes_file')) try: contents = hashes_file.run() Config.update({k.lower(): v for k, v in contents.items()}) except PasswordError: pass # Now open the account list file account_list_file = PythonFile(get_setting('account_list_file')) try: contents = account_list_file.run() Config.update({k.lower(): v for k, v in contents.items()}) except PasswordError: pass # initilize GPG from .gpg import GnuPG GnuPG.initialize() # Now read the user key file user_key_file = get_setting('user_key_file') if user_key_file: user_key_file = PythonFile(get_setting('user_key_file')) try: contents = user_key_file.run() Config.update({ k.lower(): v for k, v in contents.items() if not k.startswith('__') }) except PasswordError: pass # Set the user-selected colors Config['_label_color'] = Color(color=get_setting('label_color'), scheme=get_setting('color_scheme'), enable=Color.isTTY()) Config['_highlight_color'] = Color(color=get_setting('highlight_color'), scheme=get_setting('color_scheme'), enable=Color.isTTY())
def main(): version = f"{__version__} ({__released__})" cmdline = docopt(__doc__, version=version) quiet = cmdline["--quiet"] problem = False use_color = Color.isTTY() and not cmdline["--no-color"] passes = Color("green", enable=use_color) fails = Color("red", enable=use_color) if cmdline["--verbose"]: overdue_message = verbose_overdue_message else: overdue_message = terse_overdue_message # prepare to create logfile log = to_path(DATA_DIR, OVERDUE_LOG_FILE) if OVERDUE_LOG_FILE else False if log: data_dir = to_path(DATA_DIR) if not data_dir.exists(): try: # data dir does not exist, create it data_dir.mkdir(mode=0o700, parents=True, exist_ok=True) except OSError as e: warn(os_error(e)) log = False with Inform(flush=True, quiet=quiet, logfile=log, version=version): # read the settings file try: settings_file = PythonFile(CONFIG_DIR, OVERDUE_FILE) settings = settings_file.run() except Error as e: e.terminate() # gather needed settings default_maintainer = settings.get("default_maintainer") default_max_age = settings.get("default_max_age", 28) dumper = settings.get("dumper", f"{username}@{hostname}") repositories = settings.get("repositories") root = settings.get("root") # process repositories table backups = [] if is_str(repositories): for line in repositories.split("\n"): line = line.split("#")[0].strip() # discard comments if not line: continue backups.append([c.strip() for c in line.split("|")]) else: for each in repositories: backups.append([ each.get("host"), each.get("path"), each.get("maintainer"), each.get("max_age"), ]) def send_mail(recipient, subject, message): if cmdline["--mail"]: if cmdline['--verbose']: display(f"Reporting to {recipient}.\n") mail_cmd = ["mailx", "-r", dumper, "-s", subject, recipient] Run(mail_cmd, stdin=message, modes="soeW0") # check age of repositories for host, path, maintainer, max_age in backups: maintainer = default_maintainer if not maintainer else maintainer max_age = float(max_age) if max_age else default_max_age try: path = to_path(root, path) if path.is_dir(): paths = list(path.glob("index.*")) if not paths: raise Error("no sentinel file found.", culprit=path) if len(paths) > 1: raise Error("too many sentinel files.", *paths, sep="\n ") path = paths[0] mtime = arrow.get(path.stat().st_mtime) delta = now - mtime age = 24 * delta.days + delta.seconds / 3600 report = age > max_age overdue = ' -- overdue' if report else '' color = fails if report else passes if report or not cmdline["--no-passes"]: display(color(fmt(overdue_message))) if report: problem = True subject = f"backup of {host} is overdue" msg = fmt(mail_overdue_message) send_mail(maintainer, subject, msg) except OSError as e: problem = True msg = os_error(e) error(msg) if maintainer: send_mail( maintainer, f"{get_prog_name()} error", error_message.format(msg), ) except Error as e: problem = True e.report() if maintainer: send_mail( maintainer, f"{get_prog_name()} error", error_message.format(str(e)), ) terminate(problem)
def dumps(obj, *, width=0, sort_keys=False, indent=4, renderers=None, default=None, _level=0): """Recursively convert object to *NestedText* string. Args: obj: The object to convert to *NestedText*. width (int): Enables compact lists and dictionaries if greater than zero and if resulting line would be less than or equal to given width. sort_keys (bool or func): Dictionary items are sorted by their key if *sort_keys* is true. If a function is passed in, it is used as the key function. indent (int): The number of spaces to use to represent a single level of indentation. Must be one or greater. renderers (dict): A dictionary where the keys are types and the values are render functions (functions that take an object and convert it to a string). These will be used to convert values to strings during the conversion. default (func or 'strict'): The default renderer. Use to render otherwise unrecognized objects to strings. If not provided an error will be raised for unsupported data types. Typical values are *repr* or *str*. If 'strict' is specified then only dictionaries, lists, strings, and those types specified in *renderers* are allowed. If *default* is not specified then a broader collection of value types are supported, including *None*, *bool*, *int*, *float*, and *list*- and *dict*-like objects. In this case Booleans is rendered as 'True' and 'False' and None and empty lists and dictionaries are rendered as empty strings. _level (int): The number of indentation levels. When dumps is invoked recursively this is used to increment the level and so the indent. Should not be specified by the user. Returns: The *NestedText* content. Raises: NestedTextError: if there is a problem in the input data. Examples: .. code-block:: python >>> import nestedtext as nt >>> data = { ... 'name': 'Kristel Templeton', ... 'sex': 'female', ... 'age': '74', ... } >>> try: ... print(nt.dumps(data)) ... except nt.NestedTextError as e: ... print(str(e)) name: Kristel Templeton sex: female age: 74 The *NestedText* format only supports dictionaries, lists, and strings and all leaf values must be strings. By default, *dumps* is configured to be rather forgiving, so it will render many of the base Python data types, such as *None*, *bool*, *int*, *float* and list-like types such as *tuple* and *set* by converting them to the types supported by the format. This implies that a round trip through *dumps* and *loads* could result in the types of values being transformed. You can restrict *dumps* to only supporting the native types of *NestedText* by passing `default='strict'` to *dumps*. Doing so means that values that are not dictionaries, lists, or strings generate exceptions; as do empty dictionaries and lists. .. code-block:: python >>> data = {'key': 42, 'value': 3.1415926, 'valid': True} >>> try: ... print(nt.dumps(data)) ... except nt.NestedTextError as e: ... print(str(e)) key: 42 value: 3.1415926 valid: True >>> try: ... print(nt.dumps(data, default='strict')) ... except nt.NestedTextError as e: ... print(str(e)) 42: unsupported type. Alternatively, you can specify a function to *default*, which is used to convert values to strings. It is used if no other converter is available. Typical values are *str* and *repr*. .. code-block:: python >>> class Color: ... def __init__(self, color): ... self.color = color ... def __repr__(self): ... return f'Color({self.color!r})' ... def __str__(self): ... return self.color >>> data['house'] = Color('red') >>> print(nt.dumps(data, default=repr)) key: 42 value: 3.1415926 valid: True house: Color('red') >>> print(nt.dumps(data, default=str)) key: 42 value: 3.1415926 valid: True house: red You can also specify a dictionary of renderers. The dictionary maps the object type to a render function. .. code-block:: python >>> renderers = { ... bool: lambda b: 'yes' if b else 'no', ... int: hex, ... float: lambda f: f'{f:0.3}', ... Color: lambda c: c.color, ... } >>> try: ... print(nt.dumps(data, renderers=renderers)) ... except nt.NestedTextError as e: ... print(str(e)) key: 0x2a value: 3.14 valid: yes house: red If the dictionary maps a type to *None*, then the default behavior is used for that type. If it maps to *False*, then an exception is raised. .. code-block:: python >>> renderers = { ... bool: lambda b: 'yes' if b else 'no', ... int: hex, ... float: False, ... Color: lambda c: c.color, ... } >>> try: ... print(nt.dumps(data, renderers=renderers)) ... except nt.NestedTextError as e: ... print(str(e)) 3.1415926: unsupported type. Both *default* and *renderers* may be used together. *renderers* has priority over the built-in types and *default*. When a function is specified as *default*, it is always applied as a last resort. """ # define sort function {{{3 if sort_keys: def sort(keys): return sorted(keys, key=sort_keys if callable(sort_keys) else None) else: def sort(keys): return keys # render_dict_item {{{3 def render_dict_item(key, dictionary, indent, rdumps): value = dictionary[key] if is_a_scalar(key): key = str(key) if not is_a_str(key): raise NestedTextError(template='keys must be strings.', culprit=key) multiline_key_required = (not key or '\n' in key or key.strip(' ') != key or key[:1] == "#" or key[:2] in ["- ", "> ", ": "] or ': ' in key) if multiline_key_required: key = "\n".join(": " + l if l else ":" for l in key.split('\n')) if is_str(value): # force use of multiline value with multiline keys return key + "\n" + add_leader(value, indent * ' ' + '> ') else: return key + rdumps(value) else: return add_prefix(key + ":", rdumps(value)) # render_inline_dict {{{3 def render_inline_dict(obj): exclude = set('\n[]{}:,') items = [] for k in sort(obj): v = render_inline_value(obj[k], exclude=exclude) k = render_inline_scalar(k, exclude=exclude) items.append(f'{k}: {v}') return '{' + ', '.join(items) + '}' # render_inline_list {{{3 def render_inline_list(obj): items = [] for v in obj: v = render_inline_value(v, exclude=set('\n[]{},')) items.append(v) endcap = ', ]' if len(items) and items[-1] == '' else ']' return '[' + ', '.join(items) + endcap # render_inline_value {{{3 def render_inline_value(obj, exclude): if is_a_dict(obj): return render_inline_dict(obj) if is_a_list(obj): return render_inline_list(obj) return render_inline_scalar(obj, exclude) # render_inline_scalar {{{3 def render_inline_scalar(obj, exclude): render = renderers.get(type(obj)) if renderers else None if render is False: raise ValueError() elif render: value = render(obj) if "\n" in value: raise ValueError() elif is_a_str(obj): value = obj elif is_a_scalar(obj): value = '' if obj is None else str(obj) elif default and callable(default): value = default(obj) else: raise ValueError() if exclude & set(value): raise ValueError() if value.strip(' ') != value: raise ValueError() return value # define object type identification functions {{{3 if default == 'strict': is_a_dict = lambda obj: isinstance(obj, dict) is_a_list = lambda obj: isinstance(obj, list) is_a_str = lambda obj: isinstance(obj, str) is_a_scalar = lambda obj: False else: is_a_dict = is_mapping is_a_list = is_collection is_a_str = is_str is_a_scalar = lambda obj: obj is None or isinstance( obj, (bool, int, float)) if is_str(default): raise NotImplementedError(default) # define dumps function for recursion {{{3 def rdumps(v): return dumps(v, width=width - indent, sort_keys=sort_keys, indent=indent, renderers=renderers, default=default, _level=_level + 1) # render content {{{3 assert indent > 0 error = None need_indented_block = is_collection(obj) content = '' render = renderers.get(type(obj)) if renderers else None if render is False: error = "unsupported type." elif render: content = render(obj) if "\n" in content: need_indented_block = True elif is_a_dict(obj): try: if obj and not (width > 0 and len(obj) <= width / 6): raise ValueError content = render_inline_dict(obj) if obj and len(content) > width: raise ValueError except ValueError: content = "\n".join( render_dict_item(k, obj, indent, rdumps) for k in sort(obj)) elif is_a_list(obj): try: if obj and not (width > 0 and len(obj) <= width / 6): raise ValueError content = render_inline_list(obj) if obj and len(content) > width: raise ValueError except ValueError: content = "\n".join(add_prefix("-", rdumps(v)) for v in obj) elif is_a_str(obj): text = obj.replace('\r\n', '\n').replace('\r', '\n') if "\n" in text or _level == 0: content = add_leader(text, '> ') need_indented_block = True else: content = text elif is_a_scalar(obj): if obj is None: content = '' else: content = str(obj) elif default and callable(default): content = default(obj) else: error = 'unsupported type.' if need_indented_block and content and _level: content = "\n" + add_leader(content, indent * ' ') if error: raise NestedTextError(obj, template=error, culprit=repr(obj)) return content
def value(self, name, default=""): """Gets value of scalar setting.""" value = self.settings.get(name, default) if not is_str(value) or name in self.do_not_expand: return value return self.resolve(value)