def tag(options): """Tag the version""" default_options = Config() default_options._load('./.tagitrc') default_options._use('TAGIT') default_options.update(vars(options['c'])) publish = default_options.get('publish', False) #changelog = default_options.get('changelog', '') increment = default_options.get('increment', 'patch') versource = default_options.get('versource', '') vertoml = default_options.get('vertoml', '') #checksource = default_options.get('checksource', True) #checktoml = default_options.get('checktoml', True) extra = default_options.get('extra', '') specver = options[POSITIONAL] or None ret = status(default_options, specver, True) if not ret: return tagver = _get_version_from_gittag() or Tag((0, 0, 0)) specver = specver or tagver.increment(increment) if versource: _log('Updating version in source file ...') _update_version_to_source(_getsrcfile(versource), specver) if vertoml: _log('Updating version in pyproject.toml ...') _update_version_to_toml(specver, vertoml) if extra: cmd = bash(c=extra).fg if cmd.rc != 0: raise RuntimeError('Failed to run %r' % extra) _log('Committing the change ...') try: git.commit({'allow-empty': True}, a=True, m=str(specver)).fg except CmdyReturnCodeError: # pre-commit fails, do it again _log('Pre-commit failed, try again ...') git.add('.') git.commit({'allow-empty': True}, a=True, m=str(specver)).fg _log('Pushing the commit to remote ...') git.push().fg _log('Adding tag %r ...' % specver) git.tag(str(specver)).fg _log('Pushing the tag to remote ...') git.push(tag=True).fg if publish: _log('Building the release ...') poetry.build().fg _log('Publishing the release ...') poetry.publish().fg _log('Done!')
def version(options): # pylint: disable=unused-argument """Get current local version""" default_options = Config() default_options._load('./.tagitrc') default_options._use('TAGIT') vertoml = options.vertoml ver = _get_version_from_toml(vertoml) _log('Current version: %s' % ver)
def load_config(config, opts): """Load the configurations from file""" if not path.isfile(config): raise OSError("Config file does not exist: {}".format(config)) configs = Config(with_profile=False) configs._load(config) configs = configs.as_dict() ones = [] if "instance" in configs: ones = configs["instance"] del configs["instance"] opts |= configs # padding figtype and ggs, and devpars len_fml = len(opts["formula"]) opts["figtype"].extend([None] * (len_fml - len(opts["figtype"]))) opts["figfmt"].extend([None] * (len_fml - len(opts["figfmt"]))) opts["ggs"].extend([None] * (len_fml - len(opts["ggs"]))) if isinstance(opts["devpars"], list): default_devpars = opts["devpars"][0] opts["devpars"].extend([default_devpars] * (len_fml - len(opts["devpars"]))) else: default_devpars = opts["devpars"] opts["devpars"] = [opts["devpars"]] * len_fml for instance in ones: if "formula" not in instance: raise ValueError( "Formula not found in instance: {}".format(instance)) if "title" not in instance: raise ValueError( "Title not found in instance: {}".format(instance)) opts["formula"].append(instance["formula"]) opts["title"].append(instance["title"]) opts["figtype"].append(instance.get("figtype")) opts["figfmt"].append(instance.get("figfmt")) opts["ggs"].append(instance.get("ggs")) def_devpars = default_devpars.copy() def_devpars.update(instance.get("devpars", {})) opts["devpars"].append(def_devpars)
def load_file(self, compfile): """Load commands and options from a configuration file""" config = Config(with_profile=False) config._load(compfile) self.load(config)
def status(options, specver=None, ret=False): """Get the status of the project""" tagver = _get_version_from_gittag() exception = None gitstatus = git.status(s=True).str() cherry = git.cherry(v=True).str() if gitstatus or cherry: exception = UncleanRepoException( 'You have changes uncommitted or unpushed.\n\n' + git.status().str()) lastmsg = git.log('-1', pretty="format:%s", _sep='=').strip() if lastmsg == str(tagver): raise NoChangesSinceLastTagException('No changes since last tag.') tagver = tagver or Tag((0, 0, 0)) rcoptions = Config() rcoptions._load('./.tagitrc') rcoptions._use('TAGIT') rcoptions.update(vars(options)) changelog = rcoptions.get('changelog', '') increment = rcoptions.get('increment', 'patch') versource = rcoptions.get('versource', '') vertoml = rcoptions.get('vertoml', '') checksource = rcoptions.get('checksource', True) checktoml = rcoptions.get('checktoml', True) _log('Current version: %s' % str(tagver)) if ret: nextver = tagver.increment(increment) _log('New version received: %r' % (specver or nextver)) if exception: raise exception return _checkver(specver or nextver, changelog, versource, vertoml, checksource, checktoml) nextver = tagver.increment('patch') _log('Next auto patch version is: %s' % nextver) if _checkver(nextver, changelog, versource, vertoml, checksource, checktoml): _log(' You are good to go with this version.') shortcmd = '`tagit tag`, ' if increment == 'patch' else '' _log(' Run %s`tagit tag -c.i patch` or `tagit tag %s`' % (shortcmd, nextver)) nextver = tagver.increment('minor') _log('Next auto minor version is: %s' % nextver) if _checkver(nextver, changelog, versource, vertoml, checksource, checktoml): _log(' You are good to go with this version.') shortcmd = '`tagit tag`, ' if increment == 'minor' else '' _log(' Run %s`tagit tag -c.i minor` or `tagit tag %s`' % (shortcmd, nextver)) nextver = tagver.increment('major') _log('Next auto major version is: %s' % nextver) if _checkver(nextver, changelog, versource, vertoml, checksource, checktoml): _log(' You are good to go with this version.') shortcmd = '`tagit tag`, ' if increment == 'major' else '' _log(' Run %s`tagit tag -c.i major` or `tagit tag %s`' % (shortcmd, nextver)) if exception: raise exception
class PyPPL (object): """@API The PyPPL class @static variables: TIPS (list): The tips for users RUNNERS (dict): Registered runners COUNTER (int): The counter for `PyPPL` instance """ TIPS = [ "You can find the stdout in <workdir>/<job.index>/job.stdout", "You can find the stderr in <workdir>/<job.index>/job.stderr", "You can find the script in <workdir>/<job.index>/job.script", "Check documentation at: https://pyppl.readthedocs.io/en/latest/", "You cannot have two processes with the same id and tag", "beforeCmd and afterCmd only run locally", "Workdir defaults to PyPPL.<id>.<tag>.<suffix> under default <ppldir>", "The default <ppldir> is './workdir'"] RUNNERS = {} # counter COUNTER = 0 def __init__(self, conf = None, cfgfile = None): """@API PyPPL Constructor @params: conf (dict): the configurations for the pipeline, default: `None` - Remember the profile name should be included. cfgfile (file): the configuration file for the pipeline, default: `None` """ self.counter = PyPPL.COUNTER PyPPL.COUNTER += 1 self.config = Config() # __noloading__ tells processes not load it as they have it initiated. self.config._load({'default': config, '__noloading__': None}) if cfgfile: self.config._load(cfgfile) if conf and isinstance(conf, dict): self.config._load(conf) if self.config._log.file is True: self.config._log.file = (Path('./') / Path(sys.argv[0]).stem).with_suffix( '%s.pyppl.log' % ('.' + str(self.counter) if self.counter else '')) # reinitiate logger according to new config logger.init(self.config) logger.pyppl('Version: %s', __version__) logger.tips(random.choice(PyPPL.TIPS)) for cfile in DEFAULT_CFGFILES + (str(cfgfile), ): if cfile.endswith('.osenv'): logger.config('Read from environment variables with prefix: "%s_"', Path(cfile).name[:-6]) cfile = Path(cfile).expanduser() if not utils.fs.isfile(cfile): if cfile == cfgfile: logger.warning('Configuration file does not exist: %s', cfile) continue elif cfile.suffix in ('.yaml', 'yml'): try: import yaml # pylint: disable=W0611 except ImportError: # pragma: no cover logger.warning('Module PyYAML not installed, config file ignored: %s', cfile) elif cfile.suffix == '.toml': try: import toml # pylint: disable=W0611 except ImportError: # pragma: no cover logger.warning('Module toml not installed, config file ignored: %s', cfile) logger.config('Read from %s', cfile) for plgname, plugin in pluginmgr.list_name_plugin(): logger.plugin('Loaded %s: v%s', plgname, plugin.__version__ if hasattr(plugin, '__version__') else 'Unknown') self.tree = ProcTree() # save the procs in order for plugin use self.procs = [] pluginmgr.hook.pypplInit(ppl = self) @staticmethod def _procsSelector(selector): ret = Proxy() if isinstance(selector, Proc): ret.add(selector) elif isinstance(selector, ProcSet): ret.add(selector.starts) elif isinstance(selector, (tuple, list)): for thing in selector: ret.add(PyPPL._procsSelector(thing)) else: for proc in ProcTree.NODES: if selector == proc.id: ret.add(proc) elif selector == proc.id + '.' + proc.tag: ret.add(proc) elif fnmatch.fnmatch(proc.id + '.' + proc.tag, selector): ret.add(proc) return ret def start (self, *args): """@API Set the starting processes of the pipeline @params: *args (Proc|str): process selectors @returns: (PyPPL): The pipeline object itself. """ starts = set(PyPPL._procsSelector(args)) PyPPL._registerProc(*starts) self.tree.init() nostart = set() for startproc in starts: # Let's check if we have any other procs on the path of start process paths = self.tree.getPaths(startproc) pristarts = [pnode for sublist in paths for pnode in sublist if pnode in starts] if pristarts: nostart.add(startproc) names = [pnode.name(True) for pnode in pristarts] names = names[:3] + ['...'] if len(names) > 3 else names logger.warning('Start process %s ignored, depending on [%s]', startproc.name(True), ', '.join(names)) self.tree.setStarts(starts - nostart) return self def _resume(self, *args, plus = False): """ Mark processes as to be resumed @params: `args`: the processes to be marked. The last element is the mark for processes to be skipped. """ sflag = 'skip+' if plus else 'skip' rflag = 'resume+' if plus else 'resume' resumes = PyPPL._procsSelector(args) ends = self.tree.getEnds() #starts = self.tree.getStarts() # check whether all ends can be reached for end in ends: if end in resumes: continue paths = self.tree.getPathsToStarts(end) failedpaths = [apath for apath in paths if not any(pnode in apath for pnode in resumes)] if not failedpaths: continue failedpath = failedpaths[0] raise PyPPLProcRelationError('%s <- [%s]' % ( end.name(), ', '.join(pnode.name() for pnode in failedpath)), 'One of the routes cannot be achived from resumed processes') # set prior processes to skip for rsproc in resumes: rsproc.resume = rflag paths = self.tree.getPathsToStarts(rsproc) for apath in paths: for pnode in apath: if not pnode.resume: pnode.resume = sflag def resume (self, *args): """@API Mark processes as to be resumed @params: *args (Proc|str): the processes to be marked @returns: (PyPPL): The pipeline object itself. """ if not args or (len(args) == 1 and not args[0]): return self self._resume(*args) return self def resume2 (self, *args): """@API Mark processes as to be resumed @params: *args (Proc|str): the processes to be marked @returns: (PyPPL): The pipeline object itself. """ if not args or (len(args) == 1 and not args[0]): return self self._resume(*args, plus = True) return self def run(self, profile = 'default'): """@API Run the pipeline @params: profile (str|dict): the profile used to run, if not found, it'll be used as runner name. - default: 'default' @returns: (PyPPL): The pipeline object itself. """ timer = time() pluginmgr.hook.pypplPreRun(ppl = self) proc = self.tree.getNextToRun() while proc: if proc.origin != proc.id: name = '{} ({}): {}'.format(proc.name(True), proc.origin, proc.desc) else: name = '{}: {}'.format(proc.name(True), proc.desc) #nlen = max(85, len(name) + 3) #logger.logger.info ('[PROCESS] +' + '-'*(nlen-3) + '+') #logger.logger.info ('[PROCESS] |%s%s|' % (name, ' '*(nlen - 3 - len(name)))) decorlen = max(80, len(name)) logger.process ('-' * decorlen) logger.process (name) logger.process ('-' * decorlen) logger.depends ( '%s => %s => %s', ProcTree.getPrevStr(proc), proc.name(), ProcTree.getNextStr(proc), proc = proc.id) proc.run(profile, self.config) self.procs.append(proc) proc = self.tree.getNextToRun() # unran = self.tree.unranProcs() # if unran: # klen = max([len(key) for key, _ in unran.items()]) # for key, val in unran.items(): # fmtstr = "%-"+ str(klen) +"s won't run as path can't be reached: %s <- %s" # logger.warning(fmtstr, key, key, ' <- '.join(val)) pluginmgr.hook.pypplPostRun(ppl = self) logger.done('Total time: %s', utils.formatSecs(time() - timer)) return self @staticmethod def _registerProc(*procs): """ Register the process @params: `*procs`: The process """ ProcTree.register(*procs) @staticmethod def _checkProc(proc): """ Check processes, whether 2 processes have the same id and tag @params: `proc`: The process @returns: If there are 2 processes with the same id and tag, raise `ValueError`. """ ProcTree.check(proc) @staticmethod def registerRunner(runner_to_reg): """@API Register a runner @params: `runner_to_reg`: The runner to be registered. """ runner_name = runner_to_reg.__name__ if not runner_name.startswith('Runner'): raise RunnerClassNameError('The class name of a runner should start with "Runner"') runner_name = runner_name[6:].lower() if runner_name not in PyPPL.RUNNERS: PyPPL.RUNNERS[runner_name] = runner_to_reg