def test_duplicate_signature(): # Duplicate function signatures occur e.g. in ufuncs, when the # automatic mechanism adds one, and a more detailed comes from the # docstring itself. doc = NumpyDocString(""" z(x1, x2) z(a, theta) """) assert doc['Signature'].strip() == 'z(a, theta)'
def add_workflow(self, workflow): specs = inspect.getargspec(workflow) doc = inspect.getdoc(workflow) self.doc = NumpyDocString(doc)['Parameters'] self.outputs = NumpyDocString(doc)['Outputs'] args = specs.args defaults = specs.defaults len_args = len(args) len_defaults = len(defaults) for i, arg in enumerate(args): prefix = '' is_optionnal = i >= len_args - len_defaults if is_optionnal: prefix = '--' typestr = self.doc[i][1] dtype, isnarg = self._select_dtype(typestr) help_msg = ''.join(self.doc[i][2]) _args = ['{0}{1}'.format(prefix, arg)] _kwargs = {'help': help_msg, 'type': dtype, 'action': 'store'} if is_optionnal: _kwargs["metavar"] = dtype.__name__ if dtype is bool: _kwargs['type'] = int _kwargs['choices'] = [0, 1] if isnarg: _kwargs['nargs'] = '*' self.add_argument(*_args, **_kwargs)
def test_see_also(): doc6 = NumpyDocString(""" z(x,theta) See Also -------- func_a, func_b, func_c func_d : some equivalent func foo.func_e : some other func over multiple lines func_f, func_g, :meth:`func_h`, func_j, func_k :obj:`baz.obj_q` :class:`class_j`: fubar foobar """) npt.assert_equal(len(doc6['See Also']), 12) for func, desc, role in doc6['See Also']: if func in ('func_a', 'func_b', 'func_c', 'func_f', 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): assert (not desc) else: assert (desc) if func == 'func_h': assert role == 'meth' elif func == 'baz.obj_q': assert role == 'obj' elif func == 'class_j': assert role == 'class' else: assert role is None if func == 'func_d': assert desc == ['some equivalent func'] elif func == 'foo.func_e': assert desc == ['some other func over', 'multiple lines'] elif func == 'class_j': assert desc == ['fubar', 'foobar']
>>> cov = [[1,0],[1,0]] >>> x = multivariate_normal(mean,cov,(3,3)) >>> print x.shape (3, 3, 2) The following is probably true, given that 0.6 is roughly twice the standard deviation: >>> print list( (x[0,0,:] - mean) < 0.6 ) [True, True] .. index:: random :refguide: random;distributions, random;gauss """ doc = NumpyDocString(doc_txt) doc_yields_txt = """ Test generator Yields ------ a : int The number of apples. b : int The number of bananas. int The number of unknowns. """ doc_yields = NumpyDocString(doc_yields_txt)
def add_sub_flow_args(self, sub_flows): """ Take an array of workflow objects and use introspection to extract the parameters, types and docstrings of their run method. Only the optional input parameters are extracted for these as they are treated as sub workflows. Parameters ---------- sub_flows : array of dipy.workflows.workflow.Workflow Workflows to inspect. Returns ------- sub_flow_optionals : dictionary of all sub workflow optional parameters """ sub_flow_optionals = dict() for name, flow, short_name in sub_flows: sub_flow_optionals[name] = {} doc = inspect.getdoc(flow) npds = NumpyDocString(doc) _doc = npds['Parameters'] args, defaults = get_args_default(flow) len_args = len(args) len_defaults = len(defaults) flow_args = \ self.add_argument_group('{0} arguments(optional)'. format(name)) for i, arg_name in enumerate(args): is_not_optionnal = i < len_args - len_defaults if 'out_' in arg_name or is_not_optionnal: continue arg_name = '{0}.{1}'.format(short_name, arg_name) sub_flow_optionals[name][arg_name] = None prefix = '--' typestr = _doc[i][1] dtype, isnarg = self._select_dtype(typestr) help_msg = ''.join(_doc[i][2]) _args = ['{0}{1}'.format(prefix, arg_name)] _kwargs = {'help': help_msg, 'type': dtype, 'action': 'store'} _kwargs['metavar'] = dtype.__name__ if dtype is bool: _kwargs['action'] = 'store_true' default_ = dict() default_[arg_name] = False self.set_defaults(**default_) del _kwargs['type'] del _kwargs['metavar'] elif dtype is bool: _kwargs['type'] = int _kwargs['choices'] = [0, 1] if dtype is tuple: _kwargs['type'] = str if isnarg: _kwargs['nargs'] = '*' if _kwargs['action'] != 'store_true': _kwargs['type'] = none_or_dtype(_kwargs['type']) flow_args.add_argument(*_args, **_kwargs) return sub_flow_optionals
def add_workflow(self, workflow): """Take a workflow object and use introspection to extract the parameters, types and docstrings of its run method. Then add these parameters to the current arparser's own params to parse. If the workflow is of type combined_workflow, the optional input parameters of its sub workflows will also be added. Parameters ---------- workflow : dipy.workflows.workflow.Workflow Workflow from which to infer parameters. Returns ------- sub_flow_optionals : dictionary of all sub workflow optional parameters """ doc = inspect.getdoc(workflow.run) npds = NumpyDocString(doc) self.doc = npds['Parameters'] self.description = '{0}\n\n{1}'.format( ' '.join(npds['Summary']), ' '.join(npds['Extended Summary'])) if npds['References']: ref_text = [text if text else "\n" for text in npds['References']] ref_idx = self.epilog.find('References: \n') + \ len('References: \n') self.epilog = "{0}{1}\n{2}".format(self.epilog[:ref_idx], ''.join(ref_text), self.epilog[ref_idx:]) self._output_params = [ param for param in npds['Parameters'] if 'out_' in param[0] ] self._positional_params = [ param for param in npds['Parameters'] if 'optional' not in param[1] and 'out_' not in param[0] ] self._optional_params = [ param for param in npds['Parameters'] if 'optional' in param[1] ] args, defaults = get_args_default(workflow.run) output_args = self.add_argument_group('output arguments(optional)') len_args = len(args) len_defaults = len(defaults) nb_positional_variable = 0 if len_args != len(self.doc): raise ValueError(self.prog + ": Number of parameters in the " "doc string and run method does not match. " "Please ensure that the number of parameters " "in the run method is same as the doc string.") for i, arg in enumerate(args): prefix = '' is_optional = i >= len_args - len_defaults if is_optional: prefix = '--' typestr = self.doc[i][1] dtype, isnarg = self._select_dtype(typestr) help_msg = ' '.join(self.doc[i][2]) _args = ['{0}{1}'.format(prefix, arg)] _kwargs = {'help': help_msg, 'type': dtype, 'action': 'store'} if is_optional: _kwargs['metavar'] = dtype.__name__ if dtype is bool: _kwargs['action'] = 'store_true' default_ = dict() default_[arg] = False self.set_defaults(**default_) del _kwargs['type'] del _kwargs['metavar'] elif dtype is bool: _kwargs['type'] = int _kwargs['choices'] = [0, 1] if dtype is tuple: _kwargs['type'] = str if isnarg: if is_optional: _kwargs['nargs'] = '*' else: _kwargs['nargs'] = '+' nb_positional_variable += 1 if 'out_' in arg: output_args.add_argument(*_args, **_kwargs) else: if _kwargs['action'] != 'store_true': _kwargs['type'] = none_or_dtype(_kwargs['type']) self.add_argument(*_args, **_kwargs) if nb_positional_variable > 1: raise ValueError(self.prog + " : All positional arguments present" " are gathered into a list. It does not make" "much sense to have more than one positional" " argument with 'variable string' as dtype." " Please, ensure that 'variable (type)'" " appears only once as a positional argument.") return self.add_sub_flow_args(workflow.get_sub_runs())
def add_workflow(self, workflow): """ Take a workflow object and use introspection to extract the parameters, types and docstrings of its run method. Then add these parameters to the current arparser's own params to parse. If the workflow is of type combined_workflow, the optional input parameters of its sub workflows will also be added. Parameters ----------- workflow : dipy.workflows.workflow.Workflow Workflow from which to infer parameters. Returns ------- sub_flow_optionals : dictionary of all sub workflow optional parameters """ doc = inspect.getdoc(workflow.run) npds = NumpyDocString(doc) self.doc = npds['Parameters'] self.description = ' '.join(npds['Extended Summary']) if npds['References']: ref_text = [text if text else "\n" for text in npds['References']] ref_idx = self.epilog.find('References: \n') + len( 'References: \n') self.epilog = "{0}{1}\n{2}".format( self.epilog[:ref_idx], ''.join([text for text in ref_text]), self.epilog[ref_idx:]) self.outputs = [ param for param in npds['Parameters'] if 'out_' in param[0] ] args, defaults = get_args_default(workflow.run) len_args = len(args) len_defaults = len(defaults) output_args = \ self.add_argument_group('output arguments(optional)') for i, arg in enumerate(args): prefix = '' is_optionnal = i >= len_args - len_defaults if is_optionnal: prefix = '--' typestr = self.doc[i][1] dtype, isnarg = self._select_dtype(typestr) help_msg = ''.join(self.doc[i][2]) _args = ['{0}{1}'.format(prefix, arg)] _kwargs = {'help': help_msg, 'type': dtype, 'action': 'store'} if is_optionnal: _kwargs['metavar'] = dtype.__name__ if dtype is bool: _kwargs['action'] = 'store_true' default_ = dict() default_[arg] = False self.set_defaults(**default_) del _kwargs['type'] del _kwargs['metavar'] elif dtype is bool: _kwargs['type'] = int _kwargs['choices'] = [0, 1] if dtype is tuple: _kwargs['type'] = str if isnarg: _kwargs['nargs'] = '*' if 'out_' in arg: output_args.add_argument(*_args, **_kwargs) else: self.add_argument(*_args, **_kwargs) return self.add_sub_flow_args(workflow.get_sub_runs())