示例#1
0
def help2md(filepath, output='README.md', name='code'):
    """help2md
    Converts python help to a .md file.
    params:
        - filename - The full path of the input file
        - output - The full path of the output file ( defaults to README.md )
        - name - The name of the file. It puts this at the top of the document.
    """
    document = '#' + name + '\n'
    c = imp.load_source(name, filepath)
    doc = inspect.getdoc(c)
    if doc:
        document += doc + '\n'
    else:
        document += '\n'
    
    main_file = getattr(c, '__file__')
    modules = []
    items = dir(c)
    for i in items:
        item = getattr(c, i)

        if inspect.isfunction(item):
            doc = inspect.getdoc(item)
            if doc == None:
                doc = 'No documentation'
            params = inspect.formatargspec(*inspect.getfullargspec(item))
            document += ('\n\n##' + i + '\n```python\ndef ' + i + params + 
                ':\n\t"""' + '\n\t'.join(doc.split('\n'))
                + '"""\n```')
        
        if inspect.isclass(item):
            doc = inspect.getdoc(item)
            if doc == None:
                doc = 'No documentation'
            document += ('\n\n##' + i + '\n```python\nclass ' + i + 
                '():\n\t"""' + '\n\t'.join(doc.split('\n')) 
                + '"""\n```')
            methods = dir(item)
            
            for m in methods:
                mitem = getattr(item, m)
                if inspect.isfunction(mitem):
                    params = inspect.formatargspec(
                        *inspect.getfullargspec(mitem))
                    doc = inspect.getdoc(mitem)
                    if doc == None:
                        doc = 'No documentation'
                    document += ('\n\n###' + m + '\n```python\n\tdef ' + m 
                    + params + ':\n\t\t"""' + '\n\t\t'.join(
                        doc.split('\n')) + '"""\n```')
        
        if inspect.ismodule(item):
            modules.append(i)
    
    document += '\n\n# Dependencies\n- ' + '\n- '.join(modules)
    document += '\n\n***Made with help2md***'
    with open(output, 'w') as ofile:
        ofile.write(document)
    return None
示例#2
0
文件: reports.py 项目: tomjelinek/pcs
def build_message_from_report(code_builder_map, report_item, force_text=""):
    if report_item.code not in code_builder_map:
        return build_default_message_from_report(report_item, force_text)

    message = code_builder_map[report_item.code]
    #Sometimes report item info is not needed for message building.
    #In that case the message is a string. Otherwise the message is a callable.
    if not callable(message):
        return message + force_text

    try:
        # Object functools.partial cannot be used with inspect because it is not
        # regular python function. We have to use original function for that.
        if isinstance(message, partial):
            keywords = message.keywords if message.keywords is not None else {}
            args = inspect.getfullargspec(message.func).args
            del args[:len(message.args)]
            args = [arg for arg in args if arg not in keywords]
        else:
            args = inspect.getfullargspec(message).args
        if "force_text" in args:
            return message(report_item.info, force_text)
        return message(report_item.info) + force_text
    except(TypeError, KeyError):
        return build_default_message_from_report(report_item, force_text)
示例#3
0
    def __sig_from_func(self, func):
        """Extract function signature, default arguments, keyword-only
        arguments, and whether or not variable positional or keyword
        arguments are allowed.  This also supports calling unbound instance
        methods by passing an object instance as the first argument;
        however, unbound classmethod and staticmethod objects are not
        callable, so we do not attempt to support them here."""

        if isinstance(func, types.MethodType):
            # A bound instance or class method.
            argspec = getfullargspec(func.__func__)
            self.pargl = argspec[0][1:]
        else:
            # A regular function, an unbound instance method, or a
            # bound static method.
            argspec = getfullargspec(func)
            self.pargl = argspec[0][:]

        if argspec[3] is not None:
            def_offset = len(self.pargl) - len(argspec[3])
            self.def_argv = dict((self.pargl[def_offset+i],argspec[3][i]) \
                                     for i in range(len(argspec[3])))
        else:
            self.def_argv = {}

        self.var_pargs = argspec[1] is not None
        self.var_kargs = argspec[2] is not None
        self.kargl     = argspec[4]

        # We need keyword-only arguments' default values too.
        if argspec[5] is not None:
            self.def_argv.update(argspec[5])
示例#4
0
    def _check_methods(self, cls, interface, error_mode):
        """ Checks that a class implements the methods on an interface.
        """
        cls_methods = self._get_public_methods(cls)
        interface_methods = self._get_public_methods(interface)

        for name in interface_methods:
            if name not in cls_methods:
                return self._handle_error(
                    MISSING_METHOD
                    % (
                        self._class_name(cls),
                        name,
                        self._class_name(interface),
                    ),
                    error_mode,
                )

            # Check that the method signatures are the same:
            cls_argspec = getfullargspec(cls_methods[name])
            interface_argspec = getfullargspec(interface_methods[name])

            if cls_argspec != interface_argspec:
                return self._handle_error(
                    BAD_SIGNATURE
                    % (
                        self._class_name(cls),
                        name,
                        self._class_name(interface),
                    ),
                    error_mode,
                )

        return True
示例#5
0
def _function_signatures_match(func1:types.FunctionType,
                               func2:types.FunctionType):
    """Return True if all args, kwargs, kw-only args, and defaults match"""
    spec1 = inspect.getfullargspec(func1)
    spec2 = inspect.getfullargspec(func2)
    return all(lambda: getattr(spec1, attr) != getattr(spec2, attr)
                       for attr in spec_attrs_to_check)
示例#6
0
def decorator(caller, _func=None):
    """decorator(caller) converts a caller function into a decorator"""
    if _func is not None:  # return a decorated function
        # this is obsolete behavior; you should use decorate instead
        return decorate(_func, caller)
    # else return a decorator function
    if inspect.isclass(caller):
        name = caller.__name__.lower()
        callerfunc = get_init(caller)
        doc = 'decorator(%s) converts functions/generators into ' \
            'factories of %s objects' % (caller.__name__, caller.__name__)
        fun = getfullargspec(callerfunc).args[1]  # second arg
    elif inspect.isfunction(caller):
        if caller.__name__ == '<lambda>':
            name = '_lambda_'
        else:
            name = caller.__name__
        callerfunc = caller
        doc = caller.__doc__
        fun = getfullargspec(callerfunc).args[0]  # first arg
    else:  # assume caller is an object with a __call__ method
        name = caller.__class__.__name__.lower()
        callerfunc = caller.__call__.__func__
        doc = caller.__call__.__doc__
        fun = getfullargspec(callerfunc).args[1]  # second arg
    evaldict = callerfunc.__globals__.copy()
    evaldict['_call_'] = caller
    evaldict['_decorate_'] = decorate
    return FunctionMaker.create(
        '%s(%s)' % (name, fun),
        'return _decorate_(%s, _call_)' % fun,
        evaldict, call=caller, doc=doc, module=caller.__module__,
        __wrapped__=caller)
示例#7
0
    def __init__(self, cids1=None, cids2=None, forwards=None, backwards=None, labels1=None, labels2=None, **kwargs):

        # NOTE: we explicitly specify ``cids1`` and ``cids2`` as the two first
        # arguments for backwards-compatibility with callers that use positional
        # arguments.

        if forwards is None and backwards is None:
            raise TypeError("Must supply either forwards or backwards")

        self.forwards = forwards
        self.backwards = backwards

        # NOTE: the getattr(forwards, 'func', forwards) in the following code
        # is to make sure that things work properly if the functions are
        # PartialResult objects.

        if labels1 is None:
            if forwards is not None:
                if isinstance(forwards, types.MethodType):
                    labels1 = getfullargspec(getattr(forwards, 'func', forwards))[0][1:]
                else:
                    labels1 = getfullargspec(getattr(forwards, 'func', forwards))[0]
            else:
                raise ValueError("labels1 needs to be specified if forwards isn't")

        if labels2 is None:
            if backwards is not None:
                if isinstance(backwards, types.MethodType):
                    labels2 = getfullargspec(getattr(backwards, 'func', backwards))[0][1:]
                else:
                    labels2 = getfullargspec(getattr(backwards, 'func', backwards))[0]
            else:
                raise ValueError("labels2 needs to be specified if backwards isn't")

        super(MultiLink, self).__init__(cids1=cids1, cids2=cids2, **kwargs)
示例#8
0
文件: command.py 项目: gonicus/gosa
    def serve(self):
        """
        Start serving the command registry to the outside world. Send
        hello and register event callbacks.
        """

        for clazz in PluginRegistry.modules.values():
            for mname, method in getmembers(clazz):
                if ismethod(method) and hasattr(method, "isCommand"):
                    func = mname

                    # Adjust documentation
                    if not method.__help__:
                        raise CommandInvalid(C.make_error("COMMAND_WITHOUT_DOCS", method=func))

                    doc = re.sub("(\s|\n)+", " ", method.__help__).strip()

                    self.log.debug("registering %s" % func)
                    info = {
                        'name': func,
                        'path': "%s.%s" % (clazz.__class__.__name__, mname),
                        'sig': [] if not getfullargspec(method).args else getfullargspec(method).args,
                        'target': clazz.get_target(),
                        'type': getattr(method, "type", NORMAL),
                        'doc': doc,
                        }

                    if 'self' in info['sig']:
                        info['sig'].remove('self')

                    self.commands[func] = info
示例#9
0
    def parameters(self):
        """
            Returns list of parameters required for func
        """
        if hasattr(self.func, "_original_"):
            return inspect.getfullargspec(self.func._original_)

        return inspect.getfullargspec(self.func)
示例#10
0
    def decorator(func):
        # If the function is decorated and uses @functools.wrapper then use the
        # __wrapped__ attribute to look at the original function signature.
        #
        # This allows us to see past the generic *args, **kwargs seen on most decorators.
        if hasattr(func, '__wrapped__'):
            args = inspect.getfullargspec(func.__wrapped__)[0]
        else:
            args = inspect.getfullargspec(func)[0]
        args.pop(0)
        if async_callbacks:
            if type(async_callbacks) != tuple:
                raise TypeError('async_callbacks must be a tuple of (keyword for return callback, keyword for error callback)')
            if len(async_callbacks) != 2:
                raise ValueError('async_callbacks must be a tuple of (keyword for return callback, keyword for error callback)')
            args.remove(async_callbacks[0])
            args.remove(async_callbacks[1])

        if sender_keyword:
            args.remove(sender_keyword)
        if rel_path_keyword:
            args.remove(rel_path_keyword)
        if path_keyword:
            args.remove(path_keyword)
        if destination_keyword:
            args.remove(destination_keyword)
        if message_keyword:
            args.remove(message_keyword)
        if connection_keyword:
            args.remove(connection_keyword)

        if in_signature:
            in_sig = tuple(Signature(in_signature))

            if len(in_sig) > len(args):
                raise ValueError('input signature is longer than the number of arguments taken')
            elif len(in_sig) < len(args):
                raise ValueError('input signature is shorter than the number of arguments taken')

        func._dbus_is_method = True
        func._dbus_async_callbacks = async_callbacks
        func._dbus_interface = dbus_interface
        func._dbus_in_signature = in_signature
        func._dbus_out_signature = out_signature
        func._dbus_sender_keyword = sender_keyword
        func._dbus_path_keyword = path_keyword
        func._dbus_rel_path_keyword = rel_path_keyword
        func._dbus_destination_keyword = destination_keyword
        func._dbus_message_keyword = message_keyword
        func._dbus_connection_keyword = connection_keyword
        func._dbus_args = args
        func._dbus_get_args_options = dict(byte_arrays=byte_arrays)
        if is_py2:
            func._dbus_get_args_options['utf8_strings'] = kwargs.get(
                'utf8_strings', False)
        elif 'utf8_strings' in kwargs:
            raise TypeError("unexpected keyword argument 'utf8_strings'")
        return func
示例#11
0
文件: core.py 项目: swayf/pyclap
 def get_arg_spec(self, obj):
     if inspect.isfunction(obj):
         arg_spec = getfullargspec(obj)
     elif inspect.ismethod(obj):
         arg_spec = getfullargspec(obj)
         del arg_spec.args[0] # remove first argument
     else:
         raise ValueError('Argument is not function and not method')
     return arg_spec
示例#12
0
 def create(balancer_type, **kwargs):
     try:
         balancer = BALANCER_TYPES[balancer_type.lower()]
         args = inspect.getfullargspec(balancer.__init__).args
         params = {k:v for k,v in kwargs.items() if k in args}
         return balancer(**params)
     except Exception:
         logs.hmlog.debug(inspect.getfullargspec(balancer.__init__).args)
         logs.hmlog.error(format_exc())
         return None
示例#13
0
文件: __init__.py 项目: AmesianX/angr
 def __new__(cls, name, bases, attrs):
     import inspect
     if name != 'ExplorationTechniqueCompat':
         if 'step' in attrs and not inspect.getfullargspec(attrs['step']).defaults:
             attrs['step'] = cls._step_factory(attrs['step'])
         if 'filter' in attrs and inspect.getfullargspec(attrs['filter']).args[1] != 'simgr':
             attrs['filter'] = cls._filter_factory(attrs['filter'])
         if 'step_state' in attrs and inspect.getfullargspec(attrs['step_state']).args[1] != 'simgr':
             attrs['step_state'] = cls._step_state_factory(attrs['step_state'])
     return type.__new__(cls, name, bases, attrs)
示例#14
0
    def test_getfullargspec(self):
        # Test preservation of function argument specification. It
        # actually needs to match that of the adapter function the
        # prototype of which was supplied via the dummy function.

        def _adapter(arg1, arg2, *, arg3=None, **kwargs): pass

        function1a_argspec = inspect.getfullargspec(_adapter)
        function1d_argspec = inspect.getfullargspec(function1d)
        self.assertEqual(function1a_argspec, function1d_argspec)
示例#15
0
def _get_argspec(f):
    if sys.version_info[:2] >= (3, 0):
        # inspect has deprecated getargspec since version 3.0
        argspec = inspect.getfullargspec(f)
    else:
        try:
            argspec = inspect.getargspec(f)
        except ValueError:
            # this can happen in python 3.5 when
            # function has keyword-only arguments or annotations
            argspec = inspect.getfullargspec(f)
    return argspec
示例#16
0
    def test_spec_inspect_signature(self):

        def myfunc(x, y):
            pass

        mock = create_autospec(myfunc)
        mock(1, 2)
        mock(x=1, y=2)

        self.assertEqual(inspect.getfullargspec(mock), inspect.getfullargspec(myfunc))
        self.assertEqual(mock.mock_calls, [call(1, 2), call(x=1, y=2)])
        self.assertRaises(TypeError, mock, 1)
示例#17
0
def _filter_baseanalysis_kwargs(function, kwargs):
    """
    create two dictionaries with kwargs separated for function and AnalysisBase

    Parameters
    ----------
    function : callable
        function to be called
    kwargs : dict
        keyword argument dictionary

    Returns
    -------
    base_args : dict
        dictionary of AnalysisBase kwargs
    kwargs : dict
        kwargs without AnalysisBase kwargs

    Raises
    ------
    ValueError : if ``function`` has the same kwargs as ``BaseAnalysis``
    """
    try:
        # pylint: disable=deprecated-method
        base_argspec = inspect.getfullargspec(AnalysisBase.__init__)
    except AttributeError:
        # pylint: disable=deprecated-method
        base_argspec = inspect.getargspec(AnalysisBase.__init__)

    n_base_defaults = len(base_argspec.defaults)
    base_kwargs = {name: val
                   for name, val in zip(base_argspec.args[-n_base_defaults:],
                                        base_argspec.defaults)}

    try:
        # pylint: disable=deprecated-method
        argspec = inspect.getfullargspec(function)
    except AttributeError:
        # pylint: disable=deprecated-method
        argspec = inspect.getargspec(function)

    for base_kw in six.iterkeys(base_kwargs):
        if base_kw in argspec.args:
            raise ValueError(
                "argument name '{}' clashes with AnalysisBase argument."
                "Now allowed are: {}".format(base_kw, base_kwargs.keys()))

    base_args = {}
    for argname, default in six.iteritems(base_kwargs):
        base_args[argname] = kwargs.pop(argname, default)

    return base_args, kwargs
示例#18
0
    def test_spec_inspect_signature_annotations(self):

        def foo(a: int, b: int=10, *, c:int) -> int:
            return a + b + c

        mock = create_autospec(foo)
        mock(1, 2, c=3)
        mock(1, c=3)

        self.assertEqual(inspect.getfullargspec(mock), inspect.getfullargspec(foo))
        self.assertEqual(mock.mock_calls, [call(1, 2, c=3), call(1, c=3)])
        self.assertRaises(TypeError, mock, 1)
        self.assertRaises(TypeError, mock, 1, 2, 3, c=4)
示例#19
0
文件: core.py 项目: swayf/pyclap
 def get_arg_spec(self, obj):
     if inspect.isclass(obj):
         if obj.__init__ is object.__init__: # to avoid an error
             arg_spec = getfullargspec(lambda self: None)
         else:
             arg_spec = getfullargspec(obj.__init__)
         del arg_spec.args[0] # remove first argument
     elif hasattr(obj, '__call__'):
         arg_spec = getfullargspec(obj.__call__)
         del arg_spec.args[0] # remove first argument
     else:
         raise TypeError('Could not determine the signature of ' + str(obj))
     return arg_spec
示例#20
0
文件: test_magic.py 项目: nengo/nengo
def test_classmethod():
    """Test that @decorator works on classmethods."""
    @decorator
    def test_decorator(wrapped, instance, args, kwargs):
        global state
        state = 'run'
        valid_names = {'TestBeforeStaticmethod', 'TestAfterStaticmethod'}
        assert (instance is None and args[0].__name__ in valid_names
                or instance.__name__ in valid_names)
        return wrapped(*args, **kwargs)

    # --- Decorator before classmethod
    class TestBeforeStaticmethod:
        @test_decorator
        @classmethod
        def f(cls, a, b):
            """Return 1."""
            return 1

    _test_decorated(TestBeforeStaticmethod.f)
    inst = TestBeforeStaticmethod()
    _test_decorated(inst.f)

    # Make sure introspection works
    assert inspect.getfullargspec(inst.f).args == ['cls', 'a', 'b']
    assert inspect.getsource(inst.f) == ('        @test_decorator\n'
                                         '        @classmethod\n'
                                         '        def f(cls, a, b):\n'
                                         '            """Return 1."""\n'
                                         '            return 1\n')

    # --- Decorator after staticmethod
    class TestAfterStaticmethod:
        @classmethod
        @test_decorator
        def f(cls, a, b):
            """Return 1."""
            return 1

    _test_decorated(TestAfterStaticmethod.f)
    inst = TestAfterStaticmethod()
    _test_decorated(inst.f)

    # Make sure introspection works
    assert inspect.getfullargspec(inst.f).args == ['cls', 'a', 'b']
    assert inspect.getsource(inst.f) == ('        @classmethod\n'
                                         '        @test_decorator\n'
                                         '        def f(cls, a, b):\n'
                                         '            """Return 1."""\n'
                                         '            return 1\n')
示例#21
0
 def test_signature(self):
     try:
         from inspect import getfullargspec, getdoc
         self.assertEqual(getfullargspec(self.cached.__call__), getfullargspec(self.O.__call__))
         self.assertEqual(getdoc(self.cached.__call__), getdoc(self.O.__call__))
     except ImportError:
         try:
             from inspect import signature, getdoc
             print(signature(self.cached.__call__), signature(self.O.__call__))
             self.assertEqual(signature(self.cached.__call__), signature(self.O.__call__))
             self.assertEqual(getdoc(self.cached.__call__), getdoc(self.O.__call__))
         except ImportError:
             from inspect import getargspec, getdoc
             self.assertEqual(getargspec(self.cached.__call__), getargspec(self.O.__call__))
             self.assertEqual(getdoc(self.cached.__call__), getdoc(self.O.__call__))
示例#22
0
文件: test_magic.py 项目: nengo/nengo
def test_staticmethod():
    """Test that @decorator works on staticmethods."""
    @decorator
    def test_decorator(wrapped, instance, args, kwargs):
        global state
        state = 'run'
        assert instance is None
        return wrapped(*args, **kwargs)

    # --- Decorator before staticmethod
    class TestBeforeStaticmethod:
        @test_decorator
        @staticmethod
        def f(a, b):
            """Return 1."""
            return 1

    _test_decorated(TestBeforeStaticmethod.f)
    inst = TestBeforeStaticmethod()
    _test_decorated(inst.f)

    # Make sure introspection works
    assert inspect.getfullargspec(inst.f).args == ['a', 'b']
    assert inspect.getsource(inst.f) == ('        @test_decorator\n'
                                         '        @staticmethod\n'
                                         '        def f(a, b):\n'
                                         '            """Return 1."""\n'
                                         '            return 1\n')

    # --- Decorator after staticmethod
    class TestAfterStaticmethod:
        @staticmethod
        @test_decorator
        def f(a, b):
            """Return 1."""
            return 1

    _test_decorated(TestAfterStaticmethod.f)
    inst = TestAfterStaticmethod()
    _test_decorated(inst.f)

    # Make sure introspection works
    assert inspect.getfullargspec(inst.f).args == ['a', 'b']
    assert inspect.getsource(inst.f) == ('        @staticmethod\n'
                                         '        @test_decorator\n'
                                         '        def f(a, b):\n'
                                         '            """Return 1."""\n'
                                         '            return 1\n')
示例#23
0
    def _get_setters_and_targets(self):
        """
        Get the attribute strings and a full path to where the setter
        is defined for all setters in an object.
        """

        setters = []
        for name in dir(self.o):
            if not name.startswith('set_'):
                continue
            o = getattr(self.o, name)
            if not six.callable(o):
                continue
            if six.PY2:
                nargs = len(inspect.getargspec(o)[0])
            else:
                nargs = len(inspect.getfullargspec(o)[0])
            if nargs < 2:
                continue
            func = o
            if self.is_alias(func):
                continue
            source_class = self.o.__module__ + "." + self.o.__name__
            for cls in self.o.mro():
                if name in cls.__dict__:
                    source_class = cls.__module__ + "." + cls.__name__
                    break
            setters.append((name[4:], source_class + "." + name))
        return setters
示例#24
0
文件: _core.py 项目: FRC4186/pyfrc
def IsEnabled():
    # backwards compatibility until 2015
    argspec = inspect.getfullargspec(internal.on_IsEnabled)
    if len(argspec.args) == 0 or (len(argspec.args) == 1 and argspec.args[0] == 'self'):
        return internal.on_IsEnabled()
    else:
        return internal.on_IsEnabled(GetClock())
示例#25
0
def test_wraps():
    """
    Tests the compatibility replacement for functools.wraps which supports
    argument preservation across all supported Python versions.
    """

    def foo(a, b, c=1, d=2, e=3, **kwargs):
        """A test function."""

        return a, b, c, d, e, kwargs

    @wraps(foo)
    def bar(*args, **kwargs):
        return ('test',) + foo(*args, **kwargs)

    expected = ('test', 1, 2, 3, 4, 5, {'f': 6, 'g': 7})
    assert bar(1, 2, 3, 4, 5, f=6, g=7) == expected
    assert bar.__name__ == 'foo'

    if foo.__doc__ is not None:
        # May happen if using optimized opcode
        assert bar.__doc__ == "A test function."

    if hasattr(foo, '__qualname__'):
        assert bar.__qualname__ == foo.__qualname__

    if six.PY2:
        argspec = inspect.getargspec(bar)
        assert argspec.keywords == 'kwargs'
    else:
        argspec = inspect.getfullargspec(bar)
        assert argspec.varkw == 'kwargs'

    assert argspec.args == ['a', 'b', 'c', 'd', 'e']
    assert argspec.defaults == (1, 2, 3)
示例#26
0
文件: classing.py 项目: ioflo/ioflo
        def wrapper(*args, **kwargs):
            """
            When called returns instance of AttributiveGenerator instead of generator.
            """
            def __iter__(self):
                return self

            def send(self):
                raise NotImplementedError

            def throw(self):
                raise NotImplementedError

            tdict = { '__iter__': __iter__, 'send': send, 'throw':  throw,}
            # use type to create dynamic instance of class AttributiveGenerator
            #spec = {'__iter__': lambda self: self, 'send': ,}
            AG = type("AttributiveGenerator", (Generator,), tdict)
            ag = AG()  # create  instance so we can inject it into genfunc

            fargs = inspect.getfullargspec(genfunc).args
            if fargs and fargs[0] == 'self':
                gen = genfunc(args[0], ag, *args[1:], **kwargs)
            else:
                gen = genfunc(ag, *args, **kwargs)  # create generator insert ag ref

            # now add to class references to gen attributes "duckify"
            for attr in ('__next__', 'close', 'send', 'throw',
                         'gi_code', 'gi_frame', 'gi_running', 'gi_yieldfrom'):
                setattr(AG, attr, getattr(gen, attr))

            functools.update_wrapper(wrapper=ag, wrapped=gen)
            return ag
示例#27
0
def getinfo(func):
    """
    Returns an info dictionary containing:
    - name (the name of the function : str)
    - argnames (the names of the arguments : list)
    - defaults (the values of the default arguments : tuple)
    - signature (the signature : str)
    - doc (the docstring : str)
    - module (the module name : str)
    - dict (the function __dict__ : str)

    >>> def f(self, x=1, y=2, *args, **kw): pass

    >>> info = getinfo(f)

    >>> info["name"]
    'f'
    >>> info["argnames"]
    ['self', 'x', 'y', 'args', 'kw']

    >>> info["defaults"]
    (1, 2)

    >>> info["signature"]
    'self, x, y, *args, **kw'
    """
    assert inspect.ismethod(func) or inspect.isfunction(func)
    if sys.version_info[0] >= 3:
        argspec = inspect.getfullargspec(func)
    else:
        argspec = inspect.getargspec(func)
    regargs, varargs, varkwargs, defaults = argspec[:4]
    argnames = list(regargs)
    if varargs:
        argnames.append(varargs)
    if varkwargs:
        argnames.append(varkwargs)
    signature = inspect.formatargspec(
        regargs, varargs, varkwargs, defaults, formatvalue=lambda value: ""
    )[1:-1]

    # pypy compatibility
    if hasattr(func, '__closure__'):
        _closure = func.__closure__
        _globals = func.__globals__
    else:
        _closure = func.func_closure
        _globals = func.func_globals

    return dict(
        name=func.__name__,
        argnames=argnames,
        signature=signature,
        defaults=func.__defaults__,
        doc=func.__doc__,
        module=func.__module__,
        dict=func.__dict__,
        globals=_globals,
        closure=_closure,
    )
示例#28
0
    def menu_decorator(f):
        """Decorator of a view function that should be included in the menu."""
        if isinstance(app, Blueprint):
            endpoint = app.name + '.' + f.__name__
            before_first_request = app.before_app_first_request
        else:
            endpoint = f.__name__
            before_first_request = app.before_first_request

        expected = inspect.getfullargspec(f).args if PY3 else \
            inspect.getargspec(f).args

        @before_first_request
        def _register_menu_item():
            # str(path) allows path to be a string-convertible object
            # that may be useful for delayed evaluation of path
            item = current_menu.submenu(str(path))
            item.register(
                endpoint,
                text,
                order,
                endpoint_arguments_constructor=endpoint_arguments_constructor,
                dynamic_list_constructor=dynamic_list_constructor,
                active_when=active_when,
                visible_when=visible_when,
                expected_args=expected,
                **kwargs)
        return f
示例#29
0
def test_wraps_exclude_names():
    """
    Test the optional ``exclude_names`` argument to the wraps decorator.
    """

    # This particular test demonstrates wrapping an instance method
    # as a function and excluding the "self" argument:

    class TestClass(object):
        def method(self, a, b, c=1, d=2, **kwargs):
            return (a, b, c, d, kwargs)

    test = TestClass()

    @wraps(test.method, exclude_args=('self',))
    def func(*args, **kwargs):
        return test.method(*args, **kwargs)

    if six.PY2:
        argspec = inspect.getargspec(func)
    else:
        argspec = inspect.getfullargspec(func)
    assert argspec.args == ['a', 'b', 'c', 'd']

    assert func('a', 'b', e=3) == ('a', 'b', 1, 2, {'e': 3})
示例#30
0
def getargspec(func, f):
    # Check if it's a real bound method or if it's implicitly calling __init__
    # (i.e. FooClass(...) and not FooClass.__init__(...) -- the former would
    # not take 'self', the latter would:
    try:
        func_name = getattr(f, '__name__', None)
    except TypeError:
        return None

    try:
        is_bound_method = ((inspect.ismethod(f) and f.im_self is not None)
                           or (func_name == '__init__' and not
        func.endswith('.__init__')))
    except:
        # if f is a method from a xmlrpclib.Server instance, func_name ==
        # '__init__' throws xmlrpclib.Fault (see #202)
        return None
    try:
        if PY3:
            argspec = inspect.getfullargspec(f)
        else:
            argspec = inspect.getargspec(f)

        argspec = list(argspec)
        fixlongargs(f, argspec)
        argspec = [func, argspec, is_bound_method]
    except (TypeError, KeyError):
        with AttrCleaner(f):
            argspec = getpydocspec(f, func)
        if argspec is None:
            return None
        if inspect.ismethoddescriptor(f):
            argspec[1][0].insert(0, 'obj')
        argspec.append(is_bound_method)
    return argspec
示例#31
0
__author__ = "Davide Micieli"
__all__ = [
    'read_tiff', 'read_fits', 'read_fits_stack', 'read_tiff_stack',
    'get_rect_coordinates_from_roi', 'read_image', 'read_stack_from_list',
    'read_dataset', 'read_image_stack', 'write_fits', 'write_tiff',
    'write_tiff_stack', 'write_fits_stack', 'get_rect_coordinates_from_roi',
    'get_filename_pattern'
]

#logging.basicConfig(level=logging.WARNING)
logs = logging.getLogger(__name__)

known_ext = ['.tif', '.tiff', '.fits']

# to ensure compatibility with astropy 1.X, clobber option is deprecated and replaced with overwrite
if 'clobber' in inspect.getfullargspec(astropy.io.fits.writeto).args:
    arg_overwrite = 'clobber'
else:
    arg_overwrite = 'overwrite'


def get_file_extension(fname):
    """
	This function returns the extension of a given file name or file path
	"""
    return os.path.splitext(fname)[1]


def get_rect_coordinates_from_roi(fname):
    """
		This function returns the coordinates from a rectangular region of interest
    def transcribe_params(self, params=None, **kwargs):
        if params is None:
            params = self.params  # reference
        params = params.copy()  # don't contaminate DAI params, since we know we use lgbm-xgb as base

        has_eval_set = self.have_eval_set(kwargs)  # only needs (and does) operate at fit-time
        from catboost import CatBoostClassifier, CatBoostRegressor, EFstrType
        fullspec_regression = inspect.getfullargspec(CatBoostRegressor)
        kwargs_regression = {k: v for k, v in zip(fullspec_regression.args, fullspec_regression.defaults)}
        fullspec_classification = inspect.getfullargspec(CatBoostClassifier)
        kwargs_classification = {k: v for k, v in zip(fullspec_classification.args, fullspec_classification.defaults)}

        if self.num_classes == 1:
            allowed_params = kwargs_regression
        else:
            allowed_params = kwargs_classification

        params_copy = copy.deepcopy(params)
        for k, v in params_copy.items():
            if k not in allowed_params.keys():
                del params[k]

        # now transcribe
        k = 'boosting_type'
        if k in params:
            params[k] = 'Plain'

        k = 'grow_policy'
        if k in params:
            params[k] = 'Depthwise' if params[k] == 'depthwise' else 'Lossguide'

        k = 'eval_metric'
        if k in params and params[k].upper() == 'AUC':
            params[k] = 'AUC'

        map = {'regression': 'RMSE', 'mse': 'RMSE', 'mae': 'MAE', "mape": 'MAPE', "huber": 'Huber', "fair": 'FairLoss',
               "rmse": "RMSE",
               "gamma": "RMSE",  # unsupported by catboost
               "tweedie": "Tweedie", "poisson": "Poisson", "quantile": "Quantile",
               'binary': 'Logloss',
               'auc': 'AUC', "xentropy": 'CrossEntropy',
               'multiclass': 'MultiClass'}

        k = 'objective'
        if k in params and params[k] in map.keys():
            params[k] = map[params[k]]

        k = 'eval_metric'
        if k in params and params[k] in map.keys():
            params[k] = map[params[k]]

        if 'objective' in params:
            # don't randomly choose these since then model not stable GA -> final
            # but backup shouldn't really be used AFAIK
            if params['objective'] == 'Huber':
                backup = float(config.huber_alpha_list[0])
                params['delta'] = params.pop('alpha', backup)
            if params['objective'] == 'Quantile':
                backup = float(config.quantile_alpha[0])
                params['delta'] = params.pop('alpha', backup)
            if params['objective'] == 'Tweedie':
                backup = float(config.tweedie_variance_power_list[0])
                params['tweedie_variance_power'] = params.pop('tweedie_variance_power', backup)
            if params['objective'] == 'FairLoss':
                backup = float(config.fair_c_list[0])
                params['smoothness'] = params.pop('fair_c', backup)

        params.pop('verbose', None)
        params.pop('verbose_eval', None)
        params.pop('logging_level', None)

        if 'grow_policy' in params:
            if params['grow_policy'] == 'Lossguide':
                params.pop('max_depth', None)
            if params['grow_policy'] == 'Depthwise':
                params.pop('num_leaves', None)
        else:
            params['grow_policy'] = 'SymmetricTree'

        uses_gpus, n_gpus = self.get_uses_gpus(params)

        if params['task_type'] == 'CPU':
            params.pop('grow_policy', None)
            params.pop('num_leaves', None)
            params.pop('max_leaves', None)
            params.pop('min_data_in_leaf', None)
            params.pop('min_child_samples', None)

        if params['task_type'] == 'GPU':
            params.pop('colsample_bylevel', None)  # : 0.35

        if 'grow_policy' in params and params['grow_policy'] in ['Depthwise', 'SymmetricTree']:
            if 'max_depth' in params and params['max_depth'] in [0, -1]:
                params['max_depth'] = max(2, int(np.log(params.get('num_leaves', 2 ** 6))))
        else:
            params.pop('max_depth', None)
            params.pop('depth', None)
        if 'grow_policy' in params and params['grow_policy'] == 'Lossguide':
            # if 'num_leaves' in params and params['num_leaves'] == -1:
            #    params['num_leaves'] = 2 ** params.get('max_depth', 6)
            if 'max_leaves' in params and params['max_leaves'] in [0, -1]:
                params['max_leaves'] = 2 ** params.get('max_depth', 6)
        else:
            params.pop('max_leaves', None)
        if 'num_leaves' in params and 'max_leaves' in params:
            params.pop('num_leaves', None)
        # apply limits
        if 'max_leaves' in params:
            params['max_leaves'] = min(params['max_leaves'], 65536)
        if 'max_depth' in params:
            params['max_depth'] = min(params['max_depth'], 16)

        params.update({'train_dir': user_dir(),
                       'allow_writing_files': False,
                       'thread_count': self.params_base.get('n_jobs', 4)})

        if 'reg_lambda' in params and params['reg_lambda'] <= 0.0:
            params['reg_lambda'] = 3.0  # assume meant unset

        if self._can_handle_categorical:
            if 'max_cat_to_onehot' in params:
                params['one_hot_max_size'] = params['max_cat_to_onehot']
                params.pop('max_cat_to_onehot', None)
            if uses_gpus:
                params['one_hot_max_size'] = min(params.get('one_hot_max_size', 255), 255)
            else:
                params['one_hot_max_size'] = min(params.get('one_hot_max_size', 65535), 65535)

        if 'one_hot_max_size' in params:
            params['one_hot_max_size'] = max(self._min_one_hot_max_size, params['one_hot_max_size'])

        params['max_bin'] = params.get('max_bin', 254)
        if params['task_type'] == 'CPU':
            params['max_bin'] = min(params['max_bin'], 254)  # https://github.com/catboost/catboost/issues/1010
        if params['task_type'] == 'GPU':
            params['max_bin'] = min(params['max_bin'], 127)  # https://github.com/catboost/catboost/issues/1010

        if uses_gpus:
            # https://catboost.ai/docs/features/training-on-gpu.html
            params['devices'] = "%d-%d" % (
                self.params_base.get('gpu_id', 0), self.params_base.get('gpu_id', 0) + n_gpus - 1)
            params['gpu_ram_part'] = 0.3  # per-GPU, assumes GPU locking or no other experiments running

        if self.num_classes > 2:
            params.pop("eval_metric", None)

        params['train_dir'] = self.context.experiment_tmp_dir
        params['allow_writing_files'] = False

        # assume during fit self.params_base could have been updated
        assert 'n_estimators' in params
        assert 'learning_rate' in params
        params['n_estimators'] = self.params_base.get('n_estimators', 100)
        params['learning_rate'] = self.params_base.get('learning_rate', config.min_learning_rate)
        params['learning_rate'] = min(params['learning_rate'], 0.5)  # 1.0 leads to illegal access on GPUs
        params['learning_rate'] = max(config.min_learning_rate,
                                           max(self._min_learning_rate_catboost, params['learning_rate']))
        if 'early_stopping_rounds' not in params and has_eval_set:
            params['early_stopping_rounds'] = 150  # temp fix
            # assert 'early_stopping_rounds' in params

        if uses_gpus:
            params.pop('sampling_frequency', None)

        if not uses_gpus and params['bootstrap_type'] == 'Poisson':
            params['bootstrap_type'] = 'Bayesian'  # revert to default
        if uses_gpus and params['bootstrap_type'] == 'MVS':
            params['bootstrap_type'] = 'Bayesian'  # revert to default

        if 'bootstrap_type' not in params or params['bootstrap_type'] not in ['Poisson', 'Bernoulli']:
            params.pop('subsample', None)  # only allowed for those 2 bootstrap_type settings

        if params['bootstrap_type'] not in ['Bayesian']:
            params.pop('bagging_temperature', None)

        if not (self.num_classes == 2 and params['objective'] == 'Logloss'):
            params.pop('scale_pos_weight', None)

        # go back to some default eval_metric
        if self.num_classes == 1:
            if 'eval_metric' not in params or params['eval_metric'] not in ['MAE', 'MAPE', 'Poisson', 'Quantile',
                                                                            'RMSE', 'LogLinQuantile', 'Lq',
                                                                            'Huber', 'Expectile', 'FairLoss',
                                                                            'NumErrors', 'SMAPE', 'R2', 'MSLE',
                                                                            'MedianAbsoluteError']:
                params['eval_metric'] = 'RMSE'
        elif self.num_classes == 2:
            if 'eval_metric' not in params or params['eval_metric'] not in ['Logloss', 'CrossEntropy', 'Precision',
                                                                            'Recall', 'F1', 'BalancedAccuracy',
                                                                            'BalancedErrorRate', 'MCC', 'Accuracy',
                                                                            'CtrFactor', 'AUC',
                                                                            'NormalizedGini', 'BrierScore', 'HingeLoss',
                                                                            'HammingLoss', 'ZeroOneLoss',
                                                                            'Kappa', 'WKappa',
                                                                            'LogLikelihoodOfPrediction']:
                params['eval_metric'] = 'Logloss'
        else:
            if 'eval_metric' not in params or params['eval_metric'] not in ['MultiClass', 'MultiClassOneVsAll',
                                                                            'Precision', 'Recall', 'F1', 'TotalF1',
                                                                            'MCC', 'Accuracy', 'HingeLoss',
                                                                            'HammingLoss', 'ZeroOneLoss', 'Kappa',
                                                                            'WKappa', 'AUC']:
                params['eval_metric'] = 'MultiClass'

        # set system stuff here
        params['silent'] = self.params_base.get('silent', True)
        if config.debug_daimodel_level >= 1:
            params['silent'] = False  # Can enable for tracking improvement in console/dai.log if have access
        params['random_state'] = self.params_base.get('random_state', 1234)
        params['thread_count'] = self.params_base.get('n_jobs', max(1, physical_cores_count))  # -1 is not supported

        return params
示例#33
0
def ask_questions():
    model_name_question = {
        'type': 'input',
        'name': 'value',
        'message': 'your model name ():',
        'input_type': 'name_identifier'
    }
    model_name = prompt(model_name_question)

    task_type_question = {
        'type': 'rawlist',
        'name': 'value',
        'message': 'choose task type',
        'choices': task_type_choices
    }
    task_type = prompt(task_type_question)

    network_name_question = {
        'type': 'rawlist',
        'name': 'value',
        'message': 'choose network',
        'choices': network_name_choices(task_type)
    }
    network_name = prompt(network_name_question)

    dataset_format_question = {
        'type': 'rawlist',
        'name': 'value',
        'message': 'choose dataset format',
        'choices': dataset_format_choices(task_type)
    }
    dataset_format = prompt(dataset_format_question)

    enable_data_augmentation = {
        'type': 'confirm',
        'name': 'value',
        'message': 'enable data augmentation?',
        'default': True
    }

    train_dataset_path_question = {
        'type': 'input',
        'name': 'value',
        'message': 'training dataset path:',
    }
    train_path = prompt(train_dataset_path_question)

    enable_test_dataset_path_question = {
        'type': 'rawlist',
        'name': 'value',
        'message': 'set validation dataset? \
(if answer no, the dataset will be separated for training and validation by 9:1 ratio.)',
        'choices': ['yes', 'no']
    }
    enable_test_dataset_path = prompt(enable_test_dataset_path_question)

    test_dataset_path_question = {
        'type': 'input',
        'name': 'value',
        'message': 'validation dataset path:',
    }
    if enable_test_dataset_path == 'yes':
        test_path = prompt(test_dataset_path_question)
    else:
        test_path = ''

    batch_size_question = {
        'type': 'input',
        'name': 'value',
        'message': 'batch size (integer):',
        'input_type': 'integer',
        'default': default_batch_size(task_type),
    }
    batch_size = prompt(batch_size_question)

    image_size_question = {
        'type': 'input',
        'name': 'value',
        'message': 'image size (integer x integer):',
        'default': '128x128',
        "filter": image_size_filter,
        "validate": generate_image_size_validate(network_name),
    }
    image_size = prompt(image_size_question)

    training_epochs_question = {
        'type': 'input',
        'name': 'value',
        'message': 'how many epochs do you run training (integer):',
        'input_type': 'integer',
        'default': '100'
    }
    training_epochs = prompt(training_epochs_question)

    training_optimizer_question = {
        'type': 'rawlist',
        'name': 'value',
        'message': 'select optimizer:',
        'choices': ['Momentum', 'Adam'],
        'default': 'Momentum'
    }
    training_optimizer = prompt(training_optimizer_question)

    initial_learning_rate_value_question = {
        'type': 'input',
        'name': 'value',
        'message': 'initial learning rate:',
        'default': '0.001'
    }
    initial_learning_rate_value = prompt(initial_learning_rate_value_question)

    # learning rate schedule
    learning_rate_schedule_question = {
        'type': 'rawlist',
        'name': 'value',
        'message': 'choose learning rate schedule \
({epochs} is the number of training epochs you entered before):',
        'choices': list(learning_rate_schedule_map.values()),
        'default': learning_rate_schedule_map["constant"],
    }
    _tmp_learning_rate_schedule = prompt(learning_rate_schedule_question)
    for key, value in learning_rate_schedule_map.items():
        if value == _tmp_learning_rate_schedule:
            learning_rate_schedule = key

    if prompt(enable_data_augmentation):
        all_augmentor = {}
        checkboxes = []
        for name, obj in inspect.getmembers(augmentor):
            if inspect.isclass(obj) and issubclass(obj, Processor):
                argspec = inspect.getfullargspec(obj)
                # ignore self
                args = argspec.args[1:]
                defaults = argspec.defaults
                if len(args) == len(defaults):
                    default_val = [(arg, default)
                                   for arg, default in zip(args, defaults)]
                    default_str = " (default: {})".format(", ".join(
                        ["{}={}".format(a, d) for a, d in default_val]))
                else:
                    defaults = ("# Please fill a value.", ) * (
                        len(args) - len(defaults)) + defaults
                    default_val = [(arg, default)
                                   for arg, default in zip(args, defaults)]
                    default_str = " (**caution**: No default value is provided, \
please modify manually after config exported.)"

                all_augmentor[name + default_str] = {
                    "name": name,
                    "defaults": default_val
                }
                checkboxes.append({"name": name + default_str, "value": name})
        data_augmentation_question = {
            'type': 'checkbox',
            'name': 'value',
            'message': 'Please choose augmentors:',
            'choices': checkboxes
        }
        data_augmentation_res = prompt(data_augmentation_question)
        data_augmentation = {}
        if data_augmentation_res:
            for v in data_augmentation_res:
                data_augmentation[all_augmentor[v]
                                  ["name"]] = all_augmentor[v]["defaults"]

    quantize_first_convolution_question = {
        'type': 'rawlist',
        'name': 'value',
        'message': 'apply quantization at the first layer?',
        'choices': ['yes', 'no']
    }
    quantize_first_convolution = prompt(quantize_first_convolution_question)

    r = {}
    for k, v in locals().items():
        if k != 'r' and not k.endswith("question"):
            r[k] = v
    return r
示例#34
0
    def build_tower(self, network_def, x_image, y_ref, void_label, n_classes,
                    tower_setup):
        use_dropout = not tower_setup.is_training
        gpu_str = "/gpu:" + str(tower_setup.gpu)
        if tower_setup.is_main_train_tower:
            print("inputs:", [x_image.get_shape().as_list()])
        with tf.device(gpu_str), tf.name_scope("tower_gpu_" +
                                               str(tower_setup.gpu)):
            output_layer = None
            layers = {}
            for name, layer_def in network_def.items():
                layer_def = layer_def.copy()
                layer_class = layer_def["class"]
                if layer_class == "GraphSection":
                    if self.use_partialflow:
                        if self.current_graph_section is not None:
                            self.current_graph_section.__exit__(
                                None, None, None)
                        self.current_graph_section = self.graph_section_manager.new_section(
                        )
                        self.graph_sections.append(self.current_graph_section)
                        self.current_graph_section.__enter__()
                    # else:
                    #  print >> log.v1, "warning, GraphSection defined, but use_partialflow is False. Ignoring sections"
                    continue
                del layer_def["class"]
                class_ = get_layer_class(layer_class)
                spec = inspect.getfullargspec(class_.__init__)
                args = spec[0]

                if "from" in layer_def:
                    inputs = sum(
                        [layers[x].outputs for x in layer_def["from"]], [])
                    del layer_def["from"]
                else:
                    inputs = [x_image]
                if "concat" in layer_def:
                    concat = sum(
                        [layers[x].outputs for x in layer_def["concat"]], [])
                    layer_def["concat"] = concat
                if "alternative_labels" in layer_def:
                    layer_def["targets"] = sum([
                        layers[x].out_labels
                        for x in layer_def["alternative_labels"]
                    ])
                    layer_def["n_classes"] = 2
                    del layer_def["alternative_labels"]
                elif class_.output_layer:
                    layer_def["targets"] = y_ref
                    layer_def["n_classes"] = n_classes
                    if "void_label" in args:
                        layer_def["void_label"] = void_label
                elif "targets" in args:
                    layer_def["targets"] = y_ref
                layer_def["name"] = name
                layer_def["inputs"] = inputs
                if "dropout" in args and not use_dropout:
                    layer_def["dropout"] = 0.0
                if "tower_setup" in args:
                    layer_def["tower_setup"] = tower_setup

                # check if all args are specified
                defaults = spec[3]
                if defaults is None:
                    defaults = []
                n_non_default_args = len(args) - len(defaults)
                non_default_args = args[1:n_non_default_args]  # without self
                for arg in non_default_args:
                    assert arg in layer_def, (name, arg)

                layer = class_(**layer_def)

                if tower_setup.is_main_train_tower:
                    print(name, "shape:",
                          [l.get_shape().as_list() for l in layer.outputs])
                layers[name] = layer
                if class_.output_layer:
                    assert output_layer is None, "Currently only 1 output layer is supported"
                    output_layer = layer
            assert output_layer is not None, "No output layer in network"

            n = tf.shape(y_ref)[0]
            assert len(output_layer.outputs) == 1, len(output_layer.outputs)
            loss, measures, y_softmax = output_layer.loss, output_layer.measures, output_layer.outputs[
                0]
            regularizers_tower = []
            update_ops_tower = []
            for l in layers.values():
                self.summaries += l.summaries
                regularizers_tower += l.regularizers
                update_ops_tower += l.update_ops
            n_params = sum([l.n_params for l in layers.values()])
            return loss, measures, y_softmax, n, n_params, regularizers_tower, update_ops_tower, layers
示例#35
0
 def test_overridden_by_assumptions_empty(self, **kwargs):
     func = decorators.overridden_by_assumptions()(self.func)
     assert func.overridden_by_assumptions == ()
     assert func.__name__ == self.func_name
     assert getfullargspec(func) == self.func_argspec
示例#36
0
 def _function_argc(function):
     args, _, _, _, _, _, _ = inspect.getfullargspec(function)
     return len(args)
示例#37
0
    def _create_workflow(
        self,
        pipeline_func: Callable,
        pipeline_name: Text = None,
        pipeline_description: Text = None,
        params_list: List[dsl.PipelineParam] = None,
        pipeline_conf: dsl.PipelineConf = None,
    ) -> Dict[Text, Any]:
        """ Internal implementation of create_workflow."""
        params_list = params_list or []
        argspec = inspect.getfullargspec(pipeline_func)

        # Create the arg list with no default values and call pipeline function.
        # Assign type information to the PipelineParam
        pipeline_meta = _extract_pipeline_metadata(pipeline_func)
        pipeline_meta.name = pipeline_name or pipeline_meta.name
        pipeline_meta.description = pipeline_description or pipeline_meta.description
        pipeline_name = sanitize_k8s_name(pipeline_meta.name)

        # Need to first clear the default value of dsl.PipelineParams. Otherwise, it
        # will be resolved immediately in place when being to each component.
        default_param_values = {}
        for param in params_list:
            default_param_values[param.name] = param.value
            param.value = None

        # Currently only allow specifying pipeline params at one place.
        if params_list and pipeline_meta.inputs:
            raise ValueError(
                'Either specify pipeline params in the pipeline function, or in "params_list", but not both.'
            )

        args_list = []
        for arg_name in argspec.args:
            arg_type = None
            for input in pipeline_meta.inputs or []:
                if arg_name == input.name:
                    arg_type = input.type
                    break
            args_list.append(
                dsl.PipelineParam(sanitize_k8s_name(arg_name, True),
                                  param_type=arg_type))

        with dsl.Pipeline(pipeline_name) as dsl_pipeline:
            pipeline_func(*args_list)

        pipeline_conf = pipeline_conf or dsl_pipeline.conf  # Configuration passed to the compiler is overriding. Unfortunately, it's not trivial to detect whether the dsl_pipeline.conf was ever modified.

        self._validate_exit_handler(dsl_pipeline)
        self._sanitize_and_inject_artifact(dsl_pipeline, pipeline_conf)

        # Fill in the default values.
        args_list_with_defaults = []
        if pipeline_meta.inputs:
            args_list_with_defaults = [
                dsl.PipelineParam(sanitize_k8s_name(arg_name, True))
                for arg_name in argspec.args
            ]
            if argspec.defaults:
                for arg, default in zip(reversed(args_list_with_defaults),
                                        reversed(argspec.defaults)):
                    arg.value = default.value if isinstance(
                        default, dsl.PipelineParam) else default
        elif params_list:
            # Or, if args are provided by params_list, fill in pipeline_meta.
            for param in params_list:
                param.value = default_param_values[param.name]

            args_list_with_defaults = params_list
            pipeline_meta.inputs = [
                InputSpec(name=param.name,
                          type=param.param_type,
                          default=param.value) for param in params_list
            ]

        op_transformers = [add_pod_env]
        op_transformers.extend(pipeline_conf.op_transformers)

        workflow = self._create_pipeline_workflow(
            args_list_with_defaults,
            dsl_pipeline,
            op_transformers,
            pipeline_conf,
        )

        from ._data_passing_rewriter import fix_big_data_passing
        workflow = fix_big_data_passing(workflow)

        import json
        workflow.setdefault('metadata', {}).setdefault(
            'annotations',
            {})['pipelines.kubeflow.org/pipeline_spec'] = json.dumps(
                pipeline_meta.to_dict(), sort_keys=True)

        return workflow
示例#38
0
 def test_gen_palindromic(self):
     assert gen_palindromic
     assert 'n' in getfullargspec(gen_palindromic).args
示例#39
0
 def test_represent(self):
     assert represent
     assert 'n' in getfullargspec(represent).args
示例#40
0
文件: apply.py 项目: qinxuye/mars
def df_apply(df,
             func,
             axis=0,
             raw=False,
             result_type=None,
             args=(),
             dtypes=None,
             dtype=None,
             name=None,
             output_type=None,
             index=None,
             elementwise=None,
             **kwds):
    """
    Apply a function along an axis of the DataFrame.

    Objects passed to the function are Series objects whose index is
    either the DataFrame's index (``axis=0``) or the DataFrame's columns
    (``axis=1``). By default (``result_type=None``), the final return type
    is inferred from the return type of the applied function. Otherwise,
    it depends on the `result_type` argument.

    Parameters
    ----------
    func : function
        Function to apply to each column or row.
    axis : {0 or 'index', 1 or 'columns'}, default 0
        Axis along which the function is applied:

        * 0 or 'index': apply function to each column.
        * 1 or 'columns': apply function to each row.

    raw : bool, default False
        Determines if row or column is passed as a Series or ndarray object:

        * ``False`` : passes each row or column as a Series to the
          function.
        * ``True`` : the passed function will receive ndarray objects
          instead.
          If you are just applying a NumPy reduction function this will
          achieve much better performance.

    result_type : {'expand', 'reduce', 'broadcast', None}, default None
        These only act when ``axis=1`` (columns):

        * 'expand' : list-like results will be turned into columns.
        * 'reduce' : returns a Series if possible rather than expanding
          list-like results. This is the opposite of 'expand'.
        * 'broadcast' : results will be broadcast to the original shape
          of the DataFrame, the original index and columns will be
          retained.

        The default behaviour (None) depends on the return value of the
        applied function: list-like results will be returned as a Series
        of those. However if the apply function returns a Series these
        are expanded to columns.

    output_type : {'dataframe', 'series'}, default None
        Specify type of returned object. See `Notes` for more details.

    dtypes : Series, default None
        Specify dtypes of returned DataFrames. See `Notes` for more details.

    dtype : numpy.dtype, default None
        Specify dtype of returned Series. See `Notes` for more details.

    name : str, default None
        Specify name of returned Series. See `Notes` for more details.

    index : Index, default None
        Specify index of returned object. See `Notes` for more details.

    elementwise : bool, default False
        Specify whether ``func`` is an elementwise function:

        * ``False`` : The function is not elementwise. Mars will try
          concatenating chunks in rows (when ``axis=0``) or in columns
          (when ``axis=1``) and then apply ``func`` onto the concatenated
          chunk. The concatenation step can cause extra latency.
        * ``True`` : The function is elementwise. Mars will apply
          ``func`` to original chunks. This will not introduce extra
          concatenation step and reduce overhead.

    args : tuple
        Positional arguments to pass to `func` in addition to the
        array/series.

    **kwds
        Additional keyword arguments to pass as keywords arguments to
        `func`.

    Returns
    -------
    Series or DataFrame
        Result of applying ``func`` along the given axis of the
        DataFrame.

    See Also
    --------
    DataFrame.applymap: For elementwise operations.
    DataFrame.aggregate: Only perform aggregating type operations.
    DataFrame.transform: Only perform transforming type operations.

    Notes
    -----
    When deciding output dtypes and shape of the return value, Mars will
    try applying ``func`` onto a mock DataFrame,  and the apply call may
    fail. When this happens, you need to specify the type of apply call
    (DataFrame or Series) in output_type.

    * For DataFrame output, you need to specify a list or a pandas Series
      as ``dtypes`` of output DataFrame. ``index`` of output can also be
      specified.
    * For Series output, you need to specify ``dtype`` and ``name`` of
      output Series.

    Examples
    --------
    >>> import numpy as np
    >>> import mars.tensor as mt
    >>> import mars.dataframe as md
    >>> df = md.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
    >>> df.execute()
       A  B
    0  4  9
    1  4  9
    2  4  9

    Using a reducing function on either axis

    >>> df.apply(np.sum, axis=0).execute()
    A    12
    B    27
    dtype: int64

    >>> df.apply(np.sum, axis=1).execute()
    0    13
    1    13
    2    13
    dtype: int64

    Returning a list-like will result in a Series

    >>> df.apply(lambda x: [1, 2], axis=1).execute()
    0    [1, 2]
    1    [1, 2]
    2    [1, 2]
    dtype: object

    Passing ``result_type='expand'`` will expand list-like results
    to columns of a Dataframe

    >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand').execute()
       0  1
    0  1  2
    1  1  2
    2  1  2

    Returning a Series inside the function is similar to passing
    ``result_type='expand'``. The resulting column names
    will be the Series index.

    >>> df.apply(lambda x: md.Series([1, 2], index=['foo', 'bar']), axis=1).execute()
       foo  bar
    0    1    2
    1    1    2
    2    1    2

    Passing ``result_type='broadcast'`` will ensure the same shape
    result, whether list-like or scalar is returned by the function,
    and broadcast it along the axis. The resulting column names will
    be the originals.

    >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast').execute()
       A  B
    0  1  2
    1  1  2
    2  1  2
    """
    if isinstance(func, (list, dict)):
        return df.aggregate(func)

    output_types = kwds.pop('output_types', None)
    object_type = kwds.pop('object_type', None)
    output_types = validate_output_types(output_type=output_type,
                                         output_types=output_types,
                                         object_type=object_type)
    output_type = output_types[0] if output_types else None

    # calling member function
    if isinstance(func, str):
        func = getattr(df, func)
        sig = inspect.getfullargspec(func)
        if "axis" in sig.args:
            kwds["axis"] = axis
        return func(*args, **kwds)

    op = ApplyOperand(func=func,
                      axis=axis,
                      raw=raw,
                      result_type=result_type,
                      args=args,
                      kwds=kwds,
                      output_type=output_type,
                      elementwise=elementwise)
    return op(df, dtypes=dtypes, dtype=dtype, name=name, index=index)
示例#41
0
 def wrapper(*args):
     if PY3:
         arg_spec = inspect.getfullargspec(func)
     else:
         arg_spec = inspect.getargspec(func)
     return func(*args[0:len(arg_spec.args)])
示例#42
0
    def news_river_api_request(self,
                               bool_operator='OR',
                               language='EN',
                               sortBy='_score',
                               sortOrder='DESC',
                               limit=100):

        valid_operators = ['AND', 'OR', 'NOT']
        valid_country_codes = []
        request_url = 'https://api.newsriver.io/v2/search'
        response = None
        """
		if bool_operator not in valid_operators:
			pass
			# raise some exception
		else:
			if bool_operator is not 'AND':
				self.newsriver_keywords.replace('AND',
					bool_operator.center(len(bool_operator)+2*len(' '), ' '))
 
		
		
		if countryCode not in valid_country_codes:
			pass
			# raise some exception
		else:
			if bool_operator is not 'AND':
				self.newsriver_keywords.replace('AND', bool_operator)

		"""

        payload = {}
        payload[
            'query'] = self.newsriver_keywords + ' AND ' + 'language:{}'.format(
                language)

        arg_spec = inspect.getfullargspec(self.news_river_api_request)
        for kwargs, vals in zip(arg_spec.args[2:], arg_spec.defaults[1:]):
            payload[kwargs] = vals

        params = urllib.parse.urlencode(payload, quote_via=urllib.parse.quote)

        headers = {}
        headers["Authorization"] = self.newsriver_token

        response = requests.get(request_url, headers=headers, params=params)
        # response.raise_for_status()
        custom_log(response.request.body)
        """
		try:
			response = requests.get(request_url, headers=headers, params=params, timeout = (15, 30))
			response.raise_for_status()
			custom_log(response)
		
		except ex.HTTPError:
			pass
			# handle exception
		except ex.Timeout:
			pass
			# handle exception
		"""
        custom_log(response.text)
        loaded = json.loads(response.text)
        na = news_agg(response)
        compiled = na.json_decodecompile()
        custom_log(compiled)
示例#43
0
 def test_word_count(self):
     assert word_count
     assert 'file' in getfullargspec(word_count).args
示例#44
0
    def callInvoke(self, obj, methodname, vals, location):

        methods = inspect.getmembers(obj, callable)

        if not methods:
            return (
                False,
                PLambdaException(f'Object not invokable: {obj} {location}'))

        method = None

        if not isString(methodname):
            return (False,
                    PLambdaException(
                        f'Method name not a string: {methodname} {location}'))

        for (name, value) in methods:
            if name == methodname:
                method = value
                break

        if method is None:
            return (
                False,
                PLambdaException(f'No such method: {methodname} {location}'))

        # Ugliness under python's hood:
        # Cannot get the argspec of a builtin
        # http://stackoverflow.com/questions/3276635/how-to-get-the-number-of-args-of-a-built-in-function-in-python
        # http://stackoverflow.com/questions/990016/how-to-find-out-the-arity-of-a-method-in-python
        #
        # Can avoid using inspect for simple cases:
        #        def arity(obj, method):
        #          return getattr(obj.__class__, method).func_code.co_argcount - 1 # remove self
        #
        if not isinstance(method, types.BuiltinFunctionType):

            # 05/08/20 flipped inspect.getargspec(method) to
            # inspect.getfullargspec(method) without having a clue what I was doing.
            # but the doc said: "Deprecated since version 3.0: Use
            # getfullargspec() for an updated API that is usually a drop-in replacement,
            # but also correctly handles function annotations and keyword-only parameters."

            argspec = inspect.getfullargspec(method)
            # string2error(f'argspec({method}) =  {argspec}')
            # if it is an object we have to *not* count 'self',
            # but if it is a class we need to pass all the args!
            offset = 0
            # if the thing has vargargs (e.g. decorators for example, then we better just try to apply)
            if (not inspect.ismodule(obj)) and (
                    not inspect.isclass(obj)) and argspec.varargs is None:
                offset = 1
                ndefaults = len(argspec.defaults) if argspec.defaults else 0
                nargs = len(argspec.args) - offset
                nvals = len(vals)
                # my guess is that we will need to revisit this. what do we
                # do when some of the defaults are used but not others?
                if (nvals < nargs - ndefaults) or (nargs < nvals):
                    msg = f'Arity of {methodname} args {vals} does not match the argspec: {argspec.args[offset:]}. defaults: {argspec.defaults} varargs: {argspec.varargs}'
                    return (False, PLambdaException(msg))

        retval = None

        try:
            retval = method(*vals)
            return (True, retval)
        except Exception as e:
            return (False,
                    PLambdaException(f'invoke {location} threw {str(e)}'))
示例#45
0
readme_table_post = '**POST requests**\n\n'
readme_table_post += '| Resource | Argument | Flags | Function |\n| ------ | ------ | ------ | ------ |\n'
readme_table_post += '| `admin` | `password_hash` | | Sent a SHA256 hash of the admin password to obtain an admin session |\n'

readme_table_get = '**GET requests**\n\n'
readme_table_get += '| Resource | Flags | Function |\n| ------ | ------ | ------ |\n'

for attr_name in dir(io_functions.InputHandler):
    attr = getattr(io_functions.InputHandler, attr_name)
    if hasattr(attr, 'is_api_method'):
        method_data_for_json = {
            'name': attr_name,
            'resource': attr_name.replace('_', '/'),
            'description': attr.__doc__
        }
        is_post = len(getfullargspec(attr)[0]) > 1

        readme_tmp = '| `' + attr_name.replace('_', '/') + '` '

        if len(getfullargspec(attr)[0]) > 1:
            method_arg = getfullargspec(attr)[0][1]
            readme_tmp += '| `' + method_arg + '` |'
            method_data_for_json['argument'] = method_arg
        else:
            method_arg = None
            readme_tmp += '|'
            method_data_for_json['argument'] = None

        api_list.append(method_data_for_json)

        for api_flag in api_flags:
示例#46
0
 def test_is_palindromic(self):
     assert is_palindromic
     assert 'n' in getfullargspec(is_palindromic).args
示例#47
0
    def register_handler(
            self, call: Callable[[Any, Message], Optional[Response]]) -> None:
        """Register a handler call.

        The message type handled by the call is determined by its
        type annotation.
        """
        # TODO: can use types.GenericAlias in 3.9.
        from typing import _GenericAlias  # type: ignore
        from typing import Union, get_type_hints, get_args

        sig = inspect.getfullargspec(call)

        # The provided callable should be a method taking one 'msg' arg.
        expectedsig = ['self', 'msg']
        if sig.args != expectedsig:
            raise ValueError(f'Expected callable signature of {expectedsig};'
                             f' got {sig.args}')

        # Check annotation types to determine what message types we handle.
        # Return-type annotation can be a Union, but we probably don't
        # have it available at runtime. Explicitly pull it in.
        anns = get_type_hints(call, localns={'Union': Union})
        msgtype = anns.get('msg')
        if not isinstance(msgtype, type):
            raise TypeError(
                f'expected a type for "msg" annotation; got {type(msgtype)}.')
        assert issubclass(msgtype, Message)

        ret = anns.get('return')
        responsetypes: Tuple[Union[Type[Any], Type[None]], ...]

        # Return types can be a single type or a union of types.
        if isinstance(ret, _GenericAlias):
            targs = get_args(ret)
            if not all(isinstance(a, type) for a in targs):
                raise TypeError(f'expected only types for "return" annotation;'
                                f' got {targs}.')
            responsetypes = targs
        else:
            if not isinstance(ret, type):
                raise TypeError(f'expected one or more types for'
                                f' "return" annotation; got a {type(ret)}.')
            responsetypes = (ret, )

        # Return type of None translates to EmptyResponse.
        responsetypes = tuple(EmptyResponse if r is type(None) else r
                              for r in responsetypes)

        # Make sure our protocol has this message type registered and our
        # return types exactly match. (Technically we could return a subset
        # of the supported types; can allow this in the future if it makes
        # sense).
        registered_types = self._protocol.message_ids_by_type.keys()

        if msgtype not in registered_types:
            raise TypeError(f'Message type {msgtype} is not registered'
                            f' in this Protocol.')

        if msgtype in self._handlers:
            raise TypeError(f'Message type {msgtype} already has a registered'
                            f' handler.')

        # Make sure the responses exactly matches what the message expects.
        if set(responsetypes) != set(msgtype.get_response_types()):
            raise TypeError(
                f'Provided response types {responsetypes} do not'
                f' match the set expected by message type {msgtype}: '
                f'({msgtype.get_response_types()})')

        # Ok; we're good!
        self._handlers[msgtype] = call
示例#48
0
文件: validator.py 项目: sgill2/yank
def generate_signature_schema(func, update_keys=None, exclude_keys=frozenset()):
    """Generate a dictionary to test function signatures with Cerberus' Schema.

    Parameters
    ----------
    func : function
        The function used to build the schema.
    update_keys : dict
        Keys in here have priority over automatic generation. It can be
        used to make an argument mandatory, or to use a specific validator.
    exclude_keys : list-like
        Keys in here are ignored and not included in the schema.

    Returns
    -------
    func_schema : dict
        The dictionary to be used as Cerberus Validator schema. Contains all keyword
        variables in the function signature as optional argument with
        the default type as validator. Unit bearing strings are converted.
        Argument with default None are always accepted. Camel case
        parameters in the function are converted to underscore style.

    Examples
    --------
    >>> from cerberus import Validator
    >>> def f(a, b, camelCase=True, none=None, quantity=3.0*unit.angstroms):
    ...     pass
    >>> f_dict = generate_signature_schema(f, exclude_keys=['quantity'])
    >>> print(isinstance(f_dict, dict))
    True
    >>> f_validator = Validator(generate_signature_schema(f))
    >>> f_validator.validated({'quantity': '1.0*nanometer'})
    {'quantity': Quantity(value=1.0, unit=nanometer)}

    """
    if update_keys is None:
        update_keys = {}

    func_schema = {}
    arg_spec = inspect.getfullargspec(unwrap_py2(func))
    args = arg_spec.args
    defaults = arg_spec.defaults

    # Check keys that must be excluded from first pass
    exclude_keys = set(exclude_keys)
    exclude_keys.update(update_keys)

    # Transform camelCase to underscore
    args = [utils.camelcase_to_underscore(arg) for arg in args]

    # Build schema
    optional_validator = {'required': False}  # Keys are always optional for this type
    for arg, default_value in zip(args[-len(defaults):], defaults):
        if arg not in exclude_keys:  # User defined keys are added later
            if default_value is None:  # None defaults are always accepted, and considered nullable
                validator = {'nullable': True}
            elif isinstance(default_value, unit.Quantity):  # Convert unit strings
                validator = {'coerce': to_unit_coercer(default_value.unit)}
            else:
                validator = type_to_cerberus_map(type(default_value))
            # Add the argument to the existing schema as a keyword
            # To the new keyword, add the optional flag and the "validator" flag
            # of either 'validator' or 'type' depending on how it was processed
            func_schema = {**func_schema, **{arg: {**optional_validator, **validator}}}

    # Add special user keys
    func_schema.update(update_keys)

    return func_schema
示例#49
0
 def test_overridden_by_assumptions_triple(self, **kwargs):
     func = decorators.overridden_by_assumptions(
         'a1', 'a2', 'a3')(self.func)
     assert func.overridden_by_assumptions == ('a1', 'a2', 'a3',)
     assert func.__name__ == self.func_name
     assert getfullargspec(func) == self.func_argspec
示例#50
0
 def get_plugin_init_parameters(self, pluginname):
     required_parameters = inspect.getfullargspec(
         util.plugin(pluginname).__init__)
示例#51
0
 def test_assumes_single(self, **kwargs):
     func = decorators.assumes('a1')(self.func)
     assert func.assumptions == ('a1',)
     assert func.__name__ == self.func_name
     assert getfullargspec(func) == self.func_argspec
示例#52
0
 def test_divisors (self):
     assert divisors
     assert 'n' in getfullargspec(divisors).args
示例#53
0
    def run_test_as_given(test):
        if inspect.isclass(test):
            # Provide a meaningful error to users, instead of exceptions from
            # internals that assume we're dealing with a function.
            raise InvalidArgument("@given cannot be applied to a class.")
        given_arguments = tuple(_given_arguments)
        given_kwargs = dict(_given_kwargs)

        original_argspec = getfullargspec(test)

        check_invalid = is_invalid_test(test.__name__, original_argspec,
                                        given_arguments, given_kwargs)

        # If the argument check found problems, return a dummy test function
        # that will raise an error if it is actually called.
        if check_invalid is not None:
            return check_invalid

        # Because the argument check succeeded, we can convert @given's
        # positional arguments into keyword arguments for simplicity.
        if given_arguments:
            assert not given_kwargs
            for name, strategy in zip(reversed(original_argspec.args),
                                      reversed(given_arguments)):
                given_kwargs[name] = strategy
        # These have been converted, so delete them to prevent accidental use.
        del given_arguments

        argspec = new_given_argspec(original_argspec, given_kwargs)

        @impersonate(test)
        @define_function_signature(test.__name__, test.__doc__, argspec)
        def wrapped_test(*arguments, **kwargs):
            # Tell pytest to omit the body of this function from tracebacks
            __tracebackhide__ = True

            test = wrapped_test.hypothesis.inner_test

            if getattr(test, "is_hypothesis_test", False):
                raise InvalidArgument((
                    "You have applied @given to the test %s more than once, which "
                    "wraps the test several times and is extremely slow. A "
                    "similar effect can be gained by combining the arguments "
                    "of the two calls to given. For example, instead of "
                    "@given(booleans()) @given(integers()), you could write "
                    "@given(booleans(), integers())") % (test.__name__, ))

            settings = wrapped_test._hypothesis_internal_use_settings

            random = get_random_for_wrapped_test(test, wrapped_test)

            # Use type information to convert "infer" arguments into appropriate
            # strategies.
            if infer in given_kwargs.values():
                hints = get_type_hints(test)
            for name in [
                    name for name, value in given_kwargs.items()
                    if value is infer
            ]:
                if name not in hints:
                    raise InvalidArgument(
                        "passed %s=infer for %s, but %s has no type annotation"
                        % (name, test.__name__, name))
                given_kwargs[name] = st.from_type(hints[name])

            processed_args = process_arguments_to_given(
                wrapped_test,
                arguments,
                kwargs,
                given_kwargs,
                argspec,
                test,
                settings,
            )
            arguments, kwargs, test_runner, search_strategy = processed_args

            runner = getattr(search_strategy, "runner", None)
            if isinstance(runner, TestCase) and test.__name__ in dir(TestCase):
                msg = ("You have applied @given to the method %s, which is "
                       "used by the unittest runner but is not itself a test."
                       "  This is not useful in any way." % test.__name__)
                fail_health_check(settings, msg, HealthCheck.not_a_test_method)
            if bad_django_TestCase(runner):  # pragma: no cover
                # Covered by the Django tests, but not the pytest coverage task
                raise InvalidArgument(
                    "You have applied @given to a method on %s, but this "
                    "class does not inherit from the supported versions in "
                    "`hypothesis.extra.django`.  Use the Hypothesis variants "
                    "to ensure that each example is run in a separate "
                    "database transaction." % qualname(type(runner)))

            state = StateForActualGivenExecution(
                test_runner,
                search_strategy,
                test,
                settings,
                random,
                wrapped_test,
            )

            reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure

            # If there was a @reproduce_failure decorator, use it to reproduce
            # the error (or complain that we couldn't). Either way, this will
            # always raise some kind of error.
            if reproduce_failure is not None:
                expected_version, failure = reproduce_failure
                if expected_version != __version__:
                    raise InvalidArgument(
                        ("Attempting to reproduce a failure from a different "
                         "version of Hypothesis. This failure is from %s, but "
                         "you are currently running %r. Please change your "
                         "Hypothesis version to a matching one.") %
                        (expected_version, __version__))
                try:
                    state.execute_once(
                        ConjectureData.for_buffer(decode_failure(failure)),
                        print_example=True,
                        is_final=True,
                    )
                    raise DidNotReproduce(
                        "Expected the test to raise an error, but it "
                        "completed successfully.")
                except StopTest:
                    raise DidNotReproduce(
                        "The shape of the test data has changed in some way "
                        "from where this blob was defined. Are you sure "
                        "you're running the same test?")
                except UnsatisfiedAssumption:
                    raise DidNotReproduce(
                        "The test data failed to satisfy an assumption in the "
                        "test. Have you added it since this blob was "
                        "generated?")

            # There was no @reproduce_failure, so start by running any explicit
            # examples from @example decorators.

            execute_explicit_examples(state, wrapped_test, arguments, kwargs)

            # If there were any explicit examples, they all ran successfully.
            # The next step is to use the Conjecture engine to run the test on
            # many different inputs.

            if not (Phase.reuse in settings.phases
                    or Phase.generate in settings.phases):
                return

            try:
                if isinstance(runner, TestCase) and hasattr(runner, "subTest"):
                    subTest = runner.subTest
                    try:
                        runner.subTest = fake_subTest
                        state.run_engine()
                    finally:
                        runner.subTest = subTest
                else:
                    state.run_engine()
            except BaseException as e:
                # The exception caught here should either be an actual test
                # failure (or MultipleFailures), or some kind of fatal error
                # that caused the engine to stop.

                generated_seed = wrapped_test._hypothesis_internal_use_generated_seed
                with local_settings(settings):
                    if not (state.failed_normally or generated_seed is None):
                        if running_under_pytest:
                            report(
                                "You can add @seed(%(seed)d) to this test or "
                                "run pytest with --hypothesis-seed=%(seed)d "
                                "to reproduce this failure." %
                                {"seed": generated_seed})
                        else:
                            report("You can add @seed(%d) to this test to "
                                   "reproduce this failure." %
                                   (generated_seed, ))
                    # The dance here is to avoid showing users long tracebacks
                    # full of Hypothesis internals they don't care about.
                    # We have to do this inline, to avoid adding another
                    # internal stack frame just when we've removed the rest.
                    #
                    # Using a variable for our trimmed error ensures that the line
                    # which will actually appear in tracebacks is as clear as
                    # possible - "raise the_error_hypothesis_found".
                    the_error_hypothesis_found = e.with_traceback(
                        get_trimmed_traceback())
                    raise the_error_hypothesis_found

        # After having created the decorated test function, we need to copy
        # over some attributes to make the switch as seamless as possible.

        for attrib in dir(test):
            if not (attrib.startswith("_") or hasattr(wrapped_test, attrib)):
                setattr(wrapped_test, attrib, getattr(test, attrib))
        wrapped_test.is_hypothesis_test = True
        if hasattr(test, "_hypothesis_internal_settings_applied"):
            # Used to check if @settings is applied twice.
            wrapped_test._hypothesis_internal_settings_applied = True
        wrapped_test._hypothesis_internal_use_seed = getattr(
            test, "_hypothesis_internal_use_seed", None)
        wrapped_test._hypothesis_internal_use_settings = (getattr(
            test, "_hypothesis_internal_use_settings", None)
                                                          or Settings.default)
        wrapped_test._hypothesis_internal_use_reproduce_failure = getattr(
            test, "_hypothesis_internal_use_reproduce_failure", None)
        wrapped_test.hypothesis = HypothesisHandle(test)
        return wrapped_test
示例#54
0
 def test_leap(self):
     assert leap
     assert 'year' in getfullargspec(leap).args
示例#55
0
def _parse_signature(func):
    """Return a signature object for the function.

    .. deprecated:: 2.0
        Will be removed in 2.1 along with utils.bind/validate_arguments
    """
    # if we have a cached validator for this function, return it
    parse = _signature_cache.get(func)
    if parse is not None:
        return parse

    # inspect the function signature and collect all the information
    tup = inspect.getfullargspec(func)
    positional, vararg_var, kwarg_var, defaults = tup[:4]
    defaults = defaults or ()
    arg_count = len(positional)
    arguments = []
    for idx, name in enumerate(positional):
        if isinstance(name, list):
            raise TypeError(
                "cannot parse functions that unpack tuples in the function signature"
            )
        try:
            default = defaults[idx - arg_count]
        except IndexError:
            param = (name, False, None)
        else:
            param = (name, True, default)
        arguments.append(param)
    arguments = tuple(arguments)

    def parse(args, kwargs):
        new_args = []
        missing = []
        extra = {}

        # consume as many arguments as positional as possible
        for idx, (name, has_default, default) in enumerate(arguments):
            try:
                new_args.append(args[idx])
            except IndexError:
                try:
                    new_args.append(kwargs.pop(name))
                except KeyError:
                    if has_default:
                        new_args.append(default)
                    else:
                        missing.append(name)
            else:
                if name in kwargs:
                    extra[name] = kwargs.pop(name)

        # handle extra arguments
        extra_positional = args[arg_count:]
        if vararg_var is not None:
            new_args.extend(extra_positional)
            extra_positional = ()
        if kwargs and kwarg_var is None:
            extra.update(kwargs)
            kwargs = {}

        return (
            new_args,
            kwargs,
            missing,
            extra,
            extra_positional,
            arguments,
            vararg_var,
            kwarg_var,
        )

    _signature_cache[func] = parse
    return parse
示例#56
0
        def run(data):
            # Set up dynamic context needed by a single test run.
            with local_settings(self.settings):
                with deterministic_PRNG():
                    with BuildContext(data, is_final=is_final):

                        # Generate all arguments to the test function.
                        args, kwargs = data.draw(self.search_strategy)
                        if expected_failure is not None:
                            text_repr[0] = arg_string(test, args, kwargs)

                        if print_example or current_verbosity(
                        ) >= Verbosity.verbose:
                            output = StringIO()

                            printer = RepresentationPrinter(output)
                            if print_example:
                                printer.text("Falsifying example:")
                            else:
                                printer.text("Trying example:")

                            if self.print_given_args:
                                printer.text(" ")
                                printer.text(test.__name__)
                                with printer.group(indent=4,
                                                   open="(",
                                                   close=""):
                                    printer.break_()
                                    for v in args:
                                        printer.pretty(v)
                                        # We add a comma unconditionally because
                                        # generated arguments will always be
                                        # kwargs, so there will always be more
                                        # to come.
                                        printer.text(",")
                                        printer.breakable()

                                    # We need to make sure to print these in the argument order for
                                    # Python 2 and older versionf of Python 3.5. In modern versions
                                    # this isn't an issue because kwargs is ordered.
                                    arg_order = {
                                        v: i
                                        for i, v in enumerate(
                                            getfullargspec(self.test).args)
                                    }
                                    for i, (k, v) in enumerate(
                                            sorted(
                                                kwargs.items(),
                                                key=lambda t: (
                                                    arg_order.get(
                                                        t[0], float("inf")),
                                                    t[0],
                                                ),
                                            )):
                                        printer.text(k)
                                        printer.text("=")
                                        printer.pretty(v)
                                        printer.text(",")
                                        if i + 1 < len(kwargs):
                                            printer.breakable()
                                printer.break_()
                                printer.text(")")
                            printer.flush()
                            report(output.getvalue())
                        return test(*args, **kwargs)
示例#57
0
def export_longformer(model, onnx_model_path, export_padding):
    input_ids, attention_mask, global_attention_mask = get_dummy_inputs(
        model.config, export_padding, device=torch.device('cpu'))

    example_outputs = model(input_ids,
                            attention_mask=attention_mask,
                            global_attention_mask=global_attention_mask)

    if version.parse(transformers.__version__) < version.parse("4.0.0"):
        raise RuntimeError("This tool requires transformers 4.0.0 or later.")

    # Here we replace LongformerSelfAttention.forward using our implmentation for exporting ONNX model
    from transformers import LongformerSelfAttention
    import inspect
    key = ' '.join(
        inspect.getfullargspec(LongformerSelfAttention.forward).args)
    args_to_func = {
        'self hidden_states attention_mask layer_head_mask is_index_masked is_index_global_attn is_global_attn output_attentions':
        my_longformer_self_attention_forward_4_3_2,
        'self hidden_states attention_mask is_index_masked is_index_global_attn is_global_attn output_attentions':
        my_longformer_self_attention_forward_4_3,
        'self hidden_states attention_mask is_index_masked is_index_global_attn is_global_attn':
        my_longformer_self_attention_forward_4,
    }

    if key not in args_to_func:
        print("Current arguments",
              inspect.getfullargspec(LongformerSelfAttention.forward).args)
        raise RuntimeError(
            "LongformerSelfAttention.forward arguments are different. Please install supported version (like transformers 4.3.0)."
        )

    # Store for restoring later
    original_forward = LongformerSelfAttention.forward

    LongformerSelfAttention.forward = args_to_func[key]

    example_inputs = (input_ids, attention_mask, global_attention_mask)

    Path(onnx_model_path).parent.mkdir(parents=True, exist_ok=True)

    torch.onnx.export(
        model,
        example_inputs,
        onnx_model_path,
        opset_version=11,
        example_outputs=example_outputs,
        input_names=["input_ids", "attention_mask", "global_attention_mask"],
        output_names=["last_state", "pooler"],
        dynamic_axes={
            'input_ids': {
                0: 'batch_size',
                1: 'sequence_length'
            },
            'attention_mask': {
                0: 'batch_size',
                1: 'sequence_length'
            },
            'global_attention_mask': {
                0: 'batch_size',
                1: 'sequence_length'
            },
            'last_state': {
                0: 'batch_size',
                1: 'sequence_length'
            },
            'pooler': {
                0: 'batch_size',
                1: 'sequence_length'
            }
        },
        custom_opsets={"com.microsoft": 1})
    print(f"ONNX model exported to {onnx_model_path}")

    # Restore original implementaiton:
    LongformerSelfAttention.forward = original_forward
示例#58
0
 def test_args(self):
     arg = getfullargspec(q07_randomsearch_predict).args
     self.assertEqual(len(arg), 1,
                      "Expected argument(s) %d, Given %d" % (1, len(arg)))
示例#59
0
 def test_nwords(self):
     assert nwords
     assert 's' in getfullargspec(nwords).args
示例#60
0
def livepatch(old,
              new,
              modname=None,
              visit_stack=(),
              cache=None,
              assume_type=None,
              heed_hook=True):
    """
    Livepatch C{old} with contents of C{new}.

    If C{old} can't be livepatched, then return C{new}.

    @param old:
      The object to be updated
    @param new:
      The object used as the source for the update.
    @type modname:
      C{str}
    @param modname:
      Only livepatch C{old} if it was defined in the given fully-qualified
      module name.  If C{None}, then update regardless of module.
    @param assume_type:
      Update as if both C{old} and C{new} were of type C{assume_type}.  If
      C{None}, then C{old} and C{new} must have the same type.
      For internal use.
    @param cache:
      Cache of already-updated objects.  Map from (id(old), id(new)) to result.
    @param visit_stack:
      Ids of objects that are currently being updated.
      Used to deal with reference cycles.
      For internal use.
    @param heed_hook:
      If C{True}, heed the C{__livepatch__} hook on C{new}, if any.
      If C{False}, ignore any C{__livepatch__} hook on C{new}.
    @return:
      Either live-patched C{old}, or C{new}.
    """
    if old is new:
        return new
    # If we're already visiting this object (due to a reference cycle), then
    # don't recurse again.
    if id(old) in visit_stack:
        return old
    if cache is None:
        cache = {}
    cachekey = (id(old), id(new))
    try:
        return cache[cachekey]
    except KeyError:
        pass
    visit_stack += (id(old), )

    def do_livepatch():
        new_modname = _get_definition_module(new)
        if modname and new_modname and new_modname != modname:
            # Ignore objects that have been imported from another module.
            # Just update their references.
            return new
        if assume_type is not None:
            use_type = assume_type
        else:
            oldtype = type(old)
            newtype = type(new)
            if oldtype is newtype:
                # Easy, common case: Type didn't change.
                use_type = oldtype
            elif (oldtype.__name__ == newtype.__name__
                  and oldtype.__module__ == newtype.__module__ == modname
                  and getattr(sys.modules[modname], newtype.__name__, None) is
                  newtype and oldtype is livepatch(oldtype,
                                                   newtype,
                                                   modname=modname,
                                                   visit_stack=visit_stack,
                                                   cache=cache)):
                # Type of this object was defined in this module.  This
                # includes metaclasses defined in the same module.
                use_type = oldtype
            else:
                # If the type changed, then give up.
                return new
        try:
            mro = type.mro(use_type)
        except TypeError:
            mro = [use_type, object]  # old-style class
        # Dispatch on type.  Include parent classes (in C3 linearized
        # method resolution order), in particular so that this works on
        # classes with custom metaclasses that subclass C{type}.
        for t in mro:
            try:
                update = _LIVEPATCH_DISPATCH_TABLE[t]
                break
            except KeyError:
                pass
        else:
            # We should have found at least C{object}
            raise AssertionError("unreachable")
        # Dispatch.
        return update(old,
                      new,
                      modname=modname,
                      cache=cache,
                      visit_stack=visit_stack)

    if heed_hook:
        hook = (getattr(new, "__livepatch__", None)
                or getattr(new, "__reload_update__", None))
        # XXX if unbound method or a descriptor, then we should ignore it.
        # XXX test for that.
    else:
        hook = None
    if hook is None:
        # No hook is defined or the caller instructed us to ignore it.
        # Do the standard livepatch.
        result = do_livepatch()
    else:
        # Call a hook for updating.
        # Build dict of optional kwargs.
        avail_kwargs = dict(old=old,
                            new=new,
                            do_livepatch=do_livepatch,
                            modname=modname,
                            cache=cache,
                            visit_stack=visit_stack)
        # Find out which optional kwargs the hook wants.
        kwargs = {}
        if PY2:
            argspec = inspect.getargspec(hook)
        else:
            argspec = inspect.getfullargspec(hook)
        argnames = argspec.args
        if hasattr(hook, "__func__"):
            # Skip 'self' arg.
            argnames = argnames[1:]
        # Pick kwargs that are wanted and available.
        args = []
        kwargs = {}
        for n in argnames:
            try:
                kwargs[n] = avail_kwargs[n]
                if argspec.keywords if PY2 else argspec.varkw:
                    break
            except KeyError:
                # For compatibility, allow first argument to be 'old' with any
                # name, as long as there's no other arg 'old'.
                # We intentionally allow this even if the user specified
                # **kwargs.
                if not args and not kwargs and 'old' not in argnames:
                    args.append(old)
                else:
                    # Rely on default being set.  If a default isn't set, the
                    # user will get a TypeError.
                    pass
        if argspec.keywords if PY2 else argspec.varkw:
            # Use all available kwargs.
            kwargs = avail_kwargs
        # Call hook.
        result = hook(*args, **kwargs)
    cache[cachekey] = result
    return result