Пример #1
0
def parse(lines, filename, headings = None):
    """ headings: Specify the headings explicitly. Otherwise they are read from the first line in the file. """
    reader = csv.reader(lines, dialect = ParadoxDialect)
    
    if headings is None:
        headings = next(reader)
    
    result = pyradox.Tree()
    
    for row_index, row_tokens in enumerate(reader):
        if len(row_tokens) == 0: continue # skip blank lines
        
        if len(row_tokens) != len(headings):
            warnings.warn_explicit('Row length (%d) should be same as headings length (%d).' % (len(row_tokens), len(headings)), ParseWarning, filename, row_index + 2)
        
        # first column is the key
        key = pyradox.token.make_primitive(row_tokens[0], default_token_type = 'str')
        tree_row = pyradox.Tree()
        result.append(key, tree_row)
        
        for col_index in range(min(len(headings), len(row_tokens))):
            heading = headings[col_index]
            row_token = row_tokens[col_index]
            value = pyradox.token.make_primitive(row_token, default_token_type = 'str')
            tree_row.append(heading, value)
    
    return result 
Пример #2
0
 def error_checked_function(self, *args, **kwargs):
     return_value = function(self, *args, **kwargs)
     allowed = ('captcha', 'data', 'errors', 'kind', 'names', 'next',
                'prev', 'ratelimit', 'users')
     if isinstance(return_value, dict):
         for key in return_value:
             if key not in allowed:
                 warnings.warn_explicit('Unknown return key: %s' % key,
                                        UserWarning, '', 0)
         if 'errors' in return_value and return_value['errors']:
             # Hack for now with successful submission and captcha error
             if 'data' in return_value and 'url' in return_value['data']:
                 return return_value
             error_list = []
             for error_type, msg, value in return_value['errors']:
                 if error_type in errors.ERROR_MAPPING:
                     if error_type == 'RATELIMIT':
                         _request.evict(args[0])
                     error_class = errors.ERROR_MAPPING[error_type]
                 else:
                     error_class = errors.APIException
                 error_list.append(error_class(error_type, msg, value,
                                               return_value))
             if len(error_list) == 1:
                 raise error_list[0]
             else:
                 raise errors.ExceptionList(error_list)
     return return_value
Пример #3
0
 def wrapper_function(*args, **kwargs):
     warnings.warn_explicit(
         "Use of deprecated function '%s'." % deprecated_function.__name__,
         category=DeprecationWarning,
         filename=deprecated_function.__code__.co_filename,
         lineno=deprecated_function.__code__.co_firstlineno + 1)
     return deprecated_function(*args, **kwargs)
Пример #4
0
 def error(self,msg,err,node):
     if not err:
         try:
             warnings.warn_explicit(msg,SyntaxWarning,self.getFilename(),node.beginLine)
             return
         except Exception,e:
             if not isinstance(e,SyntaxWarning): raise e
Пример #5
0
 def new_func(*args, **kwargs):
     warnings.warn_explicit('Call to deprecated function: {0}'.format(deprecated_function.__name__),
                            category=DeprecationWarning,
                            filename=deprecated_function.func_code.co_filename,
                            lineno=deprecated_function.func_code.co_firstlineno + 1
                            )
     return deprecated_function(*args, **kwargs)
Пример #6
0
def warnAboutFunction(offender, warningString):
    """
    Issue a warning string, identifying C{offender} as the responsible code.

    This function is used to deprecate some behavior of a function.  It differs
    from L{warnings.warn} in that it is not limited to deprecating the behavior
    of a function currently on the call stack.

    @param function: The function that is being deprecated.

    @param warningString: The string that should be emitted by this warning.
    @type warningString: C{str}

    @since: 11.0
    """
    # inspect.getmodule() is attractive, but somewhat
    # broken in Python < 2.6.  See Python bug 4845.
    offenderModule = sys.modules[offender.__module__]
    filename = inspect.getabsfile(offenderModule)
    lineStarts = list(findlinestarts(offender.func_code))
    lastLineNo = lineStarts[-1][1]
    globals = offender.func_globals

    kwargs = dict(
        category=DeprecationWarning,
        filename=filename,
        lineno=lastLineNo,
        module=offenderModule.__name__,
        registry=globals.setdefault("__warningregistry__", {}),
        module_globals=None)

    if sys.version_info[:2] < (2, 5):
        kwargs.pop('module_globals')

    warn_explicit(warningString, **kwargs)
Пример #7
0
 def date(self):
     """Image observation time"""
     time = parse_time(self.meta.get('date-obs', 'now'))
     if time is None:
         warnings.warn_explicit("Missing metadata for observation time. Using current time.",
                                    Warning, __file__, inspect.currentframe().f_back.f_lineno)
     return parse_time(time)
Пример #8
0
def _warn_for_function(warning, function):
    warnings.warn_explicit(
        warning,
        type(warning),
        lineno=function.__code__.co_firstlineno,
        filename=function.__code__.co_filename,
    )
Пример #9
0
    def warn(self, warning):
        """Issue a warning for this item.

        Warnings will be displayed after the test session, unless explicitly suppressed

        :param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning.

        :raise ValueError: if ``warning`` instance is not a subclass of PytestWarning.

        Example usage::

        .. code-block:: python

            node.warn(PytestWarning("some message"))
        """
        from _pytest.warning_types import PytestWarning

        if not isinstance(warning, PytestWarning):
            raise ValueError(
                "warning must be an instance of PytestWarning or subclass, got {!r}".format(
                    warning
                )
            )
        path, lineno = get_fslocation_from_item(self)
        warnings.warn_explicit(
            warning,
            category=None,
            filename=str(path),
            lineno=lineno + 1 if lineno is not None else None,
        )
Пример #10
0
    def _unjelly_instance(self, rest):
        """
        (internal) Unjelly an instance.

        Called to handle the deprecated I{instance} token.

        @param rest: The s-expression representing the instance.

        @return: The unjellied instance.
        """
        warnings.warn_explicit(
            "Unjelly support for the instance atom is deprecated since "
            "Twisted 15.0.0.  Upgrade peer for modern instance support.",
            category=DeprecationWarning, filename="", lineno=0)

        clz = self.unjelly(rest[0])
        if type(clz) is not types.ClassType:
            raise InsecureJelly("Instance found with non-class class.")
        if hasattr(clz, "__setstate__"):
            inst = _newInstance(clz, {})
            state = self.unjelly(rest[1])
            inst.__setstate__(state)
        else:
            state = self.unjelly(rest[1])
            inst = _newInstance(clz, state)
        if hasattr(clz, 'postUnjelly'):
            self.postCallbacks.append(inst.postUnjelly)
        return inst
Пример #11
0
    def set_referred_object(self, referred_object):
        """
        Sets the object the ResourceIdentifier refers to.

        If it already a weak reference it will be used, otherwise one will be
        created. If the object is None, None will be set.

        Will also append self again to the global class level reference list so
        everything stays consistent.
        """
        # If it does not yet exists simply set it.
        if self.id not in ResourceIdentifier.__resource_id_weak_dict:
            ResourceIdentifier.__resource_id_weak_dict[self.id] = \
                referred_object
            return
        # Otherwise check if the existing element the same as the new one. If
        # it is do nothing, otherwise raise a warning and set the new object as
        # the referred object.
        if ResourceIdentifier.__resource_id_weak_dict[self.id] == \
                referred_object:
            return
        msg = "The resource identifier '%s' already exists and points to " + \
              "another object: '%s'." +\
              "It will now point to the object referred to by the new " + \
              "resource identifier."
        msg = msg % (
            self.id,
            repr(ResourceIdentifier.__resource_id_weak_dict[self.id]))
        # Always raise the warning!
        warnings.warn_explicit(msg, UserWarning, __file__,
                               inspect.currentframe().f_back.f_lineno)
        ResourceIdentifier.__resource_id_weak_dict[self.id] = \
            referred_object
Пример #12
0
 def get_info(reddit_session, url, comments_only=False):
     url_data = {}
     comment_limit = reddit_session.config.comment_limit
     comment_sort = reddit_session.config.comment_sort
     if comment_limit:
         if reddit_session.user and reddit_session.user.is_gold:
             limit_max = 1500
         else:
             limit_max = 500
         if comment_limit > limit_max:
             warnings.warn_explicit('comment_limit %d is too high (max: %d)'
                                    % (comment_limit, limit_max),
                                    UserWarning, '', 0)
             url_data['limit'] = limit_max
         elif comment_limit < 0:
             url_data['limit'] = limit_max
         else:
             url_data['limit'] = comment_limit
     if comment_sort:
         url_data['sort'] = comment_sort
     s_info, c_info = reddit_session.request_json(url, url_data=url_data)
     if comments_only:
         return c_info['data']['children']
     submission = s_info['data']['children'][0]
     submission.comments = c_info['data']['children']
     return submission
Пример #13
0
def p_colon_deprication(p):
    ''' colon_opt : ':'
    '''
    warnings.warn_explicit("use of ':' deprecated after rule names",
                           DeprecationWarning,
                           scanner.lexer.filename, p.lineno(1))
    p[0] = None
Пример #14
0
    def get_info(reddit_session, url, comments_only=False):
        url_data = {}
        comment_limit = reddit_session.config.comment_limit
        comment_sort = reddit_session.config.comment_sort

        if reddit_session.user and reddit_session.user.is_gold:
            class_max = reddit_session.config.gold_comments_max
        else:
            class_max = reddit_session.config.regular_comments_max

        if comment_limit == -1:  # Use max for user class
            comment_limit = class_max
        elif comment_limit > 0:  # Use specified value
            if comment_limit > class_max:
                warnings.warn_explicit('comment_limit %d is too high (max: %d)'
                                       % (comment_limit, class_max),
                                       UserWarning, '', 0)
                comment_limit = class_max
        elif comment_limit == 0:  # Use default
            comment_limit = None

        if comment_limit:
            url_data['limit'] = comment_limit
        if comment_sort:
            url_data['sort'] = comment_sort
        s_info, c_info = reddit_session.request_json(url, url_data=url_data)
        if comments_only:
            return c_info['data']['children']
        submission = s_info['data']['children'][0]
        submission.comments = c_info['data']['children']
        return submission
Пример #15
0
        def check(r, state, tracer, visited):
            R   = set()
            C   = {}
            msg = ""
            must_trace = False
            states     = tracer.select(state[0])
            selection  = list(set(s[0] for s in states if s and s[0]!=FIN))
            for i,s in enumerate(selection):
                if is_token(s):
                    S = set([s])
                else:
                    S = nfamodule.reachables[s]
                if R&S:
                    for u in C:
                        if C[u]&S:
                            if is_token(s):
                                msg = "%s : %s -> FirstSet(%s) /\\ {%s} = %s\n"%(r, state, u, s, C[u]&S)
                            elif is_token(u):
                                msg = "%s : %s -> {%s} /\\ FirstSet(%s) = %s\n"%(r, state, u, s, C[u]&S)
                            else:
                                msg = "%s : %s -> FirstSet(%s) /\\ FirstSet(%s) = %s\n"%(r, state, u, s, C[u]&S)
                            lineno = sys._getframe(0).f_lineno +1
                            if print_warning:
                                warnings.warn_explicit(msg, NeedsMoreExpansionWarning, "nfadatagen.py", lineno)
                            backtracking.add(r)
                    break
                else:
                    R.update(S)
                    C[s] = S

            for state in states:
                if state[0] is not FIN and state not in visited:
                    visited.add(state)
                    subtracer = tracer.clone()
                    check(r, state, subtracer, visited)
Пример #16
0
def reportDeprecatedWorkerNameUsage(message, stacklevel=None, filename=None,
                                    lineno=None):
    """Hook that is ran when old API name is used.

    :param stacklevel: stack level relative to the caller's frame.
    Defaults to caller of the caller of this function.
    """

    if filename is None:
        if stacklevel is None:
            # Warning will refer to the caller of the caller of this function.
            stacklevel = 3
        else:
            stacklevel += 2

        warnings.warn(DeprecatedWorkerNameWarning(message), None, stacklevel)

    else:
        assert stacklevel is None

        if lineno is None:
            lineno = 0

        warnings.warn_explicit(
            DeprecatedWorkerNameWarning(message),
            DeprecatedWorkerNameWarning,
            filename, lineno)
Пример #17
0
 def __getattribute__(self, name):
     import warnings
     logger_name = 'deluge'
     stack = inspect.stack()
     stack.pop(0)                # The logging call from this module
     module_stack = stack.pop(0) # The module that called the log function
     caller_module = inspect.getmodule(module_stack[0])
     # In some weird cases caller_module might be None, try to continue
     caller_module_name = getattr(caller_module, '__name__', '')
     warnings.warn_explicit(DEPRECATION_WARNING, DeprecationWarning,
                            module_stack[1], module_stack[2],
                            caller_module_name)
     if caller_module:
         for member in stack:
             module = inspect.getmodule(member[0])
             if not module:
                 continue
             if module.__name__ in ('deluge.plugins.pluginbase',
                                    'deluge.plugins.init'):
                 logger_name += '.plugin.%s' % caller_module_name
                 # Monkey Patch The Plugin Module
                 caller_module.log = logging.getLogger(logger_name)
                 break
     else:
         logging.getLogger(logger_name).warning(
             "Unable to monkey-patch the calling module's `log` attribute! "
             "You should really update and rebuild your plugins..."
         )
     return getattr(logging.getLogger(logger_name), name)
Пример #18
0
    def __parse_section(self, section_text, method, filename=None, section_start=0, strip_comments=True):
        nl = re.compile("\n\r|\r\n|\n")
        lines = nl.split(section_text)
        comment = False
        for i, line in enumerate(lines):
            if strip_comments:
                line, multiline_comment = self.strip_comments(line, comment)
            line = line.strip()

            # TODO: Return None to have errors with line information anntached
            if not len(line):
                continue

            saved_warnings = []
            with warnings.catch_warnings(record=True) as ws:
                warnings.simplefilter("always")
                result = method(line)

                for w in ws:
                    w_outer = warnings.WarningMessage(message=w.message, category=BiooptParseWarning, filename=filename, lineno=section_start+i+1, line=line)
                    saved_warnings.append(w_outer)

            for w in saved_warnings:
                warnings.warn_explicit(message=w.message, category=w.category, filename=w.filename, lineno=w.lineno)

            yield result, i
Пример #19
0
    def stage_objectmode_backend(self):
        """
        Lowering for object mode
        """
        lowerfn = self.backend_object_mode
        self._backend(lowerfn, objectmode=True)

        # Warn if compiled function in object mode and force_pyobject not set
        if not self.flags.force_pyobject:
            if len(self.lifted) > 0:
                warn_msg = ('Function "%s" was compiled in object mode without'
                            ' forceobj=True, but has lifted loops.' %
                            (self.func_id.func_name,))
            else:
                warn_msg = ('Function "%s" was compiled in object mode without'
                            ' forceobj=True.' % (self.func_id.func_name,))
            warnings.warn_explicit(warn_msg, errors.NumbaWarning,
                                   self.func_id.filename,
                                   self.func_id.firstlineno)
            if self.flags.release_gil:
                warn_msg = ("Code running in object mode won't allow parallel"
                            " execution despite nogil=True.")
                warnings.warn_explicit(warn_msg, errors.NumbaWarning,
                                       self.func_id.filename,
                                       self.func_id.firstlineno)
Пример #20
0
def _compile_restricted_mode(
        source,
        filename='<string>',
        mode="exec",
        flags=0,
        dont_inherit=False,
        policy=RestrictingNodeTransformer):

    if not IS_CPYTHON:
        warnings.warn_explicit(
            NOT_CPYTHON_WARNING, RuntimeWarning, 'RestrictedPython', 0)

    byte_code = None
    collected_errors = []
    collected_warnings = []
    used_names = {}
    if policy is None:
        # Unrestricted Source Checks
        byte_code = compile(source, filename, mode=mode, flags=flags,
                            dont_inherit=dont_inherit)
    elif issubclass(policy, RestrictingNodeTransformer):
        c_ast = None
        allowed_source_types = [str, ast.Module]
        if IS_PY2:
            allowed_source_types.append(unicode)  # NOQA: F821,E501  # PY2 only statement, in Python 2 only module
        if not issubclass(type(source), tuple(allowed_source_types)):
            raise TypeError('Not allowed source type: '
                            '"{0.__class__.__name__}".'.format(source))
        c_ast = None
        # workaround for pypy issue https://bitbucket.org/pypy/pypy/issues/2552
        if isinstance(source, ast.Module):
            c_ast = source
        else:
            try:
                c_ast = ast.parse(source, filename, mode)
            except (TypeError, ValueError) as e:
                collected_errors.append(str(e))
            except SyntaxError as v:
                collected_errors.append(syntax_error_template.format(
                    lineno=v.lineno,
                    type=v.__class__.__name__,
                    msg=v.msg,
                    statement=v.text.strip() if v.text else None
                ))
        if c_ast:
            policy_instance = policy(
                collected_errors, collected_warnings, used_names)
            policy_instance.visit(c_ast)
            if not collected_errors:
                byte_code = compile(c_ast, filename, mode=mode  # ,
                                    # flags=flags,
                                    # dont_inherit=dont_inherit
                                    )
    else:
        raise TypeError('Unallowed policy provided for RestrictedPython')
    return CompileResult(
        byte_code,
        tuple(collected_errors),
        collected_warnings,
        used_names)
Пример #21
0
 def Wrapper(*args, **kwargs):
   warnings.warn_explicit(
       '%s() is deprecated: %s' % (func.__name__, message),
       category=DeprecationWarning,
       filename=func.func_code.co_filename,
       lineno=func.func_code.co_firstlineno + 1)
   return func(*args, **kwargs)
Пример #22
0
    def stage_parfor_pass(self):
        """
        Convert data-parallel computations into Parfor nodes
        """
        # Ensure we have an IR and type information.
        assert self.func_ir
        parfor_pass = ParforPass(self.func_ir, self.type_annotation.typemap,
            self.type_annotation.calltypes, self.return_type, self.typingctx,
            self.flags.auto_parallel, self.flags)
        parfor_pass.run()

        if config.WARNINGS:
            # check the parfor pass worked and warn if it didn't
            has_parfor = False
            for blk in self.func_ir.blocks.values():
                for stmnt in blk.body:
                    if isinstance(stmnt, Parfor):
                        has_parfor = True
                        break
                else:
                    continue
                break

            if not has_parfor:
                # parfor calls the compiler chain again with a string
                if not self.func_ir.loc.filename == '<string>':
                    msg = ("parallel=True was specified but no transformation"
                           " for parallel execution was possible.")
                    warnings.warn_explicit(
                        msg,
                        errors.NumbaWarning,
                        self.func_id.filename,
                        self.func_id.firstlineno
                        )
Пример #23
0
 def process_key(self):
     token_type, token_string, token_line_number = self.consume()
     
     if pyradox.token.is_primitive_key_token_type(token_type):
         self.key_string = token_string
         self.key = pyradox.token.make_primitive(token_string, token_type)
         self.next = self.process_operator
     elif token_type == 'comment':
         if token_line_number == self.get_previous_line_number():
             # Comment following a previous value.
             self.append_line_comment(token_string[1:])
         else:
             self.pending_comments.append(token_string[1:])
         self.next = self.process_key
     elif token_type == 'end':
         if self.is_top_level:
             # top level cannot be ended, warn
             warnings.warn_explicit('Unmatched closing bracket at outer level of file. Skipping token.', ParseWarning, self.filename, token_line_number + 1)
             self.next = self.process_key
         else:
             self.next = None
     else:
         #invalid key
         warnings.warn_explicit('Token "%s" is not valid key. Skipping token.' % token_string, ParseWarning, self.filename, token_line_number + 1)
         self.next = self.process_key
Пример #24
0
def _handle_menu(_context, menu, title, for_, name, permission,
                 layer=IDefaultBrowserLayer):

    if menu or title:
        if not (menu and title):
            raise ConfigurationError(
                "If either menu or title are specified, they must "
                "both be specified.")
        if len(for_) != 1:
            raise ConfigurationError(
                "Menus can be specified only for single-view, not for "
                "multi-views.")

        if menuItemDirective is None:
            import warnings
            warnings.warn_explicit(
                'Page directive used with "menu" argument, while "zope.browsermenu" '
                'package is not installed. Doing nothing.',
                UserWarning,
                _context.info.file, _context.info.line)
            return []
    
        return menuItemDirective(
            _context, menu, for_[0], '@@' + name, title,
            permission=permission, layer=layer)

    return []
Пример #25
0
def pytest_pycollect_makeitem(collector, name, obj):
    outcome = yield
    res = outcome.get_result()
    if res is not None:
        return
    # nothing was collected elsewhere, let's do it here
    if safe_isclass(obj):
        if collector.istestclass(obj, name):
            outcome.force_result(Class(name, parent=collector))
    elif collector.istestfunction(obj, name):
        # mock seems to store unbound methods (issue473), normalize it
        obj = getattr(obj, "__func__", obj)
        # We need to try and unwrap the function if it's a functools.partial
        # or a funtools.wrapped.
        # We musn't if it's been wrapped with mock.patch (python 2 only)
        if not (isfunction(obj) or isfunction(get_real_func(obj))):
            filename, lineno = getfslineno(obj)
            warnings.warn_explicit(
                message=PytestWarning(
                    "cannot collect %r because it is not a function." % name
                ),
                category=None,
                filename=str(filename),
                lineno=lineno + 1,
            )
        elif getattr(obj, "__test__", True):
            if is_generator(obj):
                res = Function(name, parent=collector)
                reason = deprecated.YIELD_TESTS.format(name=name)
                res.add_marker(MARK_GEN.xfail(run=False, reason=reason))
                res.warn(PytestWarning(reason))
            else:
                res = list(collector._genfunctions(name, obj))
            outcome.force_result(res)
 def __getitem__(self, key):
     if key in self.deprecation_messages:
         import warnings
         import linecache
         # DeprecationWarnings are ignored by default. Clear the filter so
         # they are not:
         previous_warning_filters = warnings.filters[:]
         try:
             warnings.resetwarnings()
             # Hacky stuff to get it to work from within execfile() with
             # correct line data:
             linecache.clearcache()
             caller = sys._getframe(1)
             globals = caller.f_globals
             lineno = caller.f_lineno
             module = globals['__name__']
             filename = globals.get('__file__')
             fnl = filename.lower()
             if fnl.endswith((".pyc", ".pyo")):
                 filename = filename[:-1]
             message = self.deprecation_messages[key]
             warnings.warn_explicit(message, DeprecationWarning, filename, lineno, module)
         finally:
             # Restore the warnings filter:
             warnings.filters[:] = previous_warning_filters
     return dict.__getitem__(self, key)
Пример #27
0
def deprecated(f, *args, **kw):
    warnings.warn_explicit(
        "Call to deprecated function {}.".format(f.__name__),
        category=DeprecationWarning,
        filename=f.func_code.co_filename,
        lineno=f.func_code.co_firstlineno + 1
    )
Пример #28
0
    def warn(self, code, message, fslocation=None, nodeid=None):
        """
        .. deprecated:: 3.8

            Use :py:func:`warnings.warn` or :py:func:`warnings.warn_explicit` directly instead.

        Generate a warning for this test session.
        """
        from _pytest.warning_types import RemovedInPytest4Warning

        if isinstance(fslocation, (tuple, list)) and len(fslocation) > 2:
            filename, lineno = fslocation[:2]
        else:
            filename = "unknown file"
            lineno = 0
        msg = "config.warn has been deprecated, use warnings.warn instead"
        if nodeid:
            msg = "{}: {}".format(nodeid, msg)
        warnings.warn_explicit(
            RemovedInPytest4Warning(msg),
            category=None,
            filename=filename,
            lineno=lineno,
        )
        self.hook.pytest_logwarning.call_historic(
            kwargs=dict(
                code=code, message=message, fslocation=fslocation, nodeid=nodeid
            )
        )
def resource(_context, name, layer=IDefaultBrowserLayer,
             permission='zope.Public', factory=None,
             file=None, image=None, template=None):

    if permission == 'zope.Public':
        permission = CheckerPublic

    checker = NamesChecker(allowed_names, permission)

    too_many = bool(factory) + bool(file) + bool(image) + bool(template)
    if too_many > 1:
        raise ConfigurationError(
            "Must use exactly one of factory or file or image or template"
            " attributes for resource directives"
        )

    if image or template:
        import warnings
        warnings.warn_explicit(
            'The "template" and "image" attributes of resource '
            'directive are deprecated in favor of pluggable '
            'file resource factories based on file extensions. '
            'Use the "file" attribute instead.',
            DeprecationWarning,
            _context.info.file, _context.info.line)
        if image:
            file = image
        elif template:
            file = template

    _context.action(
        discriminator=('resource', name, IBrowserRequest, layer),
        callable=resourceHandler,
        args=(name, layer, checker, factory, file, _context.info),
        )
Пример #30
0
    def _importconftest(self, conftestpath):
        try:
            return self._conftestpath2mod[conftestpath]
        except KeyError:
            pkgpath = conftestpath.pypkgpath()
            if pkgpath is None:
                _ensure_removed_sysmodule(conftestpath.purebasename)
            try:
                mod = conftestpath.pyimport()
                if hasattr(mod, "pytest_plugins") and self._configured:
                    from _pytest.deprecated import (
                        PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
                    )

                    warnings.warn_explicit(
                        PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST,
                        category=None,
                        filename=str(conftestpath),
                        lineno=0,
                    )
            except Exception:
                raise ConftestImportFailure(conftestpath, sys.exc_info())

            self._conftest_plugins.add(mod)
            self._conftestpath2mod[conftestpath] = mod
            dirpath = conftestpath.dirpath()
            if dirpath in self._path2confmods:
                for path, mods in self._path2confmods.items():
                    if path and path.relto(dirpath) or path == dirpath:
                        assert mod not in mods
                        mods.append(mod)
            self.trace("loaded conftestmodule %r" % (mod))
            self.consider_conftest(mod)
            return mod
Пример #31
0
 def __init__(self, _context, class_):
     warnings.warn_explicit(
         "The 'content' alias for the 'class' directive has been "
         "deprecated and will be removed in Zope 2.12.\n",
         DeprecationWarning, _context.info.file, _context.info.line)
     super(ContentDirective, self).__init__(_context, class_)
Пример #32
0
    def visit_Assert(self, assert_):
        """Return the AST statements to replace the ast.Assert instance.

        This rewrites the test of an assertion to provide
        intermediate values and replace it with an if statement which
        raises an assertion error with a detailed explanation in case
        the expression is false.

        """
        if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1:
            from _pytest.warning_types import PytestWarning
            import warnings

            warnings.warn_explicit(
                PytestWarning(
                    "assertion is always true, perhaps remove parentheses?"),
                category=None,
                filename=str(self.module_path),
                lineno=assert_.lineno,
            )

        self.statements = []
        self.variables = []
        self.variable_counter = itertools.count()
        self.stack = []
        self.on_failure = []
        self.push_format_context()
        # Rewrite assert into a bunch of statements.
        top_condition, explanation = self.visit(assert_.test)
        # If in a test module, check if directly asserting None, in order to warn [Issue #3191]
        if self.module_path is not None:
            self.statements.append(
                self.warn_about_none_ast(top_condition,
                                         module_path=self.module_path,
                                         lineno=assert_.lineno))
        # Create failure message.
        body = self.on_failure
        negation = ast.UnaryOp(ast.Not(), top_condition)
        self.statements.append(ast.If(negation, body, []))
        if assert_.msg:
            assertmsg = self.helper("format_assertmsg", assert_.msg)
            explanation = "\n>assert " + explanation
        else:
            assertmsg = ast.Str("")
            explanation = "assert " + explanation
        template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
        msg = self.pop_format_context(template)
        fmt = self.helper("format_explanation", msg)
        err_name = ast.Name("AssertionError", ast.Load())
        exc = ast_Call(err_name, [fmt], [])
        if sys.version_info[0] >= 3:
            raise_ = ast.Raise(exc, None)
        else:
            raise_ = ast.Raise(exc, None, None)
        body.append(raise_)
        # Clear temporary variables by setting them to None.
        if self.variables:
            variables = [
                ast.Name(name, ast.Store()) for name in self.variables
            ]
            clear = ast.Assign(variables, _NameConstant(None))
            self.statements.append(clear)
        # Fix line numbers.
        for stmt in self.statements:
            set_location(stmt, assert_.lineno, assert_.col_offset)
        return self.statements
Пример #33
0
 def wrapper(*args, **kwargs):
     warn_explicit('Call to deprecated function %(funcname)s.' % {'funcname': func.__name__}, category=DeprecationWarning, filename=func.func_code.co_filename, lineno=func.func_code.co_firstlineno + 1)
     return func(*args, **kwargs)
    def ProcessShotStep1(self):
        """
        Method that runs the first step of the reconstruction, which consists of getting statistics from the XTCAV trace. This method is called automatically and should not be called by the user unless he has a knowledge of the operation done by this class internally. 

        Returns: True if it was successful, False otherwise
        """

        if not self._currenteventavailable:
            return False
  
        #It is important that this is open first so the experiment name is set properly (important for loading references)   
        if not self._envinfo:
            self._experiment=self._env.experiment()
            epicsstore=self._env.epicsStore();
            self._globalCalibration,ok1=xtup.GetGlobalXTCAVCalibration(epicsstore)
            self._saturationValue = xtup.GetCameraSaturationValue(epicsstore)
            self._roixtcav,ok2=xtup.GetXTCAVImageROI(epicsstore) 
            if ok1 and ok2: #If the information is not good, we try next event
                self._envinfo=True
            else:
                return False

        #It is important that the lasing off reference is open first, because it may reset the lasing off reference that needs to be loaded        
        if not self._loadedlasingoffreference:
            self.LoadLasingOffReference()
        
        if not self._loadeddarkreference:
            self.LoadDarkReference()

        if np.max(self._rawimage)>=self._saturationValue : #Detection if the image is saturated, we skip if it is
            warnings.warn_explicit('Saturated Image',UserWarning,'XTCAV',0)
                                    
        #Subtract the dark background, taking into account properly possible different ROIs
        #Only if the reference is present
        if self._loadeddarkreference:        
            img,ROI=xtu.SubtractBackground(self._rawimage,self._roixtcav,self._darkreference.image,self._darkreference.ROI)  
        else:
            ROI=self._roixtcav
            img=self._rawimage
            
        img,ok=xtu.DenoiseImage(img,self._medianfilter,self._snrfilter)                    #Remove noise from the image and normalize it
        if not ok:                                        #If there is nothing in the image we skip the event  
            return False

        img,ROI=xtu.FindROI(img,ROI,self._roiwaistthres,self._roiexpand)                  #Crop the image, the ROI struct is changed. It also add an extra dimension to the image so the array can store multiple images corresponding to different bunches
        if ROI['xN']<3 or ROI['yN']<3:
            print 'ROI too small',ROI['xN'],ROI['yN']
            return False
        img=xtu.SplitImage(img,self._nb, self._islandsplitmethod,self._islandsplitpar1,self._islandsplitpar2)

         

        imageStats=xtu.ProcessXTCAVImage(img,ROI)          #Obtain the different properties and profiles from the trace        
        
        #Save the results of the step 1
        
        self._eventresultsstep1={
            'processedImage':img,
            'NB':img.shape[0],
            'ROI':ROI,
            'imageStats':imageStats,
            }
        
        self._currenteventprocessedstep1=True        
        return True
Пример #35
0
def main(args=None, *, _wrap_timer=None):
    """Main program, used when run as a script.

    The optional 'args' argument specifies the command line to be parsed,
    defaulting to sys.argv[1:].

    The return value is an exit code to be passed to sys.exit(); it
    may be None to indicate success.

    When an exception happens during timing, a traceback is printed to
    stderr and the return value is 1.  Exceptions at other times
    (including the template compilation) are not caught.

    '_wrap_timer' is an internal interface used for unit testing.  If it
    is not None, it must be a callable that accepts a timer function
    and returns another timer function (used for unit testing).
    """
    if args is None:
        args = sys.argv[1:]
    import getopt
    try:
        opts, args = getopt.getopt(args, "n:u:s:r:tcpvh", [
            "number=", "setup=", "repeat=", "time", "clock", "process",
            "verbose", "unit=", "help"
        ])
    except getopt.error as err:
        print(err)
        print("use -h/--help for command line help")
        return 2
    timer = default_timer
    stmt = "\n".join(args) or "pass"
    number = 0  # auto-determine
    setup = []
    repeat = default_repeat
    verbose = 0
    time_unit = None
    units = {"usec": 1, "msec": 1e3, "sec": 1e6}
    precision = 3
    for o, a in opts:
        if o in ("-n", "--number"):
            number = int(a)
        if o in ("-s", "--setup"):
            setup.append(a)
        if o in ("-u", "--unit"):
            if a in units:
                time_unit = a
            else:
                print("Unrecognized unit. Please select usec, msec, or sec.",
                      file=sys.stderr)
                return 2
        if o in ("-r", "--repeat"):
            repeat = int(a)
            if repeat <= 0:
                repeat = 1
        if o in ("-t", "--time"):
            timer = time.time
        if o in ("-c", "--clock"):
            timer = time.clock
        if o in ("-p", "--process"):
            timer = time.process_time
        if o in ("-v", "--verbose"):
            if verbose:
                precision += 1
            verbose += 1
        if o in ("-h", "--help"):
            print(__doc__, end=' ')
            return 0
    setup = "\n".join(setup) or "pass"
    # Include the current directory, so that local imports work (sys.path
    # contains the directory of this script, rather than the current
    # directory)
    import os
    sys.path.insert(0, os.curdir)
    if _wrap_timer is not None:
        timer = _wrap_timer(timer)
    t = Timer(stmt, setup, timer)
    if number == 0:
        # determine number so that 0.2 <= total time < 2.0
        callback = None
        if verbose:

            def callback(number, time_taken):
                msg = "{num} loops -> {secs:.{prec}g} secs"
                print(msg.format(num=number, secs=time_taken, prec=precision))

        try:
            number, _ = t.autorange(callback)
        except:
            t.print_exc()
            return 1
    try:
        r = t.repeat(repeat, number)
    except:
        t.print_exc()
        return 1
    best = min(r)
    if verbose:
        print("raw times:", " ".join(["%.*g" % (precision, x) for x in r]))
    print("%d loops," % number, end=' ')
    usec = best * 1e6 / number
    if time_unit is not None:
        scale = units[time_unit]
    else:
        scales = [(scale, unit) for unit, scale in units.items()]
        scales.sort(reverse=True)
        for scale, time_unit in scales:
            if usec >= scale:
                break
    print("best of %d: %.*g %s per loop" %
          (repeat, precision, usec / scale, time_unit))
    best = min(r)
    usec = best * 1e6 / number
    worst = max(r)
    if worst >= best * 4:
        usec = worst * 1e6 / number
        import warnings
        warnings.warn_explicit(
            "The test results are likely unreliable. The worst\n"
            "time (%.*g %s) was more than four times slower than the best time."
            % (precision, usec / scale, time_unit), UserWarning, '', 0)
    return None
Пример #36
0
    def prompt_choices(self, name, choices, default=None):
        warnings.warn_explicit(
            "Command.choices is deprecated, use prompt_choices() function instead")

        prompt_choices(name, choices, default)
Пример #37
0
    def prompt_bool(self, name, default=False):
        warnings.warn_explicit(
            "Command.prompt_bool is deprecated, use prompt_bool() function instead")

        prompt_bool(name, default)
Пример #38
0
    def prompt(self, name, default=None):
        warnings.warn_explicit(
            "Command.prompt is deprecated, use prompt() function instead")

        prompt(name, default)
Пример #39
0
def _compile_restricted_mode(source,
                             filename='<string>',
                             mode="exec",
                             flags=0,
                             dont_inherit=False,
                             policy=RestrictingNodeTransformer):

    if not IS_CPYTHON:
        warnings.warn_explicit(NOT_CPYTHON_WARNING, RuntimeWarning,
                               'RestrictedPython', 0)

    byte_code = None
    collected_errors = []
    collected_warnings = []
    used_names = {}
    if policy is None:
        # Unrestricted Source Checks
        byte_code = compile(source,
                            filename,
                            mode=mode,
                            flags=flags,
                            dont_inherit=dont_inherit)
    elif issubclass(policy, RestrictingNodeTransformer):
        c_ast = None
        allowed_source_types = [str, ast.Module]
        if IS_PY2:
            allowed_source_types.append(
                unicode
            )  # NOQA: F821,E501  # PY2 only statement, in Python 2 only module
        if not issubclass(type(source), tuple(allowed_source_types)):
            raise TypeError('Not allowed source type: '
                            '"{0.__class__.__name__}".'.format(source))
        c_ast = None
        # workaround for pypy issue https://bitbucket.org/pypy/pypy/issues/2552
        if isinstance(source, ast.Module):
            c_ast = source
        else:
            try:
                c_ast = ast.parse(source, filename, mode)
            except (TypeError, ValueError) as e:
                collected_errors.append(str(e))
            except SyntaxError as v:
                collected_errors.append(
                    syntax_error_template.format(lineno=v.lineno,
                                                 type=v.__class__.__name__,
                                                 msg=v.msg,
                                                 statement=v.text.strip()))
        if c_ast:
            policy_instance = policy(collected_errors, collected_warnings,
                                     used_names)
            policy_instance.visit(c_ast)
            if not collected_errors:
                byte_code = compile(
                    c_ast,
                    filename,
                    mode=mode  # ,
                    # flags=flags,
                    # dont_inherit=dont_inherit
                )
    else:
        raise TypeError('Unallowed policy provided for RestrictedPython')
    return CompileResult(byte_code, tuple(collected_errors),
                         collected_warnings, used_names)
Пример #40
0
 def autolog2(disable=False, silent=False):
     warnings.warn_explicit(
         "warn_autolog2", category=Warning, filename=mlflow.__file__, lineno=5
     )
     logger.info("event_autolog2")
     safe_patch("integration2", patch_destination, "fn2", patch_impl2)
Пример #41
0
    def agg_list_like(self) -> DataFrame | Series:
        """
        Compute aggregation in the case of a list-like argument.

        Returns
        -------
        Result of aggregation.
        """
        from pandas.core.reshape.concat import concat

        obj = self.obj
        arg = cast(List[AggFuncTypeBase], self.f)

        if getattr(obj, "axis", 0) == 1:
            raise NotImplementedError("axis other than 0 is not supported")

        if not isinstance(obj, SelectionMixin):
            # i.e. obj is Series or DataFrame
            selected_obj = obj
        elif obj._selected_obj.ndim == 1:
            # For SeriesGroupBy this matches _obj_with_exclusions
            selected_obj = obj._selected_obj
        else:
            selected_obj = obj._obj_with_exclusions

        results = []
        keys = []
        failed_names = []

        depr_nuisance_columns_msg = (
            "{} did not aggregate successfully. If any error is "
            "raised this will raise in a future version of pandas. "
            "Drop these columns/ops to avoid this warning.")

        # degenerate case
        if selected_obj.ndim == 1:
            for a in arg:
                colg = obj._gotitem(selected_obj.name,
                                    ndim=1,
                                    subset=selected_obj)
                try:
                    new_res = colg.aggregate(a)

                except TypeError:
                    failed_names.append(com.get_callable_name(a) or a)
                else:
                    results.append(new_res)

                    # make sure we find a good name
                    name = com.get_callable_name(a) or a
                    keys.append(name)

        # multiples
        else:
            indices = []
            for index, col in enumerate(selected_obj):
                colg = obj._gotitem(col,
                                    ndim=1,
                                    subset=selected_obj.iloc[:, index])
                try:
                    # Capture and suppress any warnings emitted by us in the call
                    # to agg below, but pass through any warnings that were
                    # generated otherwise.
                    # This is necessary because of https://bugs.python.org/issue29672
                    # See GH #43741 for more details
                    with warnings.catch_warnings(record=True) as record:
                        new_res = colg.aggregate(arg)
                    if len(record) > 0:
                        match = re.compile(
                            depr_nuisance_columns_msg.format(".*"))
                        for warning in record:
                            if re.match(match, str(warning.message)):
                                failed_names.append(col)
                            else:
                                warnings.warn_explicit(
                                    message=warning.message,
                                    category=warning.category,
                                    filename=warning.filename,
                                    lineno=warning.lineno,
                                )

                except (TypeError, DataError):
                    failed_names.append(col)
                except ValueError as err:
                    # cannot aggregate
                    if "Must produce aggregated value" in str(err):
                        # raised directly in _aggregate_named
                        failed_names.append(col)
                    elif "no results" in str(err):
                        # reached in test_frame_apply.test_nuiscance_columns
                        #  where the colg.aggregate(arg) ends up going through
                        #  the selected_obj.ndim == 1 branch above with arg == ["sum"]
                        #  on a datetime64[ns] column
                        failed_names.append(col)
                    else:
                        raise
                else:
                    results.append(new_res)
                    indices.append(index)

            keys = selected_obj.columns.take(indices)

        # if we are empty
        if not len(results):
            raise ValueError("no results")

        if len(failed_names) > 0:
            warnings.warn(
                depr_nuisance_columns_msg.format(failed_names),
                FutureWarning,
                stacklevel=find_stack_level(),
            )

        try:
            concatenated = concat(results, keys=keys, axis=1, sort=False)
        except TypeError as err:
            # we are concatting non-NDFrame objects,
            # e.g. a list of scalars
            from pandas import Series

            result = Series(results, index=keys, name=obj.name)
            if is_nested_object(result):
                raise ValueError(
                    "cannot combine transform and aggregation operations"
                ) from err
            return result
        else:
            # Concat uses the first index to determine the final indexing order.
            # The union of a shorter first index with the other indices causes
            # the index sorting to be different from the order of the aggregating
            # functions. Reindex if this is the case.
            index_size = concatenated.index.size
            full_ordered_index = next(result.index for result in results
                                      if result.index.size == index_size)
            return concatenated.reindex(full_ordered_index, copy=False)
Пример #42
0
 def dep_explicit(self, i):
     if i == 0:
         warnings.warn_explicit("dep_explicit",
                                category=DeprecationWarning,
                                filename="hello",
                                lineno=3)
Пример #43
0
def _show_warning(message: str, result: Any) -> Any:
    for i, line in enumerate(message.splitlines(keepends=False)):
        warn_explicit(line.lstrip("*WARNING*"), UserWarning, "Skill response",
                      i)

    return result
Пример #44
0
 def wrapper(*args, **kwargs):
     warnings.warn_explicit("Call to deprecated function " + func_.__name__,
                            category=DeprecationWarning,
                            filename=func_.func_code.co_filename,
                            lineno=func_.code.co_firstlineno + 1)
     return func_(*args, **kwargs)
Пример #45
0
    def run_PyBDSF(self,
                   pybdsf_params=dict(),
                   ncores=8,
                   write=True,
                   redo=False):
        """
        Perform source finding on image using PyBDSF, producing just a component catalogue by default.

        Keyword arguments:
        ------------------
        params : string
            Any extra parameters to pass into PyBDSF (apart from cores, noise, background and table).
        ncores : int
            The number of cores to use (per node) when running BANE and Aegean.
        write : bool
            Write the fitted model and residual images.
        redo : bool
            Perform source finding, even if output catalogue(s) exist."""

        try:
            import bdsf
        except:
            logging.error("Can not import bdsf module")
            return 1

        if redo:
            print "Re-doing source finding. Overwriting all PyBDSF files."

        if not os.path.exists(self.cat_comp) or redo:

            print "--------------------------------"
            print "| Running PyBDSF for catalogue |"
            print "--------------------------------"

            #Run PyBDSF source finder to produce catalogue of image
            # if self.SNR is not None:
            # pybdsf_params.update({'thresh':'hard', 'thresh_pix':self.SNR})

            img = bdsf.process_image(self.filepath,
                                     quiet=True,
                                     ncores=ncores,
                                     **pybdsf_params)
            plot_type_list = [
                'rms', 'mean', 'gaus_model', 'gaus_resid', 'island_mask'
            ]
            fits_names = [
                "../{}_{}.fits".format(self.basename, _)
                for _ in plot_type_list
            ]

            # number of plots
            n_plots = len(plot_type_list)

            for k in range(n_plots):
                img.export_image(outfile=fits_names[k],
                                 clobber=True,
                                 img_type=plot_type_list[k])

            img.write_catalog(outfile=self.cat_comp,
                              format="csv",
                              clobber=True,
                              catalog_type="srl")

            #Print error message when no sources are found and catalogue not created.
            if not os.path.exists(self.cat_comp):
                warnings.warn_explicit(
                    'Catalogue not created. Check output from PyBDSF.\n',
                    UserWarning, WARN, cf.f_lineno)
        else:
            print "'{0}' already exists. Skipping PyBDSF.".format(
                self.cat_comp)
Пример #46
0
 def wrapper(*args, **kw):
     warnings.warn_explicit("calling deprecated function %s" % f.__name__,
                            category=DeprecationWarning,
                            filename=f.func_code.co_filename,
                            lineno=f.func_code.co_firstlineno + 1)
     return f(*args, **kw)
Пример #47
0
    def _resolve_hierarchy_conflicts(self, hierarchy=("top", ), mode="warn"):
        assert mode in ("silent", "warn", "error")

        driver_subfrags = SignalDict()
        memory_subfrags = OrderedDict()

        def add_subfrag(registry, entity, entry):
            if entity not in registry:
                registry[entity] = set()
            registry[entity].add(entry)

        # For each signal driven by this fragment and/or its subfragments, determine which
        # subfragments also drive it.
        for domain, signal in self.iter_drivers():
            add_subfrag(driver_subfrags, signal, (None, hierarchy))

        flatten_subfrags = set()
        for i, (subfrag, name) in enumerate(self.subfragments):
            if name is None:
                name = "<unnamed #{}>".format(i)
            subfrag_hierarchy = hierarchy + (name, )

            if subfrag.flatten:
                # Always flatten subfragments that explicitly request it.
                flatten_subfrags.add((subfrag, subfrag_hierarchy))

            if isinstance(subfrag, Instance):
                # For memories (which are subfragments, but semantically a part of superfragment),
                # record that this fragment is driving it.
                if subfrag.type in ("$memrd", "$memwr"):
                    memory = subfrag.parameters["MEMID"]
                    add_subfrag(memory_subfrags, memory, (None, hierarchy))

                # Never flatten instances.
                continue

            # First, recurse into subfragments and let them detect driver conflicts as well.
            subfrag_drivers, subfrag_memories = \
                subfrag._resolve_hierarchy_conflicts(subfrag_hierarchy, mode)

            # Second, classify subfragments by signals they drive and memories they use.
            for signal in subfrag_drivers:
                add_subfrag(driver_subfrags, signal,
                            (subfrag, subfrag_hierarchy))
            for memory in subfrag_memories:
                add_subfrag(memory_subfrags, memory,
                            (subfrag, subfrag_hierarchy))

        # Find out the set of subfragments that needs to be flattened into this fragment
        # to resolve driver-driver conflicts.
        def flatten_subfrags_if_needed(subfrags):
            if len(subfrags) == 1:
                return []
            flatten_subfrags.update(
                (f, h) for f, h in subfrags if f is not None)
            return list(sorted(".".join(h) for f, h in subfrags))

        for signal, subfrags in driver_subfrags.items():
            subfrag_names = flatten_subfrags_if_needed(subfrags)
            if not subfrag_names:
                continue

            # While we're at it, show a message.
            message = (
                "Signal '{}' is driven from multiple fragments: {}".format(
                    signal, ", ".join(subfrag_names)))
            if mode == "error":
                raise DriverConflict(message)
            elif mode == "warn":
                message += "; hierarchy will be flattened"
                warnings.warn_explicit(message, DriverConflict,
                                       *signal.src_loc)

        for memory, subfrags in memory_subfrags.items():
            subfrag_names = flatten_subfrags_if_needed(subfrags)
            if not subfrag_names:
                continue

            # While we're at it, show a message.
            message = (
                "Memory '{}' is accessed from multiple fragments: {}".format(
                    memory.name, ", ".join(subfrag_names)))
            if mode == "error":
                raise DriverConflict(message)
            elif mode == "warn":
                message += "; hierarchy will be flattened"
                warnings.warn_explicit(message, DriverConflict,
                                       *memory.src_loc)

        # Flatten hierarchy.
        for subfrag, subfrag_hierarchy in sorted(flatten_subfrags,
                                                 key=lambda x: x[1]):
            self._merge_subfragment(subfrag)

        # If we flattened anything, we might be in a situation where we have a driver conflict
        # again, e.g. if we had a tree of fragments like A --- B --- C where only fragments
        # A and C were driving a signal S. In that case, since B is not driving S itself,
        # processing B will not result in any flattening, but since B is transitively driving S,
        # processing A will flatten B into it. Afterwards, we have a tree like AB --- C, which
        # has another conflict.
        if any(flatten_subfrags):
            # Try flattening again.
            return self._resolve_hierarchy_conflicts(hierarchy, mode)

        # Nothing was flattened, we're done!
        return (SignalSet(driver_subfrags.keys()), set(memory_subfrags.keys()))
Пример #48
0
    def visit_Assert(self, assert_: ast.Assert) -> List[ast.stmt]:
        """Return the AST statements to replace the ast.Assert instance.

        This rewrites the tests of an assertion to provide
        intermediate values and replace it with an if statement which
        raises an assertion error with a detailed explanation in case
        the expression is false.
        """
        if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1:
            from _pytest.warning_types import PytestAssertRewriteWarning
            import warnings

            # TODO: This assert should not be needed.
            assert self.module_path is not None
            warnings.warn_explicit(
                PytestAssertRewriteWarning(
                    "assertion is always true, perhaps remove parentheses?"),
                category=None,
                filename=os.fspath(self.module_path),
                lineno=assert_.lineno,
            )

        self.statements: List[ast.stmt] = []
        self.variables: List[str] = []
        self.variable_counter = itertools.count()

        if self.enable_assertion_pass_hook:
            self.format_variables: List[str] = []

        self.stack: List[Dict[str, ast.expr]] = []
        self.expl_stmts: List[ast.stmt] = []
        self.push_format_context()
        # Rewrite assert into a bunch of statements.
        top_condition, explanation = self.visit(assert_.test)

        negation = ast.UnaryOp(ast.Not(), top_condition)

        if self.enable_assertion_pass_hook:  # Experimental pytest_assertion_pass hook
            msg = self.pop_format_context(ast.Str(explanation))

            # Failed
            if assert_.msg:
                assertmsg = self.helper("_format_assertmsg", assert_.msg)
                gluestr = "\n>assert "
            else:
                assertmsg = ast.Str("")
                gluestr = "assert "
            err_explanation = ast.BinOp(ast.Str(gluestr), ast.Add(), msg)
            err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation)
            err_name = ast.Name("AssertionError", ast.Load())
            fmt = self.helper("_format_explanation", err_msg)
            exc = ast.Call(err_name, [fmt], [])
            raise_ = ast.Raise(exc, None)
            statements_fail = []
            statements_fail.extend(self.expl_stmts)
            statements_fail.append(raise_)

            # Passed
            fmt_pass = self.helper("_format_explanation", msg)
            orig = self._assert_expr_to_lineno()[assert_.lineno]
            hook_call_pass = ast.Expr(
                self.helper(
                    "_call_assertion_pass",
                    ast.Num(assert_.lineno),
                    ast.Str(orig),
                    fmt_pass,
                ))
            # If any hooks implement assert_pass hook
            hook_impl_test = ast.If(
                self.helper("_check_if_assertion_pass_impl"),
                self.expl_stmts + [hook_call_pass],
                [],
            )
            statements_pass = [hook_impl_test]

            # Test for assertion condition
            main_test = ast.If(negation, statements_fail, statements_pass)
            self.statements.append(main_test)
            if self.format_variables:
                variables = [
                    ast.Name(name, ast.Store())
                    for name in self.format_variables
                ]
                clear_format = ast.Assign(variables, ast.NameConstant(None))
                self.statements.append(clear_format)

        else:  # Original assertion rewriting
            # Create failure message.
            body = self.expl_stmts
            self.statements.append(ast.If(negation, body, []))
            if assert_.msg:
                assertmsg = self.helper("_format_assertmsg", assert_.msg)
                explanation = "\n>assert " + explanation
            else:
                assertmsg = ast.Str("")
                explanation = "assert " + explanation
            template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
            msg = self.pop_format_context(template)
            fmt = self.helper("_format_explanation", msg)
            err_name = ast.Name("AssertionError", ast.Load())
            exc = ast.Call(err_name, [fmt], [])
            raise_ = ast.Raise(exc, None)

            body.append(raise_)

        # Clear temporary variables by setting them to None.
        if self.variables:
            variables = [
                ast.Name(name, ast.Store()) for name in self.variables
            ]
            clear = ast.Assign(variables, ast.NameConstant(None))
            self.statements.append(clear)
        # Fix line numbers.
        for stmt in self.statements:
            set_location(stmt, assert_.lineno, assert_.col_offset)
        return self.statements
Пример #49
0
def _inlineCallbacks(result, g, deferred):
    """
    See L{inlineCallbacks}.
    """
    # This function is complicated by the need to prevent unbounded recursion
    # arising from repeatedly yielding immediately ready deferreds.  This while
    # loop and the waiting variable solve that by manually unfolding the
    # recursion.

    waiting = [
        True,  # waiting for result?
        None
    ]  # result

    while 1:
        try:
            # Send the last result back as the result of the yield expression.
            isFailure = isinstance(result, failure.Failure)
            if isFailure:
                result = result.throwExceptionIntoGenerator(g)
            else:
                result = g.send(result)
        except StopIteration:
            # fell off the end, or "return" statement
            deferred.callback(None)
            return deferred
        except _DefGen_Return, e:
            # returnValue() was called; time to give a result to the original
            # Deferred.  First though, let's try to identify the potentially
            # confusing situation which results when returnValue() is
            # accidentally invoked from a different function, one that wasn't
            # decorated with @inlineCallbacks.

            # The traceback starts in this frame (the one for
            # _inlineCallbacks); the next one down should be the application
            # code.
            appCodeTrace = exc_info()[2].tb_next
            if isFailure:
                # If we invoked this generator frame by throwing an exception
                # into it, then throwExceptionIntoGenerator will consume an
                # additional stack frame itself, so we need to skip that too.
                appCodeTrace = appCodeTrace.tb_next
            # Now that we've identified the frame being exited by the
            # exception, let's figure out if returnValue was called from it
            # directly.  returnValue itself consumes a stack frame, so the
            # application code will have a tb_next, but it will *not* have a
            # second tb_next.
            if appCodeTrace.tb_next.tb_next:
                # If returnValue was invoked non-local to the frame which it is
                # exiting, identify the frame that ultimately invoked
                # returnValue so that we can warn the user, as this behavior is
                # confusing.
                ultimateTrace = appCodeTrace
                while ultimateTrace.tb_next.tb_next:
                    ultimateTrace = ultimateTrace.tb_next
                filename = ultimateTrace.tb_frame.f_code.co_filename
                lineno = ultimateTrace.tb_lineno
                warnings.warn_explicit(
                    "returnValue() in %r causing %r to exit: "
                    "returnValue should only be invoked by functions decorated "
                    "with inlineCallbacks" %
                    (ultimateTrace.tb_frame.f_code.co_name,
                     appCodeTrace.tb_frame.f_code.co_name), DeprecationWarning,
                    filename, lineno)
            deferred.callback(e.value)
            return deferred
        except:
Пример #50
0
    def run_Aegean(self, params='', ncores=8, write=True, redo=False):
        """Perform source finding on image using Aegean, producing just a component catalogue by default.

        Keyword arguments:
        ------------------
        params : string
            Any extra parameters to pass into Aegean (apart from cores, noise, background and table).
        ncores : int
            The number of cores to use (per node) when running BANE and Aegean.
        write : bool
            Write the fitted model and residual images.
        redo : bool
            Perform source finding, even if output catalogue(s) exist."""

        if redo:
            print "Re-doing source finding. Overwriting all Aegean and AeRes files."

        if not os.path.exists(self.cat_comp) or redo:

            print "--------------------------------"
            print "| Running Aegean for catalogue |"
            print "--------------------------------"

            #Run Aegean source finder to produce catalogue of image
            command = 'aegean --cores={0} --table={1}'.\
                       format(ncores, self.cat_name)

            #Also write ds9 region file and island fits file when user wants verbose output
            if self.verbose:
                command += ',{0}.reg'.format(remove_extn(self.cat_name))

            #Add any parameters used has input and file name
            command += " {0} {1}".format(params, self.filepath)
            print "Running Aegean with following command:"
            print command
            os.system(command)

            #Print error message when no sources are found and catalogue not created.
            if not os.path.exists(self.cat_comp):
                warnings.warn_explicit(
                    'Aegean catalogue not created. Check output from Aegean.\n',
                    UserWarning, WARN, cf.f_lineno)
        else:
            print "'{0}' already exists. Skipping Aegean.".format(
                self.cat_comp)

        #Run AeRes when Aegean catalogue exists to produce fitted model and residual
        if write:
            if (not os.path.exists(self.residual)
                    and os.path.exists(self.cat_comp)) or redo:
                print "----------------------------------------"
                print "| Running AeRes for model and residual |"
                print "----------------------------------------"

                command = 'AeRes -f {0} -c {1} -r {2} -m {3}'.format(
                    self.filepath, self.cat_comp, self.residual, self.model)
                print "Running AeRes for residual and model images with following command:"
                print command
                os.system(command)
            else:
                print "'{0}' already exists. Skipping AeRes.".format(
                    self.residual)
Пример #51
0
def warn_or_error(
    removal_version: str,
    deprecated_entity_description: str,
    hint: Optional[str] = None,
    deprecation_start_version: Optional[str] = None,
    stacklevel: int = 3,
    frame_info: Optional[inspect.FrameInfo] = None,
    ensure_stderr: bool = False,
    print_warning: bool = True,
) -> None:
    """Check the removal_version against the current pants version.

    Issues a warning if the removal version is > current pants version, or an error otherwise.

    :param removal_version: The pantsbuild.pants version at which the deprecated entity will
                            be/was removed.
    :param deprecated_entity_description: A short description of the deprecated entity, that
                                          we can embed in warning/error messages.
    :param hint: A message describing how to migrate from the removed entity.
    :param deprecation_start_version: The pantsbuild.pants version at which the entity will
                                      begin to display a deprecation warning. This must be less
                                      than the `removal_version`. If not provided, the
                                      deprecation warning is always displayed.
    :param stacklevel: The stacklevel to pass to warnings.warn, which determines the file name and
                       line number of the error message.
    :param frame_info: If provided, use this frame info instead of getting one from `stacklevel`.
    :param ensure_stderr: Whether use warnings.warn, or use warnings.showwarning to print
                          directly to stderr.
    :param print_warning: Whether to print a warning for deprecations *before* their removal.
                          If this flag is off, an exception will still be raised for options
                          past their deprecation date.
    :raises DeprecationApplicationError: if the removal_version parameter is invalid.
    :raises CodeRemovedError: if the current version is later than the version marked for removal.
    """
    removal_semver = validate_deprecation_semver(removal_version,
                                                 "removal version")
    if deprecation_start_version:
        deprecation_start_semver = validate_deprecation_semver(
            deprecation_start_version, "deprecation start version")
        if deprecation_start_semver >= removal_semver:
            raise InvalidSemanticVersionOrderingError(
                "The deprecation start version {} must be less than the end version {}."
                .format(deprecation_start_version, removal_version))
        elif PANTS_SEMVER < deprecation_start_semver:
            return

    msg = "DEPRECATED: {} {} removed in version {}.".format(
        deprecated_entity_description, get_deprecated_tense(removal_version),
        removal_version)
    if hint:
        msg += "\n  {}".format(hint)

    # We need to have filename and line_number for warnings.formatwarning, which appears to be the only
    # way to get a warning message to display to stderr. We get that from frame_info.
    if frame_info is None:
        frame_info = _get_frame_info(stacklevel)
    _, filename, line_number, _, _, _ = frame_info

    if removal_semver > PANTS_SEMVER:
        if ensure_stderr:
            # No warning filters can stop us from printing this message directly to stderr.
            warning_msg = warnings.formatwarning(
                msg,
                DeprecationWarning,
                filename,
                line_number,
            )
            print(warning_msg, file=sys.stderr)
        elif print_warning:
            # This output is filtered by warning filters.
            warnings.warn_explicit(
                message=msg,
                category=DeprecationWarning,
                filename=filename,
                lineno=line_number,
            )
        return
    else:
        raise CodeRemovedError(msg)
Пример #52
0
 def emit(self, record):
     warnings.warn_explicit(message=record.message,
                            category=LogbookWarning,
                            filename=record.filename,
                            lineno=record.lineno,
                            module=record.module)
Пример #53
0
from .widget_output import Output
from .widget_selection import RadioButtons, ToggleButtons, Dropdown, Select, SelectMultiple
from .widget_selectioncontainer import Tab, Accordion
from .widget_string import HTML, Latex, Text, Textarea
from .interaction import interact, interactive, fixed, interact_manual
from .widget_link import jslink, jsdlink

# Deprecated classes
from .widget_bool import CheckboxWidget, ToggleButtonWidget
from .widget_button import ButtonWidget
from .widget_box import ContainerWidget
from .widget_float import FloatTextWidget, BoundedFloatTextWidget, FloatSliderWidget, FloatProgressWidget
from .widget_image import ImageWidget
from .widget_int import IntTextWidget, BoundedIntTextWidget, IntSliderWidget, IntProgressWidget
from .widget_selection import RadioButtonsWidget, ToggleButtonsWidget, DropdownWidget, SelectWidget
from .widget_selectioncontainer import TabWidget, AccordionWidget
from .widget_string import HTMLWidget, LatexWidget, TextWidget, TextareaWidget

# We use warn_explicit so we have very brief messages without file or line numbers.
# The concern is that file or line numbers will confuse the interactive user.
# To ignore this warning, do:
#
#     from warnings import filterwarnings
#     filterwarnings('ignore', module='ipython_widgets')

from warnings import warn_explicit
__warningregistry__ = {}
warn_explicit("IPython widgets are experimental and may change in the future.",
              FutureWarning, '', 0, module = 'ipython_widgets',
              registry = __warningregistry__, module_globals = globals)
Пример #54
0
    def run_pass(self, state):
        """
        Lowering for object mode
        """

        if state.library is None:
            codegen = state.targetctx.codegen()
            state.library = codegen.create_library(state.func_id.func_qualname)
            # Enable object caching upfront, so that the library can
            # be later serialized.
            state.library.enable_object_caching()

        def backend_object_mode():
            """
            Object mode compilation
            """
            if len(state.args) != state.nargs:
                # append missing
                # BUG?: What's going on with nargs here?
                # check state.nargs vs self.nargs on original code
                state.args = (tuple(state.args) + (types.pyobject,) *
                              (state.nargs - len(state.args)))

            return self._py_lowering_stage(state.targetctx,
                                           state.library,
                                           state.func_ir,
                                           state.flags)

        lowered = backend_object_mode()
        signature = typing.signature(state.return_type, *state.args)
        from numba.core.compiler import compile_result
        state.cr = compile_result(
            typing_context=state.typingctx,
            target_context=state.targetctx,
            entry_point=lowered.cfunc,
            typing_error=state.status.fail_reason,
            type_annotation=state.type_annotation,
            library=state.library,
            call_helper=lowered.call_helper,
            signature=signature,
            objectmode=True,
            lifted=state.lifted,
            fndesc=lowered.fndesc,
            environment=lowered.env,
            metadata=state.metadata,
            reload_init=state.reload_init,
        )

        # Warn, deprecated behaviour, code compiled in objmode without
        # force_pyobject indicates fallback from nopython mode
        if not state.flags.force_pyobject:
            # first warn about object mode and yes/no to lifted loops
            if len(state.lifted) > 0:
                warn_msg = ('Function "%s" was compiled in object mode without'
                            ' forceobj=True, but has lifted loops.' %
                            (state.func_id.func_name,))
            else:
                warn_msg = ('Function "%s" was compiled in object mode without'
                            ' forceobj=True.' % (state.func_id.func_name,))
            warnings.warn(errors.NumbaWarning(warn_msg,
                                              state.func_ir.loc))

            url = ("https://numba.pydata.org/numba-doc/latest/reference/"
                   "deprecation.html#deprecation-of-object-mode-fall-"
                   "back-behaviour-when-using-jit")
            msg = ("\nFall-back from the nopython compilation path to the "
                   "object mode compilation path has been detected, this is "
                   "deprecated behaviour.\n\nFor more information visit %s" %
                   url)
            warnings.warn(errors.NumbaDeprecationWarning(msg,
                                                         state.func_ir.loc))
            if state.flags.release_gil:
                warn_msg = ("Code running in object mode won't allow parallel"
                            " execution despite nogil=True.")
                warnings.warn_explicit(warn_msg, errors.NumbaWarning,
                                       state.func_id.filename,
                                       state.func_id.firstlineno)
        return True
Пример #55
0
def processLasingSingleShot(image_profile, nolasing_averaged_profiles):
    """
    Process a single shot profiles, using the no lasing references to retrieve the x-ray pulse(s)
    Arguments:
      image_profile: profile for xtcav image
      nolasing_averaged_profiles: no lasing reference profiles
    Output
      pulsecharacterization: retrieved pulse
    """

    image_stats = image_profile.image_stats
    physical_units = image_profile.physical_units
    shot_to_shot = image_profile.shot_to_shot

    num_bunches = len(image_stats)              #Number of bunches
    
    if (num_bunches != nolasing_averaged_profiles.num_bunches):
        warnings.warn_explicit('Different number of bunches in the reference',UserWarning,'XTCAV',0)
    
    t = nolasing_averaged_profiles.t   #Master time obtained from the no lasing references
    dt = old_div((t[-1]-t[0]),(t.size-1))
    
             #Electron charge in coulombs
    Nelectrons = old_div(shot_to_shot.dumpecharge,Constants.E_CHARGE)   #Total number of electrons in the bunch    
    
    #Create the the arrays for the outputs, first index is always bunch number
    bunchdelay=np.zeros(num_bunches, dtype=np.float64);                       #Delay from each bunch with respect to the first one in fs
    bunchdelaychange=np.zeros(num_bunches, dtype=np.float64);                 #Difference between the delay from each bunch with respect to the first one in fs and the same form the non lasing reference
    bunchenergydiff=np.zeros(num_bunches, dtype=np.float64);                  #Distance in energy for each bunch with respect to the first one in MeV
    bunchenergydiffchange=np.zeros(num_bunches, dtype=np.float64);            #Comparison of that distance with respect to the no lasing
    eBunchCOM=np.zeros(num_bunches, dtype=np.float64);                   #Energy of the XRays generated from each bunch for the center of mass approach in J
    eBunchRMS=np.zeros(num_bunches, dtype=np.float64);                   #Energy of the XRays generated from each bunch for the dispersion of mass approach in J
    powerAgreement=np.zeros(num_bunches, dtype=np.float64);              #Agreement factor between the two methods
    lasingECurrent=np.zeros((num_bunches,t.size), dtype=np.float64);     #Electron current for the lasing trace (In #electrons/s)
    nolasingECurrent=np.zeros((num_bunches,t.size), dtype=np.float64);   #Electron current for the no lasing trace (In #electrons/s)
    lasingECOM=np.zeros((num_bunches,t.size), dtype=np.float64);         #Lasing energy center of masses for each time in MeV
    nolasingECOM=np.zeros((num_bunches,t.size), dtype=np.float64);       #No lasing energy center of masses for each time in MeV
    lasingERMS=np.zeros((num_bunches,t.size), dtype=np.float64);         #Lasing energy dispersion for each time in MeV
    nolasingERMS=np.zeros((num_bunches,t.size), dtype=np.float64);       #No lasing energy dispersion for each time in MeV
    powerECOM=np.zeros((num_bunches,t.size), dtype=np.float64);      #Retrieved power in GW based on ECOM
    powerERMS=np.zeros((num_bunches,t.size), dtype=np.float64);      #Retrieved power in GW based on ERMS

    powerrawECOM=np.zeros((num_bunches,t.size), dtype=np.float64);              #Retrieved power in GW based on ECOM without gas detector normalization
    powerrawERMS=np.zeros((num_bunches,t.size), dtype=np.float64);              #Retrieved power in arbitrary units based on ERMS without gas detector normalization
    groupnum=np.zeros(num_bunches, dtype=np.int32);                  #group number of lasing off shot
             
    
    #We treat each bunch separately
    for j in range(num_bunches):
        distT=(image_stats[j].xCOM-image_stats[0].xCOM)*physical_units.xfsPerPix  #Distance in time converted form pixels to fs
        distE=(image_stats[j].yCOM-image_stats[0].yCOM)*physical_units.yMeVPerPix #Distance in time converted form pixels to MeV
        
        bunchdelay[j]=distT  #The delay for each bunch is the distance in time
        bunchenergydiff[j]=distE #Same for energy
        
        dt_old=physical_units.xfs[1]-physical_units.xfs[0] # dt before interpolation 
        
        eCurrent=old_div(image_stats[j].xProfile,(dt_old*Constants.FS_TO_S))*Nelectrons                        #Electron current in number of electrons per second, the original xProfile already was normalized to have a total sum of one for the all the bunches together
        
        eCOMslice=(image_stats[j].yCOMslice-image_stats[j].yCOM)*physical_units.yMeVPerPix       #Center of mass in energy for each t converted to the right units        
        eRMSslice=image_stats[j].yRMSslice*physical_units.yMeVPerPix                               #Energy dispersion for each t converted to the right units

        interp=scipy.interpolate.interp1d(physical_units.xfs-distT,eCurrent,kind='linear',fill_value=0,bounds_error=False,assume_sorted=True)  #Interpolation to master time
        eCurrent=interp(t)    
                                                   
        interp=scipy.interpolate.interp1d(physical_units.xfs-distT,eCOMslice,kind='linear',fill_value=0,bounds_error=False,assume_sorted=True)  #Interpolation to master time
        eCOMslice=interp(t)
            
        interp=scipy.interpolate.interp1d(physical_units.xfs-distT,eRMSslice,kind='linear',fill_value=0,bounds_error=False,assume_sorted=True)  #Interpolation to master time
        eRMSslice=interp(t)        
        
        #Find best no lasing match
        num_groups = nolasing_averaged_profiles.eCurrent[j].shape[0]
        corr = np.apply_along_axis(lambda x: np.corrcoef(eCurrent, x)[0,1], 1, nolasing_averaged_profiles.eCurrent[j])
        
        #The index of the most similar is that with a highest correlation, i.e. the last in the array after sorting it
        groupnum[j]=np.argmax(corr)
        #groupnum[j] = np.random.randint(0, num_groups-1) if num_groups > 1 else 0
        
        #The change in the delay and in energy with respect to the same bunch for the no lasing reference
        bunchdelaychange[j]=distT-nolasing_averaged_profiles.distT[j][groupnum[j]]
        bunchenergydiffchange[j]=distE-nolasing_averaged_profiles.distE[j][groupnum[j]]
                                       
        #We do proper assignations
        lasingECurrent[j,:]=eCurrent
        nolasingECurrent[j,:]=nolasing_averaged_profiles.eCurrent[j][groupnum[j],:]

        #We threshold the ECOM and ERMS based on electron current
        threslevel=0.1
        threslasing=np.amax(lasingECurrent[j,:])*threslevel
        thresnolasing=np.amax(nolasingECurrent[j,:])*threslevel      
        indiceslasing=np.where(lasingECurrent[j,:]>threslasing)
        indicesnolasing=np.where(nolasingECurrent[j,:]>thresnolasing)      
        ind1=np.amax([indiceslasing[0][0],indicesnolasing[0][0]])
        ind2=np.amin([indiceslasing[0][-1],indicesnolasing[0][-1]])        
        if ind1>ind2:
            ind1=ind2
        
        #And do the rest of the assignations taking into account the thresholding
        lasingECOM[j,ind1:ind2]=eCOMslice[ind1:ind2]
        nolasingECOM[j,ind1:ind2]=nolasing_averaged_profiles.eCOMslice[j][groupnum[j],ind1:ind2]
        lasingERMS[j,ind1:ind2]=eRMSslice[ind1:ind2]
        nolasingERMS[j,ind1:ind2]=nolasing_averaged_profiles.eRMSslice[j][groupnum[j],ind1:ind2]
        
        #First calculation of the power based on center of masses and dispersion for each bunch
        powerECOM[j,:]=((nolasingECOM[j]-lasingECOM[j])*Constants.E_CHARGE*1e6)*eCurrent    #In J/s
        powerERMS[j,:]=(lasingERMS[j]**2-nolasingERMS[j]**2)*(eCurrent**(2.0/3.0)) 

    powerrawECOM=powerECOM*1e-9 
    powerrawERMS=powerERMS.copy()
    #Calculate the normalization constants to have a total energy compatible with the energy detected in the gas detector
    eoffsetfactor=old_div((shot_to_shot.xrayenergy-(np.sum(powerECOM[powerECOM > 0])*dt*Constants.FS_TO_S)),Nelectrons)   #In J                           
    escalefactor=np.sum(powerERMS[powerERMS > 0])*dt*Constants.FS_TO_S                 #in J

    #Apply the corrections to each bunch and calculate the final energy distribution and power agreement
    for j in range(num_bunches):                 
        powerECOM[j,:]=((nolasingECOM[j,:]-lasingECOM[j,:])*Constants.E_CHARGE*1e6+eoffsetfactor)*lasingECurrent[j,:]*1e-9   #In GJ/s (GW)
        powerERMS[j,:]=old_div(shot_to_shot.xrayenergy*powerERMS[j,:],escalefactor)*1e-9   #In GJ/s (GW) 
        #Set all negative power to 0
        powerECOM[j,:][powerECOM[j,:] < 0] = 0
        powerERMS[j,:][powerERMS[j,:] < 0] = 0       
        powerAgreement[j]=1-old_div(np.sum((powerECOM[j,:]-powerERMS[j,:])**2),(np.sum((powerECOM[j,:]-np.mean(powerECOM[j,:]))**2)+np.sum((powerERMS[j,:]-np.mean(powerERMS[j,:]))**2)))
        eBunchCOM[j]=np.sum(powerECOM[j,:])*dt*Constants.FS_TO_S*1e9
        eBunchRMS[j]=np.sum(powerERMS[j,:])*dt*Constants.FS_TO_S*1e9
                    
    return PulseCharacterization(t, powerrawECOM, powerrawERMS, powerECOM, 
        powerERMS, powerAgreement, bunchdelay, bunchdelaychange, shot_to_shot.xrayenergy, 
        eBunchCOM, eBunchRMS, bunchenergydiff, bunchenergydiffchange, lasingECurrent,
        nolasingECurrent, lasingECOM, nolasingECOM, lasingERMS, nolasingERMS, num_bunches, 
        groupnum)
Пример #56
0
 def wrapper(*args, **kwargs):
     warn_explicit("Call to deprecated function {0:s}".format(f.__name__),
                   category=DeprecationWarning,
                   filename=getattr(f, _func_code).co_filename,
                   lineno=getattr(f, _func_code).co_firstlineno + 1)
     return f(*args, **kwargs)
Пример #57
0
    def export_data(
            self,
            file_path_on_target_machine,
            digital_channels=None,
            analog_channels=None,
            analog_format="voltage",
            time_span=None,  # 'None-->all_time, [x.x, y.y]-->time_span'
            format="csv",  # 'csv, bin, vcd, matlab'
            csv_column_headers=True,
            csv_delimeter='comma',  # 'comma' or 'tab'
            csv_timestamp='time_stamp',  # 'time_stamp, sample_number'
            csv_combined=True,  # 'combined' else 'separate'
            csv_row_per_change=True,  # 'row_per_change' else 'row_per_sample'
            csv_number_format='hex',  # dec, hex, bin, ascii
            bin_per_change=True,  # 'on_change' else 'each_sample'
            bin_word_size='8'  # 8, 16, 32, 64
    ):
        # export_data, C:\temp_file, digital_channels, 0, 1, analog_channels, 1, voltage, all_time, adc, csv, headers, comma, time_stamp, separate, row_per_change, Dec
        # export_data, C:\temp_file, all_channels, time_span, 0.2, 0.4, vcd
        # export_data, C:\temp_file, analog_channels, 0, 1, 2, adc, all_time, matlab

        frame = inspect.currentframe().f_back
        warnings.warn_explicit(
            'export_data is deprecated, use export_data2',
            category=UserWarning,  # DeprecationWarning suppressed by default
            filename=inspect.getfile(frame.f_code),
            lineno=frame.f_lineno)

        while not self.is_processing_complete():
            time.sleep(1)

        # The path needs to be absolute. This is hard to check reliably since we
        # don't know the OS on the target machine, but we can do a basic check
        # for something that will definitely fail
        if file_path_on_target_machine[0] in ('~', '.'):
            raise NotImplementedError('File path must be absolute')
        # Fix windows path if needed
        file_path_on_target_machine.replace('\\', '/')
        self._build('EXPORT_DATA')
        self._build(file_path_on_target_machine)
        if (digital_channels is None) and (analog_channels is None):
            self._build('all_channels')
            analog_channels = self.get_active_channels()[1]
        else:
            if digital_channels is not None and len(digital_channels):
                self._build('digital_channels')
                for ch in digital_channels:
                    self._build(str(ch))
            if analog_channels is not None and len(analog_channels):
                self._build('analog_channels')
                for ch in analog_channels:
                    self._build(str(ch))
        if analog_channels is not None and len(analog_channels):
            if analog_format not in ('voltage', 'adc'):
                raise NotImplementedError("bad analog_format")
            self._build(analog_format)

        if time_span is None:
            self._build('all_time')
        elif len(time_span) == 2:
            self._build('time_span')
            self._build(str(time_span[0]))
            self._build(str(time_span[1]))
        else:
            raise NotImplementedError('invalid time format')

        if format == 'csv':
            self._build(format)

            if csv_column_headers:
                self._build('headers')
            else:
                self._build('no_headers')

            if csv_delimeter not in ('comma', 'tab'):
                raise NotImplementedError('bad csv delimeter')
            self._build(csv_delimeter)

            if csv_timestamp not in ('time_stamp', 'sample_number'):
                raise NotImplementedError('bad csv timestamp')
            self._build(csv_timestamp)

            if csv_combined:
                self._build('combined')
            else:
                self._build('separate')

            if csv_row_per_change:
                self._build('row_per_change')
            else:
                self._build('row_per_sample')

            if csv_number_format not in ('dec', 'hex', 'bin', 'ascii'):
                raise NotImplementedError('bad csv number format')
            self._build(csv_number_format)
        elif format == 'bin':
            self._build(format)

            if bin_per_change:
                self._build('on_change')
            else:
                self._build('each_sample')

            if bin_word_size not in ('8', '16', '32', '64'):
                raise NotImplementedError('bad bin word size')
            self._build(bin_word_size)

        elif format in ('vcd', 'matlab'):
            # No options for these
            self._build(format)
        else:
            raise NotImplementedError('unknown format')

        self._finish()
Пример #58
0
"""
# path_input_pdbs_dir = '/switchlab/group/shazib/SnpEffect/output_data/analyse_complex'
# path_input_pdbs_dir = Paths.OUTPUT_AC
# path_pdbfiles = sorted(glob.glob(path_input_pdbs_dir + '/**/*.pdb', recursive=True))
# path_pdbfiles = []
# pdbnames = ['RepairPDB_1', 'RepairPDB_3', 'RepairPDB_4', 'RepairPDB_5', 'RepairPDB_6', 'RepairPDB_7', 'RepairPDB_8',
#             'RepairPDB_9', 'RepairPDB_10', 'RepairPDB_11']
pdbnames = ['RepairPDB_1']
path_pdbfiles = []
for pdbname in pdbnames:
    path_pdbfiles.append(
        os.path.join(Paths.INPUT_PDBS, pdbname + Str.PDBEXT.value))
if not path_pdbfiles:
    warnings.warn_explicit(
        message=
        "No pdb files to process. (No problem if that is what you expected).",
        category=RuntimeWarning,
        filename="KickOff",
        lineno=68)
"""
5. Select specific mutants if you are only interested in these.
MAKE SURE TO SET THIS LIST TO EMPTY if you don't want any of the subsequent actions below to be for these mutants only.
"""
# specific_fxmutants = ['CA498L']
specific_fxmutants = []
for path_pdbfile in path_pdbfiles:
    for specific_fxmutant in specific_fxmutants:
        if not GUM.is_valid_fxmutant_for_pdb(path_pdbfile, specific_fxmutant):
            raise ValueError('The specified mutant ' + specific_fxmutant +
                             ' is not valid for this pdb: ' +
                             os.path.basename(path_pdbfile))
"""
Пример #59
0
    def __parse(self, text, filename=None):
        text = text.replace("\r\n", "\n")
        text = text.replace("\r", "\n")

        re_long = re.compile("%.*?%", re.DOTALL | re.MULTILINE)
        text = re_long.sub("\n", text)

        re_short = re.compile("#.*?\n")
        text = re_short.sub("\n", text)

        sections = self.find_sections(text)

        model = CBModel(basename(filename))
        react_name, react_text, react_line = self.__find_section(
            text, sections, lambda x: re.search(r"reac", x, re.I))
        const_name, const_text, const_line = self.__find_section(
            text, sections, lambda x: re.search(r"cons", x, re.I))
        ext_m_name, ext_m_text, ext_m_line = self.__find_section(
            text, sections, lambda x: re.search(r"ext", x, re.I))
        obj_name, obj_text, obj_line = self.__find_section(
            text, sections,
            lambda x: re.search(r"obj", x, re.I) and not re.search(
                "des", x, re.I))

        if react_text:
            reactions = self.__parse_reactions_section(
                react_text,
                filename=filename,
                section_start=react_line,
                strip_comments=False)

            for reaction, r_i in reactions:
                for m_id in reaction.stoichiometry:
                    if m_id not in model.metabolites:
                        model.add_metabolite(Metabolite(elem_id=m_id,
                                                        compartment=None),
                                             clear_tmp=True)

                model.add_reaction(reaction, clear_tmp=True)
        else:
            warnings.warn("Could not find '-REACTIONS' section",
                          BiooptParseWarning)

        if const_text:
            for (r_id, lb, ub), i in self.__parse_constraints_section(
                    const_text,
                    filename=filename,
                    section_start=const_line,
                    strip_comments=False):
                if r_id in model.reactions and not model.reactions[
                        r_id].reversible and (lb is None or lb < 0):
                    warnings.warn_explicit(
                        "Reaction '{0}' from '{1}' has effective bounds not compatible with reaction direction in '{2}' section ({3} : [{4}, {5}])"
                        .format(r_id, const_name, react_name,
                                "<->" if reactions[r_id].reversibl else "->",
                                lb, ub),
                        BiooptParseWarning,
                        filename=filename,
                        lineno=const_line + i + 1)

                if r_id in model.reactions:
                    model.reactions[r_id].lb = lb
                    model.reactions[r_id].ub = ub
                elif react_text:
                    warnings.warn_explicit(
                        "Reaction '{0}' from '{1}' section is not present in '{2}' section"
                        .format(r_id, const_name, react_name),
                        BiooptParseWarning,
                        filename=filename,
                        lineno=const_line + i + 1)
        else:
            warnings.warn("Could not find '-CONSTRAINS' section",
                          BiooptParseWarning)

        if ext_m_text:
            met2rxn = model.metabolite_reaction_lookup()
            for m_external, i in self.__parse_external_metabolites_section(
                    ext_m_text,
                    filename=filename,
                    section_start=ext_m_line,
                    strip_comments=False):
                if m_external.id in model.metabolites:
                    model.metabolites[m_external.id].boundary = True
                    for external_r_id, coef in met2rxn[m_external.id].items():
                        model.reactions[
                            external_r_id].reaction_type = ReactionType.EXCHANGE
                        del model.reactions[external_r_id].stoichiometry[
                            m_external.id]
                elif react_text:
                    warnings.warn_explicit(
                        "Metabolite '{0}' from '{1}' section is not present in any reaction from '{2}' section"
                        .format(m_external.id, ext_m_name, react_name),
                        BiooptParseWarning,
                        filename=filename,
                        lineno=ext_m_line + i + 1)
        else:
            warnings.warn("Could not find '-EXTERNAL METABOLITES' section",
                          BiooptParseWarning)

        if obj_text:
            r_id, coef = self.parse_objective_section(
                obj_text,
                section_name=obj_name,
                reactions_section_name=react_name,
                filename=filename,
                section_start=obj_line,
                reactions=reactions,
                strip_comments=False)
            model.reactions[r_id].objective = coef
        else:
            warnings.warn("Could not find '-OBJECTIVE' section",
                          BiooptParseWarning)

        return model
Пример #60
0
def warn_or_error(
    removal_version: str,
    deprecated_entity_description: str,
    hint: Optional[str] = None,
    deprecation_start_version: Optional[str] = None,
    stacklevel: int = 3,
    frame_info: Optional[inspect.FrameInfo] = None,
    context: int = 1,
    ensure_stderr: bool = False,
    print_warning: bool = True,
) -> None:
    """Check the removal_version against the current pants version.

  Issues a warning if the removal version is > current pants version, or an error otherwise.

  :param removal_version: The pantsbuild.pants version at which the deprecated entity will
                          be/was removed.
  :param deprecated_entity_description: A short description of the deprecated entity, that
                                        we can embed in warning/error messages.
  :param hint: A message describing how to migrate from the removed entity.
  :param deprecation_start_version: The pantsbuild.pants version at which the entity will
                                    begin to display a deprecation warning. This must be less
                                    than the `removal_version`. If not provided, the
                                    deprecation warning is always displayed.
  :param stacklevel: The stacklevel to pass to warnings.warn.
  :param frame_info: If provided, use this frame info instead of getting one from `stacklevel`.
  :param context: The number of lines of source code surrounding the selected frame to display
                  in a warning message.
  :param ensure_stderr: Whether use warnings.warn, or use warnings.showwarning to print
                        directly to stderr.
  :param print_warning: Whether to print a warning for deprecations *before* their removal.
                        If this flag is off, an exception will still be raised for options
                        past their deprecation date.
  :raises DeprecationApplicationError: if the removal_version parameter is invalid.
  :raises CodeRemovedError: if the current version is later than the version marked for removal.
  """
    removal_semver = validate_deprecation_semver(removal_version,
                                                 'removal version')
    if deprecation_start_version:
        deprecation_start_semver = validate_deprecation_semver(
            deprecation_start_version, 'deprecation start version')
        if deprecation_start_semver >= removal_semver:
            raise InvalidSemanticVersionOrderingError(
                'The deprecation start version {} must be less than the end version {}.'
                .format(deprecation_start_version, removal_version))
        elif PANTS_SEMVER < deprecation_start_semver:
            return

    msg = 'DEPRECATED: {} {} removed in version {}.'.format(
        deprecated_entity_description, get_deprecated_tense(removal_version),
        removal_version)
    if hint:
        msg += '\n  {}'.format(hint)

    # We need to have filename and line_number for warnings.formatwarning, which appears to be the only
    # way to get a warning message to display to stderr. We get that from frame_info -- it's too bad
    # we have to reconstruct the `stacklevel` logic ourselves, but we do also gain the ability to have
    # multiple lines of context, which is neat.
    if frame_info is None:
        frame_info = _get_frame_info(stacklevel, context=context)
    _, filename, line_number, _, code_context, _ = frame_info
    if code_context:
        context_lines = ''.join(code_context)
    else:
        context_lines = '<no code context available>'

    if removal_semver > PANTS_SEMVER:
        if ensure_stderr:
            # No warning filters can stop us from printing this message directly to stderr.
            warning_msg = warnings.formatwarning(msg,
                                                 DeprecationWarning,
                                                 filename,
                                                 line_number,
                                                 line=context_lines)
            print(warning_msg, file=sys.stderr)
        elif print_warning:
            # This output is filtered by warning filters.
            with _greater_warnings_context(context_lines):
                warnings.warn_explicit(message=msg,
                                       category=DeprecationWarning,
                                       filename=filename,
                                       lineno=line_number)
        return
    else:
        raise CodeRemovedError(msg)