Exemple #1
0
def register(conf, conf_admin, **options):
    """
    Register a new admin section.

    :param conf: A subclass of ``djconfig.admin.Config``
    :param conf_admin: A subclass of ``djconfig.admin.ConfigAdmin``
    :param options: Extra options passed to ``django.contrib.admin.site.register``
    """
    assert issubclass(conf_admin, ConfigAdmin), (
        'conf_admin is not a ConfigAdmin subclass')
    assert issubclass(
        getattr(conf_admin, 'change_list_form', None),
        ConfigForm), 'No change_list_form set'
    assert issubclass(conf, Config), (
        'conf is not a Config subclass')
    assert conf.app_label, 'No app_label set'
    assert conf.verbose_name_plural, 'No verbose_name_plural set'
    assert not conf.name or re.match(r"^[a-zA-Z_]+$", conf.name), (
        'Not a valid name. Valid chars are [a-zA-Z_]')
    config_class = type("Config", (), {})
    config_class._meta = type("Meta", (_ConfigMeta,), {
        'app_label': conf.app_label,
        'verbose_name_plural': conf.verbose_name_plural,
        'object_name': 'Config',
        'model_name': conf.name,
        'module_name': conf.name})
    admin.site.register([config_class], conf_admin, **options)
Exemple #2
0
    def _main_loop_handleException(self, dump_info):
        """
        Helper method with one argument only so that it can be registered
        with GLib.idle_add() to run on idle or called from a handler.

        :type dump_info: an instance of the meh.DumpInfo class

        """

        ty = dump_info.exc_info.type
        value = dump_info.exc_info.value

        if (issubclass(ty, blivet.errors.StorageError) and value.hardware_fault) or (
            issubclass(ty, OSError) and value.errno == errno.EIO
        ):
            # hardware fault or '[Errno 5] Input/Output error'
            hw_error_msg = _(
                "The installation was stopped due to what "
                "seems to be a problem with your hardware. "
                "The exact error message is:\n\n%s.\n\n "
                "The installer will now terminate."
            ) % str(value)
            self.intf.messageWindow(_("Hardware error occured"), hw_error_msg)
            sys.exit(0)
        elif isinstance(value, blivet.errors.UnusableConfigurationError):
            sys.exit(0)
        else:
            super(AnacondaExceptionHandler, self).handleException(dump_info)
            return False
Exemple #3
0
def __unit_test_onset_function(metric):
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # First, test for a warning on empty onsets
        metric(np.array([]), np.arange(10))
        assert len(w) == 1
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Reference onsets are empty."
        metric(np.arange(10), np.array([]))
        assert len(w) == 2
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Estimated onsets are empty."
        # And that the metric is 0
        assert np.allclose(metric(np.array([]), np.array([])), 0)

    # Now test validation function - onsets must be 1d ndarray
    onsets = np.array([[1., 2.]])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be in seconds (so not huge)
    onsets = np.array([1e10, 1e11])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be sorted
    onsets = np.array([2., 1.])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)

    # Valid onsets which are the same produce a score of 1 for all metrics
    onsets = np.arange(10, dtype=np.float)
    assert np.allclose(metric(onsets, onsets), 1)
Exemple #4
0
def issubdtype(arg1, arg2):
    """
    Returns True if first argument is a typecode lower/equal in type hierarchy.

    Parameters
    ----------
    arg1, arg2 : dtype_like
        dtype or string representing a typecode.

    Returns
    -------
    out : bool

    See Also
    --------
    issubsctype, issubclass_
    numpy.core.numerictypes : Overview of numpy type hierarchy.

    Examples
    --------
    >>> np.issubdtype('S1', str)
    True
    >>> np.issubdtype(np.float64, np.float32)
    False

    """
    if issubclass_(arg2, generic):
        return issubclass(dtype(arg1).type, arg2)
    mro = dtype(arg2).type.mro()
    if len(mro) > 1:
        val = mro[1]
    else:
        val = mro[0]
    return issubclass(dtype(arg1).type, val)
Exemple #5
0
  def all_acceptable_memory_accesses(self, state, possible_type):
    (gadget_type, inputs, outputs, params, clobber) = possible_type

    # Always allow the LoadMem gadget for loading IP from the Stack
    if gadget_type == LoadMem and outputs[0] == self.ip and inputs[0] == self.sp:
      return True

    for mem_address, mem_value in state.in_mem.items():
      good_mem_access = False
      if not (
          # Allow the LoadMem's read
          (gadget_type == LoadMem and mem_address == state.in_regs[inputs[0]] + params[0] and state.out_regs[outputs[0]] == mem_value)

          # Allow the ArithmeticLoad's read
          or (issubclass(gadget_type, ArithmeticLoad) and mem_address == state.in_regs[inputs[0]] + params[0])

          # Allow the ArithmeticStore's read
          or (issubclass(gadget_type, ArithmeticStore) and mem_address == state.in_regs[inputs[0]] + params[0])

          # Allow loads from the SP register (i.e. pop)
          or (self.sp in state.in_regs and abs(mem_address - state.in_regs[self.sp]) < 0x1000)
      ):
        return False

    for mem_address, mem_value in state.out_mem.items():
      if not (
        # Allow the StoreMem's write
        (gadget_type == StoreMem and mem_address == state.in_regs[inputs[0]] + params[0] and mem_value == state.in_regs[inputs[1]])

        # Allow the ArithmeticStore's write
        or (issubclass(gadget_type, ArithmeticStore) and mem_address == state.in_regs[inputs[0]] + params[0])
      ):
        return False

    return True
 def on_last_change(self):
     '''call last change for all the containing items'''
     for item in self.items:
         if issubclass(type(item), Base):
             item.on_value_changed(True)
         elif issubclass(type(item), Group):
             item.on_last_change()
 def setup_slaves(self):
     field_type = self.detail.get_parameter_type()
     if issubclass(field_type, Image):
         self._setup_image_slave()
     elif issubclass(field_type, Domain):
         self._setup_comboboxentry_slave()
     elif self.detail.editor == 'file-chooser':
         self._setup_entry_with_filechooser_button_slave()
     elif self.detail.editor == 'directory-chooser':
         self._setup_entry_with_filechooser_button_slave(dir_only=True)
     elif issubclass(field_type, bool):
         self._setup_radio_slave()
     elif issubclass(field_type, (int, float, Decimal)):
         if self.detail.options:
             self._setup_options_combo_slave()
         elif self.detail.range:
             self._setup_spin_entry_slave()
         else:
             self._setup_entry_slave()
     elif issubclass(field_type, basestring):
         if self.detail.multiline:
             self._setup_text_view_slave()
         elif self.detail.combo_data:
             self._setup_comboboxentry_slave(data=self.detail.combo_data())
         else:
             self._setup_entry_slave()
     else:
         raise TypeError("ParameterData for `%s' has an invalid "
                         "type: %r" % (self.model.field_name,
                                       field_type))
Exemple #8
0
    def listReplicas(self, request):
        """
        Queries the DLI for the list of SURLs for the LFN/GUID/Dataset specified
        in the request. The request can be built with the
        dliClient_types.new_listReplicasRequest method.
        
        @param request: a listReplicaRequest object containing the input data and its type

        @return: a listReplicasResponseWrapper object, containing the list of SURLs 
        """

        # Check correct type is passed
        if not isinstance(request, listReplicasRequest) and\
            not issubclass(listReplicasRequest, request.__class__):
            raise TypeError, "%s incorrect request type" %(request.__class__)
            
        # Query    
        kw = {}
        response = self.binding.Send(None, None, request, soapaction="", **kw)
        response = self.binding.Receive(listReplicasResponseWrapper())

        # Check correct reply was received
        if not isinstance(response, listReplicasResponse) and\
            not issubclass(listReplicasResponse, response.__class__):
            raise TypeError, "%s incorrect response type" %(response.__class__)

        # Return received value
        return response
Exemple #9
0
    def _init_hook1(cls, cls_name, bases, dct):

        # cls is the class to be
        # cls.__dict__ is its current dict, which may contain inherited items
        # dct is the dict represented by exactly this class (no inheritance)

        # Get CSS from the class now
        CSS = dct.get('CSS', '')

        # Create corresponding class for JS
        if issubclass(cls, LocalComponent):
            cls._make_js_proxy_class(cls_name, bases, dct)
        elif issubclass(cls, ProxyComponent):
            cls._make_js_local_class(cls_name, bases, dct)
        else:  # pragma: no cover
            raise TypeError('Expected class to inherit from '
                            'LocalComponent or ProxyComponent.')

        # Write __jsmodule__; an optimization for our module/asset system
        cls.__jsmodule__ = get_mod_name(sys.modules[cls.__module__])
        cls.JS.__jsmodule__ = cls.__jsmodule__  # need it in JS too
        cls.JS.__module__ = cls.__module__

        # Set CSS
        cls.CSS = CSS
        try:
            delattr(cls.JS, 'CSS')
        except AttributeError:
            pass
    def __new__(cls, dtype):
        try:
            dtype = numeric.dtype(dtype)
        except TypeError:
            # In case a float instance was given
            dtype = numeric.dtype(type(dtype))

        obj = cls._finfo_cache.get(dtype, None)
        if obj is not None:
            return obj
        dtypes = [dtype]
        newdtype = numeric.obj2sctype(dtype)
        if newdtype is not dtype:
            dtypes.append(newdtype)
            dtype = newdtype
        if not issubclass(dtype, numeric.inexact):
            raise ValueError("data type %r not inexact" % (dtype))
        obj = cls._finfo_cache.get(dtype, None)
        if obj is not None:
            return obj
        if not issubclass(dtype, numeric.floating):
            newdtype = _convert_to_float[dtype]
            if newdtype is not dtype:
                dtypes.append(newdtype)
                dtype = newdtype
        obj = cls._finfo_cache.get(dtype, None)
        if obj is not None:
            return obj
        obj = object.__new__(cls)._init(dtype)
        for dt in dtypes:
            cls._finfo_cache[dt] = obj
        return obj
Exemple #11
0
    def get_form(self, step=None, data=None, files=None):
        """
        Constructs the form for a given `step`. If no `step` is defined, the
        current step will be determined automatically.

        The form will be initialized using the `data` argument to prefill the
        new form. If needed, instance or queryset (for `ModelForm` or
        `ModelFormSet`) will be added too.
        """
        if step is None:
            step = self.steps.current
        # prepare the kwargs for the form instance.
        kwargs = self.get_form_kwargs(step)
        kwargs.update({
            'data': data,
            'files': files,
            'prefix': self.get_form_prefix(step, self.form_list[step]),
            'initial': self.get_form_initial(step),
        })
        if issubclass(self.form_list[step], forms.ModelForm):
            # If the form is based on ModelForm, add instance if available.
            kwargs.update({'instance': self.get_form_instance(step)})
        elif issubclass(self.form_list[step], forms.models.BaseModelFormSet):
            # If the form is based on ModelFormSet, add queryset if available.
            kwargs.update({'queryset': self.get_form_instance(step)})
        return self.form_list[step](**kwargs)
Exemple #12
0
    def _check_inlines_item(self, obj, model, inline, label):
        """ Check one inline model admin. """
        inline_label = '.'.join([inline.__module__, inline.__name__])

        from django.contrib.admin.options import BaseModelAdmin

        if not issubclass(inline, BaseModelAdmin):
            return [
                checks.Error(
                    "'%s' must inherit from 'BaseModelAdmin'." % inline_label,
                    hint=None,
                    obj=obj.__class__,
                    id='admin.E104',
                )
            ]
        elif not inline.model:
            return [
                checks.Error(
                    "'%s' must have a 'model' attribute." % inline_label,
                    hint=None,
                    obj=obj.__class__,
                    id='admin.E105',
                )
            ]
        elif not issubclass(inline.model, models.Model):
            return must_be('a Model', option='%s.model' % inline_label,
                           obj=obj, id='admin.E106')
        else:
            return inline(model, obj.admin_site).check()
Exemple #13
0
 def test_subclass(self):
     self.assertTrue(issubclass(dict, aiozmq.rpc.AbstractHandler))
     self.assertIsInstance({}, aiozmq.rpc.AbstractHandler)
     self.assertFalse(issubclass(object, aiozmq.rpc.AbstractHandler))
     self.assertNotIsInstance(object(), aiozmq.rpc.AbstractHandler)
     self.assertNotIsInstance('string', aiozmq.rpc.AbstractHandler)
     self.assertNotIsInstance(b'bytes', aiozmq.rpc.AbstractHandler)
Exemple #14
0
    def log_wal_fetch_failures_on_error(exc_tup, exc_processor_cxt):
        def standard_detail_message(prefix=''):
            return (prefix + '  There have been {n} attempts to fetch wal '
                    'file {url} so far.'.format(n=exc_processor_cxt, url=url))
        typ, value, tb = exc_tup
        del exc_tup

        # Screen for certain kinds of known-errors to retry from
        if issubclass(typ, socket.error):
            socketmsg = value[1] if isinstance(value, tuple) else value

            logger.info(
                msg='Retrying fetch because of a socket error',
                detail=standard_detail_message(
                    "The socket error's message is '{0}'."
                    .format(socketmsg)))
        elif (issubclass(typ, boto.exception.S3ResponseError) and
              value.error_code == 'RequestTimeTooSkewed'):
            logger.info(msg='Retrying fetch because of a Request Skew time',
                        detail=standard_detail_message())
        else:
            # For all otherwise untreated exceptions, report them as a
            # warning and retry anyway -- all exceptions that can be
            # justified should be treated and have error messages
            # listed.
            logger.warning(
                msg='retrying WAL file fetch from unexpected exception',
                detail=standard_detail_message(
                    'The exception type is {etype} and its value is '
                    '{evalue} and its traceback is {etraceback}'
                    .format(etype=typ, evalue=value,
                            etraceback=''.join(traceback.format_tb(tb)))))

        # Help Python GC by resolving possible cycles
        del tb
    def test_get_url_shortener(self):
        us_settings.URL_SHORTENER_BACKEND = 'mymodule.myclass'
        try:
            with warnings.catch_warnings(record=True) as w:
                self.assertEquals(get_url_shortener(), default_backend)
                self.assertTrue(issubclass(w[-1].metatype, RuntimeWarning))
                self.assertEquals(
                    str(w[-1].message),
                    'mymodule.myclass backend cannot be imported')
        except AttributeError:
            # Fail under Python2.5, because of'warnings.catch_warnings'
            pass

        us_settings.URL_SHORTENER_BACKEND = 'gstudio.tests.custom_url_shortener'
        try:
            with warnings.catch_warnings(record=True) as w:
                self.assertEquals(get_url_shortener(), default_backend)
                self.assertTrue(issubclass(w[-1].metatype, RuntimeWarning))
                self.assertEquals(
                    str(w[-1].message),
                    'This backend only exists for testing')
        except AttributeError:
            # Fail under Python2.5, because of'warnings.catch_warnings'
            pass

        us_settings.URL_SHORTENER_BACKEND = 'gstudio.url_shortener'\
                                            '.backends.default'
        self.assertEquals(get_url_shortener(), default_backend)
Exemple #16
0
        def _innerWrapper(*args, **kw):
            # Supports independent function views
            if isinstance(args[0], HttpRequest):
                request = args[0]

            # Supports ModelAdmin method views
            elif isinstance(args[0], ModelAdmin):
                model_admin = args[1]
                request = args[1]

            context_dict = decorator_args.copy()
            g = fn(*args, **kw)
            if issubclass(type(g), HttpResponse): 
                return g
            if not hasattr(g, 'next'):  #Is this a generator?  Otherwise make it a tuple!
                g = (g,)
            for i in g:
                if issubclass(type(i), HttpResponse):
                    return i
                if type(i) == type(()):
                    context_dict[i[0]] = i[1]
                else:
                    context_dict.update(i)
            template_name = context_dict.get("template", template)
            context_instance = context_dict.get("context", context)
            if not context_instance:
                context_instance = RequestContext(request, context_dict)
            return render_to_response(template_name, context_dict, context_instance)
Exemple #17
0
    def __call__(self, name, *args, **kwargs):
        if "observed" in kwargs:
            raise ValueError(
                "Observed Bound distributions are not supported. "
                "If you want to model truncated data "
                "you can use a pm.Potential in combination "
                "with the cumulative probability function. See "
                "pymc3/examples/censored_data.py for an example."
            )

        transform = kwargs.pop("transform", "infer")
        if issubclass(self.distribution, Continuous):
            return _ContinuousBounded(
                name,
                self.distribution,
                self.lower,
                self.upper,
                transform,
                *args,
                **kwargs
            )
        elif issubclass(self.distribution, Discrete):
            return _DiscreteBounded(
                name,
                self.distribution,
                self.lower,
                self.upper,
                transform,
                *args,
                **kwargs
            )
        else:
            raise ValueError("Distribution is neither continuous nor discrete.")
Exemple #18
0
 def __call__(self, model, owner='', includeReferences=True):
     if issubclass(model, orb.Table):
         return self._createTable(model, owner, includeReferences)
     elif issubclass(model, orb.View):
         return self._createView(model, owner, includeReferences)
     else:
         raise orb.errors.OrbError('Cannot create model for type: '.format(type(model)))
    def get_real_instance_class(self):
        """
        Normally not needed.
        If a non-polymorphic manager (like base_objects) has been used to
        retrieve objects, then the real class/type of these objects may be
        determined using this method.
        """
        # the following line would be the easiest way to do this, but it
        # produces sql queries return self.polymorphic_ctype.model_class() so
        # we use the following version, which uses the ContentType manager
        # cache.
        # Note that model_class() can return None for stale content types; when
        # the content type record still exists but no longer refers to an
        # existing model.
        try:
            model = ContentType.objects.get_for_id(self.polymorphic_ctype_id) \
                .model_class()
        except AttributeError:
            # Django <1.6 workaround
            return None

        # Protect against bad imports (dumpdata without --natural) or other
        # issues missing with the ContentType models.
        if model is not None \
           and not issubclass(model, self.__class__) \
           and not issubclass(model, self.__class__._meta.proxy_for_model):
            raise RuntimeError(
                "ContentType {0} for {1} #{2} does not point to a subclass!"
                .format(self.polymorphic_ctype_id, model, self.pk,)
            )
        return model
Exemple #20
0
 def to(cls, other):
     cls.genre_check(other)
     pf = cls.prefix_factor(other)
     if issubclass(other, Second):
         return pf
     elif issubclass(other, AtomicUnitOfTime):
         return pf / 2.418884326502e-17
     elif issubclass(other, Minute):
         return pf / 60.0
     elif issubclass(other, Hour):
         return pf / 3600.0
     elif issubclass(other, Day):
         return pf / 86400.0
     elif issubclass(other, Week):
         return pf / 604800.0
     elif issubclass(other, Year):
         return pf / 31556925.445
     elif issubclass(other, Decade):
         return pf / (31556925.445 * 10)
     elif issubclass(other, Century):
         return pf / (31556925.445 * 100)
     elif issubclass(other, Millennium):
         return pf / (31556925.445 * 1000)
     else: # pragma: no cover
         raise NotImplementedError("Conversion from units " + classname(cls) + " to units " + classname(other) + " is not implemented.")
Exemple #21
0
def printExtensions():
  '''
  Echoes all entities in our extension module.
  Useful to create documentation.
  '''
  print("  Types:")
  for name, o in inspect.getmembers(frepple):
    if not inspect.isclass(o) or issubclass(o,Exception) or hasattr(o,"__iter__"): continue
    print("    %s: %s" % (o.__name__, inspect.getdoc(o)))
  print("  Methods:")
  for name, o in inspect.getmembers(frepple):
    if not inspect.isroutine(o): continue
    print("    %s: %s" % (o.__name__, inspect.getdoc(o)))
  print("  Exceptions:")
  for name, o in inspect.getmembers(frepple):
    if not inspect.isclass(o) or not issubclass(o,Exception): continue
    print("    %s" % (o.__name__))
  print("  Iterators:")
  for name, o in inspect.getmembers(frepple):
    if not inspect.isclass(o) or not hasattr(o,"__iter__"): continue
    print("    %s: %s" % (o.__name__, inspect.getdoc(o)))
  print("  Other:")
  for name, o in inspect.getmembers(frepple):
    # Negating the exact same filters as in the previous blocks
    if not(not inspect.isclass(o) or issubclass(o,Exception) or hasattr(o,"__iter__")): continue
    if inspect.isroutine(o): continue
    if not(not inspect.isclass(o) or not issubclass(o,Exception)): continue
    if not(not inspect.isclass(o) or not hasattr(o,"__iter__")): continue
    print("    %s: %s" % (name, o))
Exemple #22
0
def does_match_definition(given, main, secondary):
    """implementation details"""
    assert isinstance(secondary, tuple)
    assert len(secondary) == 2  # general solution could be provided
    types = decompose_type(given)

    if isinstance(types[0], main):
        return True

    if len(types) >= 2:
        cond1 = isinstance(types[0], main)
        cond2 = isinstance(types[1], secondary)
        cond3 = isinstance(types[1], main)
        cond4 = isinstance(types[0], secondary)
        if (cond1 and cond2) or (cond3 and cond4):
            return True

        if len(types) >= 3:
            classes = set([tp.__class__ for tp in types[:3]])
            desired = set([main] + list(secondary))
            diff = classes.symmetric_difference(desired)
            if not diff:
                return True
            if len(diff) == 2:
                items = list(diff)
                return (
                    issubclass(
                        items[0], items[1]) or issubclass(items[1], items[0]))
            return False
    else:
        return False
Exemple #23
0
    def create(self, parameter_type):
        if issubclass(parameter_type, EnumParameter):
            class TestParameterEnum(parameter_type):
                def __init__(self):
                    parameter_type.__init__(self)
                    self.value = parameter_type.default_value(self)

            return TestParameterEnum()

        elif issubclass(parameter_type, BooleanParameter):
            class TestParameterBoolean(parameter_type):
                def __init__(self):
                    parameter_type.__init__(self)
                    self.value = parameter_type.default_value(self)

            return TestParameterBoolean()

        elif issubclass(parameter_type, Parameter):
            class TestParameter(parameter_type):
                def __init__(self):
                    parameter_type.__init__(self)

            return TestParameter()

        else:
            raise TypeError('Parameter type %s unknown!' %
                            str(type(parameter_type)))
def create_views(sender, **kwargs):
    cursor = connection.cursor()
    app = kwargs['app']
    if type(app) == str:
        import sys
        app = getattr(sys.modules[app], 'models')
    for model in models.get_models(app):
        if issubclass(model, View):
            # Check if view exists
            sql = model.create_check()
            cursor.execute(*sql)
            result = cursor.fetchone()
            if not result[0]:
                # Create View
                sql = model.create_sql()
                cursor.execute(*sql)
            if issubclass(model, MatView):
                sql = MatView.storedproc_check()
                expected_resp = sql[2]
                sql = sql[:2]
                cursor.execute(*sql)
                res = cursor.fetchall()
                if res[0][0] != expected_resp:
                    for sql in MatView.storedproc_sql():
                        cursor.execute(sql, ())
                func = model.create_matview()
                try:
                    cursor.execute(*func)
                    transaction.commit()
                except DatabaseError as e:
                    if e.message.startswith('MatView') and e.message.find('already exists') != -1:
                        transaction.rollback()
                    else:
                        raise
Exemple #25
0
    def create(self, command_type, inferior_repository, platform_factory,
               inferior_factory, thread_factory, config, terminal):
        if issubclass(command_type, DataCommand):
            class TestDataCommand(command_type):
                __doc__ = command_type.__doc__

                def __init__(self):
                    command_type.__init__(self)

                def invoke(self, argument, _):
                    inferior = TestInferior()
                    thread = TestThread(inferior.id(), 0)
                    command_type.execute(self, terminal, thread, argument)

            return TestDataCommand()

        if issubclass(command_type, Command):
            class TestCommand(command_type):
                __doc__ = command_type.__doc__

                def __init__(self):
                    command_type.__init__(self)

                def invoke(self, argument, _):
                    command_type.execute(self, terminal, argument)

            return TestCommand()

        else:
            raise TypeError('Command type %s unknown!' %
                            str(command_type))
Exemple #26
0
def _PrintTestList(tests):
  if not tests:
    print >> sys.stderr, 'No tests found!'
    return

  # Align the test names to the longest one.
  format_string = '  %%-%ds %%s' % max(len(t.Name()) for t in tests)

  filtered_tests = [test_class for test_class in tests
                    if issubclass(test_class, benchmark.Benchmark)]
  if filtered_tests:
    print >> sys.stderr, 'Available tests are:'
    for test_class in sorted(filtered_tests, key=lambda t: t.Name()):
      print >> sys.stderr, format_string % (
          test_class.Name(), test_class.Description())
    print >> sys.stderr

  filtered_tests = [test_class for test_class in tests
                    if issubclass(test_class, page_test.PageTest)]
  if filtered_tests:
    print >> sys.stderr, 'Available page tests are:'
    for test_class in sorted(filtered_tests, key=lambda t: t.Name()):
      print >> sys.stderr, format_string % (
          test_class.Name(), test_class.Description())
    print >> sys.stderr
Exemple #27
0
    def register(self, provider_class):
        """
        Registers a provider with the site.
        """
        if not issubclass(provider_class, BaseProvider):
            raise TypeError('%s is not a subclass of BaseProvider' % provider_class.__name__)

        if provider_class in self._registered_providers:
            raise AlreadyRegistered('%s is already registered' % provider_class.__name__)

        if issubclass(provider_class, DjangoProvider):
            # set up signal handler for cache invalidation
            signals.post_save.connect(
                self.invalidate_stored_oembeds,
                sender=provider_class._meta.model
            )

        # don't build the regex yet - if not all urlconfs have been loaded
        # and processed at this point, the DjangoProvider instances will fail
        # when attempting to reverse urlpatterns that haven't been created.
        # Rather, the regex-list will be populated once, on-demand.
        self._registered_providers.append(provider_class)

        # flag for re-population
        self.invalidate_providers()
Exemple #28
0
def describe(value):
  """Describe any value as a descriptor.

  Helper function for describing any object with an appropriate descriptor
  object.

  Args:
    value: Value to describe as a descriptor.

  Returns:
    Descriptor message class if object is describable as a descriptor, else
    None.
  """
  from . import remote
  if isinstance(value, types.ModuleType):
    return describe_file(value)
  elif callable(value) and hasattr(value, 'remote'):
    return describe_method(value)
  elif isinstance(value, messages.Field):
    return describe_field(value)
  elif isinstance(value, messages.Enum):
    return describe_enum_value(value)
  elif isinstance(value, type):
    if issubclass(value, messages.Message):
      return describe_message(value)
    elif issubclass(value, messages.Enum):
      return describe_enum(value)
    elif issubclass(value, remote.Service):
      return describe_service(value)
  return None
Exemple #29
0
def get_build_from_file (platform, file_name, name):
    gub_name = file_name.replace (os.getcwd () + '/', '')
    logging.verbose ('reading spec: %(gub_name)s\n' % locals ())
    # Ugh, FIXME
    # This loads gub/specs/darwin/python.py in PYTHON. namespace,
    # overwriting the PYTHON. namespace from gub/specs/python.py
    # Current workaround: always/also use __darwin etc. postfixing
    # of class names, also in specs/darwin/ etc.
    warnings.filterwarnings ('ignore', '''Parent module 'python-2' ''')
    module = misc.load_module (file_name, name)
    # cross/gcc.py:Gcc will be called: cross/Gcc.py,
    # to distinguish from specs/gcc.py:Gcc.py
    base = os.path.basename (name)
    class_name = ((base[0].upper () + base[1:])
                  .replace ('-', '_')
                  .replace ('.', '_')
                  .replace ('++', '_xx_')
                  .replace ('+', '_x_')
                  + ('-' + platform).replace ('-', '__'))
    logging.debug ('LOOKING FOR: %(class_name)s\n' % locals ())
    cls = misc.most_significant_in_dict (module.__dict__, class_name, '__')
    if (platform == 'tools32'
        and (not cls or issubclass (cls, target.AutoBuild))):
        cls = misc.most_significant_in_dict (module.__dict__, class_name.replace ('tools32', 'tools'), '__')
    if ((platform == 'tools' or platform == 'tools32')
        and (issubclass (cls, target.AutoBuild)
             and not issubclass (cls, tools.AutoBuild)
             and not issubclass (cls, tools32.AutoBuild))):
        cls = None
    return cls
Exemple #30
0
    def __new__(mcs, name, bases, attrs):
        options_data = mcs.collect_options(bases, attrs)
        options = mcs.create_options(options_data)

        if options.inheritance_mode == INLINE:
            fields = mcs.collect_fields(bases, attrs)
            parent_documents = set()
            for base in bases:
                if issubclass(base, Document) and base is not Document:
                    parent_documents.update(base._parent_documents)
        else:
            fields = mcs.collect_fields([], attrs)
            parent_documents = [base for base in bases
                                if issubclass(base, Document) and base is not Document]

        attrs['_fields'] = fields
        attrs['_parent_documents'] = sorted(parent_documents, key=lambda d: d.get_definition_id())
        attrs['_options'] = options
        attrs['_backend'] = DocumentBackend(
            properties=fields,
            pattern_properties=options.pattern_properties,
            additional_properties=options.additional_properties,
            min_properties=options.min_properties,
            max_properties=options.max_properties,
            title=options.title,
            description=options.description,
            enum=options.enum,
            default=options.default,
            id=options.id,
        )

        klass = type.__new__(mcs, name, bases, attrs)
        registry.put_document(klass.__name__, klass, module=klass.__module__)
        _set_owner_to_document_fields(klass)
        return klass
 def test_subclass(self):
     """Test to inheritance"""
     self.assertTrue(issubclass(self.s1.__class__, BaseModel), True)
 def test_abc(self):
     self.assertIsInstance(OrderedDict(), MutableMapping)
     self.assertTrue(issubclass(OrderedDict, MutableMapping))
    def get_properties(self, cls):
        """Returns two lists with the dlite dimensions and properties
        correspinding to owl class `cls`."""
        dims = []
        props = []
        dimindices = {}
        propnames = set()
        types = dict(Integer='int', Real='double', String='string')

        def get_dim(r, name, descr=None):
            """Returns dimension index corresponding to dimension name `name`
            for property `r.value`."""
            t = owlready2.class_construct._restriction_type_2_label[r.type]
            if (t in ('some', 'only', 'min') or
                    (t in ('max', 'exactly') and r.cardinality > 1)):
                if name not in dimindices:
                    dimindices[name] = len(dims)
                    dims.append(Dimension(name, descr))
                return [dimindices[name]]
            else:
                return []

        for c in cls.mro():
            if not isinstance(c, owlready2.ThingClass):
                continue
            for r in c.is_a:
                # Note that EMMO currently does not define an inverse for
                # hasProperty.  If we reintroduce that, we should replace
                #
                #     not isinstance(r.property, Inverse) and
                #     issubclass(r.property, self.onto.hasProperty)
                #
                # with
                #
                #     ((isinstance(r.property, Inverse) and
                #       issubclass(Inverse(r.property), onto.isPropertyFor)) or
                #      issubclass(r.property, self.onto.hasProperty))
                #
                if (isinstance(r, owlready2.Restriction) and
                        not isinstance(r.property, owlready2.Inverse) and
                        issubclass(r.property, self.onto.hasProperty) and
                        isinstance(r.value, owlready2.ThingClass) and
                        isinstance(r.value, self.onto.Property)):
                    name = self.get_label(r.value)
                    if name in propnames:
                        continue
                    propnames.add(name)

                    # Default type, ndims and unit
                    if isinstance(r.value, (self.onto.DescriptiveProperty,
                                            self.onto.QualitativeProperty,
                                            self.onto.SubjectiveProperty)):
                        ptype = 'string'
                    else:
                        ptype = 'double'
                    d = []
                    d.extend(get_dim(r, 'n_%ss' % name, 'Number of %s.' %
                                     name))
                    unit = None

                    # Update type, ndims and unit from relations
                    for r2 in [r] + r.value.is_a:
                        if isinstance(r2, owlready2.Restriction):
                            if issubclass(r2.property, self.onto.hasType):
                                typelabel = self.get_label(r2.value)
                                ptype = types[typelabel]
                                d.extend(get_dim(r2, '%s_length' % name,
                                                 'Length of %s' % name))
                            elif issubclass(r2.property, self.onto.hasUnit):
                                unit = self.get_label(r2.value)

                    descr = self.get_description(r.value)
                    props.append(Property(name, type=ptype, dims=d,
                                          unit=unit, description=descr))
        return dims, props
Exemple #34
0
def all_estimators(include_meta_estimators=False,
                   include_other=False,
                   type_filter=None,
                   include_dont_test=False):
    """Get a list of all estimators from imblearn.

    This function crawls the module and gets all classes that inherit
    from BaseEstimator. Classes that are defined in test-modules are not
    included.
    By default meta_estimators are also not included.
    This function is adapted from sklearn.

    Parameters
    ----------
    include_meta_estimators : boolean, default=False
        Whether to include meta-estimators that can be constructed using
        an estimator as their first argument. These are currently none.

    include_other : boolean, default=False
        Wether to include meta-estimators that are somehow special and can
        not be default-constructed sensibly. These are currently
        Pipeline, FeatureUnion.

    include_dont_test : boolean, default=False
        Whether to include "special" label estimator or test processors.

    type_filter : string, list of string, or None, default=None
        Which kind of estimators should be returned. If None, no
        filter is applied and all estimators are returned.  Possible
        values are 'sampler' to get estimators only of these specific
        types, or a list of these to get the estimators that fit at
        least one of the types.

    Returns
    -------
    estimators : list of tuples
        List of (name, class), where ``name`` is the class name as string
        and ``class`` is the actual type of the class.

    """
    def is_abstract(c):
        if not (hasattr(c, '__abstractmethods__')):
            return False
        if not len(c.__abstractmethods__):
            return False
        return True

    all_classes = []
    # get parent folder
    path = imblearn.__path__
    for importer, modname, ispkg in pkgutil.walk_packages(
            path=path, prefix='imblearn.', onerror=lambda x: None):
        if (".tests." in modname):
            continue
        module = __import__(modname, fromlist="dummy")
        classes = inspect.getmembers(module, inspect.isclass)
        all_classes.extend(classes)

    all_classes = set(all_classes)

    estimators = [
        c for c in all_classes
        if (issubclass(c[1], BaseEstimator) and c[0] != 'BaseEstimator')
    ]
    # get rid of abstract base classes
    estimators = [c for c in estimators if not is_abstract(c[1])]

    # get rid of sklearn estimators which have been imported in some classes
    estimators = [c for c in estimators if "sklearn" not in c[1].__module__]

    if not include_dont_test:
        estimators = [c for c in estimators if not c[0] in DONT_TEST]

    if not include_other:
        estimators = [c for c in estimators if not c[0] in OTHER]
    # possibly get rid of meta estimators
    if not include_meta_estimators:
        estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
    if type_filter is not None:
        if not isinstance(type_filter, list):
            type_filter = [type_filter]
        else:
            type_filter = list(type_filter)  # copy
        filtered_estimators = []
        filters = {'sampler': SamplerMixin}
        for name, mixin in filters.items():
            if name in type_filter:
                type_filter.remove(name)
                filtered_estimators.extend(
                    [est for est in estimators if issubclass(est[1], mixin)])
        estimators = filtered_estimators
        if type_filter:
            raise ValueError("Parameter type_filter must be 'sampler' or "
                             "None, got"
                             " %s." % repr(type_filter))

    # drop duplicates, sort for reproducibility
    # itemgetter is used to ensure the sort does not extend to the 2nd item of
    # the tuple
    return sorted(set(estimators), key=itemgetter(0))
Exemple #35
0
    def test_postgres_uses_JSONField(self) -> None:
        """ Tests that postgres uses the JSONField implementation """

        self.assertTrue(SmartJSONField.using_postgres)
        self.assertTrue(issubclass(SmartJSONField, JSONField))
Exemple #36
0
    def test_sqlite_uses_DumbJSON(self) -> None:
        """ Tests that sqlite uses the DumbJSONField implementation """

        self.assertFalse(SmartJSONField.using_postgres)
        self.assertTrue(issubclass(SmartJSONField, DumbJSONField))
Exemple #37
0
    def _format_exception_only(self, etype, value):
        """Format the exception part of a traceback.

        The arguments are the exception type and value such as given by
        sys.exc_info()[:2]. The return value is a list of strings, each ending
        in a newline.  Normally, the list contains a single string; however,
        for SyntaxError exceptions, it contains several lines that (when
        printed) display detailed information about where the syntax error
        occurred.  The message indicating which exception occurred is the
        always last string in the list.

        Also lifted nearly verbatim from traceback.py
        """
        have_filedata = False
        Colors = self.Colors
        list = []
        stype = py3compat.cast_unicode(Colors.excName + etype.__name__ +
                                       Colors.Normal)
        if value is None:
            # Not sure if this can still happen in Python 2.6 and above
            list.append(stype + '\n')
        else:
            if issubclass(etype, SyntaxError):
                have_filedata = True
                if not value.filename: value.filename = "<string>"
                if value.lineno:
                    lineno = value.lineno
                    textline = linecache.getline(value.filename, value.lineno)
                else:
                    lineno = 'unknown'
                    textline = ''
                list.append('%s  File %s"%s"%s, line %s%s%s\n' % \
                            (Colors.normalEm,
                             Colors.filenameEm, py3compat.cast_unicode(value.filename), Colors.normalEm,
                             Colors.linenoEm, lineno, Colors.Normal  ))
                if textline == '':
                    textline = py3compat.cast_unicode(value.text, "utf-8")

                if textline is not None:
                    i = 0
                    while i < len(textline) and textline[i].isspace():
                        i += 1
                    list.append('%s    %s%s\n' %
                                (Colors.line, textline.strip(), Colors.Normal))
                    if value.offset is not None:
                        s = '    '
                        for c in textline[i:value.offset - 1]:
                            if c.isspace():
                                s += c
                            else:
                                s += ' '
                        list.append('%s%s^%s\n' %
                                    (Colors.caret, s, Colors.Normal))

            try:
                s = value.msg
            except Exception:
                s = self._some_str(value)
            if s:
                list.append('%s%s:%s %s\n' %
                            (stype, Colors.excName, Colors.Normal, s))
            else:
                list.append('%s\n' % stype)

        # sync with user hooks
        if have_filedata:
            ipinst = get_ipython()
            if ipinst is not None:
                ipinst.hooks.synchronize_with_editor(value.filename,
                                                     value.lineno, 0)

        return list
def isenumclass(x: Any) -> bool:
    """Check if the object is subclass of enum."""
    return inspect.isclass(x) and issubclass(x, enum.Enum)
Exemple #39
0
    def __from_dict(self, values: dict):
        for prop in self.properties():
            if getattr(type(self), prop).fset is None:
                continue

            prop_value = values.get(
                prop,
                values.get(
                    inflection.camelize(prop, uppercase_first_letter=False)))

            if prop_value is not None:
                if isinstance(prop_value, np.generic):
                    prop_value = prop_value.item()

                additional_types = []
                prop_type = self.prop_type(prop, additional=additional_types)

                if prop_type is None:
                    # This shouldn't happen
                    setattr(self, prop, prop_value)
                elif issubclass(prop_type, dt.datetime):
                    if isinstance(prop_value, int):
                        setattr(
                            self, prop,
                            dt.datetime.fromtimestamp(prop_value /
                                                      1000).isoformat())
                    else:
                        import re
                        matcher = re.search('\\.([0-9]*)Z$', prop_value)
                        if matcher:
                            sub_seconds = matcher.group(1)
                            if len(sub_seconds) > 6:
                                prop_value = re.sub(
                                    matcher.re, '.{}Z'.format(sub_seconds[:6]),
                                    prop_value)

                        try:
                            setattr(self, prop, isoparse(prop_value))
                        except ValueError:
                            if str in additional_types:
                                setattr(self, prop, prop_value)
                elif issubclass(prop_type,
                                dt.date) and type(prop_value) is not dt.date:
                    date_value = None

                    if isinstance(prop_value, float):
                        # Assume it's an Excel date
                        if prop_value > 59:
                            prop_value -= 1  # Excel leap year bug, 1900 is not a leap year!
                        date_value = dt.datetime(1899, 12, 31) + dt.timedelta(
                            days=prop_value).date()
                    elif isinstance(prop_value, str):
                        for format in _valid_date_formats:
                            try:
                                date_value = dt.datetime.strptime(
                                    prop_value, format).date()
                                break
                            except ValueError:
                                pass

                    setattr(self, prop, date_value or prop_value)
                elif issubclass(prop_type, float) and isinstance(
                        prop_value, str):
                    if prop_value.endswith('%'):
                        setattr(self, prop, float(prop_value[:-1]) / 100)
                    else:
                        setattr(self, prop, float(prop_value))
                elif issubclass(prop_type, EnumBase):
                    setattr(self, prop, get_enum_value(prop_type, prop_value))
                elif issubclass(prop_type, Base):
                    if isinstance(prop_value, Base):
                        setattr(self, prop, prop_value)
                    else:
                        setattr(self, prop, prop_type.from_dict(prop_value))
                elif issubclass(prop_type, (list, tuple)):
                    item_type = self.prop_item_type(prop)
                    if issubclass(item_type, Base):
                        item_values = tuple(
                            v if isinstance(v, (
                                Base, EnumBase)) else item_type.from_dict(v)
                            for v in prop_value)
                    elif issubclass(item_type, EnumBase):
                        item_values = tuple(
                            get_enum_value(item_type, v) for v in prop_value)
                    else:
                        item_values = tuple(prop_value)
                    setattr(self, prop, item_values)
                else:
                    setattr(self, prop, prop_value)
 def one_of(self, *classes):
     return issubclass(self.klass, classes)
Exemple #41
0
    def device_template_values(cls, api, show_args):
        def item_matches(item_name, item_id):
            if show_args.id is not None:
                return item_id == show_args.id
            if show_args.name is not None:
                return item_name == show_args.name
            return regex_search(show_args.regex, item_name)

        def template_values(ext_name, template_name, template_id):
            if show_args.workdir is None:
                # Load from vManage via API
                devices_attached = DeviceTemplateAttached.get(api, template_id)
                if devices_attached is None:
                    cls.log_error('Failed to retrieve %s attached devices',
                                  template_name)
                    return None

                try:
                    uuid_list = [uuid for uuid, _ in devices_attached]
                    values = DeviceTemplateValues(
                        api.post(
                            DeviceTemplateValues.api_params(
                                template_id, uuid_list),
                            DeviceTemplateValues.api_path.post))
                except RestAPIException:
                    cls.log_error('Failed to retrieve %s values',
                                  template_name)
                    return None
            else:
                # Load from local backup
                values = DeviceTemplateValues.load(show_args.workdir, ext_name,
                                                   template_name, template_id)
                if values is None:
                    cls.log_debug('Skipped %s. No template values file found.',
                                  template_name)

            return values

        print_buffer = []
        backend = show_args.workdir if show_args.workdir is not None else api
        matched_item_iter = ((index.need_extended_name, item_name, item_id,
                              tag, info)
                             for tag, info, index, item_cls in cls.index_iter(
                                 backend, catalog_iter('template_device'))
                             for item_id, item_name in index
                             if item_matches(item_name, item_id)
                             and issubclass(item_cls, DeviceTemplate))
        for use_ext_name, item_name, item_id, tag, info in matched_item_iter:
            attached_values = template_values(use_ext_name, item_name, item_id)
            if attached_values is None:
                continue

            cls.log_info('Inspecting %s %s values', info, item_name)
            var_names = attached_values.title_dict()
            for csv_id, csv_name, entry in attached_values:
                print_grp = [
                    'Template {name}, device {device}:'.format(name=item_name,
                                                               device=csv_name
                                                               or csv_id)
                ]
                results = Table('Name', 'Value', 'Variable')
                results.extend((var_names.get(var, '<not found>'), value, var)
                               for var, value in entry.items())
                if len(results) > 0:
                    if show_args.csv is not None:
                        filename = 'template_values_{name}_{id}.csv'.format(
                            name=filename_safe(item_name, lower=True),
                            id=csv_name or csv_id)
                        results.save(Path(show_args.csv, filename))
                    print_grp.extend(results.pretty_iter())
                print_buffer.append('\n'.join(print_grp))

        if len(print_buffer) > 0:
            if show_args.csv is not None:
                cls.log_info('Files saved under directory %s', show_args.csv)
            else:
                print('\n\n'.join(print_buffer))
        else:
            match_type = 'ID' if show_args.id is not None else 'name' if show_args.name is not None else 'regex'
            cls.log_warning('No items found with the %s provided', match_type)
Exemple #42
0
 def __call__(self, parser, namespace, value, option_string=None):
     namespace.forced_checks.extend(
         name for name, cls in objects.CHECKS.items() if issubclass(cls, GitCheck))
     setattr(namespace, self.dest, value)
def pytest_pycollect_makeitem(collector: Any, name: str, obj: Any) -> Any:
    if not isinstance(obj, type) or not issubclass(obj, DataSuite):
        return None
    return MypyDataSuite(name, parent=collector)
Exemple #44
0
def make_batched_features_dataset_v2(file_pattern,
                                     batch_size,
                                     features,
                                     reader=core_readers.TFRecordDataset,
                                     label_key=None,
                                     reader_args=None,
                                     num_epochs=None,
                                     shuffle=True,
                                     shuffle_buffer_size=10000,
                                     shuffle_seed=None,
                                     prefetch_buffer_size=dataset_ops.AUTOTUNE,
                                     reader_num_threads=1,
                                     parser_num_threads=2,
                                     sloppy_ordering=False,
                                     drop_final_batch=False):
    """Returns a `Dataset` of feature dictionaries from `Example` protos.

  If label_key argument is provided, returns a `Dataset` of tuple
  comprising of feature dictionaries and label.

  Example:

  ```
  serialized_examples = [
    features {
      feature { key: "age" value { int64_list { value: [ 0 ] } } }
      feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
      feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
    },
    features {
      feature { key: "age" value { int64_list { value: [] } } }
      feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
      feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
    }
  ]
  ```

  We can use arguments:

  ```
  features: {
    "age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
    "gender": FixedLenFeature([], dtype=tf.string),
    "kws": VarLenFeature(dtype=tf.string),
  }
  ```

  And the expected output is:

  ```python
  {
    "age": [[0], [-1]],
    "gender": [["f"], ["f"]],
    "kws": SparseTensor(
      indices=[[0, 0], [0, 1], [1, 0]],
      values=["code", "art", "sports"]
      dense_shape=[2, 2]),
  }
  ```

  Args:
    file_pattern: List of files or patterns of file paths containing
      `Example` records. See `tf.io.gfile.glob` for pattern rules.
    batch_size: An int representing the number of records to combine
      in a single batch.
    features: A `dict` mapping feature keys to `FixedLenFeature` or
      `VarLenFeature` values. See `tf.io.parse_example`.
    reader: A function or class that can be
      called with a `filenames` tensor and (optional) `reader_args` and returns
      a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
    label_key: (Optional) A string corresponding to the key labels are stored in
      `tf.Examples`. If provided, it must be one of the `features` key,
      otherwise results in `ValueError`.
    reader_args: Additional arguments to pass to the reader class.
    num_epochs: Integer specifying the number of times to read through the
      dataset. If None, cycles through the dataset forever. Defaults to `None`.
    shuffle: A boolean, indicates whether the input should be shuffled. Defaults
      to `True`.
    shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity
      ensures better shuffling but would increase memory usage and startup time.
    shuffle_seed: Randomization seed to use for shuffling.
    prefetch_buffer_size: Number of feature batches to prefetch in order to
      improve performance. Recommended value is the number of batches consumed
      per training step. Defaults to auto-tune.
    reader_num_threads: Number of threads used to read `Example` records. If >1,
      the results will be interleaved.
    parser_num_threads: Number of threads to use for parsing `Example` tensors
      into a dictionary of `Feature` tensors.
    sloppy_ordering: If `True`, reading performance will be improved at
      the cost of non-deterministic ordering. If `False`, the order of elements
      produced is deterministic prior to shuffling (elements are still
      randomized if `shuffle=True`. Note that if the seed is set, then order
      of elements after shuffling is deterministic). Defaults to `False`.
    drop_final_batch: If `True`, and the batch size does not evenly divide the
      input dataset size, the final smaller batch will be dropped. Defaults to
      `False`.

  Returns:
    A dataset of `dict` elements, (or a tuple of `dict` elements and label).
    Each `dict` maps feature keys to `Tensor` or `SparseTensor` objects.

  Raises:
    TypeError: If `reader` is a `tf.compat.v1.ReaderBase` subclass.
    ValueError: If `label_key` is not one of the `features` keys.
  """
    # Create dataset of all matching filenames
    dataset = dataset_ops.Dataset.list_files(file_pattern,
                                             shuffle=shuffle,
                                             seed=shuffle_seed)

    if isinstance(reader, type) and issubclass(reader, io_ops.ReaderBase):
        raise TypeError(
            "The `reader` argument must return a `Dataset` object. "
            "`tf.ReaderBase` subclasses are not supported. For "
            "example, pass `tf.data.TFRecordDataset` instead of "
            "`tf.TFRecordReader`.")

    # Read `Example` records from files as tensor objects.
    if reader_args is None:
        reader_args = []

    # Read files sequentially (if reader_num_threads=1) or in parallel
    dataset = dataset.apply(
        interleave_ops.parallel_interleave(
            lambda filename: reader(filename, *reader_args),
            cycle_length=reader_num_threads,
            sloppy=sloppy_ordering))

    # Extract values if the `Example` tensors are stored as key-value tuples.
    if dataset_ops.get_legacy_output_types(dataset) == (dtypes.string,
                                                        dtypes.string):
        dataset = dataset_ops.MapDataset(dataset,
                                         lambda _, v: v,
                                         use_inter_op_parallelism=False)

    # Apply dataset repeat and shuffle transformations.
    dataset = _maybe_shuffle_and_repeat(dataset, num_epochs, shuffle,
                                        shuffle_buffer_size, shuffle_seed)

    # NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to
    # improve the shape inference, because it makes the batch dimension static.
    # It is safe to do this because in that case we are repeating the input
    # indefinitely, and all batches will be full-sized.
    dataset = dataset.batch(batch_size,
                            drop_remainder=drop_final_batch
                            or num_epochs is None)

    # Parse `Example` tensors to a dictionary of `Feature` tensors.
    dataset = dataset.apply(
        parsing_ops.parse_example_dataset(
            features, num_parallel_calls=parser_num_threads))

    if label_key:
        if label_key not in features:
            raise ValueError(
                "The `label_key` provided (%r) must be one of the `features` keys."
                % label_key)
        dataset = dataset.map(lambda x: (x, x.pop(label_key)))

    dataset = dataset.prefetch(prefetch_buffer_size)
    return dataset
Exemple #45
0
def all_estimators(type_filter=None):
    """Get a list of all estimators from sklearn.

    This function crawls the module and gets all classes that inherit
    from BaseEstimator. Classes that are defined in test-modules are not
    included.
    By default meta_estimators such as GridSearchCV are also not included.

    Parameters
    ----------
    type_filter : string, list of string,  or None, default=None
        Which kind of estimators should be returned. If None, no filter is
        applied and all estimators are returned.  Possible values are
        'classifier', 'regressor', 'cluster' and 'transformer' to get
        estimators only of these specific types, or a list of these to
        get the estimators that fit at least one of the types.

    Returns
    -------
    estimators : list of tuples
        List of (name, class), where ``name`` is the class name as string
        and ``class`` is the actuall type of the class.
    """
    # lazy import to avoid circular imports from sklearn.base
    from ._testing import ignore_warnings
    from ..base import (BaseEstimator, ClassifierMixin, RegressorMixin,
                        TransformerMixin, ClusterMixin)

    def is_abstract(c):
        if not(hasattr(c, '__abstractmethods__')):
            return False
        if not len(c.__abstractmethods__):
            return False
        return True

    all_classes = []
    modules_to_ignore = {"tests", "externals", "setup", "conftest"}
    root = str(Path(__file__).parent.parent)  # sklearn package
    # Ignore deprecation warnings triggered at import time and from walking
    # packages
    with ignore_warnings(category=FutureWarning):
        for importer, modname, ispkg in pkgutil.walk_packages(
                path=[root], prefix='sklearn.'):
            mod_parts = modname.split(".")
            if (any(part in modules_to_ignore for part in mod_parts)
                    or '._' in modname):
                continue
            module = import_module(modname)
            classes = inspect.getmembers(module, inspect.isclass)
            classes = [(name, est_cls) for name, est_cls in classes
                       if not name.startswith("_")]

            # TODO: Remove when FeatureHasher is implemented in PYPY
            # Skips FeatureHasher for PYPY
            if IS_PYPY and 'feature_extraction' in modname:
                classes = [(name, est_cls) for name, est_cls in classes
                           if name == "FeatureHasher"]

            all_classes.extend(classes)

    all_classes = set(all_classes)

    estimators = [c for c in all_classes
                  if (issubclass(c[1], BaseEstimator) and
                      c[0] != 'BaseEstimator')]
    # get rid of abstract base classes
    estimators = [c for c in estimators if not is_abstract(c[1])]

    if type_filter is not None:
        if not isinstance(type_filter, list):
            type_filter = [type_filter]
        else:
            type_filter = list(type_filter)  # copy
        filtered_estimators = []
        filters = {'classifier': ClassifierMixin,
                   'regressor': RegressorMixin,
                   'transformer': TransformerMixin,
                   'cluster': ClusterMixin}
        for name, mixin in filters.items():
            if name in type_filter:
                type_filter.remove(name)
                filtered_estimators.extend([est for est in estimators
                                            if issubclass(est[1], mixin)])
        estimators = filtered_estimators
        if type_filter:
            raise ValueError("Parameter type_filter must be 'classifier', "
                             "'regressor', 'transformer', 'cluster' or "
                             "None, got"
                             " %s." % repr(type_filter))

    # drop duplicates, sort for reproducibility
    # itemgetter is used to ensure the sort does not extend to the 2nd item of
    # the tuple
    return sorted(set(estimators), key=itemgetter(0))
 def _validate(self, data):
     label_type = data._get_label_field_type(self.label_field)
     if not issubclass(label_type, self.label_cls):
         raise ValueError(f"Expected field '{self.label_field}' to have type {self.label_cls}; found {label_type}")
Exemple #47
0
 def __lt__(self, other: object) -> bool:
     return issubclass(type(self), type(other))
Exemple #48
0
        Grain('week', _('week'), "TRUNC({col}, 'WW')"),
        Grain('month', _('month'), "TRUNC({col}, 'MONTH')"),
        Grain('quarter', _('quarter'), "TRUNC({col}, 'Q')"),
        Grain('year', _('year'), "TRUNC({col}, 'YYYY')"),
    )

    @classmethod
    def convert_dttm(cls, target_type, dttm):
        tt = target_type.upper()
        if tt == 'DATE':
            return "{}'".format(dttm.strftime('%Y-%m-%d'))
        return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))

    @classmethod
    def get_schema_names(cls, inspector):
        schemas = [row[0] for row in inspector.engine.execute('SHOW SCHEMAS')
                   if not row[0].startswith('_')]
        return schemas


class DruidEngineSpec(BaseEngineSpec):
    """Engine spec for Druid.io"""
    engine = 'druid'
    limit_method = LimitMethod.FETCH_MANY
    inner_joins = False


engines = {
    o.engine: o for o in globals().values()
    if inspect.isclass(o) and issubclass(o, BaseEngineSpec)}
Exemple #49
0
__author__ = "Jérôme Kieffer"
__contact__ = "*****@*****.**"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "21/03/2022"
__status__ = "stable"

from ._common import *
from ._adsc import *
from ._dectris import *
from ._imxpad import *
from ._rayonix import *
from ._esrf import *
from ._xspectrum import *
from ._psi import *
from ._non_flat import *
from ._others import *
from ._hexagonal import *

ALL_DETECTORS = Detector.registry
detector_factory = Detector.factory
load = NexusDetector.sload

# Expose all the classes, else it is not part of the documentation
import inspect
_detector_class_names = [
    i[0] for i in locals().items()
    if inspect.isclass(i[1]) and issubclass(i[1], Detector)
]
__all__ = _detector_class_names + ["ALL_DETECTORS", "detector_factory", "load"]
def construct_models(nip_model, patch_size=128, distribution_jpeg=50, distribution_down='pool', loss_metric='L2', jpeg_approx='sin'):
    """
    Setup the TF model of the entire acquisition and distribution workflow.
    :param nip_model: name of the NIP class
    :param patch_size: patch size for manipulation training (raw patch - rgb patches will be 4 times as big)
    :param distribution_jpeg: JPEG quality level in the distribution channel
    :param distribution_down: Sub-sampling method in the distribution channel ('pool' or 'bilin')
    :param loss_metric: NIP loss metric: L2, L1 or SSIM
    """
    # Sanitize inputs
    if patch_size < 32 or patch_size > 512:
        raise ValueError('The patch size ({}) looks incorrect, typical values should be >= 32 and <= 512'.format(patch_size))

    if distribution_jpeg < 1 or distribution_jpeg > 100:
        raise ValueError('Invalid JPEG quality level ({})'.format(distribution_jpeg))

    if not issubclass(getattr(pipelines, nip_model), pipelines.NIPModel):
        supported_nips = [x for x in dir(pipelines) if x != 'NIPModel' and type(getattr(pipelines, x)) is type and issubclass(getattr(pipelines, x), pipelines.NIPModel)]
        raise ValueError('Invalid NIP model ({})! Available NIPs: ({})'.format(nip_model, supported_nips))
        
    if loss_metric not in ['L2', 'L1', 'SSIM']:
        raise ValueError('Invalid loss metric ({})!'.format(loss_metric))
    
    tf.reset_default_graph()
    sess = tf.Session()

    # The pipeline -----------------------------------------------------------------------------------------------------

    model = getattr(pipelines, nip_model)(sess, tf.get_default_graph(), patch_size=patch_size, loss_metric=loss_metric)
    print('NIP network: {}'.format(model.summary()))

    # Several paths for post-processing --------------------------------------------------------------------------------
    with tf.name_scope('distribution'):

        # Sharpen    
        im_shr = tf_helpers.tf_sharpen(model.y, 0, hsv=True)

        # Bilinear resampling
        im_res = tf.image.resize_images(model.y, [tf.shape(model.y)[1] // 2, tf.shape(model.y)[1] // 2])
        im_res = tf.image.resize_images(im_res, [tf.shape(model.y)[1], tf.shape(model.y)[1]])

        # Gaussian filter
        im_gauss = tf_helpers.tf_gaussian(model.y, 5, 4)

        # Mild JPEG
        tf_jpg = DJPG(sess, tf.get_default_graph(), model.y, None, quality=80, rounding_approximation=jpeg_approx)
        im_jpg = tf_jpg.y

        # Setup operations for detection
        operations = (model.y, im_shr, im_gauss, im_jpg, im_res)
        forensics_classes = ['native', 'sharpen', 'gaussian', 'jpg', 'resample']

        n_classes = len(operations)

        # Concatenate outputs from multiple post-processing paths ------------------------------------------------------
        y_concat = tf.concat(operations, axis=0)

        # Add sub-sampling and JPEG compression in the channel ---------------------------------------------------------
        if distribution_down == 'pool':
            imb_down = tf.nn.avg_pool(y_concat, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='post_downsample')
        elif distribution_down == 'bilin':
            imb_down = tf.image.resize_images(y_concat, [tf.shape(y_concat)[1] // 2, tf.shape(y_concat)[1] // 2])
        else:
            raise ValueError('Unsupported channel down-sampling {}'.format(distribution_down))
            
        jpg = DJPG(sess, tf.get_default_graph(), imb_down, model.x, quality=distribution_jpeg, rounding_approximation=jpeg_approx)
        imb_out = jpg.y

    # Add manipulation detection
    fan = FAN(sess, tf.get_default_graph(), n_classes=n_classes, x=imb_out, nip_input=model.x, n_convolutions=4)
    print('Forensics network parameters: {:,}'.format(fan.count_parameters()))

    # Setup a combined loss and training op
    with tf.name_scope('combined_optimization') as scope:
        nip_fw = tf.placeholder(tf.float32, name='nip_weight')
        lr = tf.placeholder(tf.float32, name='learning_rate')
        loss = fan.loss + nip_fw * model.loss
        adam = tf.train.AdamOptimizer(learning_rate=lr, name='adam')
        opt = adam.minimize(loss, name='opt_combined')
    
    # Initialize all variables
    sess.run(tf.global_variables_initializer())
    
    tf_ops = {
        'sess': sess,
        'nip': model,
        'fan': fan,
        'loss': loss,
        'opt': opt,
        'lr': lr,
        'lambda': nip_fw,
        'operations': operations,
    }
        
    distribution = {    
        'forensics_classes': forensics_classes,
        'channel_jpeg_quality': distribution_jpeg,
        'channel_downsampling': distribution_down,
        'jpeg_approximation': jpeg_approx
    }
    
    return tf_ops, distribution
Exemple #51
0
    def exports(self):
        context_reportable = IReportable(self.context)
        if self.class_context and hasattr(context_reportable,
                                          'set_class_context'):
            context_reportable.set_class_context(self.class_context)

        yield context_reportable

        if hasattr(context_reportable, 'export_as_bases'):
            # The idea here is to give the abiliity to export something both as
            # itself, but also as a more generic type (one of its base classes).
            # For example, an OpenStack Endpoint is both an openstack endpoint
            # and a Device.  Therefore I would like it to end up in both
            # dim_openstack_endpoint and dim_device.

            for class_ in context_reportable.export_as_bases:
                if class_ == self.class_context:
                    # no need to re-export as ourself..
                    continue

                reportable_factory_class = adapter_for_class(
                    class_, IReportableFactory)
                reportable_class = adapter_for_class(class_, IReportable)

                # The problem is that normally, a Reportable or ReportableFactory
                # does not know what class it is adapting.  It therefore tends
                # to rely on the model object to tell it what to export, and
                # most of the reportables export all properties and relationships
                # of the supplied object.
                #
                # In this situation, though, we want to export, say, an Endpoint
                # as if it was a Device, and therefore to only export the
                # properties and relationships defined in the Device class.
                #
                # The only way to make this work is to introduce the idea of
                # class-context to Reportable (and ReportableFactory).
                #
                # A class-context-aware Reportable or ReportableFactory has
                # an additional method, set_class_context(), which is passed
                # a class object.
                #
                # The default behavior is still the same- if set_class_context
                # has not been used, the reportable should behave as it does
                # today.
                #
                # However, in this specific situation (export_as_bases), if a
                # class-context-aware ReportableFactory is available, I will
                # use it (and expect it to pass that class context on to the
                # reportables it generates).
                #
                # Otherwise, I will create the single reportable directly, not
                # using any reportablefactory, because I can't trust the
                # an existing factory that doesn't realize that it's dealing
                # with a base class, not the actual object class, to not
                # duplicate all the exports I have already done.
                factory = reportable_factory_class(self.context)

                if hasattr(reportable_factory_class, 'set_class_context'):
                    factory.set_class_context(class_)
                    for export in factory.exports():
                        yield export
                else:
                    yield reportable_class(self.context)

        relations = getattr(self.context, '_relations', tuple())
        for relName, relation in relations:
            if isinstance(relation, ToMany) and \
               issubclass(relation.remoteType, ToMany):

                # For a many-many relationship, we need to implement a
                # reportable to represent the relationship, if we're
                # on the proper end of it.  Really, either end will work,
                # but we need something deterministic, so just go with
                # whichever end has the alphabetically earliest relname.
                if min(relation.remoteName, relName) == relName:
                    related = getattr(self.context, relName, None)
                    if related:
                        related = related()

                    entity_class_name = "%s_to_%s" % (
                        IReportable(self.context).entity_class_name,
                        un_camel(
                            importClass(relation.remoteClass, None).meta_type))

                    for remoteObject in related:
                        yield BaseManyToManyReportable(
                            fromObject=self.context,
                            toObject=remoteObject,
                            entity_class_name=entity_class_name)
        if hasattr(self.context, 'os') \
                and hasattr(self.context.os, 'software') \
                and isinstance(self.context, Device):
            for sw in chain((self.context.os, ), self.context.os.software()):
                yield IReportable(sw)
Exemple #52
0

def test_generator_enqueuer_threadsafe():
    enqueuer = GeneratorEnqueuer(create_generator_from_sequence_pcs(
        DummySequence([3, 10, 10, 3])),
                                 use_multiprocessing=False)
    enqueuer.start(3, 10)
    gen_output = enqueuer.get()
    with pytest.raises(RuntimeError) as e:
        [next(gen_output) for _ in range(10)]
    assert 'thread-safe' in str(e.value)
    enqueuer.stop()


# TODO: resolve flakyness issue. Tracked with #11587
@flaky(rerun_filter=lambda err, *args: issubclass(err[0], StopIteration))
def test_generator_enqueuer_fail_threads():
    enqueuer = GeneratorEnqueuer(create_generator_from_sequence_threads(
        FaultSequence()),
                                 use_multiprocessing=False)
    enqueuer.start(3, 10)
    gen_output = enqueuer.get()
    with pytest.raises(IndexError):
        next(gen_output)


@skip_generators
def test_generator_enqueuer_fail_processes():
    enqueuer = GeneratorEnqueuer(create_generator_from_sequence_pcs(
        FaultSequence()),
                                 use_multiprocessing=True)
Exemple #53
0
 def _check_for_existing_members(class_name, bases):
     for chain in bases:
         for base in chain.__mro__:
             if issubclass(base, Enum) and base._member_names_:
                 raise TypeError("%s: cannot extend enumeration %r" %
                                 (class_name, base.__name__))
Exemple #54
0
"""
    These functions are provided as helper functions for running
    feature extraction on a single neuron_reconstruction.
    run_feature_extraction is used by the main feature_extractor function,
    and must be in a seperate py file due to how Windows
    handles multiprocessing
"""



# this is a little hack to get a look up table for the built-in marks
well_known_marks: Dict[str, Type[Mark]] = {}
for item_name in dir(_mark):
    item = getattr(_mark, item_name)
    if inspect.isclass(item) and issubclass(item, Mark) and not item is Mark:
        well_known_marks[item_name] = item


known_feature_sets = {
    "aibs_default": default_features
}


def resolve_reference_layer_depths(key=None, names=None, boundaries=None):
    """ Given either the name of a well known depths set or a set of names and 
    corresponding boundaries, produce a ReferenceLayerDepths

    Parameters
    ----------
    key : of a well known reference layer
Exemple #55
0
 def __set_name__(self, enum_class, member_name):
     """
     convert each quasi-member into an instance of the new enum class
     """
     # first step: remove ourself from enum_class
     delattr(enum_class, member_name)
     # second step: create member based on enum_class
     value = self.value
     if not isinstance(value, tuple):
         args = (value, )
     else:
         args = value
     if enum_class._member_type_ is tuple:  # special case for tuple enums
         args = (args, )  # wrap it one more time
     if not enum_class._use_args_:
         enum_member = enum_class._new_member_(enum_class)
         if not hasattr(enum_member, '_value_'):
             enum_member._value_ = value
     else:
         enum_member = enum_class._new_member_(enum_class, *args)
         if not hasattr(enum_member, '_value_'):
             if enum_class._member_type_ is object:
                 enum_member._value_ = value
             else:
                 try:
                     enum_member._value_ = enum_class._member_type_(*args)
                 except Exception as exc:
                     raise TypeError(
                         '_value_ not set in __new__, unable to create it'
                     ) from None
     value = enum_member._value_
     enum_member._name_ = member_name
     enum_member.__objclass__ = enum_class
     enum_member.__init__(*args)
     enum_member._sort_order_ = len(enum_class._member_names_)
     # If another member with the same value was already defined, the
     # new member becomes an alias to the existing one.
     for name, canonical_member in enum_class._member_map_.items():
         if canonical_member._value_ == enum_member._value_:
             enum_member = canonical_member
             break
     else:
         # this could still be an alias if the value is multi-bit and the
         # class is a flag class
         if (Flag is None or not issubclass(enum_class, Flag)):
             # no other instances found, record this member in _member_names_
             enum_class._member_names_.append(member_name)
         elif (Flag is not None and issubclass(enum_class, Flag)
               and _is_single_bit(value)):
             # no other instances found, record this member in _member_names_
             enum_class._member_names_.append(member_name)
     # get redirect in place before adding to _member_map_
     # but check for other instances in parent classes first
     need_override = False
     descriptor = None
     for base in enum_class.__mro__[1:]:
         descriptor = base.__dict__.get(member_name)
         if descriptor is not None:
             if isinstance(descriptor, (property, DynamicClassAttribute)):
                 break
             else:
                 need_override = True
                 # keep looking for an enum.property
     if descriptor and not need_override:
         # previous enum.property found, no further action needed
         pass
     else:
         redirect = property()
         redirect.__set_name__(enum_class, member_name)
         if descriptor and need_override:
             # previous enum.property found, but some other inherited attribute
             # is in the way; copy fget, fset, fdel to this one
             redirect.fget = descriptor.fget
             redirect.fset = descriptor.fset
             redirect.fdel = descriptor.fdel
         setattr(enum_class, member_name, redirect)
     # now add to _member_map_ (even aliases)
     enum_class._member_map_[member_name] = enum_member
     try:
         # This may fail if value is not hashable. We can't add the value
         # to the map, and by-value lookups for this value will be
         # linear.
         enum_class._value2member_map_.setdefault(value, enum_member)
     except TypeError:
         pass
Exemple #56
0
    def read_ds(self, name, *args):
        """
        Function to read and prepare a datasets.

        Calls read_ts of the dataset.

        Takes either 1 (gpi) or 2 (lon, lat) arguments.

        Parameters
        ----------
        name : string
            Name of the other dataset.
        gpi : int
            Grid point index
        lon : float
            Longitude of point
        lat : float
            Latitude of point

        Returns
        -------
        data_df : pandas.DataFrame or None
            Data DataFrame.

        """
        ds = self.datasets[name]
        args = list(args)
        args.extend(ds['args'])

        try:
            func = getattr(ds['class'], self.read_ts_names[name])
            data_df = func(*args, **ds['kwargs'])
            if type(data_df) is TS or issubclass(type(data_df), TS):
                data_df = data_df.data
        except IOError:
            warnings.warn(
                "IOError while reading dataset {} with args {:}".format(
                    name, args))
            return None
        except RuntimeError as e:
            if e.args[0] == "No such file or directory":
                warnings.warn(
                    "IOError while reading dataset {} with args {:}".format(
                        name, args))
                return None
            else:
                raise e

        if len(data_df) == 0:
            warnings.warn("No data for dataset {}".format(name))
            return None

        if isinstance(data_df, pd.DataFrame) == False:
            warnings.warn("Data is not a DataFrame {:}".format(args))
            return None

        if self.period is not None:
            # here we use the isoformat since pandas slice behavior is
            # different when using datetime objects.
            data_df = data_df[self.period[0].isoformat():self.period[1].
                              isoformat()]

        if len(data_df) == 0:
            warnings.warn("No data for dataset {} with arguments {:}".format(
                name, args))
            return None

        else:
            return data_df
Exemple #57
0
 def validate(self, key, cls, inst):
     # validate raw input
     if issubclass(cls, Unicode) and not isinstance(inst, six.string_types):
         raise ValidationError((key, inst))
Exemple #58
0
 def __new__(metacls, cls, bases, classdict, boundary=None, **kwds):
     # an Enum class is final once enumeration items have been defined; it
     # cannot be mixed with other types (int, float, etc.) if it has an
     # inherited __new__ unless a new __new__ is defined (or the resulting
     # class will fail).
     #
     # remove any keys listed in _ignore_
     classdict.setdefault('_ignore_', []).append('_ignore_')
     ignore = classdict['_ignore_']
     for key in ignore:
         classdict.pop(key, None)
     #
     # grab member names
     member_names = classdict._member_names
     #
     # check for illegal enum names (any others?)
     invalid_names = set(member_names) & {'mro', ''}
     if invalid_names:
         raise ValueError('Invalid enum member name: {0}'.format(
             ','.join(invalid_names)))
     #
     # adjust the sunders
     _order_ = classdict.pop('_order_', None)
     # convert to normal dict
     classdict = dict(classdict.items())
     #
     # data type of member and the controlling Enum class
     member_type, first_enum = metacls._get_mixins_(cls, bases)
     __new__, save_new, use_args = metacls._find_new_(
         classdict,
         member_type,
         first_enum,
     )
     classdict['_new_member_'] = __new__
     classdict['_use_args_'] = use_args
     #
     # convert future enum members into temporary _proto_members
     # and record integer values in case this will be a Flag
     flag_mask = 0
     for name in member_names:
         value = classdict[name]
         if isinstance(value, int):
             flag_mask |= value
         classdict[name] = _proto_member(value)
     #
     # house-keeping structures
     classdict['_member_names_'] = []
     classdict['_member_map_'] = {}
     classdict['_value2member_map_'] = {}
     classdict['_member_type_'] = member_type
     #
     # Flag structures (will be removed if final class is not a Flag
     classdict['_boundary_'] = (boundary
                                or getattr(first_enum, '_boundary_', None))
     classdict['_flag_mask_'] = flag_mask
     classdict['_all_bits_'] = 2**((flag_mask).bit_length()) - 1
     classdict['_inverted_'] = None
     #
     # If a custom type is mixed into the Enum, and it does not know how
     # to pickle itself, pickle.dumps will succeed but pickle.loads will
     # fail.  Rather than have the error show up later and possibly far
     # from the source, sabotage the pickle protocol for this class so
     # that pickle.dumps also fails.
     #
     # However, if the new class implements its own __reduce_ex__, do not
     # sabotage -- it's on them to make sure it works correctly.  We use
     # __reduce_ex__ instead of any of the others as it is preferred by
     # pickle over __reduce__, and it handles all pickle protocols.
     if '__reduce_ex__' not in classdict:
         if member_type is not object:
             methods = ('__getnewargs_ex__', '__getnewargs__',
                        '__reduce_ex__', '__reduce__')
             if not any(m in member_type.__dict__ for m in methods):
                 _make_class_unpicklable(classdict)
     #
     # create a default docstring if one has not been provided
     if '__doc__' not in classdict:
         classdict['__doc__'] = 'An enumeration.'
     try:
         exc = None
         enum_class = super().__new__(metacls, cls, bases, classdict,
                                      **kwds)
     except RuntimeError as e:
         # any exceptions raised by member.__new__ will get converted to a
         # RuntimeError, so get that original exception back and raise it instead
         exc = e.__cause__ or e
     if exc is not None:
         raise exc
     #
     # double check that repr and friends are not the mixin's or various
     # things break (such as pickle)
     # however, if the method is defined in the Enum itself, don't replace
     # it
     for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
         if name in classdict:
             continue
         class_method = getattr(enum_class, name)
         obj_method = getattr(member_type, name, None)
         enum_method = getattr(first_enum, name, None)
         if obj_method is not None and obj_method is class_method:
             setattr(enum_class, name, enum_method)
     #
     # replace any other __new__ with our own (as long as Enum is not None,
     # anyway) -- again, this is to support pickle
     if Enum is not None:
         # if the user defined their own __new__, save it before it gets
         # clobbered in case they subclass later
         if save_new:
             enum_class.__new_member__ = __new__
         enum_class.__new__ = Enum.__new__
     #
     # py3 support for definition order (helps keep py2/py3 code in sync)
     #
     # _order_ checking is spread out into three/four steps
     # - if enum_class is a Flag:
     #   - remove any non-single-bit flags from _order_
     # - remove any aliases from _order_
     # - check that _order_ and _member_names_ match
     #
     # step 1: ensure we have a list
     if _order_ is not None:
         if isinstance(_order_, str):
             _order_ = _order_.replace(',', ' ').split()
     #
     # remove Flag structures if final class is not a Flag
     if (Flag is None and cls != 'Flag'
             or Flag is not None and not issubclass(enum_class, Flag)):
         delattr(enum_class, '_boundary_')
         delattr(enum_class, '_flag_mask_')
         delattr(enum_class, '_all_bits_')
         delattr(enum_class, '_inverted_')
     elif Flag is not None and issubclass(enum_class, Flag):
         # ensure _all_bits_ is correct and there are no missing flags
         single_bit_total = 0
         multi_bit_total = 0
         for flag in enum_class._member_map_.values():
             flag_value = flag._value_
             if _is_single_bit(flag_value):
                 single_bit_total |= flag_value
             else:
                 # multi-bit flags are considered aliases
                 multi_bit_total |= flag_value
         if enum_class._boundary_ is not KEEP:
             missed = list(
                 _iter_bits_lsb(multi_bit_total & ~single_bit_total))
             if missed:
                 raise TypeError('invalid Flag %r -- missing values: %s' %
                                 (cls, ', '.join((str(i) for i in missed))))
         enum_class._flag_mask_ = single_bit_total
         #
         # set correct __iter__
         member_list = [m._value_ for m in enum_class]
         if member_list != sorted(member_list):
             enum_class._iter_member_ = enum_class._iter_member_by_def_
         if _order_:
             # _order_ step 2: remove any items from _order_ that are not single-bit
             _order_ = [
                 o for o in _order_ if o not in enum_class._member_map_
                 or _is_single_bit(enum_class[o]._value_)
             ]
     #
     if _order_:
         # _order_ step 3: remove aliases from _order_
         _order_ = [
             o for o in _order_ if (o not in enum_class._member_map_ or (
                 o in enum_class._member_map_
                 and o in enum_class._member_names_))
         ]
         # _order_ step 4: verify that _order_ and _member_names_ match
         if _order_ != enum_class._member_names_:
             raise TypeError(
                 'member order does not match _order_:\n%r\n%r' %
                 (enum_class._member_names_, _order_))
     #
     return enum_class
Exemple #59
0
    def object_to_simple_dict(self, inst_cls, value, retval=None,
                   prefix=None, subvalue_eater=lambda prot, v, t: v, tags=None):
        """Converts a native python object to a flat dict.

        See :func:`spyne.model.complex.ComplexModelBase.get_flat_type_info`.
        """

        if retval is None:
            retval = {}

        if prefix is None:
            prefix = []

        if value is None and inst_cls.Attributes.min_occurs == 0:
            return retval

        if tags is None:
            tags = set([id(value)])
        else:
            if id(value) in tags:
                return retval

        if issubclass(inst_cls, ComplexModelBase):
            fti = inst_cls.get_flat_type_info(inst_cls)

            for k, v in fti.items():
                new_prefix = list(prefix)
                new_prefix.append(k)
                subvalue = getattr(value, k, None)

                if (issubclass(v, Array) or v.Attributes.max_occurs > 1) and \
                                                           subvalue is not None:
                    if issubclass(v, Array):
                        subtype, = v._type_info.values()
                    else:
                        subtype = v

                    if issubclass(subtype, SimpleModel):
                        key = self.hier_delim.join(new_prefix)
                        l = []
                        for ssv in subvalue:
                            l.append(subvalue_eater(self, ssv, subtype))
                        retval[key] = l

                    else:
                        last_prefix = new_prefix[-1]
                        for i, ssv in enumerate(subvalue):
                            new_prefix[-1] = '%s[%d]' % (last_prefix, i)
                            self.object_to_simple_dict(subtype, ssv,
                                   retval, new_prefix,
                                   subvalue_eater=subvalue_eater, tags=tags)

                else:
                    self.object_to_simple_dict(v, subvalue, retval, new_prefix,
                                       subvalue_eater=subvalue_eater, tags=tags)

        else:
            key = self.hier_delim.join(prefix)

            if key in retval:
                raise ValueError("%r.%s conflicts with previous value %r" %
                                                    (inst_cls, key, retval[key]))

            retval[key] = subvalue_eater(self, value, inst_cls)

        return retval
Exemple #60
0
    def _doc_to_object(self, cls, doc, validator=None):
        if doc is None:
            return []

        if issubclass(cls, Array):
            retval = []
            (serializer,) = cls._type_info.values()

            for i, child in enumerate(doc):
                retval.append(self._from_dict_value(i, serializer, child,
                                                                    validator))

            return retval

        if not self.ignore_wrappers:
            if not isinstance(doc, dict):
                raise ValidationError("Wrapper documents must be dicts")
            if len(doc) == 0:
                return None
            if len(doc) > 1:
                raise ValidationError(doc, "There can be only one entry in a "
                                                                 "wrapper dict")

            subclasses = cls.get_subclasses()
            (class_name, doc), = doc.items()
            if cls.get_type_name() != class_name and subclasses is not None \
                                                        and len(subclasses) > 0:
                for subcls in subclasses:
                    if subcls.get_type_name() == class_name:
                        break

                if not self.issubclass(subcls, cls):
                    raise ValidationError(class_name,
                             "Class name %%r is not a subclass of %r" %
                                                            cls.get_type_name())
                cls = subcls

        inst = cls.get_deserialization_instance()

        # get all class attributes, including the ones coming from parent classes.
        flat_type_info = cls.get_flat_type_info(cls)

        # this is for validating cls.Attributes.{min,max}_occurs
        frequencies = defaultdict(int)

        try:
            items = doc.items()
        except AttributeError:
            # Input is not a dict, so we assume it's a sequence that we can pair
            # with the incoming sequence with field names.
            # TODO: cache this
            items = zip([k for k,v in flat_type_info.items()
                         if not get_cls_attrs(self, v).exc], doc)

        # parse input to set incoming data to related attributes.
        for k, v in items:
            member = flat_type_info.get(k, None)
            if member is None:
                member, k = flat_type_info.alt.get(k, (None, k))
                if member is None:
                    continue

            attr = get_cls_attrs(self, member)

            mo = attr.max_occurs
            if mo > 1:
                subinst = getattr(inst, k, None)
                if subinst is None:
                    subinst = []

                for a in v:
                    subinst.append(self._from_dict_value(k, member, a, validator))

            else:
                subinst = self._from_dict_value(k, member, v, validator)

            inst._safe_set(k, subinst, member)

            frequencies[k] += 1

        if validator is self.SOFT_VALIDATION and cls.Attributes.validate_freq:
            _check_freq_dict(cls, frequencies, flat_type_info)

        return inst