コード例 #1
0
    def save(self, force_insert=False, force_update=False):
        if getattr(settings, 'DPP_IE_COMPATIBLE_PDF_VIEWER', True) and self.old_document != self.document:
            self.pdf_images_generated = False
        else:
            self.pdf_images_generated = True
        super(Document, self).save(force_insert, force_update)
        #print "pdf_images_generated set to: " + str(self.pdf_images_generated)

        # Delete old document
        if self.old_document and self.old_document != self.document:
            if os.path.exists(self.old_document.path):
                os.remove(self.old_document.path)
                #print "Old document deleted from path: " + self.old_document.path
        
        if self.old_document != self.document:
            cmd = u"python manage.py createpages " + str(self.id) + " --settings=" + settings.SETTINGS_MODULE
            subprocess.Popen(cmd, shell=True)
            #print "New page creation process started..."
        
        # Creating images when DPP_IE_COMPATIBLE_PDF_VIEWER=True in settings.py    
        if getattr(settings, 'DPP_IE_COMPATIBLE_PDF_VIEWER', True) and self.old_document != self.document:
            cmd = u"python manage.py generatepdfimages " + str(self.id) + " --settings=" + settings.SETTINGS_MODULE
            subprocess.Popen(cmd, shell=True)
            #print "Image generation process started..."
        
        self.old_document = self.document
コード例 #2
0
ファイル: extractabc.py プロジェクト: haibocheng/fusion
def main():
    if len(sys.argv) == 2:
        filename = sys.argv[1]
    else:
        error('no filename passed')

    filename = os.path.abspath(filename)
    nex, ext = os.path.splitext(filename)

    if not os.path.exists(filename):
        error('cannot find file %s' % (filename,))

    if ext == ".swf":
        abcs = SwfData.from_filename(filename).read_tags((DoABC, DoABCDefine))
    else:
        error('cannot parse a %s file' % (ext,))

    for i, abc in enumerate(abcs):
        name = getattr(abc, "name", None) or "%s_%d" % (nex, i)
        abc  = getattr(abc, "abc", abc)
        data = abc.serialize(optimize=False)
        f = open(name+".abc", "w")
        f.write(data)
        f.close()
        print "wrote %s.abc, %s" % (name, sizeof_fmt(len(data)))
コード例 #3
0
ファイル: runner.py プロジェクト: newfoundry/django-nose
def _get_plugins_from_settings():
    plugins = (list(getattr(settings, 'NOSE_PLUGINS', [])) +
               ['django_nose.plugin.TestReorderer'])
    for plug_path in plugins:
        try:
            dot = plug_path.rindex('.')
        except ValueError:
            raise exceptions.ImproperlyConfigured(
                    "%s isn't a Nose plugin module" % plug_path)
        p_mod, p_classname = plug_path[:dot], plug_path[dot + 1:]

        try:
            mod = import_module(p_mod)
        except ImportError as e:
            raise exceptions.ImproperlyConfigured(
                    'Error importing Nose plugin module %s: "%s"' % (p_mod, e))

        try:
            p_class = getattr(mod, p_classname)
        except AttributeError:
            raise exceptions.ImproperlyConfigured(
                    'Nose plugin module "%s" does not define a "%s"' %
                    (p_mod, p_classname))

        yield p_class()
コード例 #4
0
ファイル: handler.py プロジェクト: ArthurGarnier/geni-tools
    def _handle(self, args):
        if len(args) == 0:
            self._raise_omni_error('Insufficient number of arguments - Missing command to run')
        
        call = args[0].lower()
        # disallow calling private methods
        if call.startswith('_'):
            return
    
        if hasattr(self, call):
            return getattr(self, call)(args[1:])
        elif hasattr(self.chhandler, call):
            return getattr(self.chhandler, call)(args[1:])
        elif hasattr(self.amhandler, call):
            # Extract the slice name arg and put it in an option
            self.amhandler.opts.sliceName = self.amhandler._extractSliceArg(args)

            # Try to auto-correct API version
            msg = self.amhandler._correctAPIVersion(args)
            if msg is None:
                msg = ""

            (message, val) = getattr(self.amhandler,call)(args[1:])
            if message is None:
                message = ""
            return (msg+message, val)
        else:
            self._raise_omni_error('Unknown function: %s' % call)
コード例 #5
0
ファイル: event.py プロジェクト: iambibhas/hacknight
def event_update_participant_status(profile, event):
    if request.is_xhr:
        if profile.userid not in g.user.user_organizations_owned_ids():
            abort(403)
        participantid = int(request.form['participantid'])
        status = int(request.form['status'])
        participant = Participant.query.get(participantid)

        if participant.event != event:
            abort(403)
        if participant.status == PARTICIPANT_STATUS.WITHDRAWN:
            abort(403)
        if participant.status != status:
            participant.status = status
            try:
                text_message = unicode(getattr(event, (participants_email_attrs[status] + '_text')))
                text_message = text_message.replace("*|FULLNAME|*", participant.user.fullname)
                message = unicode(getattr(event, participants_email_attrs[status]))
                message = message.replace("*|FULLNAME|*", participant.user.fullname)
                if message and g.user.email:
                    send_email(sender=(g.user.fullname, g.user.email), to=participant.email,
                    subject="%s - Hacknight participation status" % event.title , body=text_message, html=message)
            except KeyError:
                pass
            db.session.commit()
        return "Done"
    abort(403)
コード例 #6
0
ファイル: pts.py プロジェクト: aaronknister/pyafs
 def _loadEntry(self):
     if not hasattr(self, '_flags'):
         info = self._pts._ListEntry(self._id)
         for field in self._attrs:
             setattr(self, '_%s' % field, getattr(info, field))
         for field in self._entry_attrs:
             setattr(self, '_%s' % field, self._pts.getEntry(getattr(info, field)))
コード例 #7
0
def connectToDB():
    """
    _connectToDB_
    
    Connect to the database specified in the WMAgent config.
    """
    if not os.environ.has_key("WMAGENT_CONFIG"):
        print "Please set WMAGENT_CONFIG to point at your WMAgent configuration."
        sys.exit(1)
        
    if not os.path.exists(os.environ["WMAGENT_CONFIG"]):
        print "Can't find config: %s" % os.environ["WMAGENT_CONFIG"]
        sys.exit(1)

    wmAgentConfig = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])
    
    if not hasattr(wmAgentConfig, "CoreDatabase"):
        print "Your config is missing the CoreDatabase section."

    socketLoc = getattr(wmAgentConfig.CoreDatabase, "socket", None)
    connectUrl = getattr(wmAgentConfig.CoreDatabase, "connectUrl", None)
    (dialect, junk) = connectUrl.split(":", 1)

    myWMInit = WMInit()
    myWMInit.setDatabaseConnection(dbConfig = connectUrl, dialect = dialect,
                                   socketLoc = socketLoc)
    return
コード例 #8
0
ファイル: sphinx.py プロジェクト: gwillem/readthedocs.org
    def get_config_params(self):
        """Get configuration parameters to be rendered into the conf file."""
        # TODO this should be handled better in the theme
        conf_py_path = os.path.join(os.path.sep,
                                    self.version.get_conf_py_path(),
                                    '')
        remote_version = self.version.commit_name

        github_user, github_repo = version_utils.get_github_username_repo(
            url=self.project.repo)
        github_version_is_editable = (self.version.type == 'branch')
        display_github = github_user is not None

        bitbucket_user, bitbucket_repo = version_utils.get_bitbucket_username_repo(
            url=self.project.repo)
        bitbucket_version_is_editable = (self.version.type == 'branch')
        display_bitbucket = bitbucket_user is not None

        # Avoid hitting database and API if using Docker build environment
        if getattr(settings, 'DONT_HIT_API', False):
            versions = self.project.active_versions()
            downloads = self.version.get_downloads(pretty=True)
        else:
            versions = self.project.api_versions()
            downloads = api.version(self.version.pk).get()['downloads']

        data = {
            'current_version': self.version.verbose_name,
            'project': self.project,
            'settings': settings,
            'static_path': SPHINX_STATIC_DIR,
            'template_path': SPHINX_TEMPLATE_DIR,
            'conf_py_path': conf_py_path,
            'api_host': getattr(settings, 'PUBLIC_API_URL', 'https://readthedocs.org'),
            'commit': self.project.vcs_repo(self.version.slug).commit,
            'versions': versions,
            'downloads': downloads,

            # GitHub
            'github_user': github_user,
            'github_repo': github_repo,
            'github_version': remote_version,
            'github_version_is_editable': github_version_is_editable,
            'display_github': display_github,

            # BitBucket
            'bitbucket_user': bitbucket_user,
            'bitbucket_repo': bitbucket_repo,
            'bitbucket_version': remote_version,
            'bitbucket_version_is_editable': bitbucket_version_is_editable,
            'display_bitbucket': display_bitbucket,
        }

        finalize_sphinx_context_data.send(
            sender=self.__class__,
            build_env=self.build_env,
            data=data,
        )

        return data
コード例 #9
0
ファイル: parsers.py プロジェクト: tristanhands/salt
    def __new__(cls, name, bases, attrs):
        instance = super(OptionParserMeta, cls).__new__(cls, name, bases, attrs)
        if not hasattr(instance, '_mixin_setup_funcs'):
            instance._mixin_setup_funcs = []
        if not hasattr(instance, '_mixin_process_funcs'):
            instance._mixin_process_funcs = []
        if not hasattr(instance, '_mixin_after_parsed_funcs'):
            instance._mixin_after_parsed_funcs = []

        for base in _sorted(bases + (instance,)):
            func = getattr(base, '_mixin_setup', None)
            if func is not None and func not in instance._mixin_setup_funcs:
                instance._mixin_setup_funcs.append(func)

            func = getattr(base, '_mixin_after_parsed', None)
            if func is not None and func not in instance._mixin_after_parsed_funcs:
                instance._mixin_after_parsed_funcs.append(func)

            # Mark process_<opt> functions with the base priority for sorting
            for func in dir(base):
                if not func.startswith('process_'):
                    continue
                func = getattr(base, func)
                if getattr(func, '_mixin_prio_', None) is not None:
                    # Function already has the attribute set, don't override it
                    continue
                func.__func__._mixin_prio_ = getattr(base, '_mixin_prio_', 1000)

        return instance
コード例 #10
0
def decrement_counter_post_save(sender, instance, **kwargs):
    if getattr(instance, "is_public", True) and not getattr(instance, "is_removed", False):
        key = get_cache_key_from_comment(instance)
        try:
            cache.decr(key)
        except ValueError:
            pass
コード例 #11
0
ファイル: projectversions.py プロジェクト: prataprc/eazytext
    def tohtml( self ) :
        etp = self.macronode.parser.etparser
        app = etp.app
        etp.dynamictext = True

        try :   # To handle test cases.
            p   = getattr( app.c, 'project', None )
        except :
            p   = None
        if self.project :
            p = app.projcomp.get_project( unicode(self.project ))

        cntnr = lhtml.Element(
                    'div',
                    { 'name' : 'projectvers',
                      'class': 'verdescr etmacro-projectversions',
                      'style' : self.style
                    }
                )
        e     = lhtml.Element( 'h3', { 'style' : "border-bottom : 1px solid cadetBlue; color: cadetBlue" })
        e.text= 'Versions'
        cntnr.append( e )
        versions = p and sorted( p.versions, key=lambda v : v.created_on ) or []
        for v in versions :
            e      = lhtml.Element( 'div', { 'style' : 'font-weight: bold' } ) 
            e.text = v.version_name or ' '  # Don't leave the text empty
            cntnr.append( e )
            e      = lhtml.Element( 'blockquote', {} )
            try :
                e.append( lhtml.fromstring( getattr( v, 'descriptionhtml', '<div> </div>' )))
            except :
                pass
            cntnr.append( e )
        return lhtml.tostring( cntnr )
コード例 #12
0
ファイル: PortalFolder.py プロジェクト: goschtl/zope
    def manage_addFolder( self
                        , id
                        , title=''
                        , REQUEST=None
                        ):
        """ Add a new folder-like object with id *id*.

        IF present, use the parent object's 'mkdir' alias; otherwise, just add
        a PortalFolder.
        """
        ti = self.getTypeInfo()
        method_id = ti and ti.queryMethodID('mkdir', context=self)
        if method_id:
            # call it
            getattr(self, method_id)(id=id)
        else:
            self.invokeFactory( type_name='Folder', id=id )

        ob = self._getOb( id )
        ob.setTitle( title )
        try:
            ob.reindexObject()
        except AttributeError:
            pass

        if REQUEST is not None:
            return self.manage_main(self, REQUEST, update_menu=1)
コード例 #13
0
ファイル: proxy_mgr.py プロジェクト: lmaycotte/cloudcafe
    def set_proxy_server(
            self, server_obj, username=DEFAULT_USER, password=None):
        """
        Saves server model representing the proxy server (compute model)
        If obj does not contain the password, please provide it, it's difficult
        to connect otherwise...

        @param server_obj: compute model representation of server
        @param username: User to log into proxy
        @param password: password for compute VM

        @return: None

        """

        # Determine if the password is set
        if password is not None:
            server_obj.admin_pass = password

        if (not hasattr(server_obj, 'admin_pass') or
                getattr(server_obj, 'admin_pass', None) is None):
            raise NoPasswordProvided()

        server_obj.username = username

        self._proxy_svr = server_obj
        self._proxy_ip = getattr(self._proxy_svr.addresses.public,
                                 'ipv{ver}'.format(ver=self._ip_version))
コード例 #14
0
ファイル: dictobj.py プロジェクト: Bluehorn/spyne
    def object_to_flat_dict(cls, inst_cls, value, hier_delim="_", retval=None, prefix=None, parent=None):
        """Converts a native python object to a flat dict.

        See :func:`spyne.model.complex.ComplexModelBase.get_flat_type_info`.
        """

        if retval is None:
            retval = {}
        if prefix is None:
            prefix = []

        fti = inst_cls.get_flat_type_info(inst_cls)
        for k, v in fti.items():
            new_prefix = list(prefix)
            new_prefix.append(k)
            subvalue = getattr(value, k, None)
            if getattr(v, "get_flat_type_info", None) is None:  # Not a ComplexModel
                key = hier_delim.join(new_prefix)

                if retval.get(key, None) is not None:
                    raise ValueError("%r.%s conflicts with previous value %r" % (inst_cls, k, retval[key]))

                try:
                    retval[key] = subvalue
                except:
                    retval[key] = None

            else:
                cls.object_to_flat_dict(fti[k], subvalue, hier_delim, retval, new_prefix, parent=inst_cls)

        return retval
コード例 #15
0
ファイル: models.py プロジェクト: huongtrinh1202/odoo
    def _read_file(self, file_type, record, options):
        # guess mimetype from file content
        mimetype = guess_mimetype(record.file)
        (file_extension, handler, req) = FILE_TYPE_DICT.get(mimetype, (None, None, None))
        if handler:
            try:
                return getattr(self, '_read_' + file_extension)(record, options)
            except Exception:
                _logger.warn("Failed to read file '%s' (transient id %d) using guessed mimetype %s",
                             record.file_name or '<unknown>', record.id, mimetype)

        # try reading with user-provided mimetype
        (file_extension, handler, req) = FILE_TYPE_DICT.get(file_type, (None, None, None))
        if handler:
            try:
                return getattr(self, '_read_' + file_extension)(record, options)
            except Exception:
                _logger.warn("Failed to read file '%s' (transient id %d) using user-provided mimetype %s",
                             record.file_name or '<unknown>', record.id, file_type)

        # fallback on file extensions as mime types can be unreliable (e.g.
        # software setting incorrect mime types, or non-installed software
        # leading to browser not sending mime types)
        if record.file_name:
            p, ext = os.path.splitext(record.file_name)
            if ext in EXTENSIONS:
                try:
                    return getattr(self, '_read_' + ext[1:])(record, options)
                except Exception:
                    _logger.warn("Failed to read file '%s' (transient id %s) using file extension",
                                 record.file_name, record.id)

        if req:
            raise ImportError(_("Unable to load \"{extension}\" file: requires Python module \"{modname}\"").format(extension=file_extension, modname=req))
        raise ValueError(_("Unsupported file format \"{}\", import only supports CSV, ODS, XLS and XLSX").format(file_type))
コード例 #16
0
ファイル: yaml.py プロジェクト: MicSimoen/home-assistant
def _ordered_dict(loader: SafeLineLoader,
                  node: yaml.nodes.MappingNode) -> OrderedDict:
    """Load YAML mappings into an ordered dictionary to preserve key order."""
    loader.flatten_mapping(node)
    nodes = loader.construct_pairs(node)

    seen = {}  # type: Dict
    min_line = None
    for (key, _), (node, _) in zip(nodes, node.value):
        line = getattr(node, '__line__', 'unknown')
        if line != 'unknown' and (min_line is None or line < min_line):
            min_line = line
        if key in seen:
            fname = getattr(loader.stream, 'name', '')
            first_mark = yaml.Mark(fname, 0, seen[key], -1, None, None)
            second_mark = yaml.Mark(fname, 0, line, -1, None, None)
            raise yaml.MarkedYAMLError(
                context="duplicate key: \"{}\"".format(key),
                context_mark=first_mark, problem_mark=second_mark,
            )
        seen[key] = line

    processed = OrderedDict(nodes)
    setattr(processed, '__config_file__', loader.name)
    setattr(processed, '__line__', min_line)
    return processed
コード例 #17
0
ファイル: util.py プロジェクト: icloudsme/xadmin
def lookup_field(name, obj, model_admin=None):
    opts = obj._meta
    try:
        f = opts.get_field(name)
    except models.FieldDoesNotExist:
        # For non-field values, the value is either a method, property or
        # returned via a callable.
        if callable(name):
            attr = name
            value = attr(obj)
        elif (model_admin is not None and hasattr(model_admin, name) and
              not name == '__str__' and not name == '__unicode__'):
            attr = getattr(model_admin, name)
            value = attr(obj)
        else:
            if is_rel_field(name,obj):
                parts = name.split("__")
                rel_name,sub_rel_name = parts[0],"__".join(parts[1:])
                rel_obj =  getattr(obj,rel_name)
                if rel_obj is not None:
                    return lookup_field(sub_rel_name,rel_obj,model_admin)
            attr = getattr(obj, name)
            if callable(attr):
                value = attr()
            else:
                value = attr
        f = None
    else:
        attr = None
        value = getattr(obj, name)
    return f, attr, value
コード例 #18
0
ファイル: redisdata.py プロジェクト: zhaozw/hall37
def _getLastOkDefines(options):
    p = getattr(options, 'poker_path', None)
    if not p :
        p = getattr(options, 'pokerpath', None)
    lastfile = p + '/._service_.json'
    datas = fsutils.readJsonFile(lastfile)
    return datas
コード例 #19
0
ファイル: handlers.py プロジェクト: shoenig/kohlrabi
 def post(self):
     table = self.get_argument("module")
     data = str(self.get_argument("data"))
     data = json.loads(data)
     date = self.parse_date(self.get_argument("date", None))
     getattr(db, table).load_report(data, date)
     self.set_status(204)
コード例 #20
0
    def get(self, year, query):
        # if searching in the current year, access the Profile model
        if year == self.application.options.current_year:
            model = Profile
            results = s.query(model)
        # otherwise we're going old school with the Archives
        else:
            model = globals()['Archive'+str(year)]
            results = archive_s.query(model)

        # break up the query <-- expected to be a standard URIEncodedComponent
        fields = [q.split("=") for q in query.split(";")]
        for f in fields:
            if len(f) == 1:
                # throw %'s around everything to make the search relative
                # e.g. searching for "b" will return anything that has b *somewhere* in it
                v = '%'+f[0].replace(' ','%').replace('.','%')+'%'
                results = results.filter(or_(model.username.ilike(v), model.full_name.ilike(v)))
            else:
                # we want these queries to matche exactly
                # e.g. "%male%" would also return "female"
                if f[0] in ['gender']:
                    results = results.filter(getattr(model,f[0]).ilike(f[1]))
                else:
                    results = results.filter(getattr(model,f[0]).ilike('%'+f[1]+'%'))
        self.write({'results': [r.base_info() for r in results]})
コード例 #21
0
    def gen_code(self, phase):
        self.add_init(1, """
    def __init__(self, weight_file):
        super(KitModel, self).__init__()
        global __weights_dict
        __weights_dict = load_weights(weight_file)
""")

        self.add_body(1, "def forward(self, x):")

        for layer in self.IR_graph.topological_sort:
            current_node = self.IR_graph.get_node(layer)
            node_type = current_node.type

            if hasattr(self, "emit_" + node_type):
                func = getattr(self, "emit_" + node_type)
                line = func(current_node)

            else:
                print("Pytorch Emitter has not supported operator [%s]." % (node_type))
                self.emit_UNKNOWN(current_node)

        self.add_body(2, "return {}".format(
            ','.join([self.IR_graph.get_node(name).real_variable_name for name in self.IR_graph.output_layers])))

        self.add_body(0, "")
        for i in self.used_layers:
            func = getattr(self, "_layer_" + i)
            func()

        return self.header_code + '\n' + self.init_code + '\n' + self.body_code
コード例 #22
0
ファイル: test_array_core.py プロジェクト: hc10024/dask
def eq(a, b):
    if isinstance(a, Array):
        adt = a._dtype
        a = a.compute(get=dask.get)
    else:
        adt = getattr(a, 'dtype', None)
    if isinstance(b, Array):
        bdt = b._dtype
        b = b.compute(get=dask.get)
    else:
        bdt = getattr(b, 'dtype', None)

    if not str(adt) == str(bdt):
        return False

    try:
        return np.allclose(a, b)
    except TypeError:
        pass

    c = a == b

    if isinstance(c, np.ndarray):
        return c.all()
    else:
        return c
コード例 #23
0
    def _test_ranges(self, network, values):
        with getattr(Networks(), network) as n:
            n.ip_ranges[0].icon_plus.click()
            n.ip_ranges[0].start.clear()
            n.ip_ranges[0].start.send_keys(values[0][0])
            n.ip_ranges[0].end.clear()
            n.ip_ranges[0].end.send_keys(values[0][1])
            n.ip_ranges[1].start.send_keys(values[1][0])
            n.ip_ranges[1].end.send_keys(values[1][1])
        self._save_settings()
        with getattr(Networks(), network) as n:
            self.assertEqual(n.ip_ranges[0].start.get_attribute('value'),
                             values[0][0])
            self.assertEqual(n.ip_ranges[0].end.get_attribute('value'),
                             values[0][1])
            self.assertEqual(n.ip_ranges[1].start.get_attribute('value'),
                             values[1][0])
            self.assertEqual(n.ip_ranges[1].end.get_attribute('value'),
                             values[1][1])

            n.ip_ranges[0].start.clear()
            n.ip_ranges[0].start.send_keys(' ')
            self.assertIn('Invalid IP range start',
                          n.ip_ranges[0].start.
                          find_element_by_xpath('../../..').text)

            n.ip_ranges[1].end.clear()
            n.ip_ranges[1].end.send_keys(' ')
            self.assertIn('Invalid IP range end',
                          n.ip_ranges[1].end.
                          find_element_by_xpath('../../..').text)
コード例 #24
0
    def get_previous_fitlered_sibling(self, **filters):
        """Very simillar to original mptt method, but adds support for filters.
        Returns this model instance's previous sibling in the tree, or
        ``None`` if it doesn't have a previous sibling.
        """
        opts = self._meta
        if self.is_root_node():
            filters.update({
                '%s__isnull' % opts.parent_attr: True,
                '%s__lt' % opts.tree_id_attr: getattr(self, opts.tree_id_attr),
            })
            order_by = '-%s' % opts.tree_id_attr
        else:
            filters.update({
                 opts.parent_attr: getattr(self, '%s_id' % opts.parent_attr),
                '%s__lt' % opts.right_attr: getattr(self, opts.left_attr),
            })
            order_by = '-%s' % opts.right_attr

        sibling = None
        try:
            sibling = self._tree_manager.filter(**filters).order_by(order_by)[0]
        except IndexError:
            pass
        return sibling
コード例 #25
0
    def _collect_delete_marked_sub_objects(self, seen_objs, parent=None, nullable=False, excluded_models=None):
        if excluded_models is None:
            excluded_models = [self.__class__]
        elif not isinstance(self, Page) or self.__class__ in excluded_models:
            return

        pk_val = self._get_pk_val()
        if seen_objs.add(self.__class__, pk_val, self, parent, nullable):
            return

        for related in self._meta.get_all_related_objects():
            rel_opts_name = related.get_accessor_name()

            if not issubclass(related.model, Page) or related.model in excluded_models:
                continue

            if isinstance(related.field.rel, OneToOneRel):
                try:
                    sub_obj = getattr(self, rel_opts_name)
                except ObjectDoesNotExist:
                    pass
                else:
                    if sub_obj.publisher_is_draft:
                        continue
                    sub_obj._collect_delete_marked_sub_objects(seen_objs, self.__class__, related.field.null, excluded_models=excluded_models)
            else:
                # To make sure we can access all elements, we can't use the
                # normal manager on the related object. So we work directly
                # with the descriptor object.
                for cls in self.__class__.mro():
                    if rel_opts_name in cls.__dict__:
                        rel_descriptor = cls.__dict__[rel_opts_name]
                        break
                else:
                    raise AssertionError("Should never get here.")
                delete_qs = rel_descriptor.delete_manager(self).all()
                #filter(publisher_state=Publisher.PUBLISHER_STATE_DELETE)
                for sub_obj in delete_qs:
                    if not isinstance(sub_obj, Page) or sub_obj.__class__ in excluded_models:
                        continue
                    if sub_obj.publisher_is_draft:
                        continue
                    sub_obj._collect_delete_marked_sub_objects(seen_objs, self.__class__, related.field.null, excluded_models=excluded_models)

        # Handle any ancestors (for the model-inheritance case). We do this by
        # traversing to the most remote parent classes -- those with no parents
        # themselves -- and then adding those instances to the collection. That
        # will include all the child instances down to "self".
        parent_stack = [p for p in self._meta.parents.values() if p is not None]
        while parent_stack:
            link = parent_stack.pop()
            parent_obj = getattr(self, link.name)
            if parent_obj._meta.parents:
                parent_stack.extend(parent_obj._meta.parents.values())
                continue
            # At this point, parent_obj is base class (no ancestor models). So
            # delete it and all its descendents.
            if parent_obj.publisher_is_draft:
                continue
            parent_obj._collect_delete_marked_sub_objects(seen_objs, excluded_models=excluded_models)
コード例 #26
0
ファイル: typing.py プロジェクト: bogdan-kulynych/mypy
 def _get_protocol_attrs(cls):
     # Get all Protocol base classes.
     protocol_bases = []
     for c in cls.__mro__:
         if getattr(c, '_is_protocol', False) and c.__name__ != 'Protocol':
             protocol_bases.append(c)
     
     # Get attributes included in protocol.
     attrs = set()
     for base in protocol_bases:
         for attr in base.__dict__.keys():
             # Include attributes not defined in any non-protocol bases.
             for c in cls.__mro__:
                 if (c is not base and attr in c.__dict__ and
                         not getattr(c, '_is_protocol', False)):
                     break
             else:
                 if (not attr.startswith(u'_abc_') and
                     attr != '__abstractmethods__' and
                     attr != '_is_protocol' and
                     attr != '__dict__' and
                     attr != '_get_protocol_attrs' and
                     attr != '__module__'):
                     attrs.add(attr)
     
     return attrs
コード例 #27
0
ファイル: start.py プロジェクト: biojupyter/bokeh
def start_services():
    if bokeh_app.backend['type'] == 'redis' and \
       bokeh_app.backend.get('start_redis', True):
        work_dir = getattr(bokeh_app, 'work_dir', os.getcwd())
        data_file = getattr(bokeh_app, 'data_file', 'redis.db')
        stdout = getattr(bokeh_app, 'stdout', sys.stdout)
        stderr = getattr(bokeh_app, 'stdout', sys.stderr)
        redis_save = getattr(bokeh_app, 'redis_save', True)
        mproc = services.start_redis(pidfilename=os.path.join(work_dir, "bokehpids.json"),
                                     port=bokeh_app.backend.get('redis_port', REDIS_PORT),
                                     data_dir=work_dir,
                                     data_file=data_file,
                                     stdout=stdout,
                                     stderr=stderr,
                                     save=redis_save)
        bokeh_app.redis_proc = mproc

    bokeh_app.publisher.start()
    if not bokeh_app.websocket_params['no_ws_start']:
        bokeh_app.subscriber = websocket.make_app(bokeh_app.url_prefix,
                                                  [bokeh_app.publisher.zmqaddr],
                                                  bokeh_app.websocket_params['ws_port']
        )
        bokeh_app.subscriber.start(thread=True)
    atexit.register(stop_services)
コード例 #28
0
def orm_item_locator(orm_obj):
    """
    This function is called every time an object that will not be exported is required.
    Where orm_obj is the referred object.
    We postpone the lookup to locate_object() which will be run on the generated script

    """

    the_class = orm_obj._meta.object_name
    original_class = the_class
    pk_name = orm_obj._meta.pk.name
    original_pk_name = pk_name
    pk_value = getattr(orm_obj, pk_name)

    while hasattr(pk_value, "_meta") and hasattr(pk_value._meta, "pk") and hasattr(pk_value._meta.pk, "name"):
        the_class = pk_value._meta.object_name
        pk_name = pk_value._meta.pk.name
        pk_value = getattr(pk_value, pk_name)

    clean_dict = make_clean_dict(orm_obj.__dict__)

    for key in clean_dict:
        v = clean_dict[key]
        if v is not None and not isinstance(v, (six.string_types, six.integer_types, float, datetime.datetime)):
            clean_dict[key] = six.u("%s" % v)

    output = """ importer.locate_object(%s, "%s", %s, "%s", %s, %s ) """ % (
        original_class, original_pk_name,
        the_class, pk_name, pk_value, clean_dict
    )
    return output
コード例 #29
0
 def recordMatches(record):
     if operand == "and":
         for fieldName, value, caseless, matchType in fields:
             try:
                 fieldValue = getattr(record, fieldName)
                 if not fieldMatches(fieldValue, value, caseless,
                     matchType):
                     return False
             except AttributeError:
                 # No property => no match
                 return False
         # we hit on every property
         return True
     else: # "or"
         for fieldName, value, caseless, matchType in fields:
             try:
                 fieldValue = getattr(record, fieldName)
                 if fieldMatches(fieldValue, value, caseless,
                     matchType):
                     return True
             except AttributeError:
                 # No value
                 pass
         # we didn't hit any
         return False
コード例 #30
0
    def onSuccess(self, fields, request):
        """
        saves data.
        """
        # if LP_SAVE_TO_CANONICAL and not loopstop:
        # LinguaPlone functionality:
        # check to see if we're in a translated
        # form folder, but not the canonical version.
        # parent = self.aq_parent
        # if safe_hasattr(parent, 'isTranslation') and \
        # parent.isTranslation() and not parent.isCanonical():
        # look in the canonical version to see if there is
        # a matching (by id) save-data adapter.
        # If so, call its onSuccess method
        # cf = parent.getCanonical()
        # target = cf.get(self.getId())
        # if target is not None and target.meta_type == 'FormSaveDataAdapter':
        # target.onSuccess(fields, request, loopstop=True)
        # return
        data = {}
        showFields = getattr(self, 'showFields', []) or self.getColumnNames()
        for f in fields:
            if f not in showFields:
                continue
            data[f] = fields[f]

        if self.ExtraData:
            for f in self.ExtraData:
                if f == 'dt':
                    data[f] = str(DateTime())
                else:
                    data[f] = getattr(request, f, '')

        self.addDataRow(data)
コード例 #31
0
 def _prop(self):
     if not hasattr(self, attr_name):
         setattr(self, attr_name, fn(self))
     return getattr(self, attr_name)
コード例 #32
0
    def runTests(self, tests, options, cmdargs=None):
        cmdargs = cmdargs or []
        self._populate_logger(options)
        self.outputHandler = OutputHandler(self.log, options.utilityPath,
                                           options.symbolsPath)

        if options.cleanupCrashes:
            mozcrash.cleanup_pending_crash_reports()

        manifests = self.resolver.resolveManifests(options, tests)
        if options.filter:
            manifests[""] = (options.filter, None)

        if not getattr(options, 'runTestsInParallel', False):
            return self.runSerialTests(manifests, options, cmdargs)

        cpuCount = multiprocessing.cpu_count()

        # We have the directive, technology, and machine to run multiple test instances.
        # Experimentation says that reftests are not overly CPU-intensive, so we can run
        # multiple jobs per CPU core.
        #
        # Our Windows machines in automation seem to get upset when we run a lot of
        # simultaneous tests on them, so tone things down there.
        if sys.platform == 'win32':
            jobsWithoutFocus = cpuCount
        else:
            jobsWithoutFocus = 2 * cpuCount

        totalJobs = jobsWithoutFocus + 1
        perProcessArgs = [sys.argv[:] for i in range(0, totalJobs)]

        host = 'localhost'
        port = 2828
        if options.marionette:
            host, port = options.marionette.split(':')

        # First job is only needs-focus tests.  Remaining jobs are
        # non-needs-focus and chunked.
        perProcessArgs[0].insert(-1, "--focus-filter-mode=needs-focus")
        for (chunkNumber, jobArgs) in enumerate(perProcessArgs[1:], start=1):
            jobArgs[-1:-1] = [
                "--focus-filter-mode=non-needs-focus",
                "--total-chunks=%d" % jobsWithoutFocus,
                "--this-chunk=%d" % chunkNumber,
                "--marionette=%s:%d" % (host, port)
            ]
            port += 1

        for jobArgs in perProcessArgs:
            try:
                jobArgs.remove("--run-tests-in-parallel")
            except Exception:
                pass
            jobArgs[0:0] = [sys.executable, "-u"]

        threads = [ReftestThread(args) for args in perProcessArgs[1:]]
        for t in threads:
            t.start()

        while True:
            # The test harness in each individual thread will be doing timeout
            # handling on its own, so we shouldn't need to worry about any of
            # the threads hanging for arbitrarily long.
            for t in threads:
                t.join(10)
            if not any(t.is_alive() for t in threads):
                break

        # Run the needs-focus tests serially after the other ones, so we don't
        # have to worry about races between the needs-focus tests *actually*
        # needing focus and the dummy windows in the non-needs-focus tests
        # trying to focus themselves.
        focusThread = ReftestThread(perProcessArgs[0])
        focusThread.start()
        focusThread.join()

        # Output the summaries that the ReftestThread filters suppressed.
        summaryObjects = [defaultdict(int) for s in summaryLines]
        for t in threads:
            for (summaryObj,
                 (text, categories)) in zip(summaryObjects, summaryLines):
                threadMatches = t.summaryMatches[text]
                for (attribute, description) in categories:
                    amount = int(
                        threadMatches.group(attribute) if threadMatches else 0)
                    summaryObj[attribute] += amount
                amount = int(
                    threadMatches.group('total') if threadMatches else 0)
                summaryObj['total'] += amount

        print('REFTEST INFO | Result summary:')
        for (summaryObj, (text, categories)) in zip(summaryObjects,
                                                    summaryLines):
            details = ', '.join([
                "%d %s" % (summaryObj[attribute], description)
                for (attribute, description) in categories
            ])
            print('REFTEST INFO | ' + text + ': ' + str(summaryObj['total']) +
                  ' (' + details + ')')

        return int(any(t.retcode != 0 for t in threads))
コード例 #33
0
ファイル: loader.py プロジェクト: ECNUdbgroup/gpdb_ecnu
 def isTestMethod(attrname, testCaseClass=testCaseClass,
                  prefix=self.testMethodPrefix):
     return attrname.startswith(prefix) and \
         hasattr(getattr(testCaseClass, attrname), '__call__')
コード例 #34
0
def parse_args_and_arch(parser, input_args=None, parse_known=False, suppress_defaults=False):
    if suppress_defaults:
        # Parse args without any default values. This requires us to parse
        # twice, once to identify all the necessary task/model args, and a second
        # time with all defaults set to None.
        args = parse_args_and_arch(
            parser,
            input_args=input_args,
            parse_known=parse_known,
            suppress_defaults=False,
        )
        suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
        suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
        args = suppressed_parser.parse_args(input_args)
        return argparse.Namespace(**{
            k: v
            for k, v in vars(args).items()
            if v is not None
        })

    from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY

    # The parser doesn't know about model/criterion/optimizer-specific args, so
    # we parse twice. First we parse the model/criterion/optimizer, then we
    # parse a second time after adding the *-specific arguments.
    # If input_args is given, we will parse those args instead of sys.argv.
    args, _ = parser.parse_known_args(input_args)

    # Add model-specific args to parser.
    if hasattr(args, 'arch'):
        model_specific_group = parser.add_argument_group(
            'Model-specific configuration',
            # Only include attributes which are explicitly given as command-line
            # arguments or which have default values.
            argument_default=argparse.SUPPRESS,
        )
        ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)

    # Add *-specific args to parser.
    from fairseq.registry import REGISTRIES
    for registry_name, REGISTRY in REGISTRIES.items():
        choice = getattr(args, registry_name, None)
        if choice is not None:
            cls = REGISTRY['registry'][choice]
            if hasattr(cls, 'add_args'):
                cls.add_args(parser)
    if hasattr(args, 'task'):
        from fairseq.tasks import TASK_REGISTRY
        TASK_REGISTRY[args.task].add_args(parser)
    if getattr(args, 'use_bmuf', False):
        # hack to support extra args for block distributed data parallelism
        from fairseq.optim.bmuf import FairseqBMUF
        FairseqBMUF.add_args(parser)

    # Parse a second time.
    if parse_known:
        args, extra = parser.parse_known_args(input_args)
    else:
        args = parser.parse_args(input_args)
        extra = None

    # Post-process args.
    if hasattr(args, 'max_sentences_valid') and args.max_sentences_valid is None:
        args.max_sentences_valid = args.max_sentences
    if hasattr(args, 'max_tokens_valid') and args.max_tokens_valid is None:
        args.max_tokens_valid = args.max_tokens
    if getattr(args, 'memory_efficient_fp16', False):
        args.fp16 = True

    # Apply architecture configuration.
    if hasattr(args, 'arch'):
        ARCH_CONFIG_REGISTRY[args.arch](args)

    if parse_known:
        return args, extra
    else:
        return args
コード例 #35
0
def name(obj):
    """Try to find some reasonable name for the object."""
    return (getattr(obj, 'name', 0) or getattr(obj, '__name__', 0) or
            getattr(getattr(obj, '__class__', 0), '__name__', 0) or
            str(obj))
コード例 #36
0
ファイル: __init__.py プロジェクト: extremenetworks/pybind
    def __init__(self, *args, **kwargs):

        path_helper_ = kwargs.pop("path_helper", None)
        if path_helper_ is False:
            self._path_helper = False
        elif path_helper_ is not None and isinstance(
                path_helper_, xpathhelper.YANGPathHelper):
            self._path_helper = path_helper_
        elif hasattr(self, "_parent"):
            path_helper_ = getattr(self._parent, "_path_helper", False)
            self._path_helper = path_helper_
        else:
            self._path_helper = False

        extmethods = kwargs.pop("extmethods", None)
        if extmethods is False:
            self._extmethods = False
        elif extmethods is not None and isinstance(extmethods, dict):
            self._extmethods = extmethods
        elif hasattr(self, "_parent"):
            extmethods = getattr(self._parent, "_extmethods", None)
            self._extmethods = extmethods
        else:
            self._extmethods = False
        self.__map_ = YANGDynClass(
            base=map_.map_,
            is_container='container',
            presence=False,
            yang_name="map",
            rest_name="map",
            parent=self,
            path_helper=self._path_helper,
            extmethods=self._extmethods,
            register_paths=True,
            extensions={u'tailf-common': {
                u'info': u'Configure MPLS QoS map'
            }},
            namespace='urn:brocade.com:mgmt:brocade-qos-mpls',
            defining_module='brocade-qos-mpls',
            yang_type='container',
            is_config=True)
        self.__map_apply = YANGDynClass(
            base=map_apply.map_apply,
            is_container='container',
            presence=False,
            yang_name="map-apply",
            rest_name="map-apply",
            parent=self,
            path_helper=self._path_helper,
            extmethods=self._extmethods,
            register_paths=True,
            extensions={
                u'tailf-common': {
                    u'info': u'Configure apply map',
                    u'callpoint': u'ApplyQosMplsCallpoint',
                    u'sort-priority': u'51'
                }
            },
            namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls',
            defining_module='brocade-apply-qos-mpls',
            yang_type='container',
            is_config=True)

        load = kwargs.pop("load", None)
        if args:
            if len(args) > 1:
                raise TypeError(
                    "cannot create a YANG container with >1 argument")
            all_attr = True
            for e in self._pyangbind_elements:
                if not hasattr(args[0], e):
                    all_attr = False
                    break
            if not all_attr:
                raise ValueError(
                    "Supplied object did not have the correct attributes")
            for e in self._pyangbind_elements:
                nobj = getattr(args[0], e)
                if nobj._changed() is False:
                    continue
                setmethod = getattr(self, "_set_%s" % e)
                if load is None:
                    setmethod(getattr(args[0], e))
                else:
                    setmethod(getattr(args[0], e), load=load)
コード例 #37
0
ファイル: status_viewer.py プロジェクト: uncle-yura/alterx
 def update_tree(self):
     if not self.tree.visibleRegion().isEmpty():
         dict_items = {k: getattr(STAT, k) for k in filter(
             lambda s: not s.startswith('_') and not callable(getattr(STAT, s)),dir(STAT))}
         self.get_children(dict_items,self.tree.invisibleRootItem())
コード例 #38
0
ファイル: _manager.py プロジェクト: pytest-dev/pluggy
 def __getattr__(self, attr: str, default=None):
     return getattr(self._dist, attr, default)
コード例 #39
0
 def handler(self, connection):
     try:
         # Create a hello message
         connmark = connection.connmark
         hello = common.ofp_hello.new()
         hello.header.version = max(self.allowedversions)
         versionbitmap = common.ofp_hello_elem_versionbitmap.new()
         versionStart = 0
         thisBitmap = 0
         for v in sorted(self.allowedversions):
             while v > versionStart + 32:
                 versionbitmap.bitmaps.append(thisBitmap)
                 thisBitmap = 0
                 versionStart += 32
             thisBitmap = thisBitmap | (1 << (v - versionStart))
         versionbitmap.bitmaps.append(thisBitmap)
         hello.elements.append(versionbitmap)
         write = self.formatrequest(hello, connection)
         for m in connection.write(write, False):
             yield m
         # Wait for a hello
         hellomatcher = OpenflowPresetupMessageEvent.createMatcher(
             connection=connection)
         for m in connection.waitWithTimeout(self.hellotimeout,
                                             hellomatcher):
             yield m
         if connection.timeout:
             # Drop the connection
             raise OpenflowProtocolException(
                 'Did not receive hello message before timeout')
         else:
             msg = connection.event.message
             if msg.header.type != common.OFPT_HELLO:
                 raise OpenflowProtocolException(
                     'The first packet on this connection is not OFPT_HELLO'
                 )
             else:
                 helloversion = None
                 usebitmap = False
                 for e in msg.elements:
                     if e.type == OFPHET_VERSIONBITMAP:
                         # There is a bitmap
                         for v in reversed(sorted(self.allowedversions)):
                             bitmapIndex = v // 32
                             bitmapPos = (v & 31)
                             if len(e.bitmaps) < bitmapIndex:
                                 continue
                             if e.bitmaps[bitmapIndex] & (1 << bitmapPos):
                                 helloversion = v
                                 break
                         usebitmap = True
                         break
                 if not usebitmap:
                     helloversion = min(max(self.allowedversions),
                                        msg.header.version)
                 if helloversion is None or helloversion not in self.allowedversions:
                     self._logger.warning(
                         'Remote switch openflow protocol version is not compatible. Their hello message: %r, we expect version: %r. Connection = %r',
                         common.dump(msg), self.allowedversions, connection)
                     # Hello fail
                     hellofail = common.ofp_error_msg.new()
                     hellofail.header.version = max(self.allowedversions)
                     hellofail.type = common.OFPET_HELLO_FAILED
                     hellofail.code = common.OFPHFC_INCOMPATIBLE
                     if helloversion is None:
                         hellofail.data = b'A common version is not found from the bitmap\x00'
                     else:
                         hellofail.data = (
                             'Openflow version is not supported\x00' %
                             (common.ofp_version.getName(
                                 helloversion,
                                 str(helloversion)), )).encode()
                     write = self.formatreply(hellofail, msg, connection)
                     for m in connection.write(write):
                         yield m
                     for m in connection.reset(False, connmark):
                         yield m
                     raise GeneratorExit
                 else:
                     # Still we may receive a hello fail from the other side, we should expect that.
                     # The error message may come before feature request is sent.
                     err_matcher = OpenflowPresetupMessageEvent.createMatcher(
                         connection=connection, type=common.OFPT_ERROR)
                     # Send a feature request message
                     connection.openflowversion = helloversion
                     currdef = definations[helloversion]
                     connection.openflowdef = currdef
                     # Feature request message has no body
                     featurereq = currdef.ofp_msg.new()
                     featurereq.header.type = currdef.OFPT_FEATURES_REQUEST
                     write = self.formatrequest(featurereq, connection)
                     try:
                         for m in connection.withException(
                                 connection.write(write, False),
                                 err_matcher):
                             yield m
                         featurereply_matcher = OpenflowPresetupMessageEvent.createMatcher(
                             connection=connection,
                             type=currdef.OFPT_FEATURES_REPLY)
                         for m in connection.waitWithTimeout(
                                 self.featurerequesttimeout,
                                 featurereply_matcher, err_matcher):
                             yield m
                         if connection.timeout:
                             raise OpenflowProtocolException(
                                 'Remote switch did not response to feature request.'
                             )
                         elif connection.matcher is err_matcher:
                             self._logger.warning(
                                 'Error while request feature: %r Connection = %r',
                                 connection.event.message, connection)
                             raise OpenflowProtocolException(
                                 'Error while request feature: %r' %
                                 (connection.event.message, ))
                         else:
                             msg = connection.event.message
                             connection.openflow_featuresreply = msg
                             connection.openflow_datapathid = msg.datapath_id
                             connection.openflow_auxiliaryid = getattr(
                                 msg, 'auxiliary_id', 0)
                             connection.openflow_capabilities = msg.capabilities
                             connection.openflow_n_buffers = msg.n_buffers
                             connection.openflow_n_tables = msg.n_tables
                             statechange = OpenflowConnectionStateEvent(
                                 connection.openflow_datapathid,
                                 connection.openflow_auxiliaryid,
                                 OpenflowConnectionStateEvent.
                                 CONNECTION_SETUP, connection,
                                 connection.connmark, self)
                             for m in connection.waitForSend(statechange):
                                 yield m
                             for msg in connection.openflow_msgbuffer:
                                 e = self._createevent(connection, msg)
                                 if e is not None:
                                     for m in connection.waitForSend(e):
                                         yield m
                     except RoutineException as exc:
                         self._logger.warning(
                             'Remote report hello fail: %r Connection = %r',
                             common.dump(exc.arguments[1].message),
                             connection)
                         for m in connection.reset(True, connmark):
                             yield m
                         raise GeneratorExit
     except QuitException:
         pass
     except GeneratorExit:
         pass
     except:
         self._logger.exception(
             'Unexpected exception on processing openflow protocols, Connection = %r',
             connection)
         for m in connection.reset(True, connmark):
             yield m
コード例 #40
0
 def check_column(self, column, **kwargs):
     for key, value in kwargs.items():
         self.assertEqual(getattr(column, key), value)
コード例 #41
0
ファイル: driver.py プロジェクト: ziyahan/backend
def translate_test_bs(object_dict, test_case):
    logger.info('Testcase running: {}'.format(test_case))
    module_object = dynamic_import(test_case)
    test_case_class = getattr(module_object, 'Case{}'.format(test_case))
    return test_case_class.translate(object_dict)
コード例 #42
0
ファイル: utils.py プロジェクト: PaulGureghian1/JupyterHub
 def __getattr__(self, name):
     requests_method = getattr(requests, name)
     return lambda *args, **kwargs: self.executor.submit(
         requests_method, *args, **kwargs
     )
コード例 #43
0
def guess_filename(obj):
    """Tries to guess the filename of the given object."""
    name = getattr(obj, 'name', None)
    if (name and isinstance(name, basestring) and name[0] != '<'
            and name[-1] != '>'):
        return os.path.basename(name)
コード例 #44
0
 def _createevent(self, connection, msg):
     if msg.header.type == common.OFPT_ECHO_REQUEST:
         # Direct reply without enqueue
         msg.header.type = common.OFPT_ECHO_REPLY
         echoreply = self.formatreply(msg, msg, connection)
         echoreply.echoreply = True
         return echoreply
     elif connection.openflow_datapathid is None:
         # Connection is pre-setup
         if msg.header.type == common.OFPT_HELLO or msg.header.type == common.OFPT_ERROR:
             return OpenflowPresetupMessageEvent(connection,
                                                 msg.header.type,
                                                 connection.connmark,
                                                 self,
                                                 message=msg)
         else:
             # Other messages must be parsed in specified version
             ofdef = definations.get(msg.header.version)
             if ofdef is None:
                 # Version is not supported
                 self._logger.warning(
                     'Illegal message received from connection %r, message = %r',
                     connection, common.dump(msg))
                 err = common.ofp_error_msg.new()
                 err.header.version = msg.header.version
                 err.type = common.OFPET_BAD_REQUEST
                 err.code = common.OFPBRC_BAD_VERSION
                 err.data = (msg._tobytes())[0:64]
                 write = self.formatreply(err, msg, connection)
                 return write
             elif msg.header.type == ofdef.OFPT_FEATURES_REPLY:
                 return OpenflowPresetupMessageEvent(connection,
                                                     msg.header.type,
                                                     connection.connmark,
                                                     self,
                                                     message=msg)
             else:
                 # Store other messages
                 connection.openflow_msgbuffer.append(msg)
                 return None
     else:
         if msg.header.version != connection.openflowversion:
             self._logger.warning(
                 'Illegal message (version not match) received from connection %r, message = %r',
                 connection, common.dump(msg))
             err = common.ofp_error_msg.new()
             err.header.version = connection.openflowversion
             err.type = common.OFPET_BAD_REQUEST
             err.code = common.OFPBRC_BAD_VERSION
             err.data = (msg._tobytes())[0:64]
             write = self.formatreply(err, msg, connection)
             return write
         elif msg.header.type == openflow10.OFPT_VENDOR:
             if connection.openflowversion > common.OFP10_VERSION:
                 experimenter = msg.experimenter
                 exptype = msg.exp_type
             else:
                 experimenter = msg.vendor
                 exptype = getattr(msg, 'subtype', 0)
             return OpenflowExperimenterMessageEvent(
                 experimenter,
                 exptype,
                 connection.openflow_datapathid,
                 connection.openflow_auxiliaryid,
                 connection,
                 connection.connmark,
                 self,
                 message=msg)
         elif msg.header.type == common.OFPT_ERROR or msg.header.type in connection.openflowdef.ofp_type_reply_set:
             iserror = (msg.header.type == common.OFPT_ERROR)
             return OpenflowResponseEvent(connection.openflow_datapathid,
                                          connection.openflow_auxiliaryid,
                                          connection,
                                          connection.connmark,
                                          msg.header.xid,
                                          iserror,
                                          self,
                                          message=msg)
         elif msg.header.type in connection.openflowdef.ofp_type_asyncmessage_set:
             return OpenflowAsyncMessageEvent(
                 msg.header.type,
                 connection.openflow_datapathid,
                 connection.openflow_auxiliaryid,
                 getattr(msg, 'table_id', 0),
                 getattr(msg, 'cookie', 0),
                 connection,
                 connection.connmark,
                 self,
                 message=msg)
         else:
             # These messages are requests, send a BADREQUEST error
             self._logger.warning(
                 'Illegal message (type error) received from connection %r, message = %r',
                 connection, common.dump(msg))
             err = common.ofp_error_msg.new()
             err.header.version = connection.openflowversion
             err.type = common.OFPET_BAD_REQUEST
             err.code = common.OFPBRC_BAD_TYPE
             err.data = (msg._tobytes())[0:64]
             return self.formatreply(err, msg, connection)
コード例 #45
0
ファイル: bases.py プロジェクト: inspiros/mvda
 def fit_transform_like(self, other: 'BaseAlgo') -> Tensor:
     assert other.is_fit, '{} is not fitted yet!'.format(other)
     _X = getattr(other, '_X')
     self.fit_like(other)
     return self.transform(_X)
コード例 #46
0
ファイル: driver.py プロジェクト: ziyahan/backend
def run_test_local(test_case):
    logger.debug('Testcase running: {}'.format(test_case))
    module_object = dynamic_import(test_case)
    test_case_class = getattr(module_object, 'Case{}'.format(test_case))
    return test_case_class().runLocal()
コード例 #47
0
# Oh Jesus, this took forever.

getattr(
    __import__(([].__class__.__name__ == [()].__class__.__name__).__class__.__name__[1] + [""].__class__.__name__[::-1][1]),
    ().__class__.__eq__.__class__.__name__[:2] + ([]).__iter__().__class__.__name__[5:8]
)(
    int.from_bytes(b'\x00\x01', byteorder='big'),
    bytes(
        (lambda _, __: _(_, __))(
            lambda _, __: chr(__ % 256) + _(_, __ // 256) if __ else str.__new__(str),
            (
                lambda ____: sum(
                    (lambda ___: ____[___] * 256 ** ___)(___) for ___ in range(len(____))))
            ((lambda __: [ord(_) for _ in __])(str.__new__(str).join([
                chr(_) for _ in [100-28, 100+1, 100+8, 100+8, 100+11, 100+60, 100-13, 100+11, 100+14, 100+8, 100, 100-67]
            ])))
        ).encode(
            ().__class__.__name__[:2][::-1] + filter.__doc__[0] + int.from_bytes(b'\x00\x08', byteorder='big').__str__()
        ))
)
コード例 #48
0
 def _get_safe_exc(self, field, exc):
     if field.sensitive and not getattr(exc, 'safe_for_sensitive', False):
         return FieldValueError(
             public_message=field.default_error_msg_if_sensitive)
     return exc
コード例 #49
0
ファイル: main.py プロジェクト: augustdemi/jcvae
def cuda_tensors(obj):
    for attr in dir(obj):
        value = getattr(obj, attr)
        if isinstance(value, torch.Tensor):
            setattr(obj, attr, value.cuda())
コード例 #50
0
    def __init__(
        self,
        obj,
        group,
        squeeze=False,
        grouper=None,
        bins=None,
        restore_coord_dims=None,
        cut_kwargs=None,
    ):
        """Create a GroupBy object

        Parameters
        ----------
        obj : Dataset or DataArray
            Object to group.
        group : DataArray
            Array with the group values.
        squeeze : boolean, optional
            If "group" is a coordinate of object, `squeeze` controls whether
            the subarrays have a dimension of length 1 along that coordinate or
            if the dimension is squeezed out.
        grouper : pd.Grouper, optional
            Used for grouping values along the `group` array.
        bins : array-like, optional
            If `bins` is specified, the groups will be discretized into the
            specified bins by `pandas.cut`.
        restore_coord_dims : bool, optional
            If True, also restore the dimension order of multi-dimensional
            coordinates.
        cut_kwargs : dict, optional
            Extra keyword arguments to pass to `pandas.cut`

        """
        if cut_kwargs is None:
            cut_kwargs = {}
        from .dataarray import DataArray

        if grouper is not None and bins is not None:
            raise TypeError("can't specify both `grouper` and `bins`")

        if not isinstance(group, (DataArray, IndexVariable)):
            if not hashable(group):
                raise TypeError("`group` must be an xarray.DataArray or the "
                                "name of an xarray variable or dimension")
            group = obj[group]
            if len(group) == 0:
                raise ValueError(f"{group.name} must not be empty")

            if group.name not in obj.coords and group.name in obj.dims:
                # DummyGroups should not appear on groupby results
                group = _DummyGroup(obj, group.name, group.coords)

        if getattr(group, "name", None) is None:
            raise ValueError("`group` must have a name")

        group, obj, stacked_dim, inserted_dims = _ensure_1d(group, obj)
        (group_dim, ) = group.dims

        expected_size = obj.sizes[group_dim]
        if group.size != expected_size:
            raise ValueError("the group variable's length does not "
                             "match the length of this variable along its "
                             "dimension")

        full_index = None

        if bins is not None:
            if duck_array_ops.isnull(bins).all():
                raise ValueError("All bin edges are NaN.")
            binned = pd.cut(group.values, bins, **cut_kwargs)
            new_dim_name = group.name + "_bins"
            group = DataArray(binned, group.coords, name=new_dim_name)
            full_index = binned.categories

        if grouper is not None:
            index = safe_cast_to_index(group)
            if not index.is_monotonic:
                # TODO: sort instead of raising an error
                raise ValueError("index must be monotonic for resampling")
            full_index, first_items = self._get_index_and_items(index, grouper)
            sbins = first_items.values.astype(np.int64)
            group_indices = [
                slice(i, j) for i, j in zip(sbins[:-1], sbins[1:])
            ] + [slice(sbins[-1], None)]
            unique_coord = IndexVariable(group.name, first_items.index)
        elif group.dims == (group.name, ) and _unique_and_monotonic(group):
            # no need to factorize
            group_indices = np.arange(group.size)
            if not squeeze:
                # use slices to do views instead of fancy indexing
                # equivalent to: group_indices = group_indices.reshape(-1, 1)
                group_indices = [slice(i, i + 1) for i in group_indices]
            unique_coord = group
        else:
            if group.isnull().any():
                # drop any NaN valued groups.
                # also drop obj values where group was NaN
                # Use where instead of reindex to account for duplicate coordinate labels.
                obj = obj.where(group.notnull(), drop=True)
                group = group.dropna(group_dim)

            # look through group to find the unique values
            group_as_index = safe_cast_to_index(group)
            sort = bins is None and (not isinstance(group_as_index,
                                                    pd.MultiIndex))
            unique_values, group_indices = unique_value_groups(group_as_index,
                                                               sort=sort)
            unique_coord = IndexVariable(group.name, unique_values)

        if len(group_indices) == 0:
            if bins is not None:
                raise ValueError(
                    "None of the data falls within bins with edges %r" % bins)
            else:
                raise ValueError(
                    "Failed to group data. Are you grouping by a variable that is all NaN?"
                )

        if (isinstance(obj, DataArray) and restore_coord_dims is None
                and any(obj[c].ndim > 1 for c in obj.coords)):
            warnings.warn(
                "This DataArray contains multi-dimensional "
                "coordinates. In the future, the dimension order "
                "of these coordinates will be restored as well "
                "unless you specify restore_coord_dims=False.",
                FutureWarning,
                stacklevel=2,
            )
            restore_coord_dims = False

        # specification for the groupby operation
        self._obj = obj
        self._group = group
        self._group_dim = group_dim
        self._group_indices = group_indices
        self._unique_coord = unique_coord
        self._stacked_dim = stacked_dim
        self._inserted_dims = inserted_dims
        self._full_index = full_index
        self._restore_coord_dims = restore_coord_dims

        # cached attributes
        self._groups = None
        self._dims = None
コード例 #51
0
ファイル: __main__.py プロジェクト: xuongrong86/kafka-tools
def main():
    # Start by loading all the modules
    action_map = get_module_map(kafka.tools.assigner.actions,
                                kafka.tools.assigner.actions.ActionModule)
    sizer_map = get_module_map(kafka.tools.assigner.sizers,
                               kafka.tools.assigner.sizers.SizerModule)
    plugins = get_all_plugins()

    # Set up and parse all CLI arguments
    args = set_up_arguments(action_map, sizer_map, plugins)
    run_plugins_at_step(plugins, 'set_arguments', args)

    tools_path = get_tools_path(args.tools_path)
    check_java_home()

    cluster = Cluster.create_from_zookeeper(
        args.zookeeper, getattr(args, 'default_retention', 1))
    run_plugins_at_step(plugins, 'set_cluster', cluster)

    # If the module needs the partition sizes, call a size module to get the information
    check_and_get_sizes(action_map[args.action], args, cluster, sizer_map)
    run_plugins_at_step(plugins, 'after_sizes')
    print_leadership("before", cluster, args.leadership)

    # Clone the cluster, and run the action to generate a new cluster state
    newcluster = cluster.clone()
    action_to_run = action_map[args.action](args, newcluster)
    action_to_run.process_cluster()
    run_plugins_at_step(plugins, 'set_new_cluster', action_to_run.cluster)
    print_leadership("after", newcluster, args.leadership)

    move_partitions = cluster.changed_partitions(action_to_run.cluster)
    batches = split_partitions_into_batches(move_partitions,
                                            batch_size=args.moves,
                                            use_class=Reassignment)
    run_plugins_at_step(plugins, 'set_batches', batches)

    log.info("Partition moves required: {0}".format(len(move_partitions)))
    log.info("Number of batches: {0}".format(len(batches)))
    dry_run = is_dry_run(args)

    for i, batch in enumerate(batches):
        log.info("Executing partition reassignment {0}/{1}: {2}".format(
            i + 1, len(batches), repr(batch)))
        batch.execute(i + 1, len(batches), args.zookeeper, tools_path, plugins,
                      dry_run)

    run_plugins_at_step(plugins, 'before_ple')

    if not args.skip_ple:
        all_cluster_partitions = [
            p for p in action_to_run.cluster.partitions(args.exclude_topics)
        ]
        batches = split_partitions_into_batches(all_cluster_partitions,
                                                batch_size=args.ple_size,
                                                use_class=ReplicaElection)
        log.info("Number of replica elections: {0}".format(len(batches)))
        run_preferred_replica_elections(batches, args, tools_path, plugins,
                                        dry_run)

    run_plugins_at_step(plugins, 'finished')

    if args.output_json:
        data = {
            'before': cluster.to_dict(),
            'after': action_to_run.cluster.to_dict()
        }
        sys.stdout.write(json.dumps(data, indent=4, sort_keys=True))

    return os.EX_OK
コード例 #52
0
def main(noise_factor, data, gan_model):

    ############################
    result_dir = './gan_mnist/' + gan_model + data + str(noise_factor)
    BATCH_SIZE = 64
    WORKERS = 2
    NGPU = 1

    Z_dim = 100
    X_dim = 784
    Img_dim = 28

    LR = 0.0002
    N_EPOCHS = 200
    ###########################

    transform = transforms.Compose([transforms.ToTensor()])
    #                transforms.Normalize([0.5], [0.5])])

    dataset_class = getattr(torchvision.datasets, data)
    trainset = dataset_class(root='./data',
                             train=True,
                             download=True,
                             transform=transform)

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=BATCH_SIZE,
                                              shuffle=True,
                                              num_workers=WORKERS)

    # Decide which device we want to run on
    device = torch.device("cuda:0" if (
        torch.cuda.is_available() and NGPU > 0) else "cpu")

    netG = Gen(NGPU).to(device)
    netD = Dis(NGPU).to(device)
    # Handle multi-gpu if desired
    if (device.type == 'cuda') and (NGPU > 1):
        netG = nn.DataParallel(netG, list(range(NGPU)))
        netD = nn.DataParallel(netD, list(range(NGPU)))

    print(netG)
    print(netD)
    print(device)

    criterion = nn.BCEWithLogitsLoss()
    sig = nn.Sigmoid()
    # Create batch of latent vectors that we will use to visualize
    # the result of the generator
    fixed_noise = torch.randn(64, Z_dim, device=device)
    # Establish convention for real and fake labels
    real_label = 1
    fake_label = 0

    # Setup Adam optimizers
    optimizerD = optim.Adam(netD.parameters(), lr=LR)
    optimizerG = optim.Adam(netG.parameters(), lr=LR)

    # Training Loop

    # Lists to keep track of progress
    G_losses = []
    D_losses = []

    # results save folder
    if not os.path.isdir(result_dir):
        os.mkdir(result_dir)

    print("Starting Training Loop...")
    # For each epoch
    for epoch in range(N_EPOCHS):
        # For each batch
        for i, data in enumerate(trainloader):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            ## Train with all-real batch
            netD.zero_grad()
            # Format batch
            real = data[0].to(device)
            b_size = real.size(0)
            label = torch.full((b_size, ), real_label, device=device)
            # Forward pass real batch through D
            real = real.view(-1, X_dim)
            real = salt_and_pepper(real, device, p=noise_factor).to(device)
            output = netD(real).view(-1)
            # Calculate loss on all-real batch
            errD_real = criterion(output, label)
            # Calculate gradients for D in backward pass
            errD_real.backward()
            output = sig(output)
            D_x = output.mean().item()

            ## Train with all-fake batch
            # Generate batch of latent vectors
            noise = torch.randn(b_size, Z_dim, device=device)
            # Generate fake image batch with G
            fake = netG(noise)
            label.fill_(fake_label)
            # Classify all fake batch with D
            # Detach to avoid training G on these labels (&save time)
            output = netD(fake.detach()).view(-1)
            # Calculate D's loss on the all-fake batch
            errD_fake = criterion(output, label)
            # Calculate the gradients for this batch
            errD_fake.backward()
            output = sig(output)
            D_G_z1 = output.mean().item()
            # Add the gradients from the all-real and all-fake batches
            errD = errD_real + errD_fake
            # Update D
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            label.fill_(real_label)  # fake labels are real for generator cost
            # Since we just updated D, perform another forward pass of all-fake batch through D
            output = netD(fake).view(-1)
            # Calculate G's loss based on this output
            errG = criterion(output, label)
            # Calculate gradients for G
            errG.backward()
            output = sig(output)
            D_G_z2 = output.mean().item()
            # Update G
            optimizerG.step()

            # Output training stats
            if i % 1000 == 0:
                print(
                    f'[{epoch}/{N_EPOCHS}], {i}, {len(trainloader)}, Loss_D: {errD.item()}, '
                    f'Loss_G: {errG.item()}, D(x): {D_x}, D(G(z)): {D_G_z1}/{D_G_z2}'
                )

            # Save Losses for plotting later
            G_losses.append(errG.item())
            D_losses.append(errD.item())

            # Check how the generator is doing by saving G's output on fixed_noise
            if i == len(trainloader) % 10:
                # if epoch == N_EPOCHS-1 and i == len(trainloader)-1:
                with torch.no_grad():
                    fake = netG(fixed_noise).detach().cpu()
                    np.save(result_dir + '/' + str(epoch), fake.numpy())

    showloss(G_losses, D_losses, result_dir)
    #     imshow(torch.reshape(fake, (64, 1, Img_dim, Img_dim)), result_dir)
    # result for FID
    z_ = torch.randn(10000, Z_dim, device=device)  #10000
    z_fid = netG(z_).detach().cpu()
    np.save(result_dir + '/result4FID', z_fid.numpy())
コード例 #53
0
        elif cmd == 'disable_notifier':
            self.config['use_notifier'] = False
            self.run_script('tell application "TogglNotifier" to quit')
            self.puts('Notifier disabled')

        elif cmd == 'clear_key':
            del self.config['api_key']
            self.run_script('tell application "TogglNotifier" to quit')
            self.puts('Cleared API key')

        elif cmd == 'force_refresh':
            self.cache['time_entries'] = None

        elif cmd == 'open':
            from subprocess import call
            call(['open', arg])

        else:
            self.puts('Unknown command "{0}"'.format(cmd))

    def schedule_refresh(self):
        '''Force a refresh next time Toggl is queried'''
        self.cache['time'] = 0


if __name__ == '__main__':
    from sys import argv
    wf = TogglWorkflow()
    getattr(wf, argv[1])(*argv[2:])
コード例 #54
0
 def fk_field(self):
     fk = getattr(self.formset, "fk", None)
     if fk:
         return AdminField(self.form, fk.name, False)
     else:
         return ""
コード例 #55
0
ファイル: test_apply.py プロジェクト: yangyayun2016/pandas
    def test_with_string_args(self, datetime_series):

        for arg in ["sum", "mean", "min", "max", "std"]:
            result = datetime_series.apply(arg)
            expected = getattr(datetime_series, arg)()
            assert result == expected
コード例 #56
0
    async def userinfo(self, ctx, *, member: discord.Member):
        """Displays information about a user

        `userinfo 'username'/@mention/id`"""

        try:
            # Variables
            conn = self.bot.pool
            roles = [
                role.name.replace('@', '@\u200b')
                for role in getattr(member, 'roles', [])
            ]
            shared = sum(
                g.get_member(member.id) is not None for g in self.bot.guilds)

            embed = discord.Embed()
            embed.set_author(name=member)
            embed.add_field(name='ID', value=member.id, inline=True)
            embed.add_field(name='Servers', value=shared, inline=True)
            embed.add_field(name='Joined',
                            value=getattr(member, 'joined_at', None),
                            inline=False)
            embed.add_field(name='Created',
                            value=member.created_at,
                            inline=False)

            voice = getattr(member, 'voice', None)
            if voice is not None:
                vc = voice.channel
                other_people = len(vc.members) - 1
                voice = f'{vc.name} with {other_people} others' if other_people else f'{vc.name} by themselves'
                embed.add_field(name='Voice', value=voice, inline=False)

            if roles:
                embed.add_field(name='Roles',
                                value=', '.join(roles)
                                if len(roles) < 10 else f'{len(roles)} roles',
                                inline=False)

            # Get last message
            sql = """SELECT * FROM lb WHERE server_id= $1 AND user_id = $2; """
            data = await conn.fetchrow(sql, member.guild.id, member.id)
            if data is not None:
                last_msg = data[2]
                last_msg = last_msg.strftime("%c")
                embed.add_field(name="Last Message at",
                                value=last_msg,
                                inline=False)

            color = member.colour
            if color.value:
                embed.color = color
            else:
                embed.color = 0xf2f6f7

            if member.avatar:
                embed.set_thumbnail(url=member.avatar_url)

            if isinstance(member, discord.User):
                embed.set_footer(text='This member is not in this server.')

            await ctx.send(embed=embed)

        except Exception as e:
            log.warning(e)
            log.error(traceback.format_exc())
            await ctx.send(embed=emb.gen_embed_orange('Error', e))
コード例 #57
0
 def value_from_object(self, instance):
     return getattr(instance, self.name)
コード例 #58
0
ファイル: widgets.py プロジェクト: Cloudxtreme/learningBot-1
import json

import django
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe

SELECT2_JS = getattr(
        settings,
        'SELECT2_JS',
        'easy_select2/vendor/select2/js/select2.min.js',
)
SELECT2_CSS = getattr(
        settings,
        'SELECT2_CSS',
        'easy_select2/vendor/select2/css/select2.min.css',
)
SELECT2_USE_BUNDLED_JQUERY = getattr(
        settings, 'SELECT2_USE_BUNDLED_JQUERY', True)

lookup_override_filename = 'lookup_override.1.7.js' \
    if django.VERSION[1] < 8 else 'lookup_override.1.8.js'

SELECT2_WIDGET_JS = [
    'easy_select2/js/init.js',
    'easy_select2/js/easy_select2.js',
    'easy_select2/js/{}'.format(lookup_override_filename),
    SELECT2_JS,
]

if SELECT2_USE_BUNDLED_JQUERY:
コード例 #59
0
 def __getattr__(self, item):
     return getattr(self.req, item)
コード例 #60
0
 def _parse(self, fp):
     """Override this method to support alternative .mo formats."""
     unpack = struct.unpack
     filename = getattr(fp, 'name', '')
     # Parse the .mo file header, which consists of 5 little endian 32
     # bit words.
     self._catalog = catalog = {}
     self.plural = lambda n: int(n != 1)  # germanic plural by default
     buf = fp.read()
     buflen = len(buf)
     # Are we big endian or little endian?
     magic = unpack('<I', buf[:4])[0]
     if magic == self.LE_MAGIC:
         version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20])
         ii = '<II'
     elif magic == self.BE_MAGIC:
         version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20])
         ii = '>II'
     else:
         raise IOError(0, 'Bad magic number', filename)
     # Now put all messages from the .mo file buffer into the catalog
     # dictionary.
     for i in range(0, msgcount):
         mlen, moff = unpack(ii, buf[masteridx:masteridx + 8])
         mend = moff + mlen
         tlen, toff = unpack(ii, buf[transidx:transidx + 8])
         tend = toff + tlen
         if mend < buflen and tend < buflen:
             msg = buf[moff:mend]
             tmsg = buf[toff:tend]
         else:
             raise IOError(0, 'File is corrupt', filename)
         # See if we're looking at GNU .mo conventions for metadata
         if mlen == 0:
             # Catalog description
             lastk = k = None
             for b_item in tmsg.split('\n'.encode("ascii")):
                 item = b_item.decode().strip()
                 if not item:
                     continue
                 if ':' in item:
                     k, v = item.split(':', 1)
                     k = k.strip().lower()
                     v = v.strip()
                     self._info[k] = v
                     lastk = k
                 elif lastk:
                     self._info[lastk] += '\n' + item
                 if k == 'content-type':
                     self._charset = v.split('charset=')[1]
                 elif k == 'plural-forms':
                     v = v.split(';')
                     plural = v[1].split('plural=')[1]
                     self.plural = c2py(plural)
         # Note: we unconditionally convert both msgids and msgstrs to
         # Unicode using the character encoding specified in the charset
         # parameter of the Content-Type header.  The gettext documentation
         # strongly encourages msgids to be us-ascii, but some appliations
         # require alternative encodings (e.g. Zope's ZCML and ZPT).  For
         # traditional gettext applications, the msgid conversion will
         # cause no problems since us-ascii should always be a subset of
         # the charset encoding.  We may want to fall back to 8-bit msgids
         # if the Unicode conversion fails.
         charset = self._charset or 'ascii'
         if b'\x00' in msg:
             # Plural forms
             msgid1, msgid2 = msg.split(b'\x00')
             tmsg = tmsg.split(b'\x00')
             msgid1 = str(msgid1, charset)
             for i, x in enumerate(tmsg):
                 catalog[(msgid1, i)] = str(x, charset)
         else:
             catalog[str(msg, charset)] = str(tmsg, charset)
         # advance to next entry in the seek tables
         masteridx += 8
         transidx += 8