def serialize(self, ctx, message): assert message in (self.REQUEST, self.RESPONSE) self.event_manager.fire_event("before_serialize", ctx) if ctx.out_error is not None: ctx.out_document = [ctx.out_error.to_dict(ctx.out_error)] else: # get the result message if message is self.REQUEST: out_type = ctx.descriptor.in_message elif message is self.RESPONSE: out_type = ctx.descriptor.out_message if out_type is None: return out_type_info = out_type._type_info # instantiate the result message out_instance = out_type() # assign raw result to its wrapper, result_message for i in range(len(out_type_info)): attr_name = out_type_info.keys()[i] setattr(out_instance, attr_name, ctx.out_object[i]) ctx.out_document = self._object_to_doc(out_type, out_instance, skip_depth=self.skip_depth) self.event_manager.fire_event("after_serialize", ctx)
def make_cache_level(ncaches, prototypes, level, next_cache): global next_subsys_index, proto_l1, testerspec, proto_tester index = next_subsys_index[level] next_subsys_index[level] += 1 # Create a subsystem to contain the crossbar and caches, and # any testers subsys = SubSystem() setattr(system, "l%dsubsys%d" % (level, index), subsys) # The levels are indexing backwards through the list ntesters = testerspec[len(cachespec) - level] # Scale the progress threshold as testers higher up in the tree # (smaller level) get a smaller portion of the overall bandwidth, # and also make the interval of packet injection longer for the # testers closer to the memory (larger level) to prevent them # hogging all the bandwidth limit = (len(cachespec) - level + 1) * 100000000 testers = [proto_tester(interval=10 * (level * level + 1), progress_check=limit) for i in xrange(ntesters)] if ntesters: subsys.tester = testers if level != 0: # Create a crossbar and add it to the subsystem, note that # we do this even with a single element on this level xbar = L2XBar() subsys.xbar = xbar if next_cache: xbar.master = next_cache.cpu_side # Create and connect the caches, both the ones fanning out # to create the tree, and the ones used to connect testers # on this level tree_caches = [prototypes[0]() for i in xrange(ncaches[0])] tester_caches = [proto_l1() for i in xrange(ntesters)] subsys.cache = tester_caches + tree_caches for cache in tree_caches: cache.mem_side = xbar.slave make_cache_level(ncaches[1:], prototypes[1:], level - 1, cache) for tester, cache in zip(testers, tester_caches): tester.port = cache.cpu_side cache.mem_side = xbar.slave else: if not next_cache: print "Error: No next-level cache at top level" sys.exit(1) if ntesters > 1: # Create a crossbar and add it to the subsystem xbar = L2XBar() subsys.xbar = xbar xbar.master = next_cache.cpu_side for tester in testers: tester.port = xbar.slave else: # Single tester testers[0].port = next_cache.cpu_side
def _addNamedParams(self, alias, **d): self.namedparams.update(d) if not alias: # create the reverse mapping: from parameter index to name. self.paramnames.update(dict((v,k) for k,v in d.items())) # Create a property for each named parameter. for n,i in self.namedparams.items(): #print('Adding named parameter', n, 'to class', self.__class__) if hasattr(self.__class__, n): #print(' class', self.__class__, 'already has attr', n) continue #if hasattr(self, n): # print(' self of type', self.__class__, 'already has that attr') # continue # def makeGetter(ii): # return lambda x: x._getThing(ii) # def makeSetter(ii): # return lambda x,v: x._setThing(ii, v) # getter = makeGetter(i) # setter = makeSetter(i) def makeNamedGetter(nm): #return lambda x: x._getThing(self.namedparams[nm]) return lambda x: x._getNamedThing(nm) def makeNamedSetter(nm): #return lambda x,v: x._setThing(self.namedparams[nm], v) return lambda x,v: x._setNamedThing(nm, v) getter = makeNamedGetter(n) setter = makeNamedSetter(n) prop = property(getter, setter, None, 'named param %s' % n) setattr(self.__class__, n, prop)
def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj)
def update_aggregate(self, ag_model, location): agg = ag_model.objects.filter(location=location) if agg: agg = agg[0] else: agg = ag_model(location=location) #print agg.__dict__ #summaries = [] trees = Tree.objects.filter(plot__geometry__within=location.geometry) plots = Plot.objects.filter(geometry__within=location.geometry) #print trees agg.total_trees = trees.count() agg.total_plots = plots.count() trees = trees.exclude( Q(dbh=None) | Q(dbh=0.0) ).exclude(species=None) #print agg.total_trees #TODO figure out how to summarize diff stratum stuff field_names = [x.name for x in ResourceSummaryModel._meta.fields if not x.name == 'id'] if agg.total_trees == 0: for f in field_names: setattr(agg, f, 0.0) else: #TODO speed this up for f in field_names: fn = 'treeresource__' + f s = trees.aggregate(Sum(fn))[fn + '__sum'] or 0.0 setattr(agg,f,s) agg.save()
def _loadEntry(self): if not hasattr(self, '_flags'): info = self._pts._ListEntry(self._id) for field in self._attrs: setattr(self, '_%s' % field, getattr(info, field)) for field in self._entry_attrs: setattr(self, '_%s' % field, self._pts.getEntry(getattr(info, field)))
def applyMethod(stmts, methodName): @_transaction def method(self, _=None, **kwargs): # Received un-named parameter, it would be a iterable if _ != None: # Parameters are given as a dictionary, # put them in the correct place (bad guy...) if isinstance(_, dict): kwargs = _ # Iterable of parameters, execute as normal else: for kwargs in _: for stmt in stmts: self.cursor.execute(stmt, kwargs) return self.cursor.execute(stmts[0], kwargs) rowid = self.cursor.lastrowid for stmt in stmts[1:]: self.cursor.execute(stmt, kwargs) return rowid setattr(self.__class__, methodName, method)
def applyMethod(stmts, methodName): @_transaction def method(self, **kwargs): for stmt in stmts: self.cursor.execute(stmt, kwargs) setattr(self.__class__, methodName, method)
def __getattr__(self, name): if name in self.ok_names: attr = getattr(self.real, name) setattr(self, name, attr) return attr else: raise AttributeError, name # Attribute not allowed
def delete_objects(seen_objs): """ Iterate through a list of seen classes, and remove any instances that are referred to. """ try: ordered_classes = seen_objs.keys() except CyclicDependency: # If there is a cyclic dependency, we cannot in general delete the # objects. However, if an appropriate transaction is set up, or if the # database is lax enough, it will succeed. So for now, we go ahead and # try anyway. ordered_classes = seen_objs.unordered_keys() obj_pairs = {} for cls in ordered_classes: items = seen_objs[cls].items() items.sort() obj_pairs[cls] = items # Pre-notify all instances to be deleted. for pk_val, instance in items: signals.pre_delete.send(sender=cls, instance=instance) pk_list = [pk for pk,instance in items] del_query = sql.DeleteQuery(cls, connection) del_query.delete_batch_related(pk_list) update_query = sql.UpdateQuery(cls, connection) for field, model in cls._meta.get_fields_with_model(): if (field.rel and field.null and field.rel.to in seen_objs and filter(lambda f: f.column == field.column, field.rel.to._meta.fields)): if model: sql.UpdateQuery(model, connection).clear_related(field, pk_list) else: update_query.clear_related(field, pk_list) # Now delete the actual data. for cls in ordered_classes: items = obj_pairs[cls] items.reverse() pk_list = [pk for pk,instance in items] del_query = sql.DeleteQuery(cls, connection) del_query.delete_batch(pk_list) # Last cleanup; set NULLs where there once was a reference to the # object, NULL the primary key of the found objects, and perform # post-notification. for pk_val, instance in items: for field in cls._meta.fields: if field.rel and field.null and field.rel.to in seen_objs: setattr(instance, field.attname, None) signals.post_delete.send(sender=cls, instance=instance) setattr(instance, cls._meta.pk.attname, None) transaction.commit_unless_managed()
def get_cached_row(klass, row, index_start, max_depth=0, cur_depth=0, requested=None): """ Helper function that recursively returns an object with the specified related attributes already populated. """ if max_depth and requested is None and cur_depth > max_depth: # We've recursed deeply enough; stop now. return None restricted = requested is not None index_end = index_start + len(klass._meta.fields) fields = row[index_start:index_end] if not [x for x in fields if x is not None]: # If we only have a list of Nones, there was not related object. obj = None else: obj = klass(*fields) for f in klass._meta.fields: if not select_related_descend(f, restricted, requested): continue if restricted: next = requested[f.name] else: next = None cached_row = get_cached_row(f.rel.to, row, index_end, max_depth, cur_depth+1, next) if cached_row: rel_obj, index_end = cached_row if obj is not None: setattr(obj, f.get_cache_name(), rel_obj) return obj, index_end
def __call__(self, parser, namespace, fname, option_string=None): ext = os.path.splitext(fname)[1][1:] if ext not in allowed: option_string = '({})'.format(option_string) if option_string else '' parser.error("file extension is not one of {}{}".format(allowed, option_string)) else: setattr(namespace, self.dest, fname)
def register(self, mod): # Derive the name of the kernel from the module name = mod[mod.rfind('.') + 1:] # See if a kernel has already been registered under this name if hasattr(self, name): # Same name different module if getattr(self, name)._mod != mod: raise RuntimeError('Attempt to re-register "{}" with a ' 'different module'.format(name)) # Otherwise (since we're already registered) return else: return # Generate the kernel providing method def kernel_meth(self, tplargs, dims, **kwargs): # Render the source of kernel src, ndim, argn, argt = self._render_kernel(name, mod, tplargs) # Compile the kernel fun = self._build_kernel(name, src, list(it.chain(*argt))) # Process the argument list argb = self._build_arglst(dims, argn, argt, kwargs) # Return a ComputeKernel subclass instance return self._instantiate_kernel(dims, fun, argb) # Attach the module to the method as an attribute kernel_meth._mod = mod # Bind setattr(self, name, types.MethodType(kernel_meth, self))
def _make_scalar_compound_controller(self, fcurves, keyframes, bez_chans, default_xform): ctrl = plCompoundController() subctrls = ("X", "Y", "Z") for i in subctrls: setattr(ctrl, i, plLeafController()) exported_frames = ([], [], []) ctrl_fcurves = { i.array_index: i for i in fcurves } for keyframe in keyframes: for i, subctrl in enumerate(subctrls): fval = keyframe.values.get(i, None) if fval is not None: keyframe_type = hsKeyFrame.kBezScalarKeyFrame if i in bez_chans else hsKeyFrame.kScalarKeyFrame exported = hsScalarKey() exported.frame = keyframe.frame_num exported.frameTime = keyframe.frame_time exported.inTan = keyframe.in_tans[i] exported.outTan = keyframe.out_tans[i] exported.type = keyframe_type exported.value = fval exported_frames[i].append(exported) for i, subctrl in enumerate(subctrls): my_keyframes = exported_frames[i] # ensure this controller has at least ONE keyframe if not my_keyframes: hack_frame = hsScalarKey() hack_frame.frame = 0 hack_frame.frameTime = 0.0 hack_frame.type = hsKeyFrame.kScalarKeyFrame hack_frame.value = default_xform[i] my_keyframes.append(hack_frame) getattr(ctrl, subctrl).keys = (my_keyframes, my_keyframes[0].type) return ctrl
def _set(self, value): '''set value for attribute %s. value -- initialize value, immutable type ''' %str(key) if not hasattr(self, what.attrs_aname): setattr(self, what.attrs_aname, {}) getattr(self, what.attrs_aname)[key] = value
def _ordered_dict(loader: SafeLineLoader, node: yaml.nodes.MappingNode) -> OrderedDict: """Load YAML mappings into an ordered dictionary to preserve key order.""" loader.flatten_mapping(node) nodes = loader.construct_pairs(node) seen = {} # type: Dict min_line = None for (key, _), (node, _) in zip(nodes, node.value): line = getattr(node, '__line__', 'unknown') if line != 'unknown' and (min_line is None or line < min_line): min_line = line if key in seen: fname = getattr(loader.stream, 'name', '') first_mark = yaml.Mark(fname, 0, seen[key], -1, None, None) second_mark = yaml.Mark(fname, 0, line, -1, None, None) raise yaml.MarkedYAMLError( context="duplicate key: \"{}\"".format(key), context_mark=first_mark, problem_mark=second_mark, ) seen[key] = line processed = OrderedDict(nodes) setattr(processed, '__config_file__', loader.name) setattr(processed, '__line__', min_line) return processed
def __init__(self, *args, **kwargs): """Constructor to resolve values for all Parameters. For example, the Task:: class MyTask(luigi.Task): count = luigi.IntParameter() can be instantiated as ``MyTask(count=10)``. """ params = self.get_params() param_values = self.get_param_values(params, args, kwargs) # Set all values on class instance for key, value in param_values: setattr(self, key, value) # Register args and kwargs as an attribute on the class. Might be useful self.param_args = tuple(value for key, value in param_values) self.param_kwargs = dict(param_values) # Build up task id task_id_parts = [] param_objs = dict(params) for param_name, param_value in param_values: if dict(params)[param_name].significant: task_id_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value))) self.task_id = '%s(%s)' % (self.task_family, ', '.join(task_id_parts)) self.__hash = hash(self.task_id)
def read(self, read_path=None): """Read the metadata from the associated file. If read_path is specified, read metadata from that file instead. Raises a `ReadError` if the file could not be read. """ if read_path is None: read_path = self.path else: read_path = normpath(read_path) try: f = MediaFile(syspath(read_path)) except (OSError, IOError) as exc: raise ReadError(read_path, exc) for key in ITEM_KEYS_META: value = getattr(f, key) if isinstance(value, (int, long)): # Filter values wider than 64 bits (in signed # representation). SQLite cannot store them. # py26: Post transition, we can use: # value.bit_length() > 63 if abs(value) >= 2 ** 63: value = 0 setattr(self, key, value) # Database's mtime should now reflect the on-disk value. if read_path == self.path: self.mtime = self.current_mtime() self.path = read_path
def _lun_type(self, xml_root): lun_type = constants.PRODUCT_LUN_TYPE.get(self.conf.san_product, 'Thick') def _verify_conf_lun_type(lun_type): if lun_type not in constants.LUN_TYPE_MAP: msg = _("Invalid lun type %s is configured.") % lun_type LOG.error(msg) raise exception.InvalidInput(reason=msg) if self.conf.san_product in constants.PRODUCT_LUN_TYPE: product_lun_type = constants.PRODUCT_LUN_TYPE[ self.conf.san_product] if lun_type != product_lun_type: msg = _("%(array)s array requires %(valid)s lun type, " "but %(conf)s is specified.") % { 'array': self.conf.san_product, 'valid': product_lun_type, 'conf': lun_type} LOG.error(msg) raise exception.InvalidInput(reason=msg) text = xml_root.findtext('LUN/LUNType') if text: lun_type = text.strip() _verify_conf_lun_type(lun_type) lun_type = constants.LUN_TYPE_MAP[lun_type] setattr(self.conf, 'lun_type', lun_type)
def write(self, path=None): """Write the item's metadata to a media file. ``path`` defaults to the item's path property. Can raise either a `ReadError` or a `WriteError`. """ if path is None: path = self.path else: path = normpath(path) try: f = MediaFile(syspath(path)) except (OSError, IOError) as exc: raise ReadError(self.path, exc) plugins.send('write', item=self, path=path) for key in ITEM_KEYS_WRITABLE: setattr(f, key, self[key]) try: f.save(id3v23=beets.config['id3v23'].get(bool)) except (OSError, IOError, MutagenError) as exc: raise WriteError(self.path, exc) # The file has a new mtime. self.mtime = self.current_mtime() plugins.send('after_write', item=self)
def applyMethod(sql, methodName): @_transaction def method(self, **kwargs): self.cursor.execute(sql, kwargs) return self.cursor.lastrowid setattr(self.__class__, methodName, method)
def option_none(option, opt, value, parser): """ checks a parameter for taking value""" if parser.rargs and not parser.rargs[0].startswith('-'): print "Option arg error" print opt, " option should be empty" sys.exit(2) setattr(parser.values, option.dest, True)
def populatePanel(self, contentPanel, headerPanel): contentSizer = contentPanel.GetSizer() self.panel = contentPanel self.headerPanel = headerPanel gridPrice = wx.GridSizer(1, 3) contentSizer.Add( gridPrice, 0, wx.EXPAND | wx.ALL, 0) for type in ("ship", "fittings", "total"): image = "%sPrice_big" % type if type != "ship" else "ship_big" box = wx.BoxSizer(wx.HORIZONTAL) gridPrice.Add(box, 0, wx.ALIGN_TOP) box.Add(bitmapLoader.getStaticBitmap(image, contentPanel, "icons"), 0, wx.ALIGN_CENTER) vbox = wx.BoxSizer(wx.VERTICAL) box.Add(vbox, 1, wx.EXPAND) vbox.Add(wx.StaticText(contentPanel, wx.ID_ANY, type.capitalize()), 0, wx.ALIGN_LEFT) hbox = wx.BoxSizer(wx.HORIZONTAL) vbox.Add(hbox) lbl = wx.StaticText(contentPanel, wx.ID_ANY, "0.00 ISK") setattr(self, "labelPrice%s" % type.capitalize(), lbl) hbox.Add(lbl, 0, wx.ALIGN_LEFT) # hbox.Add(wx.StaticText(contentPanel, wx.ID_ANY, " ISK"), 0, wx.ALIGN_LEFT) self.labelEMStatus = wx.StaticText(contentPanel, wx.ID_ANY, "") contentSizer.Add(self.labelEMStatus,0)
def port_cassavotes(): from r2.models import Vote, Account, Link, Comment from r2.models.vote import CassandraVote, CassandraLinkVote, CassandraCommentVote from r2.lib.db.tdb_cassandra import CL from r2.lib.utils import fetch_things2, to36, progress ts = [(Vote.rel(Account, Link), CassandraLinkVote), (Vote.rel(Account, Comment), CassandraCommentVote)] dataattrs = set(['valid_user', 'valid_thing', 'ip', 'organic']) for prel, crel in ts: vq = prel._query(sort=desc('_date'), data=True, eager_load=False) vq = fetch_things2(vq) vq = progress(vq, persec=True) for v in vq: t1 = to36(v._thing1_id) t2 = to36(v._thing2_id) cv = crel(thing1_id = t1, thing2_id = t2, date=v._date, name=v._name) for dkey, dval in v._t.iteritems(): if dkey in dataattrs: setattr(cv, dkey, dval) cv._commit(write_consistency_level=CL.ONE)
def __init__(self, *args, **kwargs): xpcshell.XPCShellTestThread.__init__(self, *args, **kwargs) # embed the mobile params from the harness into the TestThread mobileArgs = kwargs.get('mobileArgs') for key in mobileArgs: setattr(self, key, mobileArgs[key])
def init(self, XMLRoot=None): """Initialisation is called before populating with XML data. The basic implementation simply dynamically assigns variable names and their values, converted to the most sane type found. Reimplement as necessary.""" if XMLRoot != None: for prop in XMLRoot: setattr(self, prop.tag, Utility.convert(prop.text))
def load_cascade_file(self, module_path, cascade_file_path): if not hasattr(self.__class__, "cascade"): if isabs(cascade_file_path): cascade_file = cascade_file_path else: cascade_file = join(abspath(dirname(module_path)), cascade_file_path) setattr(self.__class__, "cascade", cv.Load(cascade_file))
def setValues(self,**kargs): #print "setValues",kargs for k in kargs: #print k,kargs[k] if hasattr(self,k): #print getattr(self,k) setattr(self,k,float(kargs[k]))
def from_element(cls, elem): new_note = Note() for child_el in elem: if not child_el.tag: continue setattr(new_note, child_el.tag, child_el.text) return new_note
def create_or_update_by_guid(self, guid, **kwargs): """ Look up a FeedItem by GUID, updating it if it exists, and creating it if it doesn't. We don't limit it by feed because an item could be in another feed if some feeds are themselves aggregators. That's also why we don't update the feed field if the feed item already exists. Returns (item, created) like get_or_create(). """ try: item = self.get(guid=guid) except self.model.DoesNotExist: # Create a new item log.debug('Creating entry: %s', guid) kwargs['guid'] = guid item = self.create(**kwargs) else: log.debug('Updating entry: %s', guid) # Update an existing one. kwargs.pop('feed', None) # Don't update the date since most feeds get this wrong. kwargs.pop('date_modified') for k, v in kwargs.items(): setattr(item, k, v) item.save() return item