def omni_thread_stop(self): try: delattr(self.target, "__omni_thread") del self.target._Thread__stop except AttributeError: pass self.target_stop()
def cleanup(): for name in names: # TODO(dstanek): remove this 'if' statement once # load_backend in test_backend_ldap is only called once # per test if hasattr(self, name): delattr(self, name)
def _init_hook1(cls, cls_name, bases, dct): # cls is the class to be # cls.__dict__ is its current dict, which may contain inherited items # dct is the dict represented by exactly this class (no inheritance) # Get CSS from the class now CSS = dct.get('CSS', '') # Create corresponding class for JS if issubclass(cls, LocalComponent): cls._make_js_proxy_class(cls_name, bases, dct) elif issubclass(cls, ProxyComponent): cls._make_js_local_class(cls_name, bases, dct) else: # pragma: no cover raise TypeError('Expected class to inherit from ' 'LocalComponent or ProxyComponent.') # Write __jsmodule__; an optimization for our module/asset system cls.__jsmodule__ = get_mod_name(sys.modules[cls.__module__]) cls.JS.__jsmodule__ = cls.__jsmodule__ # need it in JS too cls.JS.__module__ = cls.__module__ # Set CSS cls.CSS = CSS try: delattr(cls.JS, 'CSS') except AttributeError: pass
def test_check_for_setup_error(self): mox = self.mox drv = self._driver drv._client = api.NaServer("127.0.0.1") drv._client.set_api_version(1, 9) required_flags = [ 'netapp_transport_type', 'netapp_login', 'netapp_password', 'netapp_server_hostname', 'netapp_server_port'] # set required flags for flag in required_flags: setattr(drv.configuration, flag, None) # check exception raises when flags are not set self.assertRaises(exception.CinderException, drv.check_for_setup_error) # set required flags for flag in required_flags: setattr(drv.configuration, flag, 'val') mox.ReplayAll() drv.check_for_setup_error() mox.VerifyAll() # restore initial FLAGS for flag in required_flags: delattr(drv.configuration, flag)
def patch(namespace, **values): """Patches `namespace`.`name` with `value` for (name, value) in values""" originals = {} if isinstance(namespace, LazyObject): if namespace._wrapped is None: namespace._setup() namespace = namespace._wrapped for (name, value) in values.iteritems(): try: originals[name] = getattr(namespace, name) except AttributeError: originals[name] = NotImplemented if value is NotImplemented: if originals[name] is not NotImplemented: delattr(namespace, name) else: setattr(namespace, name, value) try: yield finally: for (name, original_value) in originals.iteritems(): if original_value is NotImplemented: if values[name] is not NotImplemented: delattr(namespace, name) else: setattr(namespace, name, original_value)
def update(self, image_id, remove_props=None, **kwargs): """ Update attributes of an image. :param image_id: ID of the image to modify. :param remove_props: List of property names to remove :param **kwargs: Image attribute names and their new values. """ image = self.get(image_id) for (key, value) in kwargs.items(): try: setattr(image, key, value) except warlock.InvalidOperation as e: raise TypeError(utils.exception_to_str(e)) if remove_props is not None: cur_props = image.keys() new_props = kwargs.keys() #NOTE(esheffield): Only remove props that currently exist on the # image and are NOT in the properties being updated / added props_to_remove = set(cur_props).intersection( set(remove_props).difference(new_props)) for key in props_to_remove: delattr(image, key) url = '/v2/images/%s' % image_id hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'} self.http_client.patch(url, headers=hdrs, data=image.patch) #NOTE(bcwaldon): calling image.patch doesn't clear the changes, so # we need to fetch the image again to get a clean history. This is # an obvious optimization for warlock return self.get(image_id)
def disconnect_handlers(self, obj): HANDLER_IDS = self.HANDLER_IDS if hasattr(obj, HANDLER_IDS): for l_id in getattr(obj, HANDLER_IDS): obj.disconnect(l_id) delattr(obj, HANDLER_IDS)
def f(cases): if isinstance(cases, _ParameterizedTestCaseBundle): # The input is a parameterized test case. cases = cases.cases else: # Input is a bare test case, i.e. not one generated from another # parameterize. cases = [_TestCaseTuple(cases, None, None)] generated_cases = [] for klass, mod_name, cls_name in cases: if mod_name is not None: # The input is a parameterized test case. # Remove it from its module. delattr(sys.modules[mod_name], cls_name) else: # The input is a bare test case mod_name = klass.__module__ # Generate parameterized test cases out of the input test case. l = _generate_test_cases(mod_name, klass, test_case_generator) generated_cases += l # Return the bundle of generated cases to allow repeated application of # parameterize decorators. return _ParameterizedTestCaseBundle(generated_cases)
def __init__(self, *pos, **kw): _orig_socket.__init__(self, *pos, **kw) self._savedmethods = dict() for name in self._savenames: self._savedmethods[name] = getattr(self, name) delattr(self, name) # Allows normal overriding mechanism to work
def run(self): "Substitutes variables in a .in file" code = self.inputs[0].read() # replace all % by %% to prevent errors by % signs code = code.replace('%', '%%') # extract the vars foo into lst and replace @foo@ by %(foo)s lst = [] def repl(match): g = match.group if g(1): lst.append(g(1)) return "%%(%s)s" % g(1) return '' code = re_m4.sub(repl, code) try: d = self.generator.dct except AttributeError: d = {} for x in lst: tmp = getattr(self.generator, x, '') or self.env.get_flat(x) or self.env.get_flat(x.upper()) d[x] = str(tmp) self.outputs[0].write(code % d) self.generator.bld.raw_deps[self.uid()] = self.dep_vars = lst # make sure the signature is updated try: delattr(self, 'cache_sig') except AttributeError: pass
def test_get_available_capacity_with_df(self): """_get_available_capacity should calculate correct value.""" mox = self._mox drv = self._driver df_avail = 1490560 df_head = "Filesystem 1K-blocks Used Available Use% Mounted on\n" df_data = "glusterfs-host:/export 2620544 996864 %d 41%% /mnt" % df_avail df_output = df_head + df_data setattr(glusterfs.FLAGS, "glusterfs_disk_util", "df") mox.StubOutWithMock(drv, "_get_mount_point_for_share") drv._get_mount_point_for_share(self.TEST_EXPORT1).AndReturn(self.TEST_MNT_POINT) mox.StubOutWithMock(drv, "_execute") drv._execute("df", "--portability", "--block-size", "1", self.TEST_MNT_POINT, run_as_root=True).AndReturn( (df_output, None) ) mox.ReplayAll() self.assertEquals(df_avail, drv._get_available_capacity(self.TEST_EXPORT1)) mox.VerifyAll() delattr(glusterfs.FLAGS, "glusterfs_disk_util")
def wrapper(self, *args, **kw): counter_name = "__%s_wrapper_counter" % func.func_name bases = list(inspect.getmro(self.__class__)) if lock: lock.aquire() try: counter = getattr(self, counter_name, 0) + 1 setattr(self, counter_name, counter) if counter == 1 and pre_name: for base in bases: try: # make sure we use the hook defined in base base.__dict__[pre_name](self, *args, **kw) except KeyError: pass output = func(self, *args, **kw) counter = getattr(self, counter_name) setattr(self, counter_name, counter - 1) if counter == 1: delattr(self, counter_name) if post_name: for base in bases: try: base.__dict__[post_name](self, *args, **kw) except KeyError: pass finally: if lock: lock.release() return output
def select_group (self, group): if self.__group == group: return if group: groups = [ group ] + [ g for g in self.groups if g != group ] else: groups = self.groups # clear dict and only keep some values we want unchanged if not self.__base_dict: self.__base_dict = self.__dict__.copy() else: self.__dict__ = self.__base_dict.copy() # updating for group_ in groups: group_.select_group(None) if group_.handlers: merge(self.handlers, group_.handlers) self.__inherits(self.__dict__, group_.__dict__) # some value that we must reset to their original state for key in ('synctrex', 'group', 'groups', 'children'): if key in self.__base_dict: setattr(self, key, self.__base_dict[key]) elif hasattr(self, key): delattr(self, key) self.__group = group
def assign_bond_order1(self, atoms, bonds): """ """ hyb_val = [0,3,2,1] converter = TypeConverter("HYB") for a in atoms: hyb = converter.convert(a.babel_type, 'dummy') a._redo = hyb_val[int(hyb)] #print a.full_name(), a.babel_type, hyb, a._redo for b in bonds: # initialize bondOrder attribute if b.bondOrder is None: b.bondOrder = 1 sum_code = b.atom1._redo + b.atom2._redo #print b, sum_code if sum_code == 6: b.bondOrder = 3 elif sum_code == 4: b.bondOrder = 2 else: b.bondOrder = 1 if self.is_carboxyl(b): b.bondOrder = 2 if b.bondOrder < 1 or b.bondOrder > 3: print "Bond %s is wierd - Bond order is %d\n" % \ (b, b.bondOrder) self.check_for_conjugation(atoms) # cleanup for a in atoms: delattr(a, '_redo')
def remove_item(module, attr): NONE = object() olditem = getattr(module, attr, NONE) if olditem is NONE: return saved.setdefault(module.__name__, {}).setdefault(attr, olditem) delattr(module, attr)
def finish_init(self): import GeoIP if hasattr(self, '_t'): self._t.join() delattr(self, '_t') parent = self._parent geo_db_path = self.get_geodb_path() Geo = GeoIP.open(geo_db_path, GeoIP.GEOIP_STANDARD) self.geo = Geo self.set_has_tooltip(True) self._max_points = 200 self._lasttime = 0.0 self.context = None self.mapcontext = None self._mappixbuf = None self._selected = [] self._current_text = ["", 0.0] self._stats = [0,0,0,0,0,0,0] self.width = self._pixbuf.get_width() self.height = self._pixbuf.get_height() self._min_x = 0 self._max_x = self.width self._drawn_points = [] self._lines = [] self._frozenlines = [] # self.set_size_request(self.width, self.height) self._points = [] self._crosses = [] self.connect("expose_event", self.expose) self.connect("query-tooltip", self.on_query_tooltip) if self.window: self.window.invalidate_rect(self.allocation, True) if not self._onattack: self.add_test_points()
def __init__( self , id , title='' , file='' , content_type='' , precondition='' , subject=() , description='' , contributors=() , effective_date=None , expiration_date=None , format=None , language='en-US' , rights='' ): OFS.Image.File.__init__( self, id, title, file , content_type, precondition ) self._setId(id) delattr(self, '__name__') # If no file format has been passed in, rely on what OFS.Image.File # detected. if format is None: format = self.content_type DefaultDublinCoreImpl.__init__( self, title, subject, description , contributors, effective_date, expiration_date , format, language, rights )
def test_get_available_capacity_with_df(self): """_get_available_capacity should calculate correct value.""" mox = self._mox drv = self._driver df_total_size = 2620544 df_avail = 1490560 df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n' df_data = 'glusterfs-host:/export %d 996864 %d 41%% /mnt' % \ (df_total_size, df_avail) df_output = df_head + df_data setattr(glusterfs.FLAGS, 'glusterfs_disk_util', 'df') mox.StubOutWithMock(drv, '_get_mount_point_for_share') drv._get_mount_point_for_share(self.TEST_EXPORT1).\ AndReturn(self.TEST_MNT_POINT) mox.StubOutWithMock(drv, '_execute') drv._execute('df', '--portability', '--block-size', '1', self.TEST_MNT_POINT, run_as_root=True).AndReturn((df_output, None)) mox.ReplayAll() self.assertEquals((df_avail, df_total_size), drv._get_available_capacity( self.TEST_EXPORT1)) mox.VerifyAll() delattr(glusterfs.FLAGS, 'glusterfs_disk_util')
def using_config(name, value, config=config): """using_config(name, value, config=chainer.config) Context manager to temporarily change the thread-local configuration. Args: name (str): Name of the configuration to change. value: Temporary value of the configuration entry. config (~chainer.configuration.LocalConfig): Configuration object. Chainer's thread-local configuration is used by default. .. seealso:: :ref:`configuration` """ if hasattr(config._local, name): old_value = getattr(config, name) setattr(config, name, value) try: yield finally: setattr(config, name, old_value) else: setattr(config, name, value) try: yield finally: delattr(config, name)
def prefetch_object(self, this, f, objid, *args, **kwds): import tornado.web # validate objid format # objid = kwds.get(OBJID_REGEX_GROUP_NAME) objid = str(objid)[:40] o = self.registry.get(objid) if o is None: raise tornado.web.HTTPError(404, 'No such Object') else: setattr(self.get_context(), OBJECT_ATTR, o) val = f(this, objid, *args, **kwds) # release ref to object, to allow weakrefs # to be reaped setattr(self.get_context(), OBJECT_ATTR, None) # should have been put there by ExhibitionitRequestHandler # if Init was called from exhibitionist.log import getLogger if not hasattr(this,SUPER_CANARY): if not self.in_test: getLogger(__name__).error("RequestHandler did not call super().__init__ ") else: delattr(this,SUPER_CANARY) return val
def restore_template_loaders(): """ Restores the original template loaders after :meth:`setup_test_template_loader` has been run. """ loader.template_source_loaders = getattr(loader, RESTORE_LOADERS_ATTR) delattr(loader, RESTORE_LOADERS_ATTR)
def undefine(self, f): if isinstance(f, basestring): name = f else: name = f.__name__ del self._impls[name] delattr(self, name)
def make_collation_func(name, locale, numeric=True, template='_sort_key_template', func='strcmp'): c = icu._icu.Collator(locale) cname = '%s_test_collator%s' % (name, template) setattr(icu, cname, c) c.numeric = numeric yield icu._make_func(getattr(icu, template), name, collator=cname, collator_func='not_used_xxx', func=func) delattr(icu, cname)
def afterRetrieveModifier(self, obj, repo_clone, preserve=()): """If we find any LargeFilePlaceHolders, replace them with the values from the current working copy. If the values are missing from the working copy, remove them from the retrieved object.""" # Search for fields stored via AnnotationStorage annotations = getattr(obj, '__annotations__', None) orig_annotations = getattr(repo_clone, '__annotations__', None) for storage, name, orig_val in self._getFieldValues(repo_clone): if isinstance(orig_val, LargeFilePlaceHolder): if storage == 'annotation': val = _empty_marker if annotations is not None: val = annotations.get(name, _empty_marker) if val is not _empty_marker: orig_annotations[name] = val else: # remove the annotation if not present on the # working copy, or if annotations are missing # entirely del orig_annotations[name] else: # attribute storage val = getattr(obj, name, _empty_marker) if val is not _empty_marker: setattr(repo_clone, name, val) else: delattr(repo_clone, name) return [], [], {}
def rewrite_for_all_browsers(test_class, browser_list, times=1, retry_count=1): """ Magically make test methods for all desired browsers. Note that this method cannot contain the word 'test' or nose will decide it is worth running. """ for name in [n for n in dir(test_class) if n.startswith('test_')]: test_method = getattr(test_class, name) for count in range(1, times + 1): for browser in browser_list: new_name = "{name}_{browser}".format(name=name, browser=browser.safe_name()) if times > 1: new_name += "-{}".format(count) if retry_count <= 1: new_function = lambda instance, browser_to_use=browser: test_method(instance, browser_to_use) else: def auto_retry(instance, test_method, browser_to_use): failure_type, failure_value, failure_traceback = None, None, None for _ in xrange(retry_count): if failure_type: sys.stderr.write("ignoring failure {} for {}\n".format(failure_type, test_method)) try: test_method(instance, browser_to_use) return # test success means we return doing nothing except: failure_type, failure_value, failure_traceback = sys.exc_info() instance.tearDown() instance.setUp() pass # reaching here means repeated failure, so let's raise the last failure raise failure_type, failure_value, failure_traceback new_function = lambda instance, browser_to_use=browser: auto_retry(instance, test_method, browser_to_use) new_function.__name__ = new_name setattr(test_class, new_name, new_function) delattr(test_class, name)
def _clear_setup(self): if os.path.exists(CONF.working_directory): os.chmod(CONF.working_directory, PERM_ALL) shutil.rmtree(CONF.working_directory) if hasattr(mock.DummySMTP, 'exception'): delattr(mock.DummySMTP, 'exception')
def test_mixin_field_access(): field_data = DictFieldData({ 'field_a': 5, 'field_x': [1, 2, 3], }) runtime = TestRuntime(Mock(), mixins=[TestSimpleMixin], services={'field-data': field_data}) field_tester = runtime.construct_xblock_from_class(FieldTester, Mock()) assert_equals(5, field_tester.field_a) assert_equals(10, field_tester.field_b) assert_equals(42, field_tester.field_c) assert_equals([1, 2, 3], field_tester.field_x) assert_equals('default_value', field_tester.field_y) field_tester.field_x = ['a', 'b'] field_tester.save() assert_equals(['a', 'b'], field_tester._field_data.get(field_tester, 'field_x')) del field_tester.field_x assert_equals([], field_tester.field_x) assert_equals([1, 2, 3], field_tester.field_x_with_default) with assert_raises(AttributeError): getattr(field_tester, 'field_z') with assert_raises(AttributeError): delattr(field_tester, 'field_z') field_tester.field_z = 'foo' assert_equals('foo', field_tester.field_z) assert_false(field_tester._field_data.has(field_tester, 'field_z'))
def main(aeta_url, testname_prefix='', email=None, passin=False, save_auth=True): """Main function invoked if module is run from commandline. Args: aeta_url: URL where an aeta instance is available. testname_prefix: Optional name prefix for tests to be created. email: The email address to use for authentication, or None for the user to enter it when necessary. passin: Whether to read the password from stdin rather than echo-free input. save_auth: Whether to store authentication cookies in a file. """ try: start_time = time.time() this_module = inspect.getmodule(main) testcases = create_test_cases(aeta_url, unittest.TestCase, testname_prefix, email=email, passin=passin, save_auth=save_auth) add_test_cases_to_module(testcases, this_module) suite = unittest.TestLoader().loadTestsFromModule(this_module) if not suite.countTestCases(): error_msg = 'No tests ' if testname_prefix: error_msg += 'with the prefix "%s" ' % testname_prefix error_msg += 'found at "%s"' % aeta_url print >> sys.stderr, error_msg sys.exit(1) _print_test_output(start_time, suite) for testcase in testcases: delattr(this_module, testcase.__name__) except AuthError, e: print >> sys.stderr, str(e)
def test_check_for_setup_error(self): mox = self.mox drv = self._driver required_flags = [ 'netapp_transport_type', 'netapp_login', 'netapp_password', 'netapp_server_hostname', 'netapp_server_port'] # set required flags for flag in required_flags: setattr(drv.configuration, flag, None) # check exception raises when flags are not set self.assertRaises(exception.CinderException, drv.check_for_setup_error) # set required flags for flag in required_flags: setattr(drv.configuration, flag, 'val') setattr(drv, 'ssc_enabled', False) mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags') netapp_nfs.NetAppDirectNfsDriver._check_flags() mox.ReplayAll() drv.check_for_setup_error() mox.VerifyAll() # restore initial FLAGS for flag in required_flags: delattr(drv.configuration, flag)
def _validate_arguments(args, **_): for validator in getattr(args, '_validators', []): validator(args) try: delattr(args, '_validators') except AttributeError: pass
def load_entities(entities, *sources, **kwargs): """ Load every entity class of the given type found in the given source folders. :param sources: paths (either with ~, relative or absolute) to folders containing entity subclasses :param include_base: include the base entities provided with the package :param select: selected modules in the source folder :param exclude: list of entity identifiers (in custom format, or simply the entity class) to be excluded (useful when including the base but not every entity is required) :param backref: list of attrs to get entity's class to be bound to :param docstr_parser: user-defined docstring parser for populating metadata """ global ENTITIES ENTITIES = [e.__name__ for e in entities] sources = list(sources) if kwargs.get("include_base", True): # this allows to use sploitkit.base for starting a project with a baseline of entities for n in ENTITIES: n = n.lower() for m in kwargs.get("select", {}).get(n, [""]): m = "../base/{}s/".format(n) + m + [".py", ""][m == ""] p = Path(__file__).parent.joinpath(m).resolve() if p.exists(): sources.insert(0, p) # load every single source (folder of modules or single module) for s in sources: if not s.exists(): logger.debug("Source does not exist: %s" % s) continue # bind the source to the entity main class for e in entities: e._source = str(s) # now, it loads every Python module from the list of source folders ; when loading entity subclasses, these are # registered to entity's registry for further use (i.e. from the console) logger.debug("Loading Python source: %s" % s) # important note: since version 1.23.17 of Tinyscript, support for cached compiled Python files has been added, # for the PythonPath class, therefore influencing the location path of loaded entities (that # is, adding __pycache__) PythonPath(s) for e in entities: tbr = [] # clean up the temporary attribute if hasattr(e, "_source"): delattr(e, "_source") # remove proxy classes n = e.__name__.lower() for c in e.subclasses[:]: if len(c.__subclasses__()) > 0: getattr(e, "unregister_%s" % n, Entity.unregister_subclass)(c) # handle specific entities or sets of entities exclusions ; this will remove them from Entity's registries excludes = kwargs.get("exclude", {}).get(n) if excludes is not None: getattr(e, "unregister_%ss" % n, Entity.unregister_subclasses)(*excludes) # handle conditional entities ; this will remove entities having a "condition" method returning False for c in e.subclasses[:]: # convention: conditional entities are unregistered and removed if hasattr(c, "condition") and not c().condition(): getattr(e, "unregister_%s" % n, Entity.unregister_subclass)(c) # now populate metadata for each class for c in e.subclasses: set_metadata(c, kwargs.get("docstr_parser", lambda s: {})) # bind entity's subclasses to the given attributes for back-reference backrefs = kwargs.get("backref", {}).get(n) if backrefs is not None: for c in e.subclasses: for br in backrefs: try: a, bn = br # [a]ttribute, [b]ackref [n]ame except ValueError: a, bn = None, br[0] if isinstance(br, tuple) else br bc = list( filter(lambda _: _.__name__.lower() == bn, entities))[0] # [b]ackref [c]lass if a and getattr(c, a, None): c = getattr(c, a) setattr(c, bn, lambda: bc._instance) # then trigger garbage collection (for removed classes) gc.collect()
def __delete__(self, instance): """Descriptor protocal: deleter""" delattr(instance._configuration, self._backing_name)
def fit(self, X, y, sample_weight=None): """Fit the calibrated model Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns an instance of self. """ X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'], force_all_finite=False) X, y = indexable(X, y) le = LabelBinarizer().fit(y) self.classes_ = le.classes_ # Check that each cross-validation fold can have at least one # example per class n_folds = self.cv if isinstance(self.cv, int) \ else self.cv.n_folds if hasattr(self.cv, "n_folds") else None if n_folds and \ np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]): raise ValueError("Requesting %d-fold cross-validation but provided" " less than %d examples for at least one class." % (n_folds, n_folds)) self.calibrated_classifiers_ = [] if self.base_estimator is None: # we want all classifiers that don't expose a random_state # to be deterministic (and we don't want to expose this one). base_estimator = LinearSVC(random_state=0) else: base_estimator = self.base_estimator if self.cv == "prefit": calibrated_classifier = _CalibratedClassifier(base_estimator, method=self.method) if sample_weight is not None: calibrated_classifier.fit(X, y, sample_weight) else: calibrated_classifier.fit(X, y) self.calibrated_classifiers_.append(calibrated_classifier) else: cv = check_cv(self.cv, y, classifier=True) fit_parameters = signature(base_estimator.fit).parameters estimator_name = type(base_estimator).__name__ if (sample_weight is not None and "sample_weight" not in fit_parameters): warnings.warn("%s does not support sample_weight. Samples" " weights are only used for the calibration" " itself." % estimator_name) base_estimator_sample_weight = None else: if sample_weight is not None: sample_weight = check_array(sample_weight, ensure_2d=False) check_consistent_length(y, sample_weight) base_estimator_sample_weight = sample_weight oob_decision_function_ = [] for train, test in cv.split(X, y): oob = np.empty(shape=(len(y), len(self.classes_))) * np.nan this_estimator = clone(base_estimator) if base_estimator_sample_weight is not None: this_estimator.fit( X[train], y[train], sample_weight=base_estimator_sample_weight[train]) else: this_estimator.fit(X[train], y[train]) calibrated_classifier = _CalibratedClassifier( this_estimator, method=self.method, classes=self.classes_) if sample_weight is not None: calibrated_classifier.fit(X[test], y[test], sample_weight[test]) else: calibrated_classifier.fit(X[test], y[test]) # out-of-bag (oob) requires special treatment if hasattr(calibrated_classifier, 'oob_decision_function_'): oob[train] = \ calibrated_classifier.oob_decision_function_ oob_decision_function_.append(oob) # save memory: remove the oob from the estimator delattr(calibrated_classifier, 'oob_decision_function_') self.calibrated_classifiers_.append(calibrated_classifier) if hasattr(self.base_estimator, 'oob_score') and \ self.base_estimator.oob_score: # average over all oob for each sample setattr(self, 'oob_decision_function_', np.nanmean(np.array(oob_decision_function_), axis=0)) return self
def clean_url_prefixes(): """Purge prefix cache.""" if hasattr(_local, 'prefix'): delattr(_local, 'prefix')
def clear_settings(self): try: return delattr(settings, 'PAGINATION_LIMIT') except AttributeError: pass
def fdel(self: Any) -> None: delattr(self.fp, name)
def __init__(self, opt): super(Att2all2Model, self).__init__(opt) self.core = Att2all2Core(opt) delattr(self, 'fc_embed') self.fc_embed = lambda x : x
def __delitem__(self, name: str) -> None: delattr(self, name)
def parse(self, print_opt=True): ''' use update_fn() to do additional modifications on args before printing ''' # initialize parser with basic options if not self.initialized: self.initialize() # parse options opt = self.parser.parse_args() # get arguments specified in config file if opt.config_file: data = yaml.load(opt.config_file, Loader=yaml.FullLoader) data = self._flatten_to_toplevel(data) else: data = {} # determine which options were specified # explicitly with command line args option_strings = {} for action_group in self.parser._action_groups: for action in action_group._group_actions: for option in action.option_strings: option_strings[option] = action.dest specified_options = set( [option_strings[x] for x in sys.argv if x in option_strings]) # make hierarchical namespace wrt groups # positional and optional arguments in toplevel args = {} for group in self.parser._action_groups: # by default, take the result from argparse # unless was specified in config file and not in command line group_dict = { a.dest: data[a.dest] if a.dest in data and a.dest not in specified_options else getattr( opt, a.dest, None) for a in group._group_actions } if group.title == 'positional arguments' or \ group.title == 'optional arguments': args.update(group_dict) else: args[group.title] = argparse.Namespace(**group_dict) opt = argparse.Namespace(**args) delattr(opt, 'config_file') # output directory if opt.name: output_dir = opt.name else: output_dir = '_'.join([ opt.model, opt.transform, opt.walk_type, 'lr' + str(opt.learning_rate), opt.loss ]) if opt.model == 'biggan': subopt = opt.biggan if subopt.category: output_dir += '_cat{}'.format(subopt.category) elif opt.model == 'stylegan': subopt = opt.stylegan output_dir += '_{}'.format(subopt.dataset) output_dir += '_{}'.format(subopt.latent) elif opt.model == 'pgan': subopt = opt.pgan output_dir += '_{}'.format(subopt.dset) if opt.walk_type.startswith('NN'): subopt = opt.nn if subopt.eps: output_dir += '_eps{}'.format(subopt.eps) if subopt.num_steps: output_dir += '_nsteps{}'.format(subopt.num_steps) if opt.transform.startswith( 'color') and opt.color.channel is not None: output_dir += '_chn{}'.format(opt.color.channel) if opt.suffix: output_dir += opt.suffix if opt.prefix: output_dir = opt.prefix + output_dir opt.output_dir = os.path.join(opt.models_dir, output_dir) # write the configurations to disk if print_opt: self.print_options(opt) self.opt = opt return opt
def __delitem__(self, key): try: delattr(self, key) except AttributeError: raise KeyError(key)
def __exit__(self, type, value, traceback): self.fh.close() # clean-up so that it does not exist delattr(self, 'fh') self._line = 0 return False
def netcdf_file_update(instance, nc_res_file, txt_res_file, user): log = logging.getLogger() # check the instance type file_type = isinstance(instance, NetCDFLogicalFile) # get the file from irods to temp dir temp_nc_file = utils.get_file_from_irods(nc_res_file) nc_dataset = netCDF4.Dataset(temp_nc_file, 'a') try: # update title title = instance.dataset_name if file_type else instance.metadata.title.value if title.lower() != 'untitled resource': if hasattr(nc_dataset, 'title'): delattr(nc_dataset, 'title') nc_dataset.title = title # update keywords keywords = instance.metadata.keywords if file_type \ else [item.value for item in instance.metadata.subjects.all()] if hasattr(nc_dataset, 'keywords'): delattr(nc_dataset, 'keywords') if keywords: nc_dataset.keywords = ', '.join(keywords) # update key/value metadata extra_metadata_dict = instance.metadata.extra_metadata if file_type \ else instance.extra_metadata if hasattr(nc_dataset, 'hs_extra_metadata'): delattr(nc_dataset, 'hs_extra_metadata') if extra_metadata_dict: extra_metadata = [] for k, v in list(extra_metadata_dict.items()): extra_metadata.append("{}:{}".format(k, v)) nc_dataset.hs_extra_metadata = ', '.join(extra_metadata) # update temporal coverage temporal_coverage = instance.metadata.temporal_coverage if file_type \ else instance.metadata.coverages.all().filter(type='period').first() for attr_name in ['time_coverage_start', 'time_coverage_end']: if hasattr(nc_dataset, attr_name): delattr(nc_dataset, attr_name) if temporal_coverage: nc_dataset.time_coverage_start = temporal_coverage.value['start'] nc_dataset.time_coverage_end = temporal_coverage.value['end'] # update spatial coverage spatial_coverage = instance.metadata.spatial_coverage if file_type \ else instance.metadata.coverages.all().filter(type='box').first() for attr_name in [ 'geospatial_lat_min', 'geospatial_lat_max', 'geospatial_lon_min', 'geospatial_lon_max' ]: if hasattr(nc_dataset, attr_name): delattr(nc_dataset, attr_name) if spatial_coverage: nc_dataset.geospatial_lat_min = spatial_coverage.value[ 'southlimit'] nc_dataset.geospatial_lat_max = spatial_coverage.value[ 'northlimit'] nc_dataset.geospatial_lon_min = spatial_coverage.value['westlimit'] nc_dataset.geospatial_lon_max = spatial_coverage.value['eastlimit'] # update variables if instance.metadata.variables.all(): dataset_variables = nc_dataset.variables for variable in instance.metadata.variables.all(): if variable.name in list(dataset_variables.keys()): dataset_variable = dataset_variables[variable.name] # update units if hasattr(dataset_variable, 'units'): delattr(dataset_variable, 'units') if variable.unit != 'Unknown': dataset_variable.setncattr('units', variable.unit) # update long_name if hasattr(dataset_variable, 'long_name'): delattr(dataset_variable, 'long_name') if variable.descriptive_name: dataset_variable.setncattr('long_name', variable.descriptive_name) # update method if hasattr(dataset_variable, 'comment'): delattr(dataset_variable, 'comment') if variable.method: dataset_variable.setncattr('comment', variable.method) # update missing value if variable.missing_value: if hasattr(dataset_variable, 'missing_value'): missing_value = dataset_variable.missing_value delattr(dataset_variable, 'missing_value') else: missing_value = '' try: dt = np.dtype(dataset_variable.datatype.name) missing_value = np.fromstring( variable.missing_value + ' ', dtype=dt.type, sep=" ") except: pass if missing_value: dataset_variable.setncattr('missing_value', missing_value) # Update metadata element that only apply to netCDF resource if not file_type: # update summary if hasattr(nc_dataset, 'summary'): delattr(nc_dataset, 'summary') if instance.metadata.description: nc_dataset.summary = instance.metadata.description.abstract # update contributor if hasattr(nc_dataset, 'contributor_name'): delattr(nc_dataset, 'contributor_name') contributor_list = instance.metadata.contributors.all() if contributor_list: res_contri_name = [] for contributor in contributor_list: res_contri_name.append(contributor.name) nc_dataset.contributor_name = ', '.join(res_contri_name) # update creator for attr_name in ['creator_name', 'creator_email', 'creator_url']: if hasattr(nc_dataset, attr_name): delattr(nc_dataset, attr_name) creator = instance.metadata.creators.all().filter(order=1).first() if creator: nc_dataset.creator_name = creator.name if creator.name else creator.organization if creator.email: nc_dataset.creator_email = creator.email if creator.description or creator.homepage: nc_dataset.creator_url = creator.homepage if creator.homepage \ else 'https://www.hydroshare.org' + creator.description # update license if hasattr(nc_dataset, 'license'): delattr(nc_dataset, 'license') if instance.metadata.rights: nc_dataset.license = "{0} {1}".format( instance.metadata.rights.statement, instance.metadata.rights.url) # update reference if hasattr(nc_dataset, 'references'): delattr(nc_dataset, 'references') reference_list = instance.metadata.relations.all().filter( type='cites') if reference_list: res_meta_ref = [] for reference in reference_list: res_meta_ref.append(reference.value) nc_dataset.references = ' \n'.join(res_meta_ref) # update source if hasattr(nc_dataset, 'source'): delattr(nc_dataset, 'source') source_list = instance.metadata.sources.all() if source_list: res_meta_source = [] for source in source_list: res_meta_source.append(source.derived_from) nc_dataset.source = ' \n'.join(res_meta_source) # close nc dataset nc_dataset.close() except Exception as ex: log.exception(str(ex)) if os.path.exists(temp_nc_file): shutil.rmtree(os.path.dirname(temp_nc_file)) raise ex # create the ncdump text file nc_file_name = os.path.basename(temp_nc_file).split(".")[0] temp_text_file = create_header_info_txt_file(temp_nc_file, nc_file_name) # push the updated nc file and the txt file to iRODS utils.replace_resource_file_on_irods(temp_nc_file, nc_res_file, user) utils.replace_resource_file_on_irods(temp_text_file, txt_res_file, user) metadata = instance.metadata if file_type: instance.create_aggregation_xml_documents(create_map_xml=False) metadata.is_dirty = False metadata.save() # cleanup the temp dir if os.path.exists(temp_nc_file): shutil.rmtree(os.path.dirname(temp_nc_file))
def unMix(self, cla): for m in cla._mixed_: #_mixed_ must exist, or there was no mixin delattr(cla, m) del cla._mixed_
def ddt(cls): """ Class decorator for subclasses of ``unittest.TestCase``. Apply this decorator to the test case class, and then decorate test methods with ``@data``. For each method decorated with ``@data``, this will effectively create as many methods as data items are passed as parameters to ``@data``. The names of the test methods follow the pattern ``original_test_name_{ordinal}_{data}``. ``ordinal`` is the position of the data argument, starting with 1. For data we use a string representation of the data value converted into a valid python identifier. If ``data.__name__`` exists, we use that instead. For each method decorated with ``@file_data('test_data.json')``, the decorator will try to load the test_data.json file located relative to the python file containing the method that is decorated. It will, for each ``test_name`` key create as many methods in the list of values from the ``data`` key. """ for name, func in list(cls.__dict__.items()): if hasattr(func, DATA_ATTR): for i, v in enumerate(getattr(func, DATA_ATTR)): test_name = mk_test_name(name, getattr(v, "__name__", v), i) try: # v是字典类型时,使用用例表中的概要字段内容作为docstring if type(v) is dict: if '概要' in v.keys(): test_data_docstring = v['概要'] elif '任务' in v.keys(): # db test test_data_docstring = v['任务'] else: test_data_docstring = _get_test_data_docstring(func, v) # elif type(v) is list: # test_docstring = "" else: test_data_docstring = _get_test_data_docstring(func, v) except Exception as e: pass #test_data_docstring = _get_test_data_docstring(func, v) if hasattr(func, UNPACK_ATTR): if isinstance(v, tuple) or isinstance(v, list): add_test( cls, test_name, test_data_docstring, func, *v ) else: # unpack dictionary add_test( cls, test_name, test_data_docstring, func, **v ) else: add_test(cls, test_name, test_data_docstring, func, v) delattr(cls, name) elif hasattr(func, FILE_ATTR): file_attr = getattr(func, FILE_ATTR) process_file_data(cls, name, func, file_attr) delattr(cls, name) return cls
class ProxyClient(object): """ A proxy which represents the current client at all times. """ # introspection support: __members__ = property(lambda x: x.__dir__()) # Need to pretend to be the wrapped class, for the sake of objects that care # about this (especially in equality tests) __class__ = property(lambda x: get_client().__class__) __dict__ = property(lambda o: get_client().__dict__) __repr__ = lambda: repr(get_client()) __getattr__ = lambda x, o: getattr(get_client(), o) __setattr__ = lambda x, o, v: setattr(get_client(), o, v) __delattr__ = lambda x, o: delattr(get_client(), o) __lt__ = lambda x, o: get_client() < o __le__ = lambda x, o: get_client() <= o __eq__ = lambda x, o: get_client() == o __ne__ = lambda x, o: get_client() != o __gt__ = lambda x, o: get_client() > o __ge__ = lambda x, o: get_client() >= o if compat.PY2: __cmp__ = lambda x, o: cmp(get_client(), o) # noqa F821 __hash__ = lambda x: hash(get_client()) # attributes are currently not callable # __call__ = lambda x, *a, **kw: get_client()(*a, **kw) __nonzero__ = lambda x: bool(get_client()) __len__ = lambda x: len(get_client()) __getitem__ = lambda x, i: get_client()[i] __iter__ = lambda x: iter(get_client()) __contains__ = lambda x, i: i in get_client() __getslice__ = lambda x, i, j: get_client()[i:j] __add__ = lambda x, o: get_client() + o __sub__ = lambda x, o: get_client() - o __mul__ = lambda x, o: get_client() * o __floordiv__ = lambda x, o: get_client() // o __mod__ = lambda x, o: get_client() % o __divmod__ = lambda x, o: get_client().__divmod__(o) __pow__ = lambda x, o: get_client()**o __lshift__ = lambda x, o: get_client() << o __rshift__ = lambda x, o: get_client() >> o __and__ = lambda x, o: get_client() & o __xor__ = lambda x, o: get_client() ^ o __or__ = lambda x, o: get_client() | o __div__ = lambda x, o: get_client().__div__(o) __truediv__ = lambda x, o: get_client().__truediv__(o) __neg__ = lambda x: -(get_client()) __pos__ = lambda x: +(get_client()) __abs__ = lambda x: abs(get_client()) __invert__ = lambda x: ~(get_client()) __complex__ = lambda x: complex(get_client()) __int__ = lambda x: int(get_client()) if compat.PY2: __long__ = lambda x: long(get_client()) # noqa F821 __float__ = lambda x: float(get_client()) __str__ = lambda x: str(get_client()) __unicode__ = lambda x: compat.text_type(get_client()) __oct__ = lambda x: oct(get_client()) __hex__ = lambda x: hex(get_client()) __index__ = lambda x: get_client().__index__() __coerce__ = lambda x, o: x.__coerce__(x, o) __enter__ = lambda x: x.__enter__() __exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
def solve(self): assert not hasattr(self, "_is_solving") self._is_solving = True project(self.f, self.V, function=self._solution) delattr(self, "_is_solving") return self._solution
def deleter(self): delattr(func(self), member_name)
def __new__(cls, name, bases, attrs): attrs = SortedDict(attrs) if 'Meta' in attrs and hasattr(attrs['Meta'], 'translate'): fields = attrs['Meta'].translate delattr(attrs['Meta'], 'translate') else: new_class = super(TransMeta, cls).__new__(cls, name, bases, attrs) # we inherit possible translatable_fields from superclasses abstract_model_bases = [base for base in bases if hasattr(base, '_meta') \ and base._meta.abstract] translatable_fields = [] for base in abstract_model_bases: if hasattr(base._meta, 'translatable_fields'): translatable_fields.extend( list(base._meta.translatable_fields)) new_class._meta.translatable_fields = tuple(translatable_fields) return new_class if not isinstance(fields, tuple): raise ImproperlyConfigured( "Meta's translate attribute must be a tuple") default_language = fallback_language() for field in fields: if not field in attrs or \ not isinstance(attrs[field], models.fields.Field): raise ImproperlyConfigured( "There is no field %(field)s in model %(name)s, "\ "as specified in Meta's translate attribute" % \ dict(field=field, name=name)) original_attr = attrs[field] for lang in get_languages(): lang_code = lang[LANGUAGE_CODE] lang_name = lang[LANGUAGE_NAME] lang_attr = copy.copy(original_attr) if type(original_attr) == models.ForeignKey: original_attr.set_attributes_from_name( field ) # Set the attributes now so we can use the column name later. blank = (original_attr.blank if lang_code == default_language else False) null = (original_attr.null if lang_code == default_language else False) kwargs = { 'verbose_name': lang_attr.verbose_name, 'related_name': '%s_set_%s' % (name.lower(), lang_code), 'limit_choices_to': lang_attr.rel.limit_choices_to, 'parent_link': lang_attr.rel.parent_link, 'blank': blank, 'null': null, } lang_attr.__init__(lang_attr.rel.to, to_field=lang_attr.rel.field_name, **kwargs) lang_attr.original_fieldname = field lang_attr_name = get_real_fieldname(field, lang_code) if lang_code != default_language: # only will be required for default language if not lang_attr.null and lang_attr.default is NOT_PROVIDED: lang_attr.null = True if not lang_attr.blank: lang_attr.blank = True if hasattr(lang_attr, 'verbose_name'): lang_attr.verbose_name = LazyString( lang_attr.verbose_name, _(lang_name)) #lang_attr.verbose_name = lazy(lazy_string, unicode)( # lang_attr.verbose_name, _(lang_name)) attrs[lang_attr_name] = lang_attr del attrs[field] attrs[field] = property( default_value(field) ) #, doc=original_attr.column) # Set the column to __doc__ so we can access it later. new_class = super(TransMeta, cls).__new__(cls, name, bases, attrs) if hasattr(new_class, '_meta'): new_class._meta.translatable_fields = fields return new_class
def failed_init(self): if hasattr(self, '_t'): self._t.join() delattr(self, '_t')
def createTeacher(self): print("createTeacher") def createStu(): print("createStu") manager = Manager("safly","男",123456,123456) print("----设置对象属性------") setattr(manager,"age",20) print(manager.age) print("----删除对象属性------") delattr(manager,"age") # 'Manager' object has no attribute 'age' # print(manager.age) print("---对象不能删除类属性---") setattr(Manager,"country","china") print(Manager.country) # delattr(manager,"country") # print(Manager.country) print("----设置对象方法------") def create_course(self): print('创建了一个课程') setattr(manager,'create_course',create_course) manager.create_course(manager)
def getSolgemaBandeaux(self): bandeaux = self.getItems() bandeauxList = [] base_ajax_content_load = self.request.get('ajax_content_load') base_ajax_load = self.request.get('ajax_load') setattr(self.request, 'ajax_content_load', 1) setattr(self.request, 'ajax_load', 1) for bandeau in bandeaux: bdict = {} if getattr(bandeau, 'image_sizes', None): height = str(bandeau.image_sizes['base'][1]) url = str(bandeau.getPath() + '/image') try: title = str(bandeau.Description) except: try: title = str(bandeau.Description.encode('utf-8')) except: try: title = str(bandeau.Description.decode('utf-8')) except: title = safe_unicode(bandeau.Description) if getattr(bandeau, 'bannerImageLink', ''): link = str(self.context.absolute_url() + '/resolveuid/' + bandeau.bannerImageLink) else: link = None repeat = getattr(bandeau, 'backgroundRepeat', None) if not repeat: repeat = 'no-repeat' repeat = str(repeat) align = getattr(bandeau, 'backgroundAlign', None) if not align: align = 'left' align = str(align) cssClass = 'bandeau_image' cssStyle = 'position:relative;' if getattr(bandeau, 'backgroundExtend', False): cssClass += ' backgroundExtend' if getattr(bandeau, 'backgroundFixed', False): cssClass += ' backgroundFixed' if len(bandeauxList) == 0: cssClass += ' ' + bandeau.id.replace( '.', '_') + ' carousel-banner-content selected' cssStyle += ' display:block;' else: cssClass += ' ' + bandeau.id.replace( '.', '_') + ' carousel-banner-content' cssStyle += ' display:none;' if link: backgrounddiv = '<a style="display:block; height:%spx; width:100%%; background:transparent url(%s) %s %s top;" title="%s" class="%s" href="%s"></a>' % ( height, url, repeat, align, title, cssClass, link) else: backgrounddiv = '<div style="height:%spx; width:100%%; background:transparent url(%s) %s %s top;" title="%s" class="%s"></div>' % ( height, url, repeat, align, title, cssClass) # bandeauxList.append({'id':bandeau.id, 'content':bandeau.tag(title=bandeau.Description())}) bdict = { 'id': bandeau.id, 'content': backgrounddiv, 'cssClass': cssClass, 'cssStyle': cssStyle, 'url': url, 'link': link, 'align': align, 'repeat': repeat } else: bandeau = bandeau.getObject() if (has_dx and IImage.providedBy(bandeau)) or hasattr( bandeau, 'tag'): if hasattr(bandeau, 'getHeight'): height = bandeau.getHeight() else: height = ImageScaling(bandeau, self.request).scale().height if has_dx and IImage.providedBy(bandeau): url = bandeau.absolute_url() else: url = str(bandeau.absolute_url() + '/image') title = bandeau.title bkg = IBackgroundContent(bandeau, None) repeat = getattr(bkg, 'backgroundRepeat', None) if not repeat: repeat = 'no-repeat' align = getattr(bkg, 'backgroundAlign', None) if not align: align = 'left' align = str(align) cssClass = 'bandeau_image' cssStyle = 'position:relative;' if getattr(bkg, 'backgroundExtend', False): cssClass += ' backgroundExtend' if getattr(bkg, 'backgroundFixed', False): cssClass += ' backgroundFixed' if len(bandeauxList) == 0: cssClass += ' ' + bandeau.id.replace( '.', '_') + ' carousel-banner-content selected' cssStyle += ' display:block;' else: cssClass += ' ' + bandeau.id.replace( '.', '_') + ' carousel-banner-content' cssStyle += ' display:none;' if getattr(bkg, 'bannerImageLink', ''): link = str(self.context.absolute_url() + '/resolveuid/' + bkg.bannerImageLink) else: link = None if link: backgrounddiv = '<a style="display:block; height:%spx; width:100%%; background:transparent url(%s) %s %s top;" title="%s" class="%s" href="%s"></a>' % ( height, url, repeat, align, title, cssClass, link) else: backgrounddiv = '<div style="height:%spx; width:100%%; background:transparent url(%s) %s %s top;" title="%s" class="%s"></div>' % ( height, url, repeat, align, title, cssClass) bdict = { 'id': bandeau.id, 'content': backgrounddiv, 'cssClass': cssClass, 'cssStyle': cssStyle, 'url': url, 'link': link, 'align': align, 'repeat': repeat } elif has_dx and IDocument.providedBy(bandeau): bdict['id'] = bandeau.id bdict['content'] = bandeau.text and bandeau.text.raw or '' elif hasattr(bandeau, 'getText') and not bandeau.portal_type in [ 'Topic', 'Collection' ]: try: bdict['id'] = bandeau.id bdict['content'] = bandeau.getText() except: raise ValueError('error with: ' + str(bandeau)) elif bandeau.portal_type == 'Collage': bdict['id'] = bandeau.id bdict['content'] = ObjectView(bandeau, self.request, 'collage_renderer.pt') elif bandeau.portal_type == 'FlashMovie': bdict['id'] = bandeau.id bdict['content'] = ObjectView( bandeau, self.request, 'flashmovie_macro_flashobject.pt') elif bandeau.portal_type == 'Folder': bdict['id'] = bandeau.id bdict['content'] = ObjectView(bandeau, self.request, 'folder_renderer.pt') else: bdict['id'] = bandeau.id bdict['content'] = bandeau() bandeauxList.append(bdict) if not base_ajax_content_load: delattr(self.request, 'ajax_content_load') if not base_ajax_load: delattr(self.request, 'ajax_load') return bandeauxList
def execute_tileables(self, tileables, fetch=True, n_parallel=None, n_thread=None, print_progress=False, mock=False, compose=True, name=None): # shallow copy chunk_result, prevent from any chunk key decref chunk_result = self._chunk_result.copy() tileables = [ tileable.data if hasattr(tileable, 'data') else tileable for tileable in tileables ] tileable_keys = [t.key for t in tileables] tileable_keys_set = set(tileable_keys) result_keys = [] to_release_keys = set() tileable_data_to_concat_keys = weakref.WeakKeyDictionary() tileable_data_to_chunks = weakref.WeakKeyDictionary() node_to_fetch = weakref.WeakKeyDictionary() def _generate_fetch_tileable(node): # Attach chunks to fetch tileables to skip tile. if isinstance(node.op, Fetch) and node.key in self.stored_tileables: tiled = self.stored_tileables[node.key][0] node._chunks = tiled.chunks node.nsplits = tiled.nsplits for param, v in tiled.params.items(): setattr(node, '_' + param, v) return node def _generate_fetch_if_executed(nd): # node processor that if the node is executed # replace it with a fetch node _to_fetch = node_to_fetch # noqa: F821 if nd.key not in chunk_result: return nd if nd in _to_fetch: return _to_fetch[nd] fn = build_fetch(nd).data _to_fetch[nd] = fn return fn def _on_tile_success(before_tile_data, after_tile_data): if before_tile_data.key not in tileable_keys_set: return after_tile_data tile_chunk_keys = [c.key for c in after_tile_data.chunks] result_keys.extend(tile_chunk_keys) tileable_data_to_chunks[before_tile_data] = [ build_fetch(c) for c in after_tile_data.chunks ] if not fetch: pass elif len(after_tile_data.chunks) > 1: # need to fetch data and chunks more than 1, we concatenate them into 1 after_tile_data = after_tile_data.op.concat_tileable_chunks( after_tile_data) chunk = after_tile_data.chunks[0] result_keys.append(chunk.key) tileable_data_to_concat_keys[before_tile_data] = chunk.key # after return the data to user, we release the reference to_release_keys.add(chunk.key) else: tileable_data_to_concat_keys[ before_tile_data] = after_tile_data.chunks[0].key return after_tile_data def _get_tileable_graph_builder(**kwargs): if options.optimize_tileable_graph: return OptimizeIntegratedTileableGraphBuilder(**kwargs) else: return TileableGraphBuilder(**kwargs) # As the chunk_result is copied, we cannot use the original context any more, # and if `chunk_result` is a LocalContext, it's copied into a LocalContext as well, # thus here just to make sure the new context is entered with self._gen_local_context(chunk_result): # build tileable graph tileable_graph_builder = _get_tileable_graph_builder( node_processor=_generate_fetch_tileable) tileable_graph = tileable_graph_builder.build(tileables) chunk_graph_builder = IterativeChunkGraphBuilder( graph_cls=DAG, node_processor=_generate_fetch_if_executed, compose=False, on_tile_success=_on_tile_success) intermediate_result_keys = set() while True: # build chunk graph, tile will be done during building chunk_graph = chunk_graph_builder.build( tileables, tileable_graph=tileable_graph) tileable_graph = chunk_graph_builder.prev_tileable_graph temp_result_keys = set(result_keys) if not chunk_graph_builder.done: # add temporary chunks keys into result keys for interrupted_op in chunk_graph_builder.interrupted_ops: for inp in interrupted_op.inputs: if inp.op not in chunk_graph_builder.interrupted_ops: for n in get_tiled(inp).chunks: temp_result_keys.add(n.key) else: # if done, prune chunk graph prune_chunk_graph(chunk_graph, temp_result_keys) # compose if compose: chunk_graph.compose(list(temp_result_keys)) # execute chunk graph self.execute_graph(chunk_graph, list(temp_result_keys), n_parallel=n_parallel or n_thread, print_progress=print_progress, mock=mock, chunk_result=chunk_result) # update shape of tileable and its chunks whatever it's successful or not self._update_chunk_shape(chunk_graph, chunk_result) self._update_tileable_and_chunk_shape( tileable_graph, chunk_result, chunk_graph_builder.interrupted_ops) if chunk_graph_builder.done: if len(intermediate_result_keys) > 0: # failed before intermediate_to_release_keys = \ {k for k in intermediate_result_keys if k not in result_keys and k in chunk_result} to_release_keys.update(intermediate_to_release_keys) delattr(chunk_graph_builder, '_prev_tileable_graph') break else: intermediate_result_keys.update(temp_result_keys) # add the node that failed to_run_tileables = list( itertools.chain( *(op.outputs for op in chunk_graph_builder.interrupted_ops))) to_run_tileables_set = set(to_run_tileables) for op in chunk_graph_builder.interrupted_ops: for inp in op.inputs: if inp not in to_run_tileables_set: to_run_tileables_set.add(inp) tileable_graph_builder = _get_tileable_graph_builder( inputs_selector=lambda inps: [inp for inp in inps if inp in to_run_tileables_set]) tileable_graph = tileable_graph_builder.build( to_run_tileables_set) if name is not None: if not isinstance(name, (list, tuple)): name = [name] self._tileable_names.update(zip(name, tileables)) for tileable in tileables: fetch_tileable = build_fetch( get_tiled(tileable, mapping=tileable_optimized)) fetch_tileable._key = tileable.key fetch_tileable._id = tileable.id if tileable.key in self.stored_tileables: if tileable.id not in [ t.id for t in self.stored_tileables[tileable.key] ]: self.stored_tileables[tileable.key].append( fetch_tileable) else: self.stored_tileables[tileable.key] = [fetch_tileable] try: if fetch: concat_keys = [ tileable_data_to_concat_keys[tileable_optimized.get( t, t)] for t in tileables ] return [chunk_result[k] for k in concat_keys] else: return finally: for to_release_key in to_release_keys: del chunk_result[to_release_key] self._chunk_result.update({ k: chunk_result[k] for k in result_keys if k in chunk_result })
def VLM(conditions, settings, geometry): """Uses the vortex lattice method to compute the lift, induced drag and moment coefficients Assumptions: None Source: 1. Aerodynamics for Engineers, Sixth Edition by John Bertin & Russel Cummings Pgs. 379-397(Literature) 2. Low-Speed Aerodynamics, Second Edition by Joseph katz, Allen Plotkin Pgs. 331-338(Literature), 579-586 (Fortran Code implementation) 3. Yahyaoui, M. "Generalized Vortex Lattice Method for Predicting Characteristics of Wings with Flap and Aileron Deflection" , World Academy of Science, Engineering and Technology International Journal of Mechanical, Aerospace, Industrial and Mechatronics Engineering Vol:8 No:10, 2014 Inputs: geometry. wing. spans.projected [m] chords.root [m] chords.tip [m] sweeps.quarter_chord [radians] taper [Unitless] twists.root [radians] twists.tip [radians] symmetric [Boolean] aspect_ratio [Unitless] areas.reference [m^2] vertical [Boolean] origin [m] fuselage. origin [m] width [m] heights.maximum [m] lengths.nose [m] lengths.tail [m] lengths.total [m] lengths.cabin [m] fineness.nose [Unitless] fineness.tail [Unitless] settings.number_panels_spanwise [Unitless] settings.number_panels_chordwise [Unitless] settings.use_surrogate [Unitless] settings.include_slipstream_effect [Unitless] conditions.aerodynamics.angle_of_attack [radians] conditions.freestream.mach_number [Unitless] Outputs: CL [Unitless] Cl [Unitless] CDi [Unitless] Cdi [Unitless] Properties Used: N/A """ # unpack settings n_sw = settings.number_panels_spanwise n_cw = settings.number_panels_chordwise sur_flag = settings.use_surrogate slipstream = settings.include_slipstream_effect Sref = geometry.reference_area # define point about which moment coefficient is computed if 'main_wing' in geometry.wings: c_bar = geometry.wings['main_wing'].chords.mean_aerodynamic x_mac = geometry.wings['main_wing'].aerodynamic_center[ 0] + geometry.wings['main_wing'].origin[0][0] else: c_bar = 0. x_mac = 0. for wing in geometry.wings: if wing.vertical == False: if c_bar <= wing.chords.mean_aerodynamic: c_bar = wing.chords.mean_aerodynamic x_mac = wing.aerodynamic_center[0] + wing.origin[0][0] x_cg = geometry.mass_properties.center_of_gravity[0][0] if x_cg == None: x_m = x_mac else: x_m = x_cg aoa = conditions.aerodynamics.angle_of_attack # angle of attack mach = conditions.freestream.mach_number # mach number ones = np.atleast_2d(np.ones_like(aoa)) # generate vortex distribution VD = compute_vortex_distribution(geometry, settings) # Build induced velocity matrix, C_mn C_mn, DW_mn = compute_induced_velocity_matrix(VD, n_sw, n_cw, aoa, mach) MCM = VD.MCM # Compute flow tangency conditions inv_root_beta = np.zeros_like(mach) inv_root_beta[mach < 1] = 1 / np.sqrt(1 - mach[mach < 1]**2) inv_root_beta[mach > 1] = 1 / np.sqrt(mach[mach > 1]**2 - 1) if np.any(mach == 1): raise ( 'Mach of 1 cannot be used in building compressibility corrections.' ) inv_root_beta = np.atleast_2d(inv_root_beta) phi = np.arctan( (VD.ZBC - VD.ZAC) / (VD.YBC - VD.YAC)) * ones # dihedral angle delta = np.arctan( (VD.ZC - VD.ZCH) / ((VD.XC - VD.XCH) * inv_root_beta)) # mean camber surface angle # Build Aerodynamic Influence Coefficient Matrix A = np.multiply(C_mn[:,:,:,0],np.atleast_3d(np.sin(delta)*np.cos(phi))) \ + np.multiply(C_mn[:,:,:,1],np.atleast_3d(np.cos(delta)*np.sin(phi))) \ - np.multiply(C_mn[:,:,:,2],np.atleast_3d(np.cos(phi)*np.cos(delta))) # valdiated from book eqn 7.42 B = np.multiply(DW_mn[:,:,:,0],np.atleast_3d(np.sin(delta)*np.cos(phi))) \ + np.multiply(DW_mn[:,:,:,1],np.atleast_3d(np.cos(delta)*np.sin(phi))) \ - np.multiply(DW_mn[:,:,:,2],np.atleast_3d(np.cos(phi)*np.cos(delta))) # valdiated from book eqn 7.42 # Build the vector RHS = compute_RHS_matrix(n_sw, n_cw, delta, phi, conditions, geometry, sur_flag, slipstream) # Compute vortex strength n_cp = VD.n_cp gamma = np.linalg.solve(A, RHS) gamma_3d = np.repeat(np.atleast_3d(gamma), n_cp, axis=2) u = np.sum(C_mn[:, :, :, 0] * MCM[:, :, :, 0] * gamma_3d, axis=2) v = np.sum(C_mn[:, :, :, 1] * MCM[:, :, :, 1] * gamma_3d, axis=2) w = np.sum(C_mn[:, :, :, 2] * MCM[:, :, :, 2] * gamma_3d, axis=2) w_ind = -np.sum(B * MCM[:, :, :, 2] * gamma_3d, axis=2) # --------------------------------------------------------------------------------------- # STEP 10: Compute aerodynamic coefficients # --------------------------------------------------------------------------------------- n_w = VD.n_w CS = VD.CS * ones wing_areas = np.array(VD.wing_areas) X_M = np.ones(n_cp) * x_m * ones CL_wing = np.zeros(n_w) CDi_wing = np.zeros(n_w) Del_Y = np.abs(VD.YB1 - VD.YA1) * ones # Use split to divide u, w, gamma, and Del_y into more arrays u_n_w = np.array(np.array_split(u, n_w, axis=1)) u_n_w_sw = np.array(np.array_split(u, n_w * n_sw, axis=1)) w_ind_n_w = np.array(np.array_split(w_ind, n_w, axis=1)) w_ind_n_w_sw = np.array(np.array_split(w_ind, n_w * n_sw, axis=1)) gamma_n_w = np.array(np.array_split(gamma, n_w, axis=1)) gamma_n_w_sw = np.array(np.array_split(gamma, n_w * n_sw, axis=1)) Del_Y_n_w = np.array(np.array_split(Del_Y, n_w, axis=1)) Del_Y_n_w_sw = np.array(np.array_split(Del_Y, n_w * n_sw, axis=1)) # lift coefficients on each wing machw = np.tile(mach, len(wing_areas)) L_wing = np.sum(np.multiply(u_n_w + 1, (gamma_n_w * Del_Y_n_w)), axis=2).T CL_wing = L_wing / (0.5 * wing_areas) CL_wing[machw > 1] = CL_wing[machw > 1] * 8 # supersonic lift off by a factor of 8 # drag coefficients on each wing Di_wing = np.sum(np.multiply(-w_ind_n_w, (gamma_n_w * Del_Y_n_w)), axis=2).T CDi_wing = Di_wing / (0.5 * wing_areas) CDi_wing[machw > 1] = CDi_wing[ machw > 1] * 2 # supersonic drag off by a factor of 2 # Calculate each spanwise set of Cls and Cds cl_y = np.sum(np.multiply(u_n_w_sw + 1, (gamma_n_w_sw * Del_Y_n_w_sw)), axis=2).T / CS cdi_y = np.sum(np.multiply(-w_ind_n_w_sw, (gamma_n_w_sw * Del_Y_n_w_sw)), axis=2).T / CS # total lift and lift coefficient L = np.atleast_2d(np.sum(np.multiply((1 + u), gamma * Del_Y), axis=1)).T CL = L / ( 0.5 * Sref ) # validated form page 402-404, aerodynamics for engineers # supersonic lift off by 2^3 CL[mach > 1] = CL[mach > 1] * 8 # supersonic lift off by a factor of 8 # total drag and drag coefficient D = -np.atleast_2d(np.sum(np.multiply(w_ind, gamma * Del_Y), axis=1)).T CDi = D / (0.5 * Sref) CDi[mach > 1] = CDi[mach > 1] * 2 # supersonic drag off by a factor of 2 # pressure coefficient U_tot = np.sqrt((1 + u) * (1 + u) + v * v + w * w) CP = 1 - (U_tot) * (U_tot) # moment coefficient CM = np.atleast_2d( np.sum(np.multiply( (X_M - VD.XCH * ones), Del_Y * gamma), axis=1) / (Sref * c_bar)).T # delete MCM from VD data structure since it consumes memory delattr(VD, 'MCM') return CL, CDi, CM, CL_wing, CDi_wing, cl_y, cdi_y, CP
# -*- coding: utf-8 -*- class A(object): pass class B(A): pass print(issubclass(A, A)) print(issubclass(B, A)) a = A() print(isinstance(a, A)) b = B() print(isinstance(b, A)) ## 对象属性相关 print(hasattr(b, 'x')) print(getattr(b, 'x', '不存在的..')) setattr(b, 'name', 'bnmyni') print(getattr(b, 'name', '不存在的..')) delattr(b, 'name')
def __delattr__(self, key): return delattr(self._getd(), key)
def list_network_rules(client, resource_group_name, account_name): sa = client.get_properties(resource_group_name, account_name) rules = sa.network_rule_set delattr(rules, 'bypass') delattr(rules, 'default_action') return rules
def unbind_schema(cls, schema): for field in schema.fields: delattr(field, 'id') if isinstance(field, StructType): cls.unbind_schema(field) return schema
def __init__(self, cursor, dictCursor): self.qList = {} dicts = self.__getQuestTables(cursor, dictCursor) infile = open("data/AreaTrigger.dbc.CSV", "r") a = infile.read() infile.close() b = re.findall( "(.*?),(.*?),(.*?),(.*?),(.*?),(.*?),(.*?),(.*?),(.*?),(.*?),", a) areaTrigger = [] for x in b: areaTrigger.append((int(x[0]), int(x[1]), float(x[2]), float(x[3]), float(x[4]), float(x[5]), float(x[6]), float(x[7]), float(x[8]), float(x[9]))) print("Adding Quests...") count = len(dicts['quest_template']) for quest in dicts['quest_template']: self.__addQuest(quest, dicts, areaTrigger) if ((count % 500) == 0): print(str(count) + "...") count -= 1 print("Done.") print("Sort quest chain information...") excluded = self.checkStartEnd( ) # quests that have no start or end point for questId in self.qList: quest = self.qList[questId] if quest in excluded: continue if hasattr(quest, "ExclusiveGroup"): group = self.allQuests(ExclusiveGroup=quest.ExclusiveGroup) for q in group: if q in excluded: group.remove(q) if quest.ExclusiveGroup > 0: for q in group: if q.id != quest.id: quest.addExclusive(q.id) else: # quest.ExclusiveGroup < 0 for q in group: if q.id != quest.id: quest.addGroup(q.id) for questId in self.qList: quest = self.qList[questId] if quest.ExclusiveTo == []: delattr(quest, "ExclusiveTo") if quest.InGroupWith == []: delattr(quest, "InGroupWith") for questId in self.qList: quest = self.qList[questId] if quest in excluded: continue if hasattr(quest, "PrevQuestId"): if quest.PrevQuestId > 0: # this should be the proper way to do it according to wiki, but due to the core handeling it differently the following fragment is deactivated # left here in case I want to debug the core/db later """ if hasattr(self.qList[quest.PrevQuestId], "InGroupWith"): quest.addPreGroup(quest.PrevQuestId) else: # has either ExclusiveTo or no ExclusiveGroup quest.addPreSingle(quest.PrevQuestId) """ # replacement for how core works atm.: quest.addPreSingle(quest.PrevQuestId) else: # quest.PrevQuestId < 0 self.qList[abs(quest.PrevQuestId)].addSub(questId) if hasattr(quest, "NextQuestId"): if quest.NextQuestId > 0: postQuest = self.qList[quest.NextQuestId] if hasattr(quest, "InGroupWith"): postQuest.addPreGroup(questId) for questId2 in quest.InGroupWith: postQuest.addPreGroup(questId2) else: postQuest.addPreSingle(questId) else: # quest.NextQuestId < 0 quest.addSub(abs(quest.NextQuestId)) for questId in self.qList: quest = self.qList[questId] if quest.PreQuestSingle == []: delattr(quest, "PreQuestSingle") if quest.PreQuestGroup == []: delattr(quest, "PreQuestGroup") if quest.SubQuests == []: delattr(quest, "SubQuests") print("Done.")
# FIXME: unfortunately, this doesn't work in PLY 3.4, so need to duplicate the # rule below. STARTING_SYMBOL = 'Definitions' # We ignore comments (and hence don't need 'Top') but base parser preserves them # FIXME: Upstream: comments should be removed in base parser REMOVED_RULES = ['Top', # [0] 'Comments', # [0.1] 'CommentsRest', # [0.2] ] # Remove rules from base class # FIXME: add a class method upstream: @classmethod IDLParser._RemoveRules for rule in REMOVED_RULES: production_name = 'p_' + rule delattr(IDLParser, production_name) class BlinkIDLParser(IDLParser): # [1] # FIXME: Need to duplicate rule for starting symbol here, with line number # *lower* than in the base parser (idl_parser.py). # This is a bug in PLY: it determines starting symbol by lowest line number. # This can be overridden by the 'start' parameter, but as of PLY 3.4 this # doesn't work correctly. def p_Definitions(self, p): """Definitions : ExtendedAttributeList Definition Definitions | """ if len(p) > 1: p[2].AddChildren(p[1]) p[0] = ListFromConcat(p[2], p[3])
from PythonTutorial.基础部分.classBasic import class_inheritence as cl # 判断一个类是否是另一个类的子类 print(issubclass(cl.ElectricCar, cl.Car)) car_obj = cl.Car() # 判断实例类型 print(isinstance(car_obj, cl.Car)) # 判断类属性 print(hasattr(car_obj, "color")) # 获取类属性 print(getattr(car_obj, "color")) print(dir(car_obj)) # 删除类属性 print(car_obj.condition) print(delattr(car_obj, "color")) print(dir(car_obj))