def test_module_getter(self): self.assertEqual(get_object("unittest"), unittest) value = get_object("subprocess") import subprocess self.assertEqual(value, subprocess) self.assertEqual(get_object("tea.tests.test_utils.unittest"), unittest)
def get_commands(self, config): if self._commands is None: self._commands = { 'alias': { 'app': 'management', 'klass': AliasCommand, }, 'config': { 'app': 'management', 'klass': ConfigCommand, }, } # Add additional commands if available command_modules = set(self._command_modules[:] + config.get('options.commands', []) + config.get('commands', [])) for module in command_modules: package = get_object(module) for (loader, module_name, is_pkg) in pkgutil.walk_packages( package.__path__): loader.find_module(module_name).load_module(module_name) for command in BaseCommand.__subclasses__(): if not command.__module__.startswith('tea.commander'): app, name = command.__module__.split('.') self._commands[name] = { 'app': app, 'klass': command, } return self._commands
def serve(): """Serve models""" config = dg.Config() server_klass = config.get('server.class', None) if server_klass: server = get_object(server_klass)() else: server = dg.Server() server.run()
def load_plugins(modules, cls=None, annotation=None, subclasses=False): if cls is None and annotation is None: logger.warning( "Either the cls or the annotation has to be provided " "to the load_plugins function." ) return [] if not isinstance(modules, (list, tuple)): modules = [modules] # First load all the modules loaded_modules = [] for module in modules: if not isinstance(module, types.ModuleType): module = get_object(module) loaded_modules.append(module) for (loader, module_name, is_pkg) in pkgutil.walk_packages( module.__path__ ): loaded_modules.append( loader.find_module(module_name).load_module(module_name) ) # If cls is provided then check the classes if cls is not None: if subclasses: classes = [] def _get_sublasses(cls): for c in cls.__subclasses__(): classes.append(c) _get_sublasses(c) _get_sublasses(cls) return classes else: return cls.__subclasses__() # If annotation is provided then check the annotations if annotation is not None: items = [] for module in loaded_modules: for obj in module.__dict__.values(): if getattr(obj, "_tea_ds_plugin", None) == annotation: items.append(obj) return items
def load_plugins(modules, cls=None, annotation=None, subclasses=False): if cls is None and annotation is None: logger.warning('Either the cls or the annotation has to be provided ' 'to the load_plugins function.') return [] if not isinstance(modules, (list, tuple)): modules = [modules] # First load all the modules loaded_modules = [] for module in modules: if not isinstance(module, types.ModuleType): module = get_object(module) loaded_modules.append(module) for (loader, module_name, is_pkg) in pkgutil.walk_packages(module.__path__): loaded_modules.append( loader.find_module(module_name).load_module(module_name)) # If cls is provided then check the classes if cls is not None: if subclasses: classes = [] def _get_sublasses(cls): for c in cls.__subclasses__(): classes.append(c) _get_sublasses(c) _get_sublasses(cls) return classes else: return cls.__subclasses__() # If annotation is provided then check the annotations if annotation is not None: items = [] for module in loaded_modules: for obj in module.__dict__.values(): if getattr(obj, '_tea_ds_plugin', None) == annotation: items.append(obj) return items
def test_module_object_getter(self): self.assertEqual(get_object('unittest.TestCase'), unittest.TestCase) self.assertEqual(get_object('tea.utils.get_object'), get_object) self.assertEqual(get_object('tea.tests.test_utils.Foo.foo'), Foo.foo)
def test_module_getter(self): self.assertEqual(get_object('unittest'), unittest) value = get_object('subprocess') import subprocess self.assertEqual(value, subprocess) self.assertEqual(get_object('tea.tests.test_utils.unittest'), unittest)
def test_object_getter(self): self.assertEqual(get_object('foo', self), self.foo) self.assertEqual(get_object('foo.x', self), self.x) self.assertEqual(get_object('foo.foo', self)(), self.x)
def score(self, X, y, sample_weight=None, metrics=None): """Scores the model. If scoring function is defined in the configuration file, this function will use that scoring function, else: For classification: Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. For regression: Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) ** 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) ** 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Args: X (array-like, shape = (n_samples, n_features)): Test samples. y (array-like, shape = (n_samples) or (n_samples, n_outputs)): True labels for X. sample_weight (array-like, shape = [n_samples], optional): Sample weights. metrics: Optional dictionary of metrics functions to use instead of the selected ones for regression and classification. Returns: float: For classification: mean accuracy of self.predict(X) wrt. y. For regression: R^2 of self.predict(X) wrt. y. """ # If metrics dictionary is passed in use it to calculate the metrics if metrics is not None: return { key: get_object(value)(y, self.predict(X)) for key, value in metrics.items() } # If metrics are None try to get the scoring function from the # configuration file proba = self.conrig.get('metrics.proba', False) score = self.config.get('metrics.score', None) predict_func = self.predict_proba if proba else self.predict if score is not None: return get_object(score)(y, predict_func(X), sample_weight=sample_weight) # Finally try the default estimators for classification and regression estimator_type = getattr(self, '_estimator_type', None) if estimator_type == 'classifier': from sklearn.metrics import accuracy_score return accuracy_score(y, predict_func(X), sample_weight=sample_weight) elif estimator_type == 'regressor': from sklearn.metrics import r2_score return r2_score(y, predict_func(X), sample_weight=sample_weight, multioutput='variance_weighted') else: # Don't know how to score the model, just return 0 return 0
def test_module_object_getter(self): self.assertEqual(get_object("unittest.TestCase"), unittest.TestCase) self.assertEqual(get_object("tea.utils.get_object"), get_object) self.assertEqual(get_object("tea.tests.test_utils.Foo.foo"), Foo.foo)
def test_object_getter(self): self.assertEqual(get_object("foo", self), self.foo) self.assertEqual(get_object("foo.x", self), self.x) self.assertEqual(get_object("foo.foo", self)(), self.x)