def test_attrmap_conflicts(): am_n = AttributeMap({'a':1, 'b':2, 'c':1}) am_t = AttributeMap({'a':1, 'b':2, 'c':1}, collisions_resolution='tuple') am_l = AttributeMap({'a':1, 'b':2, 'c':1}, collisions_resolution='lucky') q_f = ['a', 'b', 'a', 'c'] # should have no effect on forward mapping ok_(np.all(am_n.to_numeric(q_f) == am_t.to_numeric(q_f))) ok_(np.all(am_t.to_numeric(q_f) == am_l.to_numeric(q_f))) assert_raises(ValueError, am_n.to_literal, [2]) r_t = am_t.to_literal([2, 1]) r_l = am_l.to_literal([2, 1])
def _call(self, dataset): sens = super(self.__class__, self)._call(dataset) clf = self.clf targets_attr = clf.get_space() if targets_attr in sens.sa: # if labels are present -- transform them into meaningful tuples # (or not if just a single beast) am = AttributeMap(dict([(l, -1) for l in clf.neglabels] + [(l, +1) for l in clf.poslabels])) # XXX here we still can get a sensitivity per each label # (e.g. with SMLR as the slave clf), so I guess we should # tune up Multiclass...Analyzer to add an additional sa # And here we might need to check if asobjarray call is necessary # and should be actually done #asobjarray( sens.sa[targets_attr] = \ am.to_literal(sens.sa[targets_attr].value, recurse=True) return sens
def _call(self, dataset): sens = super(self.__class__, self)._call(dataset) clf = self.clf targets_attr = clf.params.targets_attr if targets_attr in sens.sa: # if labels are present -- transform them into meaningful tuples # (or not if just a single beast) am = AttributeMap(dict([(l, -1) for l in clf.neglabels] + [(l, +1) for l in clf.poslabels])) # XXX here we still can get a sensitivity per each label # (e.g. with SMLR as the slave clf), so I guess we should # tune up Multiclass...Analyzer to add an additional sa # And here we might need to check if asobjarray call is necessary # and should be actually done #asobjarray( sens.sa[targets_attr] = \ am.to_literal(sens.sa[targets_attr].value, recurse=True) return sens
def test_attrmap(): map_default = {'eins': 0, 'zwei': 2, 'sieben': 1} map_custom = {'eins': 11, 'zwei': 22, 'sieben': 33} literal = ['eins', 'zwei', 'sieben', 'eins', 'sieben', 'eins'] literal_nonmatching = ['uno', 'dos', 'tres'] num_default = [0, 2, 1, 0, 1, 0] num_custom = [11, 22, 33, 11, 33, 11] # no custom mapping given am = AttributeMap() assert_false(am) ok_(len(am) == 0) assert_array_equal(am.to_numeric(literal), num_default) assert_array_equal(am.to_literal(num_default), literal) ok_(am) ok_(len(am) == 3) # # Tests for recursive mapping + preserving datatype class myarray(np.ndarray): pass assert_raises(KeyError, am.to_literal, [(1, 2), 2, 0]) literal_fancy = [(1, 2), 2, [0], np.array([0, 1]).view(myarray)] literal_fancy_tuple = tuple(literal_fancy) literal_fancy_array = np.array(literal_fancy, dtype=object) for l in (literal_fancy, literal_fancy_tuple, literal_fancy_array): res = am.to_literal(l, recurse=True) assert_equal(res[0], ('sieben', 'zwei')) assert_equal(res[1], 'zwei') assert_equal(res[2], ['eins']) assert_array_equal(res[3], ['eins', 'sieben']) # types of result and subsequences should be preserved ok_(isinstance(res, l.__class__)) ok_(isinstance(res[0], tuple)) ok_(isinstance(res[1], str)) ok_(isinstance(res[2], list)) ok_(isinstance(res[3], myarray)) # yet another example a = np.empty(1, dtype=object) a[0] = (0, 1) res = am.to_literal(a, recurse=True) ok_(isinstance(res[0], tuple)) # # with custom mapping am = AttributeMap(map=map_custom) assert_array_equal(am.to_numeric(literal), num_custom) assert_array_equal(am.to_literal(num_custom), literal) # if not numeric nothing is mapped assert_array_equal(am.to_numeric(num_custom), num_custom) # even if the map doesn't fit assert_array_equal(am.to_numeric(num_default), num_default) # need to_numeric first am = AttributeMap() assert_raises(RuntimeError, am.to_literal, [1,2,3]) # stupid args assert_raises(ValueError, AttributeMap, map=num_custom) # map mismatch am = AttributeMap(map=map_custom) if __debug__: # checked only in __debug__ assert_raises(KeyError, am.to_numeric, literal_nonmatching) # needs reset and should work afterwards am.clear() assert_array_equal(am.to_numeric(literal_nonmatching), [2, 0, 1]) # and now reverse am = AttributeMap(map=map_custom) assert_raises(KeyError, am.to_literal, num_default) # dict-like interface am = AttributeMap() ok_([(k, v) for k, v in am.iteritems()] == [])
class Classifier(ClassWithCollections): """Abstract classifier class to be inherited by all classifiers """ # Kept separate from doc to don't pollute help(clf), especially if # we including help for the parent class _DEV__doc__ = """ Required behavior: For every classifier is has to be possible to be instantiated without having to specify the training pattern. Repeated calls to the train() method with different training data have to result in a valid classifier, trained for the particular dataset. It must be possible to specify all classifier parameters as keyword arguments to the constructor. Recommended behavior: Derived classifiers should provide access to *estimates* -- i.e. that information that is finally used to determine the predicted class label. Michael: Maybe it works well if each classifier provides a 'estimates' state member. This variable is a list as long as and in same order as Dataset.uniquetargets (training data). Each item in the list corresponds to the likelyhood of a sample to belong to the respective class. However the semantics might differ between classifiers, e.g. kNN would probably store distances to class- neighbors, where PLR would store the raw function value of the logistic function. So in the case of kNN low is predictive and for PLR high is predictive. Don't know if there is the need to unify that. As the storage and/or computation of this information might be demanding its collection should be switchable and off be default. Nomenclature * predictions : result of the last call to .predict() * estimates : might be different from predictions if a classifier's predict() makes a decision based on some internal value such as probability or a distance. """ # Dict that contains the parameters of a classifier. # This shall provide an interface to plug generic parameter optimizer # on all classifiers (e.g. grid- or line-search optimizer) # A dictionary is used because Michael thinks that access by name is nicer. # Additionally Michael thinks ATM that additional information might be # necessary in some situations (e.g. reasonably predefined parameter range, # minimal iteration stepsize, ...), therefore the value to each key should # also be a dict or we should use mvpa.misc.param.Parameter'... trained_targets = ConditionalAttribute( enabled=True, doc="Set of unique targets it has been trained on") trained_nsamples = ConditionalAttribute( enabled=True, doc="Number of samples it has been trained on") trained_dataset = ConditionalAttribute( enabled=False, doc="The dataset it has been trained on") training_confusion = ConditionalAttribute( enabled=False, doc="Confusion matrix of learning performance") predictions = ConditionalAttribute(enabled=True, doc="Most recent set of predictions") estimates = ConditionalAttribute( enabled=True, doc="Internal classifier estimates the most recent " + "predictions are based on") training_time = ConditionalAttribute( enabled=True, doc="Time (in seconds) which took classifier to train") predicting_time = ConditionalAttribute( enabled=True, doc="Time (in seconds) which took classifier to predict") feature_ids = ConditionalAttribute( enabled=False, doc="Feature IDS which were used for the actual training.") __tags__ = [] """Describes some specifics about the classifier -- is that it is doing regression for instance....""" targets_attr = Parameter( 'targets', allowedtype='bool', # ro=True, doc="""What samples attribute to use as targets.""", index=999) # TODO: make it available only for actually retrainable classifiers retrainable = Parameter( False, allowedtype='bool', doc="""Either to enable retraining for 'retrainable' classifier.""", index=1002) def __init__(self, **kwargs): ClassWithCollections.__init__(self, **kwargs) # XXX # the place to map literal to numerical labels (and back) # this needs to be in the base class, since some classifiers also # have this nasty 'regression' mode, and the code in this class # needs to deal with converting the regression output into discrete # labels # however, preferably the mapping should be kept in the respective # low-level implementations that need it self._attrmap = AttributeMap() self.__trainednfeatures = None """Stores number of features for which classifier was trained. If None -- it wasn't trained at all""" self._set_retrainable(self.params.retrainable, force=True) # deprecate #self.__trainedidhash = None #"""Stores id of the dataset on which it was trained to signal #in trained() if it was trained already on the same dataset""" @property def __summary_class__(self): if 'regression' in self.__tags__: return RegressionStatistics else: return ConfusionMatrix @property def __is_regression__(self): return 'regression' in self.__tags__ def __str__(self): if __debug__ and 'CLF_' in debug.active: return "%s / %s" % (repr(self), super(Classifier, self).__str__()) else: return repr(self) def __repr__(self, prefixes=[]): return super(Classifier, self).__repr__(prefixes=prefixes) def _pretrain(self, dataset): """Functionality prior to training """ # So we reset all conditional attributes and may be free up some memory # explicitly params = self.params if not params.retrainable: self.untrain() else: # just reset the ca, do not untrain self.ca.reset() if not self.__changedData_isset: self.__reset_changed_data() _changedData = self._changedData __idhashes = self.__idhashes __invalidatedChangedData = self.__invalidatedChangedData # if we don't know what was changed we need to figure # them out if __debug__: debug('CLF_', "IDHashes are %s" % (__idhashes)) # Look at the data if any was changed for key, data_ in (('traindata', dataset.samples), ('targets', dataset.sa[params.targets_attr].value)): _changedData[key] = self.__was_data_changed(key, data_) # if those idhashes were invalidated by retraining # we need to adjust _changedData accordingly if __invalidatedChangedData.get(key, False): if __debug__ and not _changedData[key]: debug( 'CLF_', 'Found that idhash for %s was ' 'invalidated by retraining' % key) _changedData[key] = True # Look at the parameters for col in self._paramscols: changedParams = self._collections[col].which_set() if len(changedParams): _changedData[col] = changedParams self.__invalidatedChangedData = {} # reset it on training if __debug__: debug('CLF_', "Obtained _changedData is %s" % (self._changedData)) def _posttrain(self, dataset): """Functionality post training For instance -- computing confusion matrix. Parameters ---------- dataset : Dataset Data which was used for training """ ca = self.ca if ca.is_enabled('trained_targets'): ca.trained_targets = dataset.sa[self.params.targets_attr].unique ca.trained_dataset = dataset ca.trained_nsamples = dataset.nsamples # needs to be assigned first since below we use predict self.__trainednfeatures = dataset.nfeatures if __debug__ and 'CHECK_TRAINED' in debug.active: self.__trainedidhash = dataset.idhash if self.ca.is_enabled('training_confusion') and \ not self.ca.is_set('training_confusion'): # we should not store predictions for training data, # it is confusing imho (yoh) self.ca.change_temporarily(disable_ca=["predictions"]) if self.params.retrainable: # we would need to recheck if data is the same, # XXX think if there is a way to make this all # efficient. For now, probably, retrainable # classifiers have no chance but not to use # training_confusion... sad self.__changedData_isset = False predictions = self.predict(dataset) self.ca.reset_changed_temporarily() self.ca.training_confusion = self.__summary_class__( targets=dataset.sa[self.params.targets_attr].value, predictions=predictions) if self.ca.is_enabled('feature_ids'): self.ca.feature_ids = self._get_feature_ids() ##REF: Name was automagically refactored def _get_feature_ids(self): """Virtual method to return feature_ids used while training Is not intended to be called anywhere but from _posttrain, thus classifier is assumed to be trained at this point """ # By default all features are used return range(self.__trainednfeatures) def summary(self): """Providing summary over the classifier""" s = "Classifier %s" % self ca = self.ca ca_enabled = ca.enabled if self.trained: s += "\n trained" if ca.is_set('training_time'): s += ' in %.3g sec' % ca.training_time s += ' on data with' if ca.is_set('trained_targets'): s += ' targets:%s' % list(ca.trained_targets) nsamples, nchunks = None, None if ca.is_set('trained_nsamples'): nsamples = ca.trained_nsamples if ca.is_set('trained_dataset'): td = ca.trained_dataset nsamples, nchunks = td.nsamples, len(td.sa['chunks'].unique) if nsamples is not None: s += ' #samples:%d' % nsamples if nchunks is not None: s += ' #chunks:%d' % nchunks s += " #features:%d" % self.__trainednfeatures if ca.is_set('feature_ids'): s += ", used #features:%d" % len(ca.feature_ids) if ca.is_set('training_confusion'): s += ", training error:%.3g" % ca.training_confusion.error else: s += "\n not yet trained" if len(ca_enabled): s += "\n enabled ca:%s" % ', '.join( [str(ca[x]) for x in ca_enabled]) return s def clone(self): """Create full copy of the classifier. It might require classifier to be untrained first due to present SWIG bindings. TODO: think about proper re-implementation, without enrollment of deepcopy """ if __debug__: debug("CLF", "Cloning %s#%s" % (self, id(self))) try: return deepcopy(self) except: self.untrain() return deepcopy(self) def _train(self, dataset): """Function to be actually overridden in derived classes """ raise NotImplementedError def train(self, dataset): """Train classifier on a dataset Shouldn't be overridden in subclasses unless explicitly needed to do so """ if dataset.nfeatures == 0 or dataset.nsamples == 0: raise DegenerateInputError, \ "Cannot train classifier on degenerate data %s" % dataset if __debug__: debug("CLF", "Training classifier %(clf)s on dataset %(dataset)s", msgargs={ 'clf': self, 'dataset': dataset }) self._pretrain(dataset) # remember the time when started training t0 = time.time() if dataset.nfeatures > 0: result = self._train(dataset) else: warning("Trying to train on dataset with no features present") if __debug__: debug("CLF", "No features present for training, no actual training " \ "is called") result = None self.ca.training_time = time.time() - t0 self._posttrain(dataset) return result def _prepredict(self, dataset): """Functionality prior prediction """ if not ('notrain2predict' in self.__tags__): # check if classifier was trained if that is needed if not self.trained: raise ValueError, \ "Classifier %s wasn't yet trained, therefore can't " \ "predict" % self nfeatures = dataset.nfeatures #data.shape[1] # check if number of features is the same as in the data # it was trained on if nfeatures != self.__trainednfeatures: raise ValueError, \ "Classifier %s was trained on data with %d features, " % \ (self, self.__trainednfeatures) + \ "thus can't predict for %d features" % nfeatures if self.params.retrainable: if not self.__changedData_isset: self.__reset_changed_data() _changedData = self._changedData data = np.asanyarray(dataset.samples) _changedData['testdata'] = \ self.__was_data_changed('testdata', data) if __debug__: debug( 'CLF_', "prepredict: Obtained _changedData is %s" % (_changedData)) def _postpredict(self, dataset, result): """Functionality after prediction is computed """ self.ca.predictions = result if self.params.retrainable: self.__changedData_isset = False def _predict(self, dataset): """Actual prediction """ raise NotImplementedError @accepts_samples_as_dataset def predict(self, dataset): """Predict classifier on data Shouldn't be overridden in subclasses unless explicitly needed to do so. Also subclasses trying to call super class's predict should call _predict if within _predict instead of predict() since otherwise it would loop """ ## ??? yoh: changed to asany from as without exhaustive check data = np.asanyarray(dataset.samples) if __debug__: debug("CLF", "Predicting classifier %(clf)s on ds %(dataset)s", msgargs={ 'clf': self, 'dataset': dataset }) # remember the time when started computing predictions t0 = time.time() ca = self.ca # to assure that those are reset (could be set due to testing # post-training) ca.reset(['estimates', 'predictions']) self._prepredict(dataset) if self.__trainednfeatures > 0 \ or 'notrain2predict' in self.__tags__: result = self._predict(dataset) else: warning( "Trying to predict using classifier trained on no features") if __debug__: debug("CLF", "No features were present for training, prediction is " \ "bogus") result = [None] * data.shape[0] ca.predicting_time = time.time() - t0 # with labels mapping in-place, we also need to go back to the # literal labels if self._attrmap: try: result = self._attrmap.to_literal(result) except KeyError, e: raise FailedToPredictError, \ "Failed to convert predictions from numeric into " \ "literals: %s" % e self._postpredict(dataset, result) return result
def plot_decision_boundary_2d(dataset, clf=None, targets=None, regions=None, maps=None, maps_res=50, vals=[-1, 0, 1], data_callback=None): """Plot a scatter of a classifier's decision boundary and data points Assumes data is 2d (no way to visualize otherwise!!) Parameters ---------- dataset : `Dataset` Data points to visualize (might be the data `clf` was train on, or any novel data). clf : `Classifier`, optional Trained classifier targets : string, optional What samples attributes to use for targets. If None and clf is provided, then `clf.params.targets_attr` is used. regions : string, optional Plot regions (polygons) around groups of samples with the same attribute (and target attribute) values. E.g. chunks. maps : string in {'targets', 'estimates'}, optional Either plot underlying colored maps, such as clf predictions within the spanned regions, or estimates from the classifier (might not work for some). maps_res : int, optional Number of points in each direction to evaluate. Points are between axis limits, which are set automatically by matplotlib. Higher number will yield smoother decision lines but come at the cost of O^2 classifying time/memory. vals : array of floats, optional Where to draw the contour lines if maps='estimates' data_callback : callable, optional Callable object to preprocess the new data points. Classified points of the form samples = data_callback(xysamples). I.e. this can be a function to normalize them, or cache them before they are classified. """ if False: ## from mvpa.misc.data_generators import * ## from mvpa.clfs.svm import * ## from mvpa.clfs.knn import * ## ds = dumb_feature_binary_dataset() dataset = normal_feature_dataset(nfeatures=2, nchunks=5, snr=10, nlabels=4, means=[ [0,1], [1,0], [1,1], [0,0] ]) dataset.samples += dataset.sa.chunks[:, None]*0.1 # slight shifts for chunks ;) #dataset = normal_feature_dataset(nfeatures=2, nlabels=3, means=[ [0,1], [1,0], [1,1] ]) #dataset = normal_feature_dataset(nfeatures=2, nlabels=2, means=[ [0,1], [1,0] ]) #clf = LinearCSVMC(C=-1) clf = kNN(4)#LinearCSVMC(C=-1) clf.train(dataset) #clf = None #plot_decision_boundary_2d(ds, clf) targets = 'targets' regions = 'chunks' #maps = 'estimates' maps = 'targets' #maps = None #'targets' res = 50 vals = [-1, 0, 1] data_callback=None pl.clf() if dataset.nfeatures != 2: raise ValueError('Can only plot a decision boundary in 2D') Pioff() a = pl.gca() # f.add_subplot(1,1,1) attrmap = None if clf: estimates_were_enabled = clf.ca.is_enabled('estimates') clf.ca.enable('estimates') if targets is None: targets = clf.params.targets_attr # Lets reuse classifiers attrmap if it is good enough attrmap = clf._attrmap predictions = clf.predict(dataset) targets_sa_name = targets # bad Yarik -- will rebind targets to actual values targets_lit = dataset.sa[targets_sa_name].value utargets_lit = dataset.sa[targets_sa_name].unique if not (attrmap is not None and len(attrmap) and set(clf._attrmap.keys()).issuperset(utargets_lit)): # create our own attrmap = AttributeMap(mapnumeric=True) targets = attrmap.to_numeric(targets_lit) utargets = attrmap.to_numeric(utargets_lit) vmin = min(utargets) vmax = max(utargets) cmap = pl.cm.RdYlGn # argument # Scatter points if clf: all_hits = predictions == targets_lit else: all_hits = np.ones((len(targets),), dtype=bool) targets_colors = {} for l in utargets: targets_mask = targets==l s = dataset[targets_mask] targets_colors[l] = c \ = cmap((l-vmin)/float(vmax-vmin)) # We want to plot hits and misses with different symbols hits = all_hits[targets_mask] misses = np.logical_not(hits) scatter_kwargs = dict( c=[c], zorder=10+(l-vmin)) if sum(hits): a.scatter(s.samples[hits, 0], s.samples[hits, 1], marker='o', label='%s [%d]' % (attrmap.to_literal(l), sum(hits)), **scatter_kwargs) if sum(misses): a.scatter(s.samples[misses, 0], s.samples[misses, 1], marker='x', label='%s [%d] (miss)' % (attrmap.to_literal(l), sum(misses)), edgecolor=[c], **scatter_kwargs) (xmin, xmax) = a.get_xlim() (ymin, ymax) = a.get_ylim() extent = (xmin, xmax, ymin, ymax) # Create grid to evaluate, predict it (x,y) = np.mgrid[xmin:xmax:np.complex(0, maps_res), ymin:ymax:np.complex(0, maps_res)] news = np.vstack((x.ravel(), y.ravel())).T try: news = data_callback(news) except TypeError: # Not a callable object pass imshow_kwargs = dict(origin='lower', zorder=1, aspect='auto', interpolation='bilinear', alpha=0.9, cmap=cmap, vmin=vmin, vmax=vmax, extent=extent) if maps is not None: if clf is None: raise ValueError, \ "Please provide classifier for plotting maps of %s" % maps predictions_new = clf.predict(news) if maps == 'estimates': # Contour and show predictions trained_targets = attrmap.to_numeric(clf.ca.trained_targets) if len(trained_targets)==2: linestyles = [] for v in vals: if v == 0: linestyles.append('solid') else: linestyles.append('dashed') vmin, vmax = -3, 3 # Gives a nice tonal range ;) map_ = 'estimates' # should actually depend on estimates else: vals = (trained_targets[:-1] + trained_targets[1:])/2. linestyles = ['solid'] * len(vals) map_ = 'targets' try: clf.ca.estimates.reshape(x.shape) a.imshow(map_values.T, **imshow_kwargs) CS = a.contour(x, y, map_values, vals, zorder=6, linestyles=linestyles, extent=extent, colors='k') except ValueError, e: print "Sorry - plotting of estimates isn't full supported for %s. " \ "Got exception %s" % (clf, e)
class Classifier(ClassWithCollections): """Abstract classifier class to be inherited by all classifiers """ # Kept separate from doc to don't pollute help(clf), especially if # we including help for the parent class _DEV__doc__ = """ Required behavior: For every classifier is has to be possible to be instantiated without having to specify the training pattern. Repeated calls to the train() method with different training data have to result in a valid classifier, trained for the particular dataset. It must be possible to specify all classifier parameters as keyword arguments to the constructor. Recommended behavior: Derived classifiers should provide access to *estimates* -- i.e. that information that is finally used to determine the predicted class label. Michael: Maybe it works well if each classifier provides a 'estimates' state member. This variable is a list as long as and in same order as Dataset.uniquetargets (training data). Each item in the list corresponds to the likelyhood of a sample to belong to the respective class. However the semantics might differ between classifiers, e.g. kNN would probably store distances to class- neighbors, where PLR would store the raw function value of the logistic function. So in the case of kNN low is predictive and for PLR high is predictive. Don't know if there is the need to unify that. As the storage and/or computation of this information might be demanding its collection should be switchable and off be default. Nomenclature * predictions : result of the last call to .predict() * estimates : might be different from predictions if a classifier's predict() makes a decision based on some internal value such as probability or a distance. """ # Dict that contains the parameters of a classifier. # This shall provide an interface to plug generic parameter optimizer # on all classifiers (e.g. grid- or line-search optimizer) # A dictionary is used because Michael thinks that access by name is nicer. # Additionally Michael thinks ATM that additional information might be # necessary in some situations (e.g. reasonably predefined parameter range, # minimal iteration stepsize, ...), therefore the value to each key should # also be a dict or we should use mvpa.misc.param.Parameter'... trained_targets = ConditionalAttribute(enabled=True, doc="Set of unique targets it has been trained on") trained_nsamples = ConditionalAttribute(enabled=True, doc="Number of samples it has been trained on") trained_dataset = ConditionalAttribute(enabled=False, doc="The dataset it has been trained on") training_confusion = ConditionalAttribute(enabled=False, doc="Confusion matrix of learning performance") predictions = ConditionalAttribute(enabled=True, doc="Most recent set of predictions") estimates = ConditionalAttribute(enabled=True, doc="Internal classifier estimates the most recent " + "predictions are based on") training_time = ConditionalAttribute(enabled=True, doc="Time (in seconds) which took classifier to train") predicting_time = ConditionalAttribute(enabled=True, doc="Time (in seconds) which took classifier to predict") feature_ids = ConditionalAttribute(enabled=False, doc="Feature IDS which were used for the actual training.") __tags__ = [] """Describes some specifics about the classifier -- is that it is doing regression for instance....""" targets_attr = Parameter('targets', allowedtype='bool',# ro=True, doc="""What samples attribute to use as targets.""", index=999) # TODO: make it available only for actually retrainable classifiers retrainable = Parameter(False, allowedtype='bool', doc="""Either to enable retraining for 'retrainable' classifier.""", index=1002) def __init__(self, **kwargs): ClassWithCollections.__init__(self, **kwargs) # XXX # the place to map literal to numerical labels (and back) # this needs to be in the base class, since some classifiers also # have this nasty 'regression' mode, and the code in this class # needs to deal with converting the regression output into discrete # labels # however, preferably the mapping should be kept in the respective # low-level implementations that need it self._attrmap = AttributeMap() self.__trainednfeatures = None """Stores number of features for which classifier was trained. If None -- it wasn't trained at all""" self._set_retrainable(self.params.retrainable, force=True) # deprecate #self.__trainedidhash = None #"""Stores id of the dataset on which it was trained to signal #in trained() if it was trained already on the same dataset""" @property def __summary_class__(self): if 'regression' in self.__tags__: return RegressionStatistics else: return ConfusionMatrix @property def __is_regression__(self): return 'regression' in self.__tags__ def __str__(self): if __debug__ and 'CLF_' in debug.active: return "%s / %s" % (repr(self), super(Classifier, self).__str__()) else: return repr(self) def __repr__(self, prefixes=[]): return super(Classifier, self).__repr__(prefixes=prefixes) def _pretrain(self, dataset): """Functionality prior to training """ # So we reset all conditional attributes and may be free up some memory # explicitly params = self.params if not params.retrainable: self.untrain() else: # just reset the ca, do not untrain self.ca.reset() if not self.__changedData_isset: self.__reset_changed_data() _changedData = self._changedData __idhashes = self.__idhashes __invalidatedChangedData = self.__invalidatedChangedData # if we don't know what was changed we need to figure # them out if __debug__: debug('CLF_', "IDHashes are %s" % (__idhashes)) # Look at the data if any was changed for key, data_ in (('traindata', dataset.samples), ('targets', dataset.sa[params.targets_attr].value)): _changedData[key] = self.__was_data_changed(key, data_) # if those idhashes were invalidated by retraining # we need to adjust _changedData accordingly if __invalidatedChangedData.get(key, False): if __debug__ and not _changedData[key]: debug('CLF_', 'Found that idhash for %s was ' 'invalidated by retraining' % key) _changedData[key] = True # Look at the parameters for col in self._paramscols: changedParams = self._collections[col].which_set() if len(changedParams): _changedData[col] = changedParams self.__invalidatedChangedData = {} # reset it on training if __debug__: debug('CLF_', "Obtained _changedData is %s" % (self._changedData)) def _posttrain(self, dataset): """Functionality post training For instance -- computing confusion matrix. Parameters ---------- dataset : Dataset Data which was used for training """ ca = self.ca if ca.is_enabled('trained_targets'): ca.trained_targets = dataset.sa[self.params.targets_attr].unique ca.trained_dataset = dataset ca.trained_nsamples = dataset.nsamples # needs to be assigned first since below we use predict self.__trainednfeatures = dataset.nfeatures if __debug__ and 'CHECK_TRAINED' in debug.active: self.__trainedidhash = dataset.idhash if self.ca.is_enabled('training_confusion') and \ not self.ca.is_set('training_confusion'): # we should not store predictions for training data, # it is confusing imho (yoh) self.ca.change_temporarily( disable_ca=["predictions"]) if self.params.retrainable: # we would need to recheck if data is the same, # XXX think if there is a way to make this all # efficient. For now, probably, retrainable # classifiers have no chance but not to use # training_confusion... sad self.__changedData_isset = False predictions = self.predict(dataset) self.ca.reset_changed_temporarily() self.ca.training_confusion = self.__summary_class__( targets=dataset.sa[self.params.targets_attr].value, predictions=predictions) if self.ca.is_enabled('feature_ids'): self.ca.feature_ids = self._get_feature_ids() ##REF: Name was automagically refactored def _get_feature_ids(self): """Virtual method to return feature_ids used while training Is not intended to be called anywhere but from _posttrain, thus classifier is assumed to be trained at this point """ # By default all features are used return range(self.__trainednfeatures) def summary(self): """Providing summary over the classifier""" s = "Classifier %s" % self ca = self.ca ca_enabled = ca.enabled if self.trained: s += "\n trained" if ca.is_set('training_time'): s += ' in %.3g sec' % ca.training_time s += ' on data with' if ca.is_set('trained_targets'): s += ' targets:%s' % list(ca.trained_targets) nsamples, nchunks = None, None if ca.is_set('trained_nsamples'): nsamples = ca.trained_nsamples if ca.is_set('trained_dataset'): td = ca.trained_dataset nsamples, nchunks = td.nsamples, len(td.sa['chunks'].unique) if nsamples is not None: s += ' #samples:%d' % nsamples if nchunks is not None: s += ' #chunks:%d' % nchunks s += " #features:%d" % self.__trainednfeatures if ca.is_set('feature_ids'): s += ", used #features:%d" % len(ca.feature_ids) if ca.is_set('training_confusion'): s += ", training error:%.3g" % ca.training_confusion.error else: s += "\n not yet trained" if len(ca_enabled): s += "\n enabled ca:%s" % ', '.join([str(ca[x]) for x in ca_enabled]) return s def clone(self): """Create full copy of the classifier. It might require classifier to be untrained first due to present SWIG bindings. TODO: think about proper re-implementation, without enrollment of deepcopy """ if __debug__: debug("CLF", "Cloning %s#%s" % (self, id(self))) try: return deepcopy(self) except: self.untrain() return deepcopy(self) def _train(self, dataset): """Function to be actually overridden in derived classes """ raise NotImplementedError def train(self, dataset): """Train classifier on a dataset Shouldn't be overridden in subclasses unless explicitly needed to do so """ if dataset.nfeatures == 0 or dataset.nsamples == 0: raise DegenerateInputError, \ "Cannot train classifier on degenerate data %s" % dataset if __debug__: debug("CLF", "Training classifier %(clf)s on dataset %(dataset)s", msgargs={'clf':self, 'dataset':dataset}) self._pretrain(dataset) # remember the time when started training t0 = time.time() if dataset.nfeatures > 0: result = self._train(dataset) else: warning("Trying to train on dataset with no features present") if __debug__: debug("CLF", "No features present for training, no actual training " \ "is called") result = None self.ca.training_time = time.time() - t0 self._posttrain(dataset) return result def _prepredict(self, dataset): """Functionality prior prediction """ if not ('notrain2predict' in self.__tags__): # check if classifier was trained if that is needed if not self.trained: raise ValueError, \ "Classifier %s wasn't yet trained, therefore can't " \ "predict" % self nfeatures = dataset.nfeatures #data.shape[1] # check if number of features is the same as in the data # it was trained on if nfeatures != self.__trainednfeatures: raise ValueError, \ "Classifier %s was trained on data with %d features, " % \ (self, self.__trainednfeatures) + \ "thus can't predict for %d features" % nfeatures if self.params.retrainable: if not self.__changedData_isset: self.__reset_changed_data() _changedData = self._changedData data = np.asanyarray(dataset.samples) _changedData['testdata'] = \ self.__was_data_changed('testdata', data) if __debug__: debug('CLF_', "prepredict: Obtained _changedData is %s" % (_changedData)) def _postpredict(self, dataset, result): """Functionality after prediction is computed """ self.ca.predictions = result if self.params.retrainable: self.__changedData_isset = False def _predict(self, dataset): """Actual prediction """ raise NotImplementedError @accepts_samples_as_dataset def predict(self, dataset): """Predict classifier on data Shouldn't be overridden in subclasses unless explicitly needed to do so. Also subclasses trying to call super class's predict should call _predict if within _predict instead of predict() since otherwise it would loop """ ## ??? yoh: changed to asany from as without exhaustive check data = np.asanyarray(dataset.samples) if __debug__: debug("CLF", "Predicting classifier %(clf)s on ds %(dataset)s", msgargs={'clf':self, 'dataset':dataset}) # remember the time when started computing predictions t0 = time.time() ca = self.ca # to assure that those are reset (could be set due to testing # post-training) ca.reset(['estimates', 'predictions']) self._prepredict(dataset) if self.__trainednfeatures > 0 \ or 'notrain2predict' in self.__tags__: result = self._predict(dataset) else: warning("Trying to predict using classifier trained on no features") if __debug__: debug("CLF", "No features were present for training, prediction is " \ "bogus") result = [None]*data.shape[0] ca.predicting_time = time.time() - t0 # with labels mapping in-place, we also need to go back to the # literal labels if self._attrmap: try: result = self._attrmap.to_literal(result) except KeyError, e: raise FailedToPredictError, \ "Failed to convert predictions from numeric into " \ "literals: %s" % e self._postpredict(dataset, result) return result
def plot_decision_boundary_2d(dataset, clf=None, targets=None, regions=None, maps=None, maps_res=50, vals=[-1, 0, 1], data_callback=None): """Plot a scatter of a classifier's decision boundary and data points Assumes data is 2d (no way to visualize otherwise!!) Parameters ---------- dataset : `Dataset` Data points to visualize (might be the data `clf` was train on, or any novel data). clf : `Classifier`, optional Trained classifier targets : string, optional What samples attributes to use for targets. If None and clf is provided, then `clf.params.targets_attr` is used. regions : string, optional Plot regions (polygons) around groups of samples with the same attribute (and target attribute) values. E.g. chunks. maps : string in {'targets', 'estimates'}, optional Either plot underlying colored maps, such as clf predictions within the spanned regions, or estimates from the classifier (might not work for some). maps_res : int, optional Number of points in each direction to evaluate. Points are between axis limits, which are set automatically by matplotlib. Higher number will yield smoother decision lines but come at the cost of O^2 classifying time/memory. vals : array of floats, optional Where to draw the contour lines if maps='estimates' data_callback : callable, optional Callable object to preprocess the new data points. Classified points of the form samples = data_callback(xysamples). I.e. this can be a function to normalize them, or cache them before they are classified. """ if False: ## from mvpa.misc.data_generators import * ## from mvpa.clfs.svm import * ## from mvpa.clfs.knn import * ## ds = dumb_feature_binary_dataset() dataset = normal_feature_dataset(nfeatures=2, nchunks=5, snr=10, nlabels=4, means=[[0, 1], [1, 0], [1, 1], [0, 0]]) dataset.samples += dataset.sa.chunks[:, None] * 0.1 # slight shifts for chunks ;) #dataset = normal_feature_dataset(nfeatures=2, nlabels=3, means=[ [0,1], [1,0], [1,1] ]) #dataset = normal_feature_dataset(nfeatures=2, nlabels=2, means=[ [0,1], [1,0] ]) #clf = LinearCSVMC(C=-1) clf = kNN(4) #LinearCSVMC(C=-1) clf.train(dataset) #clf = None #plot_decision_boundary_2d(ds, clf) targets = 'targets' regions = 'chunks' #maps = 'estimates' maps = 'targets' #maps = None #'targets' res = 50 vals = [-1, 0, 1] data_callback = None pl.clf() if dataset.nfeatures != 2: raise ValueError('Can only plot a decision boundary in 2D') Pioff() a = pl.gca() # f.add_subplot(1,1,1) attrmap = None if clf: estimates_were_enabled = clf.ca.is_enabled('estimates') clf.ca.enable('estimates') if targets is None: targets = clf.params.targets_attr # Lets reuse classifiers attrmap if it is good enough attrmap = clf._attrmap predictions = clf.predict(dataset) targets_sa_name = targets # bad Yarik -- will rebind targets to actual values targets_lit = dataset.sa[targets_sa_name].value utargets_lit = dataset.sa[targets_sa_name].unique if not (attrmap is not None and len(attrmap) and set(clf._attrmap.keys()).issuperset(utargets_lit)): # create our own attrmap = AttributeMap(mapnumeric=True) targets = attrmap.to_numeric(targets_lit) utargets = attrmap.to_numeric(utargets_lit) vmin = min(utargets) vmax = max(utargets) cmap = pl.cm.RdYlGn # argument # Scatter points if clf: all_hits = predictions == targets_lit else: all_hits = np.ones((len(targets), ), dtype=bool) targets_colors = {} for l in utargets: targets_mask = targets == l s = dataset[targets_mask] targets_colors[l] = c \ = cmap((l-vmin)/float(vmax-vmin)) # We want to plot hits and misses with different symbols hits = all_hits[targets_mask] misses = np.logical_not(hits) scatter_kwargs = dict(c=[c], zorder=10 + (l - vmin)) if sum(hits): a.scatter(s.samples[hits, 0], s.samples[hits, 1], marker='o', label='%s [%d]' % (attrmap.to_literal(l), sum(hits)), **scatter_kwargs) if sum(misses): a.scatter(s.samples[misses, 0], s.samples[misses, 1], marker='x', label='%s [%d] (miss)' % (attrmap.to_literal(l), sum(misses)), edgecolor=[c], **scatter_kwargs) (xmin, xmax) = a.get_xlim() (ymin, ymax) = a.get_ylim() extent = (xmin, xmax, ymin, ymax) # Create grid to evaluate, predict it (x, y) = np.mgrid[xmin:xmax:np.complex(0, maps_res), ymin:ymax:np.complex(0, maps_res)] news = np.vstack((x.ravel(), y.ravel())).T try: news = data_callback(news) except TypeError: # Not a callable object pass imshow_kwargs = dict(origin='lower', zorder=1, aspect='auto', interpolation='bilinear', alpha=0.9, cmap=cmap, vmin=vmin, vmax=vmax, extent=extent) if maps is not None: if clf is None: raise ValueError, \ "Please provide classifier for plotting maps of %s" % maps predictions_new = clf.predict(news) if maps == 'estimates': # Contour and show predictions trained_targets = attrmap.to_numeric(clf.ca.trained_targets) if len(trained_targets) == 2: linestyles = [] for v in vals: if v == 0: linestyles.append('solid') else: linestyles.append('dashed') vmin, vmax = -3, 3 # Gives a nice tonal range ;) map_ = 'estimates' # should actually depend on estimates else: vals = (trained_targets[:-1] + trained_targets[1:]) / 2. linestyles = ['solid'] * len(vals) map_ = 'targets' try: clf.ca.estimates.reshape(x.shape) a.imshow(map_values.T, **imshow_kwargs) CS = a.contour(x, y, map_values, vals, zorder=6, linestyles=linestyles, extent=extent, colors='k') except ValueError, e: print "Sorry - plotting of estimates isn't full supported for %s. " \ "Got exception %s" % (clf, e)