def test_attrmap_conflicts(): am_n = AttributeMap({'a':1, 'b':2, 'c':1}) am_t = AttributeMap({'a':1, 'b':2, 'c':1}, collisions_resolution='tuple') am_l = AttributeMap({'a':1, 'b':2, 'c':1}, collisions_resolution='lucky') q_f = ['a', 'b', 'a', 'c'] # should have no effect on forward mapping ok_(np.all(am_n.to_numeric(q_f) == am_t.to_numeric(q_f))) ok_(np.all(am_t.to_numeric(q_f) == am_l.to_numeric(q_f))) assert_raises(ValueError, am_n.to_literal, [2]) r_t = am_t.to_literal([2, 1]) r_l = am_l.to_literal([2, 1])
def _test_gpr_model_selection(self): # pragma: no cover """Smoke test for running model selection while getting GPRWeights TODO: DISABLED because setting of hyperparameters was not adopted for 0.6 (yet) """ if not externals.exists('openopt'): return amap = AttributeMap() # we would need to pass numbers into the GPR dataset = datasets['uni2small'].copy( ) #data_generators.linear1d_gaussian_noise() dataset.targets = amap.to_numeric(dataset.targets).astype(float) k = GeneralizedLinearKernel() clf = GPR(k, enable_ca=['log_marginal_likelihood']) sa = clf.get_sensitivity_analyzer() # should be regular weights sa_ms = clf.get_sensitivity_analyzer( flavor='model_select') # with model selection def prints(): print clf.ca.log_marginal_likelihood, clf.kernel.Sigma_p, clf.kernel.sigma_0 sa(dataset) lml = clf.ca.log_marginal_likelihood sa_ms(dataset) lml_ms = clf.ca.log_marginal_likelihood self.assertTrue(lml_ms > lml)
def _test_gpr_model_selection(self): # pragma: no cover """Smoke test for running model selection while getting GPRWeights TODO: DISABLED because setting of hyperparameters was not adopted for 0.6 (yet) """ if not externals.exists('openopt'): return amap = AttributeMap() # we would need to pass numbers into the GPR dataset = datasets['uni2small'].copy() #data_generators.linear1d_gaussian_noise() dataset.targets = amap.to_numeric(dataset.targets).astype(float) k = GeneralizedLinearKernel() clf = GPR(k, enable_ca=['log_marginal_likelihood']) sa = clf.get_sensitivity_analyzer() # should be regular weights sa_ms = clf.get_sensitivity_analyzer(flavor='model_select') # with model selection def prints(): print clf.ca.log_marginal_likelihood, clf.kernel.Sigma_p, clf.kernel.sigma_0 sa(dataset) lml = clf.ca.log_marginal_likelihood sa_ms(dataset) lml_ms = clf.ca.log_marginal_likelihood self.assertTrue(lml_ms > lml)
def to_lightsvm_format(dataset, out, targets_attr='targets', domain=None, am=None): """Export dataset into LightSVM format Parameters ---------- dataset : Dataset out Anything understanding .write(string), such as `File` targets_attr : string, optional Name of the samples attribute to be output domain : {None, 'regression', 'binary', 'multiclass'}, optional What domain dataset belongs to. If `None`, it would be deduced depending on the datatype ('regression' if float, classification in case of int or string, with 'binary'/'multiclass' depending on the number of unique targets) am : `AttributeMap` or None, optional Which mapping to use for storing the non-conformant targets. If None was provided, new one would be automagically generated depending on the given/deduced domain. Returns ------- am LightSVM format is an ASCII representation with a single sample per each line:: output featureIndex:featureValue ... featureIndex:featureValue where ``output`` is specific for a given domain: regression float number binary integer labels from {-1, 1} multiclass integer labels from {1..ds.targets_attr.nunique} """ targets_a = dataset.sa[targets_attr] targets = targets_a.value # XXX this all below # * might become cleaner # * might be RF to become more generic to be used may be elsewhere as well if domain is None: if targets.dtype.kind in ['S', 'U', 'i']: if len(targets_a.unique) == 2: domain = 'binary' else: domain = 'multiclass' else: domain = 'regression' if domain in ['multiclass', 'binary']: # check if labels are appropriate and provide mapping if necessary utargets = targets_a.unique if domain == 'binary' and set(utargets) != set([-1, 1]): # need mapping if len(utargets) != 2: raise ValueError, \ "We need 2 unique targets in %s of %s. Got targets " \ "from set %s" % (targets_attr, dataset, utargets) if am is None: am = AttributeMap(dict(zip(utargets, [-1, 1]))) elif set(am.keys()) != set([-1, 1]): raise ValueError, \ "Provided %s doesn't map into binary " \ "labels -1,+1" % (am,) elif domain == 'multiclass' \ and set(utargets) != set(range(1, len(utargets)+1)): if am is None: am = AttributeMap( dict(zip(utargets, range(1, len(utargets) + 1)))) elif set(am.keys()) != set([-1, 1]): raise ValueError, \ "Provided %s doesn't map into multiclass " \ "range 1..N" % (am, ) if am is not None: # map the targets targets = am.to_numeric(targets) for t, s in zip(targets, dataset.samples): out.write( ('%g %s\n' % (t, ' '.join('%i:%.8g' % (i, v) for i, v in zip(range(1, dataset.nfeatures + 1), s)))).encode('ascii')) out.flush() # push it out return am
class SVM(_SVM): """Support Vector Machine Classifier(s) based on Shogun This is a simple base interface """ __default_kernel_class__ = _default_kernel_class_ num_threads = Parameter(1, min=1, doc='Number of threads to utilize') _KNOWN_PARAMS = [ 'epsilon' ] __tags__ = _SVM.__tags__ + [ 'sg', 'retrainable' ] # Some words of wisdom from shogun author: # XXX remove after proper comments added to implementations """ If you'd like to train linear SVMs use SGD or OCAS. These are (I am serious) the fastest linear SVM-solvers to date. (OCAS cannot do SVMs with standard additive bias, but will L2 reqularize it - though it should not matter much in practice (although it will give slightly different solutions)). Note that SGD has no stopping criterion (you simply have to specify the number of iterations) and that OCAS has a different stopping condition than svmlight for example which may be more tight and more loose depending on the problem - I sugeest 1e-2 or 1e-3 for epsilon. If you would like to train kernel SVMs use libsvm/gpdt/svmlight - depending on the problem one is faster than the other (hard to say when, I *think* when your dataset is very unbalanced chunking methods like svmlight/gpdt are better), for smaller problems definitely libsvm. If you use string kernels then gpdt/svmlight have a special 'linadd' speedup for this (requires sg 0.6.2 - there was some inefficiency in the code for python-modular before that). This is effective for big datasets and (I trained on 10 million strings based on this). And yes currently we only implemented parallel training for svmlight, however all SVMs can be evaluated in parallel. """ _KNOWN_SENSITIVITIES={'linear':LinearSVMWeights, } _KNOWN_IMPLEMENTATIONS = {} if externals.exists('shogun', raise_=True): _KNOWN_IMPLEMENTATIONS = { "libsvm" : (shogun.Classifier.LibSVM, ('C',), ('multiclass', 'binary'), "LIBSVM's C-SVM (L2 soft-margin SVM)"), "gmnp" : (shogun.Classifier.GMNPSVM, ('C',), ('multiclass', 'binary'), "Generalized Nearest Point Problem SVM"), # XXX should have been GPDT, shogun has it fixed since some version "gpbt" : (shogun.Classifier.GPBTSVM, ('C',), ('binary',), "Gradient Projection Decomposition Technique for " \ "large-scale SVM problems"), "gnpp" : (shogun.Classifier.GNPPSVM, ('C',), ('binary',), "Generalized Nearest Point Problem SVM"), ## TODO: Needs sparse features... # "svmlin" : (shogun.Classifier.SVMLin, ''), # "liblinear" : (shogun.Classifier.LibLinear, ''), # "subgradient" : (shogun.Classifier.SubGradientSVM, ''), ## good 2-class linear SVMs # "ocas" : (shogun.Classifier.SVMOcas, ''), # "sgd" : ( shogun.Classifier.SVMSGD, ''), # regressions "libsvr": (shogun.Regression.LibSVR, ('C', 'tube_epsilon',), ('regression',), "LIBSVM's epsilon-SVR"), } def __init__(self, **kwargs): """Interface class to Shogun's classifiers and regressions. Default implementation is 'libsvm'. """ svm_impl = kwargs.get('svm_impl', 'libsvm').lower() kwargs['svm_impl'] = svm_impl # init base class _SVM.__init__(self, **kwargs) self.__svm = None """Holds the trained svm.""" self.__svm_apply = None """Compatibility convenience to bind to the classify/apply method of __svm""" # Need to store original data... # TODO: keep 1 of them -- just __traindata or __traindataset # For now it is needed for computing sensitivities self.__traindataset = None # internal SG swig proxies self.__traindata = None self.__kernel = None self.__kernel_test = None self.__testdata = None # remove kernel-based for some # TODO RF: provide separate handling for non-kernel machines if svm_impl in ['svmocas']: if not (self.__kernel is None or self.__kernel.__kernel_name__ == 'linear'): raise ValueError( "%s is inherently linear, thus provided kernel %s " "is of no effect" % (svm_impl, self.__kernel)) self.__tags__.pop(self.__tags__.index('kernel-based')) self.__tags__.pop(self.__tags__.index('retrainable')) # TODO: integrate with kernel framework #def __condition_kernel(self, kernel): ## XXX I thought that it is needed only for retrainable classifier, ## but then krr gets confused, and svrlight needs it to provide ## meaningful results even without 'retraining' #if self._svm_impl in ['svrlight', 'lightsvm']: #try: #kernel.set_precompute_matrix(True, True) #except Exception, e: ## N/A in shogun 0.9.1... TODO: RF #if __debug__: #debug('SG_', "Failed call to set_precompute_matrix for %s: %s" #% (self, e)) def _train(self, dataset): """Train SVM """ # XXX watchout # self.untrain() newkernel, newsvm = False, False # local bindings for faster lookup params = self.params retrainable = self.params.retrainable targets_sa_name = self.get_space() # name of targets sa targets_sa = dataset.sa[targets_sa_name] # actual targets sa if retrainable: _changedData = self._changedData # LABELS ul = None self.__traindataset = dataset # OK -- we have to map labels since # binary ones expect -1/+1 # Multiclass expect labels starting with 0, otherwise they puke # when ran from ipython... yikes if __debug__: debug("SG_", "Creating labels instance") if self.__is_regression__: labels_ = np.asarray(targets_sa.value, dtype='double') else: ul = targets_sa.unique # ul.sort() if len(ul) == 2: # assure that we have -1/+1 _labels_dict = {ul[0]:-1.0, ul[1]:+1.0} elif len(ul) < 2: raise FailedToTrainError, \ "We do not have 1-class SVM brought into SG yet" else: # can't use plain enumerate since we need them swapped _labels_dict = dict([ (ul[i], i) for i in range(len(ul))]) # Create SG-customized attrmap to assure -1 / +1 if necessary self._attrmap = AttributeMap(_labels_dict, mapnumeric=True) if __debug__: debug("SG__", "Mapping labels using dict %s" % _labels_dict) labels_ = self._attrmap.to_numeric(targets_sa.value).astype(float) labels = shogun.Features.Labels(labels_) _setdebug(labels, 'Labels') # KERNEL # XXX cruel fix for now... whole retraining business needs to # be rethought if retrainable: _changedData['kernel_params'] = _changedData.get('kernel_params', False) # TODO: big RF to move non-kernel classifiers away if 'kernel-based' in self.__tags__ and (not retrainable or _changedData['traindata'] or _changedData['kernel_params']): # If needed compute or just collect arguments for SVM and for # the kernel if retrainable and __debug__: if _changedData['traindata']: debug("SG", "Re-Creating kernel since training data has changed") if _changedData['kernel_params']: debug("SG", "Re-Creating kernel since params %s has changed" % _changedData['kernel_params']) k = self.params.kernel k.compute(dataset) self.__kernel = kernel = k.as_raw_sg() newkernel = True self.kernel_params.reset() # mark them as not-changed #_setdebug(kernel, 'Kernels') #self.__condition_kernel(kernel) if retrainable: if __debug__: debug("SG_", "Resetting test kernel for retrainable SVM") self.__kernel_test = None # TODO -- handle _changedData['params'] correctly, ie without recreating # whole SVM Cs = None if not retrainable or self.__svm is None or _changedData['params']: # SVM if self.params.has_key('C'): Cs = self._get_cvec(dataset) # XXX do not jump over the head and leave it up to the user # ie do not rescale automagically by the number of samples #if len(Cs) == 2 and not ('regression' in self.__tags__) and len(ul) == 2: # # we were given two Cs # if np.max(C) < 0 and np.min(C) < 0: # # and both are requested to be 'scaled' TODO : # # provide proper 'features' to the parameters, # # so we could specify explicitely if to scale # # them by the number of samples here # nl = [np.sum(labels_ == _labels_dict[l]) for l in ul] # ratio = np.sqrt(float(nl[1]) / nl[0]) # #ratio = (float(nl[1]) / nl[0]) # Cs[0] *= ratio # Cs[1] /= ratio # if __debug__: # debug("SG_", "Rescaled Cs to %s to accomodate the " # "difference in number of training samples" % # Cs) # Choose appropriate implementation svm_impl_class = self.__get_implementation(ul) if __debug__: debug("SG", "Creating SVM instance of %s" % `svm_impl_class`) if self._svm_impl in ['libsvr', 'svrlight']: # for regressions constructor a bit different self.__svm = svm_impl_class(Cs[0], self.params.tube_epsilon, self.__kernel, labels) # we need to set epsilon explicitly self.__svm.set_epsilon(self.params.epsilon) elif self._svm_impl in ['krr']: self.__svm = svm_impl_class(self.params.tau, self.__kernel, labels) elif 'kernel-based' in self.__tags__: self.__svm = svm_impl_class(Cs[0], self.__kernel, labels) self.__svm.set_epsilon(self.params.epsilon) else: traindata_sg = _tosg(dataset.samples) self.__svm = svm_impl_class(Cs[0], traindata_sg, labels) self.__svm.set_epsilon(self.params.epsilon) # To stay compatible with versions across API changes in sg 1.0.0 self.__svm_apply = externals.versions['shogun'] >= '1' \ and self.__svm.apply \ or self.__svm.classify # the last one for old API # Set shrinking if 'shrinking' in params: shrinking = params.shrinking if __debug__: debug("SG_", "Setting shrinking to %s" % shrinking) self.__svm.set_shrinking_enabled(shrinking) if Cs is not None and len(Cs) == 2: if __debug__: debug("SG_", "Since multiple Cs are provided: %s, assign them" % Cs) self.__svm.set_C(Cs[0], Cs[1]) self.params.reset() # mark them as not-changed newsvm = True _setdebug(self.__svm, 'SVM') # Set optimization parameters if self.params.has_key('tube_epsilon') and \ hasattr(self.__svm, 'set_tube_epsilon'): self.__svm.set_tube_epsilon(self.params.tube_epsilon) self.__svm.parallel.set_num_threads(self.params.num_threads) else: if __debug__: debug("SG_", "SVM instance is not re-created") if _changedData['targets']: # labels were changed if __debug__: debug("SG__", "Assigning new labels") self.__svm.set_labels(labels) if newkernel: # kernel was replaced if __debug__: debug("SG__", "Assigning new kernel") self.__svm.set_kernel(self.__kernel) assert(_changedData['params'] is False) # we should never get here if retrainable: # we must assign it only if it is retrainable self.ca.retrained = not newsvm or not newkernel # Train if __debug__ and 'SG' in debug.active: if not self.__is_regression__: lstr = " with labels %s" % targets_sa.unique else: lstr = "" debug("SG", "%sTraining %s on data%s" % (("","Re-")[retrainable and self.ca.retrained], self, lstr)) self.__svm.train() if __debug__: debug("SG_", "Done training SG_SVM %s" % self) # Report on training if (__debug__ and 'SG__' in debug.active) or \ self.ca.is_enabled('training_stats'): if __debug__: debug("SG_", "Assessing predictions on training data") trained_targets = self.__svm_apply().get_labels() else: trained_targets = None if __debug__ and "SG__" in debug.active: debug("SG__", "Original labels: %s, Trained labels: %s" % (targets_sa.value, trained_targets)) # Assign training confusion right away here since we are ready # to do so. # XXX TODO use some other conditional attribute like 'trained_targets' and # use it within base Classifier._posttrain to assign predictions # instead of duplicating code here # XXX For now it can be done only for regressions since labels need to # be remapped and that becomes even worse if we use regression # as a classifier so mapping happens upstairs if self.__is_regression__ and self.ca.is_enabled('training_stats'): self.ca.training_stats = self.__summary_class__( targets=targets_sa.value, predictions=trained_targets) # XXX actually this is the beast which started this evil conversion # so -- make use of dataset here! ;) @accepts_samples_as_dataset def _predict(self, dataset): """Predict values for the data """ retrainable = self.params.retrainable if retrainable: changed_testdata = self._changedData['testdata'] or \ self.__kernel_test is None if not retrainable: if __debug__: debug("SG__", "Initializing SVMs kernel of %s with training/testing samples" % self) self.params.kernel.compute(self.__traindataset, dataset) self.__kernel_test = self.params.kernel.as_sg()._k # We can just reuse kernel used for training #self.__condition_kernel(self.__kernel) else: if changed_testdata: #if __debug__: #debug("SG__", #"Re-creating testing kernel of %s giving " #"arguments %s" % #(`self._kernel_type`, self.__kernel_args)) self.params.kernel.compute(self.__traindataset, dataset) #_setdebug(kernel_test, 'Kernels') #_setdebug(kernel_test_custom, 'Kernels') self.__kernel_test = self.params.kernel.as_raw_sg() elif __debug__: debug("SG__", "Re-using testing kernel") assert(self.__kernel_test is not None) if 'kernel-based' in self.__tags__: self.__svm.set_kernel(self.__kernel_test) # doesn't do any good imho although on unittests helps tiny bit... hm #self.__svm.init_kernel_optimization() values_ = self.__svm_apply() else: testdata_sg = _tosg(dataset.samples) self.__svm.set_features(testdata_sg) values_ = self.__svm_apply() if __debug__: debug("SG_", "Classifying testing data") if values_ is None: raise RuntimeError, "We got empty list of values from %s" % self values = values_.get_labels() if retrainable: # we must assign it only if it is retrainable self.ca.repredicted = repredicted = not changed_testdata if __debug__: debug("SG__", "Re-assigning learing kernel. Repredicted is %s" % repredicted) # return back original kernel if 'kernel-based' in self.__tags__: self.__svm.set_kernel(self.__kernel) if __debug__: debug("SG__", "Got values %s" % values) if (self.__is_regression__): predictions = values else: if len(self._attrmap.keys()) == 2: predictions = np.sign(values) # since np.sign(0) == 0 predictions[predictions==0] = 1 else: predictions = values # remap labels back adjusting their type # XXX YOH: This is done by topclass now (needs RF) #predictions = self._attrmap.to_literal(predictions) if __debug__: debug("SG__", "Tuned predictions %s" % predictions) # store conditional attribute # TODO: extract values properly for multiclass SVMs -- # ie 1 value per label or pairs for all 1-vs-1 classifications self.ca.estimates = values ## to avoid leaks with not yet properly fixed shogun if not retrainable: try: testdata.free_features() except: pass return predictions def _untrain(self): super(SVM, self)._untrain() # untrain/clean the kernel -- we might not allow to drag SWIG # instance around BUT XXX -- make it work fine with # CachedKernel -- we might not want to fully "untrain" in such # case self.params.kernel.cleanup() # XXX unify naming if not self.params.retrainable: if __debug__: debug("SG__", "Untraining %(clf)s and destroying sg's SVM", msgargs={'clf':self}) # to avoid leaks with not yet properly fixed shogun # XXX make it nice... now it is just stable ;-) if True: # not self.__traindata is None: if True: # try: if self.__kernel is not None: del self.__kernel self.__kernel = None if self.__kernel_test is not None: del self.__kernel_test self.__kernel_test = None if self.__svm is not None: del self.__svm self.__svm = None self.__svm_apply = None if self.__traindata is not None: # Let in for easy demonstration of the memory leak in shogun #for i in xrange(10): # debug("SG__", "cachesize pre free features %s" % # (self.__svm.get_kernel().get_cache_size())) self.__traindata.free_features() del self.__traindata self.__traindata = None self.__traindataset = None #except: # pass if __debug__: debug("SG__", "Done untraining %(self)s and destroying sg's SVM", msgargs=locals()) elif __debug__: debug("SG__", "Not untraining %(self)s since it is retrainable", msgargs=locals()) def __get_implementation(self, ul): if self.__is_regression__ or len(ul) == 2: svm_impl_class = SVM._KNOWN_IMPLEMENTATIONS[self._svm_impl][0] else: if self._svm_impl == 'libsvm': svm_impl_class = shogun.Classifier.LibSVMMultiClass elif self._svm_impl == 'gmnp': svm_impl_class = shogun.Classifier.GMNPSVM else: raise RuntimeError, \ "Shogun: Implementation %s doesn't handle multiclass " \ "data. Got labels %s. Use some other classifier" % \ (self._svm_impl, self.__traindataset.sa[self.get_space()].unique) if __debug__: debug("SG_", "Using %s for multiclass data of %s" % (svm_impl_class, self._svm_impl)) return svm_impl_class svm = property(fget=lambda self: self.__svm) """Access to the SVM model.""" traindataset = property(fget=lambda self: self.__traindataset) """Dataset which was used for training
def plot_decision_boundary_2d(dataset, clf=None, targets=None, regions=None, maps=None, maps_res=50, vals=None, data_callback=None): """Plot a scatter of a classifier's decision boundary and data points Assumes data is 2d (no way to visualize otherwise!!) Parameters ---------- dataset : `Dataset` Data points to visualize (might be the data `clf` was train on, or any novel data). clf : `Classifier`, optional Trained classifier targets : string, optional What samples attributes to use for targets. If None and clf is provided, then `clf.params.targets_attr` is used. regions : string, optional Plot regions (polygons) around groups of samples with the same attribute (and target attribute) values. E.g. chunks. maps : string in {'targets', 'estimates'}, optional Either plot underlying colored maps, such as clf predictions within the spanned regions, or estimates from the classifier (might not work for some). maps_res : int, optional Number of points in each direction to evaluate. Points are between axis limits, which are set automatically by matplotlib. Higher number will yield smoother decision lines but come at the cost of O^2 classifying time/memory. vals : array of floats, optional Where to draw the contour lines if maps='estimates' data_callback : callable, optional Callable object to preprocess the new data points. Classified points of the form samples = data_callback(xysamples). I.e. this can be a function to normalize them, or cache them before they are classified. """ if vals is None: vals = [-1, 0, 1] if False: ## from mvpa2.misc.data_generators import * ## from mvpa2.clfs.svm import * ## from mvpa2.clfs.knn import * ## ds = dumb_feature_binary_dataset() dataset = normal_feature_dataset(nfeatures=2, nchunks=5, snr=10, nlabels=4, means=[ [0,1], [1,0], [1,1], [0,0] ]) dataset.samples += dataset.sa.chunks[:, None]*0.1 # slight shifts for chunks ;) #dataset = normal_feature_dataset(nfeatures=2, nlabels=3, means=[ [0,1], [1,0], [1,1] ]) #dataset = normal_feature_dataset(nfeatures=2, nlabels=2, means=[ [0,1], [1,0] ]) #clf = LinearCSVMC(C=-1) clf = kNN(4)#LinearCSVMC(C=-1) clf.train(dataset) #clf = None #plot_decision_boundary_2d(ds, clf) targets = 'targets' regions = 'chunks' #maps = 'estimates' maps = 'targets' #maps = None #'targets' res = 50 vals = [-1, 0, 1] data_callback=None pl.clf() if dataset.nfeatures != 2: raise ValueError('Can only plot a decision boundary in 2D') Pioff() a = pl.gca() # f.add_subplot(1,1,1) attrmap = None if clf: estimates_were_enabled = clf.ca.is_enabled('estimates') clf.ca.enable('estimates') if targets is None: targets = clf.get_space() # Lets reuse classifiers attrmap if it is good enough attrmap = clf._attrmap predictions = clf.predict(dataset) targets_sa_name = targets # bad Yarik -- will rebind targets to actual values targets_lit = dataset.sa[targets_sa_name].value utargets_lit = dataset.sa[targets_sa_name].unique if not (attrmap is not None and len(attrmap) and set(clf._attrmap.keys()).issuperset(utargets_lit)): # create our own attrmap = AttributeMap(mapnumeric=True) targets = attrmap.to_numeric(targets_lit) utargets = attrmap.to_numeric(utargets_lit) vmin = min(utargets) vmax = max(utargets) cmap = pl.cm.RdYlGn # argument # Scatter points if clf: all_hits = predictions == targets_lit else: all_hits = np.ones((len(targets),), dtype=bool) targets_colors = {} for l in utargets: targets_mask = targets==l s = dataset[targets_mask] targets_colors[l] = c \ = cmap((l-vmin)/float(vmax-vmin)) # We want to plot hits and misses with different symbols hits = all_hits[targets_mask] misses = np.logical_not(hits) scatter_kwargs = dict( c=[c], zorder=10+(l-vmin)) if sum(hits): a.scatter(s.samples[hits, 0], s.samples[hits, 1], marker='o', label='%s [%d]' % (attrmap.to_literal(l), sum(hits)), **scatter_kwargs) if sum(misses): a.scatter(s.samples[misses, 0], s.samples[misses, 1], marker='x', label='%s [%d] (miss)' % (attrmap.to_literal(l), sum(misses)), edgecolor=[c], **scatter_kwargs) (xmin, xmax) = a.get_xlim() (ymin, ymax) = a.get_ylim() extent = (xmin, xmax, ymin, ymax) # Create grid to evaluate, predict it (x,y) = np.mgrid[xmin:xmax:np.complex(0, maps_res), ymin:ymax:np.complex(0, maps_res)] news = np.vstack((x.ravel(), y.ravel())).T try: news = data_callback(news) except TypeError: # Not a callable object pass imshow_kwargs = dict(origin='lower', zorder=1, aspect='auto', interpolation='bilinear', alpha=0.9, cmap=cmap, vmin=vmin, vmax=vmax, extent=extent) if maps is not None: if clf is None: raise ValueError, \ "Please provide classifier for plotting maps of %s" % maps predictions_new = clf.predict(news) if maps == 'estimates': # Contour and show predictions trained_targets = attrmap.to_numeric(clf.ca.trained_targets) if len(trained_targets)==2: linestyles = [] for v in vals: if v == 0: linestyles.append('solid') else: linestyles.append('dashed') vmin, vmax = -3, 3 # Gives a nice tonal range ;) map_ = 'estimates' # should actually depend on estimates else: vals = (trained_targets[:-1] + trained_targets[1:])/2. linestyles = ['solid'] * len(vals) map_ = 'targets' try: clf.ca.estimates.reshape(x.shape) a.imshow(map_values.T, **imshow_kwargs) CS = a.contour(x, y, map_values, vals, zorder=6, linestyles=linestyles, extent=extent, colors='k') except ValueError, e: print "Sorry - plotting of estimates isn't full supported for %s. " \ "Got exception %s" % (clf, e)
print "Sorry - plotting of estimates isn't full supported for %s. " \ "Got exception %s" % (clf, e) elif maps == 'targets': map_values = attrmap.to_numeric(predictions_new).reshape(x.shape) a.imshow(map_values.T, **imshow_kwargs) #CS = a.contour(x, y, map_values, vals, zorder=6, # linestyles=linestyles, extent=extent, colors='k') # Plot regions belonging to the same pair of attribute given # (e.g. chunks) and targets attribute if regions: chunks_sa = dataset.sa[regions] chunks_lit = chunks_sa.value uchunks_lit = chunks_sa.value chunks_attrmap = AttributeMap(mapnumeric=True) chunks = chunks_attrmap.to_numeric(chunks_lit) uchunks = chunks_attrmap.to_numeric(uchunks_lit) from matplotlib.delaunay.triangulate import Triangulation from matplotlib.patches import Polygon # Lets figure out convex halls for each chunk/label pair for target in utargets: t_mask = targets == target for chunk in uchunks: tc_mask = np.logical_and(t_mask, chunk == chunks) tc_samples = dataset.samples[tc_mask] tr = Triangulation(tc_samples[:, 0], tc_samples[:, 1]) poly = pl.fill(tc_samples[tr.hull, 0], tc_samples[tr.hull, 1],
def to_lightsvm_format(dataset, out, targets_attr='targets', domain=None, am=None): """Export dataset into LightSVM format Parameters ---------- dataset : Dataset out Anything understanding .write(string), such as `File` targets_attr : string, optional Name of the samples attribute to be output domain : {None, 'regression', 'binary', 'multiclass'}, optional What domain dataset belongs to. If `None`, it would be deduced depending on the datatype ('regression' if float, classification in case of int or string, with 'binary'/'multiclass' depending on the number of unique targets) am : `AttributeMap` or None, optional Which mapping to use for storing the non-conformant targets. If None was provided, new one would be automagically generated depending on the given/deduced domain. Returns ------- am LightSVM format is an ASCII representation with a single sample per each line:: output featureIndex:featureValue ... featureIndex:featureValue where ``output`` is specific for a given domain: regression float number binary integer labels from {-1, 1} multiclass integer labels from {1..ds.targets_attr.nunique} """ targets_a = dataset.sa[targets_attr] targets = targets_a.value # XXX this all below # * might become cleaner # * might be RF to become more generic to be used may be elsewhere as well if domain is None: if targets.dtype.kind in ['S', 'i']: if len(targets_a.unique) == 2: domain = 'binary' else: domain = 'multiclass' else: domain = 'regression' if domain in ['multiclass', 'binary']: # check if labels are appropriate and provide mapping if necessary utargets = targets_a.unique if domain == 'binary' and set(utargets) != set([-1, 1]): # need mapping if len(utargets) != 2: raise ValueError, \ "We need 2 unique targets in %s of %s. Got targets " \ "from set %s" % (targets_attr, dataset, utargets) if am is None: am = AttributeMap(dict(zip(utargets, [-1, 1]))) elif set(am.keys()) != set([-1, 1]): raise ValueError, \ "Provided %s doesn't map into binary " \ "labels -1,+1" % (am,) elif domain == 'multiclass' \ and set(utargets) != set(range(1, len(utargets)+1)): if am is None: am = AttributeMap(dict(zip(utargets, range(1, len(utargets) + 1)))) elif set(am.keys()) != set([-1, 1]): raise ValueError, \ "Provided %s doesn't map into multiclass " \ "range 1..N" % (am, ) if am is not None: # map the targets targets = am.to_numeric(targets) for t, s in zip(targets, dataset.samples): out.write('%g %s\n' % (t, ' '.join( '%i:%.8g' % (i, v) for i,v in zip(range(1, dataset.nfeatures+1), s)))) out.flush() # push it out return am
def test_attrmap(): map_default = {'eins': 0, 'zwei': 2, 'sieben': 1} map_custom = {'eins': 11, 'zwei': 22, 'sieben': 33} literal = ['eins', 'zwei', 'sieben', 'eins', 'sieben', 'eins'] literal_nonmatching = ['uno', 'dos', 'tres'] num_default = [0, 2, 1, 0, 1, 0] num_custom = [11, 22, 33, 11, 33, 11] # no custom mapping given am = AttributeMap() assert_false(am) ok_(len(am) == 0) assert_array_equal(am.to_numeric(literal), num_default) assert_array_equal(am.to_literal(num_default), literal) ok_(am) ok_(len(am) == 3) # # Tests for recursive mapping + preserving datatype class myarray(np.ndarray): pass assert_raises(KeyError, am.to_literal, [(1, 2), 2, 0]) literal_fancy = [(1, 2), 2, [0], np.array([0, 1]).view(myarray)] literal_fancy_tuple = tuple(literal_fancy) literal_fancy_array = np.array(literal_fancy, dtype=object) for l in (literal_fancy, literal_fancy_tuple, literal_fancy_array): res = am.to_literal(l, recurse=True) assert_equal(res[0], ('sieben', 'zwei')) assert_equal(res[1], 'zwei') assert_equal(res[2], ['eins']) assert_array_equal(res[3], ['eins', 'sieben']) # types of result and subsequences should be preserved ok_(isinstance(res, l.__class__)) ok_(isinstance(res[0], tuple)) ok_(isinstance(res[1], str)) ok_(isinstance(res[2], list)) ok_(isinstance(res[3], myarray)) # yet another example a = np.empty(1, dtype=object) a[0] = (0, 1) res = am.to_literal(a, recurse=True) ok_(isinstance(res[0], tuple)) # # with custom mapping am = AttributeMap(map=map_custom) assert_array_equal(am.to_numeric(literal), num_custom) assert_array_equal(am.to_literal(num_custom), literal) # if not numeric nothing is mapped assert_array_equal(am.to_numeric(num_custom), num_custom) # even if the map doesn't fit assert_array_equal(am.to_numeric(num_default), num_default) # need to_numeric first am = AttributeMap() assert_raises(RuntimeError, am.to_literal, [1,2,3]) # stupid args assert_raises(ValueError, AttributeMap, map=num_custom) # map mismatch am = AttributeMap(map=map_custom) if __debug__: # checked only in __debug__ assert_raises(KeyError, am.to_numeric, literal_nonmatching) # needs reset and should work afterwards am.clear() assert_array_equal(am.to_numeric(literal_nonmatching), [2, 0, 1]) # and now reverse am = AttributeMap(map=map_custom) assert_raises(KeyError, am.to_literal, num_default) # dict-like interface am = AttributeMap() ok_([(k, v) for k, v in am.iteritems()] == [])
def test_attrmap(): map_default = {'eins': 0, 'zwei': 2, 'sieben': 1} map_custom = {'eins': 11, 'zwei': 22, 'sieben': 33} literal = ['eins', 'zwei', 'sieben', 'eins', 'sieben', 'eins'] literal_nonmatching = ['uno', 'dos', 'tres'] num_default = [0, 2, 1, 0, 1, 0] num_custom = [11, 22, 33, 11, 33, 11] # no custom mapping given am = AttributeMap() assert_false(am) ok_(len(am) == 0) assert_array_equal(am.to_numeric(literal), num_default) assert_array_equal(am.to_literal(num_default), literal) ok_(am) ok_(len(am) == 3) # # Tests for recursive mapping + preserving datatype class myarray(np.ndarray): pass assert_raises(KeyError, am.to_literal, [(1, 2), 2, 0]) literal_fancy = [(1, 2), 2, [0], np.array([0, 1]).view(myarray)] literal_fancy_tuple = tuple(literal_fancy) literal_fancy_array = np.array(literal_fancy, dtype=object) for l in (literal_fancy, literal_fancy_tuple, literal_fancy_array): res = am.to_literal(l, recurse=True) assert_equal(res[0], ('sieben', 'zwei')) assert_equal(res[1], 'zwei') assert_equal(res[2], ['eins']) assert_array_equal(res[3], ['eins', 'sieben']) # types of result and subsequences should be preserved ok_(isinstance(res, l.__class__)) ok_(isinstance(res[0], tuple)) ok_(isinstance(res[1], str)) ok_(isinstance(res[2], list)) ok_(isinstance(res[3], myarray)) # yet another example a = np.empty(1, dtype=object) a[0] = (0, 1) res = am.to_literal(a, recurse=True) ok_(isinstance(res[0], tuple)) # # with custom mapping am = AttributeMap(map=map_custom) assert_array_equal(am.to_numeric(literal), num_custom) assert_array_equal(am.to_literal(num_custom), literal) # if not numeric nothing is mapped assert_array_equal(am.to_numeric(num_custom), num_custom) # even if the map doesn't fit assert_array_equal(am.to_numeric(num_default), num_default) # need to_numeric first am = AttributeMap() assert_raises(RuntimeError, am.to_literal, [1, 2, 3]) # stupid args assert_raises(ValueError, AttributeMap, map=num_custom) # map mismatch am = AttributeMap(map=map_custom) if __debug__: # checked only in __debug__ assert_raises(KeyError, am.to_numeric, literal_nonmatching) # needs reset and should work afterwards am.clear() assert_array_equal(am.to_numeric(literal_nonmatching), [2, 0, 1]) # and now reverse am = AttributeMap(map=map_custom) assert_raises(KeyError, am.to_literal, num_default) # dict-like interface am = AttributeMap() ok_([(k, v) for k, v in am.items()] == [])