Ejemplo n.º 1
0
    def test_null_dist_prob_any(self):
        """Test 'any' tail statistics estimation"""
        skip_if_no_external('scipy')

        # test 'any' mode
        from mvpa.measures.corrcoef import CorrCoef
        ds = datasets['uni2small']

        null = MCNullDist(permutations=10, tail='any')

        assert_raises(ValueError, null.fit, CorrCoef(), ds)
        # cheat and map to numeric for this test
        ds.sa.targets = AttributeMap().to_numeric(ds.targets)
        null.fit(CorrCoef(), ds)

        # 100 and -100 should both have zero probability on their respective
        # tails
        pm100 = null.p([-100, 0, 0, 0, 0, 0])
        p100 = null.p([100, 0, 0, 0, 0, 0])
        assert_array_almost_equal(pm100, p100)

        # With 10 samples isn't that easy to get reliable sampling for
        # non-parametric, so we can allow somewhat low significance
        # ;-)
        self.failUnless(pm100[0] <= 0.1)
        self.failUnless(p100[0] <= 0.1)

        self.failUnless(np.all(pm100[1:] >= 0.1))
        self.failUnless(np.all(pm100[1:] >= 0.1))
        # same test with just scalar measure/feature
        null.fit(CorrCoef(), ds[:, 0])
        p_100 = null.p(100)
        self.failUnlessAlmostEqual(null.p(-100), p_100)
        self.failUnlessAlmostEqual(p100[0], p_100)
Ejemplo n.º 2
0
    def __init__(self, **kwargs):
        ClassWithCollections.__init__(self, **kwargs)

        # XXX
        # the place to map literal to numerical labels (and back)
        # this needs to be in the base class, since some classifiers also
        # have this nasty 'regression' mode, and the code in this class
        # needs to deal with converting the regression output into discrete
        # labels
        # however, preferably the mapping should be kept in the respective
        # low-level implementations that need it
        self._attrmap = AttributeMap()

        self.__trainednfeatures = None
        """Stores number of features for which classifier was trained.
        If None -- it wasn't trained at all"""

        self._set_retrainable(self.params.retrainable, force=True)
Ejemplo n.º 3
0
    def test_regressions_classifiers(self, clf):
        """Simple tests on regressions being used as classifiers
        """
        # check if we get values set correctly
        clf.ca.change_temporarily(enable_ca=['estimates'])
        self.failUnlessRaises(UnknownStateError, clf.ca['estimates']._get)
        cv = CrossValidatedTransferError(
            TransferError(clf),
            NFoldSplitter(),
            enable_ca=['confusion', 'training_confusion'])
        ds = datasets['uni2small'].copy()
        # we want numeric labels to maintain the previous behavior, especially
        # since we deal with regressions here
        ds.sa.targets = AttributeMap().to_numeric(ds.targets)
        cverror = cv(ds)

        self.failUnless(len(clf.ca.estimates) == ds[ds.chunks == 1].nsamples)
        clf.ca.reset_changed_temporarily()
Ejemplo n.º 4
0
    def test_sensitivity_based_feature_selection(self, clf):

        # sensitivity analyser and transfer error quantifier use the SAME clf!
        sens_ana = clf.get_sensitivity_analyzer(postproc=maxofabs_sample())

        # of features to remove
        Nremove = 2

        # because the clf is already trained when computing the sensitivity
        # map, prevent retraining for transfer error calculation
        # Use absolute of the svm weights as sensitivity
        fe = SensitivityBasedFeatureSelection(sens_ana,
                feature_selector=FixedNElementTailSelector(2),
                enable_ca=["sensitivity", "selected_ids"])

        wdata = self.get_data()
        tdata = self.get_data_t()
        # XXX for now convert to numeric labels, but should better be taken
        # care of during clf refactoring
        am = AttributeMap()
        wdata.targets = am.to_numeric(wdata.targets)
        tdata.targets = am.to_numeric(tdata.targets)

        wdata_nfeatures = wdata.nfeatures
        tdata_nfeatures = tdata.nfeatures

        sdata, stdata = fe(wdata, tdata)

        # fail if orig datasets are changed
        self.failUnless(wdata.nfeatures == wdata_nfeatures)
        self.failUnless(tdata.nfeatures == tdata_nfeatures)

        # silly check if nfeatures got a single one removed
        self.failUnlessEqual(wdata.nfeatures, sdata.nfeatures+Nremove,
            msg="We had to remove just a single feature")

        self.failUnlessEqual(tdata.nfeatures, stdata.nfeatures+Nremove,
            msg="We had to remove just a single feature in testing as well")

        self.failUnlessEqual(fe.ca.sensitivity.nfeatures, wdata_nfeatures,
            msg="Sensitivity have to have # of features equal to original")

        self.failUnlessEqual(len(fe.ca.selected_ids), sdata.nfeatures,
            msg="# of selected features must be equal the one in the result dataset")
Ejemplo n.º 5
0
    def test_degenerate_usage(self, clf):
        """Test how clf handles degenerate cases
        """
        # Whenever we have only 1 feature with only 0s in it
        ds1 = datasets['uni2small'][:, [0]]
        # XXX this very line breaks LARS in many other unittests --
        # very interesting effect. but screw it -- for now it will be
        # this way
        ds1.samples[:] = 0.0  # all 0s
        # For regression we need numbers
        if clf.__is_regression__:
            ds1.targets = AttributeMap().to_numeric(ds1.targets)
        #ds2 = datasets['uni2small'][[0], :]
        #ds2.samples[:] = 0.0             # all 0s

        clf.ca.change_temporarily(
            enable_ca=['estimates', 'training_confusion'])

        # Good pukes are good ;-)
        # TODO XXX add
        #  - ", ds2):" to test degenerate ds with 1 sample
        #  - ds1 but without 0s -- just 1 feature... feature selections
        #    might lead to 'surprises' due to magic in combiners etc
        for ds in (ds1, ):
            try:
                try:
                    clf.train(ds)  # should not crash or stall
                except (ValueError), e:
                    self.fail(
                        "Failed to train on degenerate data. Error was %r" % e)
                # could we still get those?
                _ = clf.summary()
                cm = clf.ca.training_confusion
                # If succeeded to train/predict (due to
                # training_confusion) without error -- results better be
                # at "chance"
                continue
                if 'ACC' in cm.stats:
                    self.failUnlessEqual(cm.stats['ACC'], 0.5)
                else:
                    self.failUnless(np.isnan(cm.stats['CCe']))
            except tuple(_degenerate_allowed_exceptions):
                pass
Ejemplo n.º 6
0
    def _call(self, dataset):
        sens = super(self.__class__, self)._call(dataset)
        clf = self.clf
        targets_attr = clf.params.targets_attr
        if targets_attr in sens.sa:
            # if labels are present -- transform them into meaningful tuples
            # (or not if just a single beast)
            am = AttributeMap(dict([(l, -1) for l in clf.neglabels] +
                                   [(l, +1) for l in clf.poslabels]))

            # XXX here we still can get a sensitivity per each label
            # (e.g. with SMLR as the slave clf), so I guess we should
            # tune up Multiclass...Analyzer to add an additional sa
            # And here we might need to check if asobjarray call is necessary
            # and should be actually done
            #asobjarray(
            sens.sa[targets_attr] = \
                am.to_literal(sens.sa[targets_attr].value, recurse=True)
        return sens
Ejemplo n.º 7
0
    def _train(self, dataset):
        """Train SVM
        """

        # XXX watchout
        # self.untrain()
        newkernel, newsvm = False, False
        # local bindings for faster lookup
        params = self.params
        retrainable = self.params.retrainable

        targets_sa_name = params.targets_attr  # name of targets sa
        targets_sa = dataset.sa[targets_sa_name]  # actual targets sa

        if retrainable:
            _changedData = self._changedData

        # LABELS
        ul = None
        self.__traindataset = dataset

        # OK -- we have to map labels since
        #  binary ones expect -1/+1
        #  Multiclass expect labels starting with 0, otherwise they puke
        #   when ran from ipython... yikes
        if __debug__:
            debug("SG_", "Creating labels instance")

        if self.__is_regression__:
            labels_ = np.asarray(targets_sa.value, dtype='double')
        else:
            ul = targets_sa.unique
            # ul.sort()

            if len(ul) == 2:
                # assure that we have -1/+1
                _labels_dict = {ul[0]: -1.0, ul[1]: +1.0}
            elif len(ul) < 2:
                raise FailedToTrainError, \
                      "We do not have 1-class SVM brought into SG yet"
            else:
                # can't use plain enumerate since we need them swapped
                _labels_dict = dict([(ul[i], i) for i in range(len(ul))])

            # Create SG-customized attrmap to assure -1 / +1 if necessary
            self._attrmap = AttributeMap(_labels_dict, mapnumeric=True)

            if __debug__:
                debug("SG__", "Mapping labels using dict %s" % _labels_dict)
            labels_ = self._attrmap.to_numeric(targets_sa.value).astype(float)

        labels = shogun.Features.Labels(labels_)
        _setdebug(labels, 'Labels')

        # KERNEL

        # XXX cruel fix for now... whole retraining business needs to
        # be rethought
        if retrainable:
            _changedData['kernel_params'] = _changedData.get(
                'kernel_params', False)

        if not retrainable \
               or _changedData['traindata'] or _changedData['kernel_params']:
            # If needed compute or just collect arguments for SVM and for
            # the kernel

            if retrainable and __debug__:
                if _changedData['traindata']:
                    debug(
                        "SG",
                        "Re-Creating kernel since training data has changed")

                if _changedData['kernel_params']:
                    debug(
                        "SG",
                        "Re-Creating kernel since params %s has changed" %
                        _changedData['kernel_params'])

            k = self.params.kernel
            k.compute(dataset)
            self.__kernel = kernel = k.as_raw_sg()

            newkernel = True
            self.kernel_params.reset()  # mark them as not-changed
            #_setdebug(kernel, 'Kernels')

            #self.__condition_kernel(kernel)
            if retrainable:
                if __debug__:
                    debug("SG_", "Resetting test kernel for retrainable SVM")
                self.__kernel_test = None

        # TODO -- handle _changedData['params'] correctly, ie without recreating
        # whole SVM
        Cs = None
        if not retrainable or self.__svm is None or _changedData['params']:
            # SVM
            if self.params.has_key('C'):
                Cs = self._get_cvec(dataset)

                # XXX do not jump over the head and leave it up to the user
                #     ie do not rescale automagically by the number of samples
                #if len(Cs) == 2 and not ('regression' in self.__tags__) and len(ul) == 2:
                #    # we were given two Cs
                #    if np.max(C) < 0 and np.min(C) < 0:
                #        # and both are requested to be 'scaled' TODO :
                #        # provide proper 'features' to the parameters,
                #        # so we could specify explicitely if to scale
                #        # them by the number of samples here
                #        nl = [np.sum(labels_ == _labels_dict[l]) for l in ul]
                #        ratio = np.sqrt(float(nl[1]) / nl[0])
                #        #ratio = (float(nl[1]) / nl[0])
                #        Cs[0] *= ratio
                #        Cs[1] /= ratio
                #        if __debug__:
                #            debug("SG_", "Rescaled Cs to %s to accomodate the "
                #                  "difference in number of training samples" %
                #                  Cs)

            # Choose appropriate implementation
            svm_impl_class = self.__get_implementation(ul)

            if __debug__:
                debug("SG", "Creating SVM instance of %s" % ` svm_impl_class `)

            if self._svm_impl in ['libsvr', 'svrlight']:
                # for regressions constructor a bit different
                self.__svm = svm_impl_class(Cs[0], self.params.tube_epsilon,
                                            self.__kernel, labels)
                # we need to set epsilon explicitly
                self.__svm.set_epsilon(self.params.epsilon)
            elif self._svm_impl in ['krr']:
                self.__svm = svm_impl_class(self.params.tau, self.__kernel,
                                            labels)
            else:
                self.__svm = svm_impl_class(Cs[0], self.__kernel, labels)
                self.__svm.set_epsilon(self.params.epsilon)

            # Set shrinking
            if 'shrinking' in params:
                shrinking = params.shrinking
                if __debug__:
                    debug("SG_", "Setting shrinking to %s" % shrinking)
                self.__svm.set_shrinking_enabled(shrinking)

            if Cs is not None and len(Cs) == 2:
                if __debug__:
                    debug(
                        "SG_",
                        "Since multiple Cs are provided: %s, assign them" % Cs)
                self.__svm.set_C(Cs[0], Cs[1])

            self.params.reset()  # mark them as not-changed
            newsvm = True
            _setdebug(self.__svm, 'SVM')
            # Set optimization parameters
            if self.params.has_key('tube_epsilon') and \
                   hasattr(self.__svm, 'set_tube_epsilon'):
                self.__svm.set_tube_epsilon(self.params.tube_epsilon)
            self.__svm.parallel.set_num_threads(self.params.num_threads)
        else:
            if __debug__:
                debug("SG_", "SVM instance is not re-created")
            if _changedData['targets']:  # labels were changed
                if __debug__: debug("SG__", "Assigning new labels")
                self.__svm.set_labels(labels)
            if newkernel:  # kernel was replaced
                if __debug__: debug("SG__", "Assigning new kernel")
                self.__svm.set_kernel(self.__kernel)
            assert (_changedData['params'] is False
                    )  # we should never get here

        if retrainable:
            # we must assign it only if it is retrainable
            self.ca.retrained = not newsvm or not newkernel

        # Train
        if __debug__ and 'SG' in debug.active:
            if not self.__is_regression__:
                lstr = " with labels %s" % targets_sa.unique
            else:
                lstr = ""
            debug(
                "SG", "%sTraining %s on data%s" %
                (("", "Re-")[retrainable and self.ca.retrained], self, lstr))

        self.__svm.train()

        if __debug__:
            debug("SG_", "Done training SG_SVM %s" % self)

        # Report on training
        if (__debug__ and 'SG__' in debug.active) or \
           self.ca.is_enabled('training_confusion'):
            if __debug__:
                debug("SG_", "Assessing predictions on training data")
            trained_targets = self.__svm.classify().get_labels()

        else:
            trained_targets = None

        if __debug__ and "SG__" in debug.active:
            debug(
                "SG__", "Original labels: %s, Trained labels: %s" %
                (targets_sa.value, trained_targets))

        # Assign training confusion right away here since we are ready
        # to do so.
        # XXX TODO use some other conditional attribute like 'trained_targets' and
        #     use it within base Classifier._posttrain to assign predictions
        #     instead of duplicating code here
        # XXX For now it can be done only for regressions since labels need to
        #     be remapped and that becomes even worse if we use regression
        #     as a classifier so mapping happens upstairs
        if self.__is_regression__ and self.ca.is_enabled('training_confusion'):
            self.ca.training_confusion = self.__summary_class__(
                targets=targets_sa.value, predictions=trained_targets)
Ejemplo n.º 8
0
    def test_regressions(self, regr):
        """Simple tests on regressions
        """
        ds = datasets['chirp_linear']
        # we want numeric labels to maintain the previous behavior, especially
        # since we deal with regressions here
        ds.sa.targets = AttributeMap().to_numeric(ds.targets)

        cve = CrossValidatedTransferError(
            TransferError(regr),
            splitter=NFoldSplitter(),
            postproc=mean_sample(),
            enable_ca=['training_confusion', 'confusion'])
        # check the default
        self.failUnless(isinstance(cve.transerror.errorfx, CorrErrorFx))
        corr = np.asscalar(cve(ds).samples)

        # Our CorrErrorFx should never return NaN
        self.failUnless(not np.isnan(corr))
        self.failUnless(corr == cve.ca.confusion.stats['CCe'])

        splitregr = SplitClassifier(
            regr,
            splitter=OddEvenSplitter(),
            enable_ca=['training_confusion', 'confusion'])
        splitregr.train(ds)
        split_corr = splitregr.ca.confusion.stats['CCe']
        split_corr_tr = splitregr.ca.training_confusion.stats['CCe']

        for confusion, error in (
            (cve.ca.confusion, corr),
            (splitregr.ca.confusion, split_corr),
            (splitregr.ca.training_confusion, split_corr_tr),
        ):
            #TODO: test confusion statistics
            # Part of it for now -- CCe
            for conf in confusion.summaries:
                stats = conf.stats
                if cfg.getboolean('tests', 'labile', default='yes'):
                    self.failUnless(stats['CCe'] < 0.5)
                self.failUnlessEqual(stats['CCe'], stats['Summary CCe'])

            s0 = confusion.as_string(short=True)
            s1 = confusion.as_string(short=False)

            for s in [s0, s1]:
                self.failUnless(len(s) > 10,
                                msg="We should get some string representation "
                                "of regression summary. Got %s" % s)
            if cfg.getboolean('tests', 'labile', default='yes'):
                self.failUnless(
                    error < 0.2,
                    msg="Regressions should perform well on a simple "
                    "dataset. Got correlation error of %s " % error)

            # Test access to summary statistics
            # YOH: lets start making testing more reliable.
            #      p-value for such accident to have is verrrry tiny,
            #      so if regression works -- it better has at least 0.5 ;)
            #      otherwise fix it! ;)
            # YOH: not now -- issues with libsvr in SG and linear kernel
            if cfg.getboolean('tests', 'labile', default='yes'):
                self.failUnless(confusion.stats['CCe'] < 0.5)

        # just to check if it works fine
        split_predictions = splitregr.predict(ds.samples)
Ejemplo n.º 9
0
def plot_decision_boundary_2d(dataset,
                              clf=None,
                              targets=None,
                              regions=None,
                              maps=None,
                              maps_res=50,
                              vals=[-1, 0, 1],
                              data_callback=None):
    """Plot a scatter of a classifier's decision boundary and data points

    Assumes data is 2d (no way to visualize otherwise!!)

    Parameters
    ----------
    dataset : `Dataset`
      Data points to visualize (might be the data `clf` was train on, or
      any novel data).
    clf : `Classifier`, optional
      Trained classifier
    targets : string, optional
      What samples attributes to use for targets.  If None and clf is
      provided, then `clf.params.targets_attr` is used.
    regions : string, optional
      Plot regions (polygons) around groups of samples with the same
      attribute (and target attribute) values. E.g. chunks.
    maps : string in {'targets', 'estimates'}, optional
      Either plot underlying colored maps, such as clf predictions
      within the spanned regions, or estimates from the classifier
      (might not work for some).
    maps_res : int, optional
      Number of points in each direction to evaluate.
      Points are between axis limits, which are set automatically by
      matplotlib.  Higher number will yield smoother decision lines but come
      at the cost of O^2 classifying time/memory.
    vals : array of floats, optional
      Where to draw the contour lines if maps='estimates'
    data_callback : callable, optional
      Callable object to preprocess the new data points.
      Classified points of the form samples = data_callback(xysamples).
      I.e. this can be a function to normalize them, or cache them
      before they are classified.
    """

    if False:
        ## from mvpa.misc.data_generators import *
        ## from mvpa.clfs.svm import *
        ## from mvpa.clfs.knn import *
        ## ds = dumb_feature_binary_dataset()
        dataset = normal_feature_dataset(nfeatures=2,
                                         nchunks=5,
                                         snr=10,
                                         nlabels=4,
                                         means=[[0, 1], [1, 0], [1, 1], [0,
                                                                         0]])
        dataset.samples += dataset.sa.chunks[:,
                                             None] * 0.1  # slight shifts for chunks ;)
        #dataset = normal_feature_dataset(nfeatures=2, nlabels=3, means=[ [0,1], [1,0], [1,1] ])
        #dataset = normal_feature_dataset(nfeatures=2, nlabels=2, means=[ [0,1], [1,0] ])
        #clf = LinearCSVMC(C=-1)
        clf = kNN(4)  #LinearCSVMC(C=-1)
        clf.train(dataset)
        #clf = None
        #plot_decision_boundary_2d(ds, clf)
        targets = 'targets'
        regions = 'chunks'
        #maps = 'estimates'
        maps = 'targets'
        #maps = None #'targets'
        res = 50
        vals = [-1, 0, 1]
        data_callback = None
        pl.clf()

    if dataset.nfeatures != 2:
        raise ValueError('Can only plot a decision boundary in 2D')

    Pioff()
    a = pl.gca()  # f.add_subplot(1,1,1)

    attrmap = None
    if clf:
        estimates_were_enabled = clf.ca.is_enabled('estimates')
        clf.ca.enable('estimates')

        if targets is None:
            targets = clf.params.targets_attr
        # Lets reuse classifiers attrmap if it is good enough
        attrmap = clf._attrmap
        predictions = clf.predict(dataset)

    targets_sa_name = targets  # bad Yarik -- will rebind targets to actual values
    targets_lit = dataset.sa[targets_sa_name].value
    utargets_lit = dataset.sa[targets_sa_name].unique

    if not (attrmap is not None and len(attrmap)
            and set(clf._attrmap.keys()).issuperset(utargets_lit)):
        # create our own
        attrmap = AttributeMap(mapnumeric=True)

    targets = attrmap.to_numeric(targets_lit)
    utargets = attrmap.to_numeric(utargets_lit)

    vmin = min(utargets)
    vmax = max(utargets)
    cmap = pl.cm.RdYlGn  # argument

    # Scatter points
    if clf:
        all_hits = predictions == targets_lit
    else:
        all_hits = np.ones((len(targets), ), dtype=bool)

    targets_colors = {}
    for l in utargets:
        targets_mask = targets == l
        s = dataset[targets_mask]
        targets_colors[l] = c \
            = cmap((l-vmin)/float(vmax-vmin))

        # We want to plot hits and misses with different symbols
        hits = all_hits[targets_mask]
        misses = np.logical_not(hits)
        scatter_kwargs = dict(c=[c], zorder=10 + (l - vmin))

        if sum(hits):
            a.scatter(s.samples[hits, 0],
                      s.samples[hits, 1],
                      marker='o',
                      label='%s [%d]' % (attrmap.to_literal(l), sum(hits)),
                      **scatter_kwargs)
        if sum(misses):
            a.scatter(s.samples[misses, 0],
                      s.samples[misses, 1],
                      marker='x',
                      label='%s [%d] (miss)' %
                      (attrmap.to_literal(l), sum(misses)),
                      edgecolor=[c],
                      **scatter_kwargs)

    (xmin, xmax) = a.get_xlim()
    (ymin, ymax) = a.get_ylim()
    extent = (xmin, xmax, ymin, ymax)

    # Create grid to evaluate, predict it
    (x, y) = np.mgrid[xmin:xmax:np.complex(0, maps_res),
                      ymin:ymax:np.complex(0, maps_res)]
    news = np.vstack((x.ravel(), y.ravel())).T
    try:
        news = data_callback(news)
    except TypeError:  # Not a callable object
        pass

    imshow_kwargs = dict(origin='lower',
                         zorder=1,
                         aspect='auto',
                         interpolation='bilinear',
                         alpha=0.9,
                         cmap=cmap,
                         vmin=vmin,
                         vmax=vmax,
                         extent=extent)

    if maps is not None:
        if clf is None:
            raise ValueError, \
                  "Please provide classifier for plotting maps of %s" % maps
        predictions_new = clf.predict(news)

    if maps == 'estimates':
        # Contour and show predictions
        trained_targets = attrmap.to_numeric(clf.ca.trained_targets)

        if len(trained_targets) == 2:
            linestyles = []
            for v in vals:
                if v == 0:
                    linestyles.append('solid')
                else:
                    linestyles.append('dashed')
            vmin, vmax = -3, 3  # Gives a nice tonal range ;)
            map_ = 'estimates'  # should actually depend on estimates
        else:
            vals = (trained_targets[:-1] + trained_targets[1:]) / 2.
            linestyles = ['solid'] * len(vals)
            map_ = 'targets'

        try:
            clf.ca.estimates.reshape(x.shape)
            a.imshow(map_values.T, **imshow_kwargs)
            CS = a.contour(x,
                           y,
                           map_values,
                           vals,
                           zorder=6,
                           linestyles=linestyles,
                           extent=extent,
                           colors='k')
        except ValueError, e:
            print "Sorry - plotting of estimates isn't full supported for %s. " \
                  "Got exception %s" % (clf, e)
Ejemplo n.º 10
0
        except ValueError, e:
            print "Sorry - plotting of estimates isn't full supported for %s. " \
                  "Got exception %s" % (clf, e)
    elif maps == 'targets':
        map_values = attrmap.to_numeric(predictions_new).reshape(x.shape)
        a.imshow(map_values.T, **imshow_kwargs)
        #CS = a.contour(x, y, map_values, vals, zorder=6,
        #               linestyles=linestyles, extent=extent, colors='k')

    # Plot regions belonging to the same pair of attribute given
    # (e.g. chunks) and targets attribute
    if regions:
        chunks_sa = dataset.sa[regions]
        chunks_lit = chunks_sa.value
        uchunks_lit = chunks_sa.value
        chunks_attrmap = AttributeMap(mapnumeric=True)
        chunks = chunks_attrmap.to_numeric(chunks_lit)
        uchunks = chunks_attrmap.to_numeric(uchunks_lit)

        from matplotlib.delaunay.triangulate import Triangulation
        from matplotlib.patches import Polygon
        # Lets figure out convex halls for each chunk/label pair
        for target in utargets:
            t_mask = targets == target
            for chunk in uchunks:
                tc_mask = np.logical_and(t_mask, chunk == chunks)
                tc_samples = dataset.samples[tc_mask]
                tr = Triangulation(tc_samples[:, 0], tc_samples[:, 1])
                poly = pl.fill(
                    tc_samples[tr.hull, 0],
                    tc_samples[tr.hull, 1],