示例#1
0
 def train(self, sparseX, **kargv):
     print 'Info: Training KNN'
     if 'algorithm' not in kargv:
         self.model = NearestNeighbors(kargv['topK'], metric=kargv['metric'])
     else:
         self.model = NearestNeighbors(kargv['topK'], metric=kargv['metric'], algorithm=kargv['algorithm'])
     self.model.fit(sparseX)
示例#2
0
def image_retrieval(constant_overwrites):
    img_shape, attr, x_train, x_test = load_faces_dataset()
    constants = merge_dict(get_constants(), constant_overwrites)
    constants['img_shape'] = img_shape
    encoder_filename = constants['encoder_filename']
    decoder_filename = constants['decoder_filename']
    reset_tf_session()
    autoencoder, encoder, decoder = model_builder(network_builder, constants)
    if os.path.exists(encoder_filename) and not constants['retrain']:
        encoder.load_weights(encoder_filename)
    else:
        data = {'X_train': x_train, 'X_test': x_test}
        train(autoencoder, data, constants)
        encoder.save_weights(encoder_filename)
        decoder.save_weights(decoder_filename)

    images = x_train
    codes = encoder.predict(images)
    assert len(codes) == len(images)
    nei_clf = NearestNeighbors(metric="euclidean")
    nei_clf.fit(codes)

    # Cherry-picked examples:

    # smiles
    show_similar(x_test[247], nei_clf, encoder, images)

    # ethnicity
    show_similar(x_test[56], nei_clf, encoder, images)

    # glasses
    show_similar(x_test[63], nei_clf, encoder, images)
示例#3
0
class FriendTrainer(Trainer):
    '''
    @summary: 计算与用户相似的用户,为基于用户的协同过滤算法做准备
    '''
    def __init__(self, num=5, provider=None):
        super().__init__(num, provider)
        self.num = num
        self.provider = provider if provider else UserInterestProvider()
        self.model = None

    def config(self, num=None, category=None):
        self.model = None
        self.num = num if num else self.num
        self.provider = ProviderFactory.getProvider(
            featureCategory=category) if category else self.provider

    def train(self, userId):
        if not self.model:
            self.model = NearestNeighbors(n_neighbors=self.num + 1).fit(
                self.provider.provideAll())
        distance, neighborList = self.model.kneighbors(
            [self.provider.provide(userId)])
        if distance[0][2] == 0:
            return []
        similarity = self.distanceToSimilarity(distance[0][1:])
        res = []
        for i in range(self.num):
            res.append((neighborList[i], similarity[i]))
        return res

    def trainAll(self):
        if not self.model:
            self.model = NearestNeighbors(n_neighbors=self.num + 1,
                                          algorithm='auto').fit(
                                              self.provider.provideAll())
        res = []
        distances, friends = self.model.kneighbors(self.provider.provideAll())
        for count in range(len(friends)):
            friend = []
            if distances[count][2] == 0:
                res.append(friend)
                continue
            similarity = self.distanceToSimilarity(distances[count])[1:]
            neighborList = friends[count][1:]

            for i in range(self.num):
                friend.append((neighborList[i], similarity[i]))
            res.append(friend)
            print("User " + str(count) + " finded!")
        if self.isUpdate():
            # DBUtil.dumpFriends(res)
            CacheUtil.dumpUserFriends(res)
            DBUtil.dumpFriends(res)
        return res

    def clear(self):
        del self.model

    def distanceToSimilarity(self, distance):
        return list(map(lambda x: 1 - x, distance))
示例#4
0
    def fit(self, X, y, sample_weight=None):
        """ Prepare different things for fast computation of metrics """
        X, y, sample_weight = check_xyw(X, y, sample_weight=sample_weight)
        self._label_mask = numpy.array(y == self.uniform_label)
        assert sum(self._label_mask) > 0, 'No events of uniform class!'
        # weights of events
        self._masked_weight = sample_weight[self._label_mask]

        X_part = numpy.array(take_features(
            X, self.uniform_features))[self._label_mask, :]
        # computing knn indices
        neighbours = NearestNeighbors(n_neighbors=self.n_neighbours,
                                      algorithm='kd_tree').fit(X_part)
        _, self._groups_indices = neighbours.kneighbors(X_part)
        self._group_matrix = ut.group_indices_to_groups_matrix(
            self._groups_indices, n_events=len(X_part))
        # self._group_weights = ut.compute_group_weights_by_indices(self._groups_indices,
        # sample_weight=self._masked_weight)
        self._group_weights = ut.compute_group_weights(
            self._group_matrix, sample_weight=self._masked_weight)
        # self._divided_weights = ut.compute_divided_weight_by_indices(self._groups_indices,
        #                                                              sample_weight=self._masked_weight)
        self._divided_weights = ut.compute_divided_weight(
            self._group_matrix, sample_weight=self._masked_weight)
        return self
示例#5
0
 def _get_kernel(self, X, y=None):
     if self.kernel == "rbf":
         if y is None:
             return rbf_kernel(X, X, gamma=self.gamma)
         else:
             return rbf_kernel(X, y, gamma=self.gamma)
     elif self.kernel == "knn":
         if self.nn_fit is None:
             t0 = time()
             self.nn_fit = NearestNeighbors(self.n_neighbors,
                                            n_jobs=self.n_jobs).fit(X)
             print("NearestNeighbors fit time cost:", time() - t0)
         if y is None:
             t0 = time()
             result = self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
                                                   self.n_neighbors,
                                                   mode='connectivity')
             print("construct kNN graph time cost:", time() - t0)
             return result
         else:
             return self.nn_fit.kneighbors(y, return_distance=False)
     elif callable(self.kernel):
         if y is None:
             return self.kernel(X, X)
         else:
             return self.kernel(X, y)
     else:
         raise ValueError("%s is not a valid kernel. Only rbf and knn"
                          " or an explicit function "
                          " are supported at this time." % self.kernel)
示例#6
0
 def fit(self, X, y):
     t = time()  # get labels for test data
     # build the graph result is the affinity matrix
     if self.kernel is 'dbscan' or self.kernel is None:
         affinity_matrix = self.dbscan(X, self.eps, self.minPts)
     # it is possible to use other kernels -> as parameter
     elif self.kernel is 'rbf':
         affinity_matrix = rbf_kernel(X, X, gamma=self.gamma)
     elif self.kernel is 'knn':
         affinity_matrix = NearestNeighbors(self.naighbors).fit(X).kneighbors_graph(X, self.naighbors).toarray()
     else:
         raise
     print( "praph(%s) time %2.3fms"%(self.kernel, (time() - t) *1000))
     if affinity_matrix.max() == 0 :
         print("no affinity matrix found")
         return y
     
     degree_martix   = np.diag(affinity_matrix.sum(axis=0))
     affinity_matrix = np.matrix(affinity_matrix)
     
     try:
         inserve_degree_matrix = np.linalg.inv(degree_martix)
     except np.linalg.linalg.LinAlgError as err:
         if 'Singular matrix' in err.args:
             # use a pseudo inverse if it's not possible to make a normal of the degree matrix
             inserve_degree_matrix =  np.linalg.pinv(degree_martix)
         else:
             raise
         
     matrix = inserve_degree_matrix * affinity_matrix
     # split labels in different vectors to calculate the propagation for the separate label
     labels = np.unique(y)
     labels = [x for x in labels if x != self.unlabeledValue]
     # init the yn1 and y0
     y0  = [[1 if (x == l) else 0 for x in y] for l in labels]
     yn1 = y0
     # function to set the probability to 1 if it was labeled in the source
     toOrgLabels      = np.vectorize(lambda x, y : 1 if y == 1 else x , otypes=[np.int0])
     # function to set the index's of the source labeled
     toOrgLabelsIndex = np.vectorize(lambda x, y, z : z if y == 1 else x , otypes=[np.int0])
     lastLabels       = np.argmax(y0, axis=0)
     while True:
         yn1 = yn1 * matrix
         #first matrix to labels
         ynLablesIndex = np.argmax(yn1, axis=0)
         # row-normalize
         yn1 /= yn1.max()
         yn1 = toOrgLabels(yn1, y0)
         for x in y0:
             ynLablesIndex = toOrgLabelsIndex(ynLablesIndex, x, y0.index(x))
         #second original labels to result
         if np.array_equiv(ynLablesIndex, lastLabels):
             break
         lastLabels = ynLablesIndex
     # result is the index of the labels -> cast index to the given labels
     toLabeles = np.vectorize(lambda x : labels[x])
     return np.array(toLabeles(lastLabels))[0]
示例#7
0
 def train(self, sparseX, **kargv):
     print 'Info: Training KNN'
     if 'algorithm' not in kargv:
         self.model = NearestNeighbors(kargv['topK'],
                                       metric=kargv['metric'])
     else:
         self.model = NearestNeighbors(kargv['topK'],
                                       metric=kargv['metric'],
                                       algorithm=kargv['algorithm'])
     self.model.fit(sparseX)
示例#8
0
class NearestNeighboor(object):
    def __init__(self):
        self.model = None

    #preprocessing, featuring, training, predicting, parsing, evaluation
    @staticmethod
    def sparseX(candPartPath, qUserPath, candToId, qPackageToId, matShape):
        row = []
        col = []
        val = []
        for file_ in os.listdir(qUserPath):
            f = gzip.open(qUserPath + os.sep + file_, mode='rb')
            for line in f:
                spl = line.strip().split('|')
                username = spl[0]
                if username not in candToId:
                    continue
                for i in range(1, len(spl)):
                    sp = spl[i].split(':')
                    if sp[0] in qPackageToId:
                        row.append(candToId[username])
                        col.append(qPackageToId[sp[0]])
                        val.append(int(sp[1]))
            f.close()

        f = gzip.open(candPartPath, mode='rb')
        for line in f:
            spl = line.strip().split('|')
            username = spl[0]
            for i in range(1, len(spl)):
                sp = spl[i].split(':')
                row.append(candToId[username])
                col.append(qPackageToId[sp[0]])
                val.append(int(sp[1]))
        f.close()

        sparseX = csc_matrix((val, (row, col)), shape=matShape)
        return sparseX

    def train(self, sparseX, **kargv):
        print 'Info: Training KNN'
        if 'algorithm' not in kargv:
            self.model = NearestNeighbors(kargv['topK'],
                                          metric=kargv['metric'])
        else:
            self.model = NearestNeighbors(kargv['topK'],
                                          metric=kargv['metric'],
                                          algorithm=kargv['algorithm'])
        self.model.fit(sparseX)

    def predict(self, sparsePX):
        pred = self.model.kneighbors(sparsePX, return_distance=True)
        dist = pred[0]
        idx = pred[1]
        return (dist, idx)
 def _entropy(data):
     if len(data) == 1:
         return 0.0
     euler_const = 0.5772156649
     nn = NearestNeighbors(n_neighbors=2).fit(data)
     entropy = 0.0
     for x in data:
         nearest_neighbor_distance = nn.kneighbors(x)[0][0].max()
         entropy += math.log(len(data) * nearest_neighbor_distance)
     entropy = entropy / len(data) + math.log(2) + euler_const
     return entropy
示例#10
0
    def preprocess_neighbors(self, rebuild=False, save=True):
        neighbors_model_path = os.path.join(self.selected_dir,
                                            "neighbors_model" + ".pkl")
        neighbors_path = os.path.join(self.selected_dir, "neighbors" + ".npy")
        neighbors_weight_path = os.path.join(self.selected_dir,
                                             "neighbors_weight" + ".npy")
        test_neighbors_path = os.path.join(self.selected_dir,
                                           "test_neighbors" + ".npy")
        test_neighbors_weight_path = os.path.join(
            self.selected_dir, "test_neighbors_weight" + ".npy")
        if os.path.exists(neighbors_model_path) and \
                os.path.exists(neighbors_path) and \
                os.path.exists(test_neighbors_path) and rebuild == False:
            print("neighbors and neighbor_weight exist!!!")
            neighbors = np.load(neighbors_path)
            neighbors_weight = np.load(neighbors_weight_path)
            test_neighbors = np.load(test_neighbors_path)
            self.test_neighbors = test_neighbors
            return neighbors, neighbors_weight, test_neighbors
        print("neighbors and neighbor_weight  do not exist, preprocessing!")
        train_num = self.train_X.shape[0]
        train_y = np.array(self.train_y)
        test_num = self.test_X.shape[0]
        max_neighbors = min(len(train_y), 200)
        print("data shape: {}, labeled_num: {}".format(str(self.train_X.shape),
                                                       sum(train_y != -1)))
        nn_fit = NearestNeighbors(7, n_jobs=-4).fit(self.train_X)
        print("nn construction finished!")
        neighbor_result = nn_fit.kneighbors_graph(
            nn_fit._fit_X,
            max_neighbors,
            # 2,
            mode="distance")
        test_neighbors_result = nn_fit.kneighbors_graph(self.test_X,
                                                        max_neighbors,
                                                        mode="distance")
        print("neighbor_result got!")
        neighbors, neighbors_weight = csr_to_impact_matrix(
            neighbor_result, train_num, max_neighbors)
        test_neighbors, test_neighbors_weight = csr_to_impact_matrix(
            test_neighbors_result, test_num, max_neighbors)
        self.test_neighbors = test_neighbors

        print("preprocessed neighbors got!")

        # save neighbors information
        if save:
            pickle_save_data(neighbors_model_path, nn_fit)
            np.save(neighbors_path, neighbors)
            np.save(neighbors_weight_path, neighbors_weight)
            np.save(test_neighbors_path, test_neighbors)
            np.save(test_neighbors_weight_path, test_neighbors_weight)
        return neighbors, neighbors_weight, test_neighbors
示例#11
0
    def fit(self, X, y, sample_weight=None):
        """ Prepare different things for fast computation of metrics """
        X, y, sample_weight = check_xyw(X, y, sample_weight=sample_weight)
        self._mask = numpy.array(y == self.uniform_label)
        assert sum(self._mask) > 0, 'No events of uniform class!'
        self._masked_weight = sample_weight[self._mask]

        X_part = numpy.array(take_features(X, self.uniform_features))[self._mask, :]
        # computing knn indices
        neighbours = NearestNeighbors(n_neighbors=self.n_neighbours, algorithm='kd_tree').fit(X_part)
        _, self._groups_indices = neighbours.kneighbors(X_part)
        self._group_weights = ut.compute_group_weights(self._groups_indices, sample_weight=self._masked_weight)
示例#12
0
def distance_quality_matrix(X, y, n_neighbors=50):
    """On of the ways to measure the quality of knning: each element
    shows how frequently events of class A are met in knn of labels of class B"""
    labels = numpy.unique(y)
    nn = NearestNeighbors(n_neighbors=n_neighbors)
    nn.fit(X)
    knn_indices = nn.kneighbors(X, n_neighbors=n_neighbors, return_distance=False)
    confusion_matrix = numpy.zeros([len(labels), len(labels)], dtype=int)
    for label1, labels2 in zip(y, numpy.take(y, knn_indices)):
        for label2 in labels2:
            confusion_matrix[label1, label2] += 1
    return confusion_matrix
示例#13
0
 def train(self, userId):
     if not self.model:
         self.model = NearestNeighbors(n_neighbors=self.num + 1).fit(
             self.provider.provideAll())
     distance, neighborList = self.model.kneighbors(
         [self.provider.provide(userId)])
     if distance[0][2] == 0:
         return []
     similarity = self.distanceToSimilarity(distance[0][1:])
     res = []
     for i in range(self.num):
         res.append((neighborList[i], similarity[i]))
     return res
示例#14
0
def compute_knn_indices_of_signal(X, is_signal, n_neighbours=50):
    """For each event returns the knn closest signal(!) events. No matter of what class the event is.

    :type X: numpy.array, shape = [n_samples, n_features] the distance is measured over these variables
    :type is_signal: numpy.array, shape = [n_samples] with booleans
    :rtype numpy.array, shape [len(dataframe), knn], each row contains indices of closest signal events
    """
    assert len(X) == len(is_signal), "Different lengths"
    signal_indices = numpy.where(is_signal)[0]
    X_signal = numpy.array(X)[numpy.array(is_signal)]
    neighbours = NearestNeighbors(n_neighbors=n_neighbours, algorithm='kd_tree').fit(X_signal)
    _, knn_signal_indices = neighbours.kneighbors(X)
    return numpy.take(signal_indices, knn_signal_indices)
示例#15
0
class NearestNeighboor(object):
    def __init__(self):
        self.model = None
    #preprocessing, featuring, training, predicting, parsing, evaluation
    @staticmethod
    def sparseX(candPartPath, qUserPath, candToId, qPackageToId, matShape):
        row = []
        col = []
        val = []
        for file_ in os.listdir(qUserPath):
            f = gzip.open(qUserPath+os.sep+file_, mode='rb')
            for line in f:
                spl = line.strip().split('|')
                username = spl[0]
                if username not in candToId:
                    continue
                for i in range(1, len(spl)):
                    sp = spl[i].split(':')
                    if sp[0] in qPackageToId:
                        row.append(candToId[username])
                        col.append(qPackageToId[sp[0]])
                        val.append(int(sp[1]))
            f.close()
        
        f = gzip.open(candPartPath, mode='rb')
        for line in f:
            spl = line.strip().split('|')
            username = spl[0]
            for i in range(1, len(spl)):
                sp = spl[i].split(':')
                row.append(candToId[username])
                col.append(qPackageToId[sp[0]])
                val.append(int(sp[1]))
        f.close()
        
        sparseX = csc_matrix((val, (row, col)), shape=matShape)
        return sparseX
    
    def train(self, sparseX, **kargv):
        print 'Info: Training KNN'
        if 'algorithm' not in kargv:
            self.model = NearestNeighbors(kargv['topK'], metric=kargv['metric'])
        else:
            self.model = NearestNeighbors(kargv['topK'], metric=kargv['metric'], algorithm=kargv['algorithm'])
        self.model.fit(sparseX)
    
    def predict(self, sparsePX):
        pred = self.model.kneighbors(sparsePX, return_distance = True)
        dist = pred[0]
        idx = pred[1]
        return (dist, idx)
示例#16
0
def compute_knn_indices_of_signal(X, is_signal, n_neighbours=50):
    """For each event returns the knn closest signal(!) events. No matter of what class the event is.

    :type X: numpy.array, shape = [n_samples, n_features] the distance is measured over these variables
    :type is_signal: numpy.array, shape = [n_samples] with booleans
    :rtype numpy.array, shape [len(dataframe), knn], each row contains indices of closest signal events
    """
    assert len(X) == len(is_signal), "Different lengths"
    signal_indices = numpy.where(is_signal)[0]
    X_signal = numpy.array(X)[numpy.array(is_signal)]
    neighbours = NearestNeighbors(n_neighbors=n_neighbours,
                                  algorithm='kd_tree').fit(X_signal)
    _, knn_signal_indices = neighbours.kneighbors(X)
    return numpy.take(signal_indices, knn_signal_indices)
示例#17
0
def rvalue(X, Y, n_neighbors=10, theta=1):
    
    neigh = NearestNeighbors(n_neighbors=n_neighbors).fit(X)
    
    sum = 0
    
    for i in range(len(X)):
        _, [indices] = neigh.kneighbors([X[i]])
        
        diff = [Y[index] for index in indices if Y[index] != Y[i]]
        
        if len(diff) > theta:
            sum += 1
    
    return sum / len(X)
示例#18
0
def computeSignalKnnIndices(uniform_variables, dataframe, is_signal, n_neighbors=50):
    """For each event returns the knn closest signal(!) events. No matter of what class the event is.
    :type uniform_variables: list of names of variables, using which we want to compute the distance
    :type dataframe: pandas.DataFrame, should contain these variables
    :type is_signal: numpy.array, shape = [n_samples] with booleans
    :rtype numpy.array, shape [len(dataframe), knn], each row contains indices of closest signal events
    """
    assert len(dataframe) == len(is_signal), "Different lengths"
    signal_indices = numpy.where(is_signal)[0]
    for variable in uniform_variables:
        assert variable in dataframe.columns, "Dataframe is missing %s column" % variable
    uniforming_features_of_signal = numpy.array(dataframe.ix[is_signal, uniform_variables])
    neighbours = NearestNeighbors(n_neighbors=n_neighbors, algorithm='kd_tree').fit(uniforming_features_of_signal)
    _, knn_signal_indices = neighbours.kneighbors(dataframe[uniform_variables])
    return numpy.take(signal_indices, knn_signal_indices)
 def _get_kernel(self, X, y=None):
     if self.kernel == "rbf":
         if y is None:
             return rbf_kernel(X, X, gamma=self.gamma)
         else:
             return rbf_kernel(X, y, gamma=self.gamma)
     elif self.kernel == "knn":
         if self.nn_fit is None:
             self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
         if y is None:
             # Nearest neighbors returns a directed matrix.
             dir_graph = self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
                                                      self.n_neighbors,
                                                      mode='connectivity')
             # Making the matrix symmetric
             un_graph = dir_graph + dir_graph.T
             # Since it is a connectivity matrix, all values should be
             # either 0 or 1
             un_graph[un_graph > 1.0] = 1.0
             return un_graph
         else:
             return self.nn_fit.kneighbors(y, return_distance=False)
     else:
         raise ValueError("%s is not a valid kernel. Only rbf and knn"
                          " are supported at this time" % self.kernel)
 def predictAll(self, userIndex):
     if not self.default:
         self.coldStart()
     uf = self.getParam(userIndex)
     if sum(uf) == 0:
         return self.default
     # data,transfer=self.afProvider.filterClicked()
     if not self.model:
         self.model = NearestNeighbors(n_neighbors=self.maxNum,
                                       algorithm='auto').fit(
                                           self.afProvider.provideAll())
     distance, candidates = self.model.kneighbors([uf])
     res = []
     for i in range(self.maxNum):
         res.append((candidates[0][i], 1 - distance[0][i]))
     return res
class SimPredictor(Predictor):
    '''
    @summary: 通过用户与新闻的主题向量的余弦相似度来得出用户评分
    @addition:候选集选取使用聚类,将用户分到某一类,该类中所有的文章成为候选集
    '''
    def __init__(self, ufProvider=None, afProvider=None):
        super().__init__()
        self.model = None
        self.maxNum = 10
        self.default = None

    def coldStart(self):
        tmp = []
        for i in range(Article.objects.count()):
            tmp.append((i, len(CacheUtil.loadClickedForArticle(i))))
        tmp.sort(key=lambda x: x[1], reverse=True)
        total = sum(list(map(lambda x: x[1], tmp[0:self.maxNum])))
        self.default = list(
            map(lambda x: (x[0], x[1] / total), tmp[0:self.maxNum]))
        return self.default

    def config(self, maxNum=None, ufProvider=None, afProvider=None):
        self.ufProvider = ufProvider if ufProvider else self.ufProvider
        self.afProvider = afProvider if afProvider else self.afProvider
        self.maxNum = maxNum if maxNum else self.maxNum

    def predict(self, userId, articleId):
        af = np.array(self.getFeature(articleId))
        uf = np.array(self.getParam(userId))
        if sum(uf) == 0:
            if not self.default:
                self.coldStart()
            for pair in self.default:
                if pair[0] == articleId:
                    return pair[1]
            return 0
        return np.dot(af,
                      uf) / (np.sqrt(np.dot(uf, uf)) * np.sqrt(np.dot(af, af)))

    def predictAll(self, userIndex):
        if not self.default:
            self.coldStart()
        uf = self.getParam(userIndex)
        if sum(uf) == 0:
            return self.default
        # data,transfer=self.afProvider.filterClicked()
        if not self.model:
            self.model = NearestNeighbors(n_neighbors=self.maxNum,
                                          algorithm='auto').fit(
                                              self.afProvider.provideAll())
        distance, candidates = self.model.kneighbors([uf])
        res = []
        for i in range(self.maxNum):
            res.append((candidates[0][i], 1 - distance[0][i]))
        return res

    def clear(self):
        del self.model
示例#22
0
def kmeanspp(X, k, seed):
    # That we need to do this is a bug in _init_centroids
    x_squared_norms = row_norms(X, squared=True)
    # Use k-means++ to initialise the centroids
    centroids = _init_centroids(X, k, 'k-means++', random_state=seed, x_squared_norms=x_squared_norms)
    # OK, we should just short-circuit and get these from k-means++...
    # quick and dirty solution
    nns = NearestNeighbors()
    nns.fit(X)
    centroid_candidatess = nns.radius_neighbors(X=centroids, radius=0, return_distance=False)
    # Account for "degenerated" solutions: serveral voxels at distance 0, each becoming a centroid
    centroids = set()
    for centroid_candidates in centroid_candidatess:
        centroid_candidates = set(centroid_candidates) - centroids
        if len(set(centroid_candidates) - centroids) == 0:
            raise Exception('Cannot get an unambiguous set of centers;'
                            'theoretically this cannot happen, so check for bugs')
        centroids.add(centroid_candidates.pop())
    return np.array(sorted(centroids))
示例#23
0
def computeSignalKnnIndices(uniform_variables,
                            dataframe,
                            is_signal,
                            n_neighbors=50):
    """For each event returns the knn closest signal(!) events. No matter of what class the event is.
    :type uniform_variables: list of names of variables, using which we want to compute the distance
    :type dataframe: pandas.DataFrame, should contain these variables
    :type is_signal: numpy.array, shape = [n_samples] with booleans
    :rtype numpy.array, shape [len(dataframe), knn], each row contains indices of closest signal events
    """
    assert len(dataframe) == len(is_signal), "Different lengths"
    signal_indices = numpy.where(is_signal)[0]
    for variable in uniform_variables:
        assert variable in dataframe.columns, "Dataframe is missing %s column" % variable
    uniforming_features_of_signal = numpy.array(
        dataframe.ix[is_signal, uniform_variables])
    neighbours = NearestNeighbors(
        n_neighbors=n_neighbors,
        algorithm='kd_tree').fit(uniforming_features_of_signal)
    _, knn_signal_indices = neighbours.kneighbors(dataframe[uniform_variables])
    return numpy.take(signal_indices, knn_signal_indices)
示例#24
0
def recommend():
    #if request.method == 'POST':
    f = request.files['file']
    basepath = os.path.dirname(__file__)
    file_path = os.path.join(basepath, 'uploads', secure_filename(f.filename))
    f.save(file_path)

    #custer for recommending
    filelist.sort()
    featurelist = []
    for i, imagepath in enumerate(filelist):
        print("    Status: %s / %s" % (i, len(filelist)), end="\r")
        img = image.load_img(imagepath, target_size=(224, 224))
        img_data = image.img_to_array(img)
        img_data = np.expand_dims(img_data, axis=0)
        img_data = preprocess_input(img_data)
        features = np.array(model.predict(img_data))
        featurelist.append(features.flatten())
    nei_clf = NearestNeighbors(metric="euclidean")
    nei_clf.fit(featurelist)
    distances, neighbors = get_similar(file_path, n_neighbors=3)
    return 'hello recommender'
示例#25
0
 def adaptive_evaluation_bkp(self):
     train_X = self.data.get_train_X()
     affinity_matrix = self.data.get_graph()
     affinity_matrix.setdiag(0)
     pred = self.pred_dist
     test_X = self.data.get_test_X()
     test_y = self.data.get_test_ground_truth()
     # nn_fit = self.data.get_neighbors_model()
     nn_fit = NearestNeighbors(n_jobs=-4).fit(train_X)
     logger.info("nn construction finished!")
     neighbor_result = nn_fit.kneighbors_graph(test_X,
                                         100,
                                         mode="distance")
     logger.info("neighbor_result got!")
     estimate_k = 5
     s = 0
     rest_idxs = self.data.get_rest_idxs()
     # removed_idxs = self.remv
     labels = []
     for i in tqdm(range(test_X.shape[0])):
         start = neighbor_result.indptr[i]
         end = neighbor_result.indptr[i + 1]
         j_in_this_row = neighbor_result.indices[start:end]
         data_in_this_row = neighbor_result.data[start:end]
         sorted_idx = data_in_this_row.argsort()
         assert (len(sorted_idx) == 100)
         j_in_this_row = j_in_this_row[sorted_idx]
         estimated_idxs = j_in_this_row[:estimate_k]
         estimated_idxs = np.array([i for i in estimated_idxs if i in rest_idxs])
         adaptive_k = affinity_matrix[estimated_idxs, :].sum() / estimate_k
         selected_idxs = j_in_this_row[:int(adaptive_k)]
         p = pred[selected_idxs].sum(axis=0)
         labels.append(p.argmax())
         s += adaptive_k
         # print(adaptive_k)
     acc = accuracy_score(test_y, labels)
     logger.info("exp accuracy: {}".format(acc))
     print(s/test_X.shape[0])
示例#26
0
    def trainAll(self):
        if not self.model:
            self.model = NearestNeighbors(n_neighbors=self.num + 1,
                                          algorithm='auto').fit(
                                              self.provider.provideAll())
        res = []
        distances, friends = self.model.kneighbors(self.provider.provideAll())
        for count in range(len(friends)):
            friend = []
            if distances[count][2] == 0:
                res.append(friend)
                continue
            similarity = self.distanceToSimilarity(distances[count])[1:]
            neighborList = friends[count][1:]

            for i in range(self.num):
                friend.append((neighborList[i], similarity[i]))
            res.append(friend)
            print("User " + str(count) + " finded!")
        if self.isUpdate():
            # DBUtil.dumpFriends(res)
            CacheUtil.dumpUserFriends(res)
            DBUtil.dumpFriends(res)
        return res
示例#27
0
    def compute_parameters(self, trainX, trainY):
        for variable in self.uniform_variables:
            if variable not in trainX.columns:
                raise ValueError("Dataframe is missing %s column" % variable)

        if self.knn is None:
            A = pairwise_distances(trainX[self.uniform_variables])
            A = self.distance_dependence(A)
            A *= (trainY[:, numpy.newaxis] == trainY[numpy.newaxis, :])
        else:
            is_signal = trainY > 0.5
            # computing knn indices of same type
            uniforming_features_of_signal = numpy.array(trainX.ix[is_signal, self.uniform_variables])
            neighbours = NearestNeighbors(n_neighbors=self.knn, algorithm='kd_tree').fit(uniforming_features_of_signal)
            signal_distances, knn_signal_indices = neighbours.kneighbors(uniforming_features_of_signal)
            knn_signal_indices = numpy.where(is_signal)[0].take(knn_signal_indices)

            uniforming_features_of_bg = numpy.array(trainX.ix[~is_signal, self.uniform_variables])
            neighbours = NearestNeighbors(n_neighbors=self.knn, algorithm='kd_tree').fit(uniforming_features_of_bg)
            bg_distances, knn_bg_indices = neighbours.kneighbors(uniforming_features_of_bg)
            knn_bg_indices = numpy.where(~is_signal)[0].take(knn_bg_indices)

            signal_distances = self.distance_dependence(signal_distances.flatten())
            bg_distances = self.distance_dependence(bg_distances.flatten())

            signal_ind_ptr = numpy.arange(0, sum(is_signal) * self.knn + 1, self.knn)
            bg_ind_ptr = numpy.arange(0, sum(~is_signal) * self.knn + 1, self.knn)
            signal_column_indices = knn_signal_indices.flatten()
            bg_column_indices = knn_bg_indices.flatten()

            A_sig = sparse.csr_matrix(sparse.csr_matrix((signal_distances, signal_column_indices, signal_ind_ptr),
                                                        shape=(sum(is_signal), len(trainX))))
            A_bg = sparse.csr_matrix(sparse.csr_matrix((bg_distances, bg_column_indices, bg_ind_ptr),
                                                       shape=(sum(~is_signal), len(trainX))))

            A = sparse.vstack((A_sig, A_bg), format='csr')

        if self.row_normalize:
            from sklearn.preprocessing import normalize

            A = normalize(A, norm='l1', axis=1)

        return A, numpy.ones(A.shape[0])
示例#28
0
def upload():
    if request.method == 'POST':
        # Get the file from post request
        f = request.files['file']

        # Save the file to ./uploads
        basepath = os.path.dirname(__file__)
        file_path = os.path.join(basepath, 'uploads',
                                 secure_filename(f.filename))
        f.save(file_path)

        # Make prediction
        output_class = [
            "batteries", "cloth", "e-waste", "glass", "light bulbs",
            "metallic", "organic", "paper", "plastic"
        ]

        preds = model_predict(file_path, model)
        print(preds)

        pred_class = output_class[np.argmax(preds)]
        pred_class_percent = round(np.max(preds) * 100, 2)

        result = 'It is ' + pred_class + ' waste'  # Convert to string
        pred_class = ' with ' + str(pred_class_percent) + '% confidence'

        #k-nn for recommending
        filelist.sort()
        featurelist = []
        for i, imagepath in enumerate(filelist):
            print("    Status: %s / %s" % (i, len(filelist)), end="\r")
            img = image.load_img(imagepath, target_size=(224, 224))
            img_data = image.img_to_array(img)
            img_data = np.expand_dims(img_data, axis=0)
            img_data = preprocess_input(img_data)
            features = np.array(model.predict(img_data))
            featurelist.append(features.flatten())
        nei_clf = NearestNeighbors(metric="euclidean")
        nei_clf.fit(featurelist)
        code = model_predict(file_path, model)
        (distances, ), (idx, ) = nei_clf.kneighbors(code, n_neighbors=3)

        #all images are loaded as np arrays
        images = []
        labels = []
        j = 1
        for i, image_path in enumerate(filelist):
            images.append(load_data(image_path))
        images = np.asarray(
            images
        )  # all of the images are converted to np array of (1360,224,224,3)

        print(distances, images[idx])
        print(images[idx].shape)

        final_result = result + pred_class
        image_save = Image.fromarray(
            (np.array(images[0]) * 255).astype(np.uint8))
        #image_save = Image.fromarray(images[idx], "RGB")
        image_save.save('out.jpg')
        image_output = os.path.join(basepath, 'out.jpg')
        immg = '<img src="' + image_output + '" style="height: 132px; width: 132px;">'
        #return render_template('index.html', filename=image_output)
        return final_result
    return None
示例#29
0
			'MLPRegressor':MLPRegressor(),
			'MaxAbsScaler':MaxAbsScaler(),
			'MeanShift':MeanShift(),
			'MinCovDet':MinCovDet(),
			'MinMaxScaler':MinMaxScaler(),
			'MiniBatchDictionaryLearning':MiniBatchDictionaryLearning(),
			'MiniBatchKMeans':MiniBatchKMeans(),
			'MiniBatchSparsePCA':MiniBatchSparsePCA(),
			'MultiTaskElasticNet':MultiTaskElasticNet(),
			'MultiTaskElasticNetCV':MultiTaskElasticNetCV(),
			'MultiTaskLasso':MultiTaskLasso(),
			'MultiTaskLassoCV':MultiTaskLassoCV(),
			'MultinomialNB':MultinomialNB(),
			'NMF':NMF(),
			'NearestCentroid':NearestCentroid(),
			'NearestNeighbors':NearestNeighbors(),
			'Normalizer':Normalizer(),
			'NuSVC':NuSVC(),
			'NuSVR':NuSVR(),
			'Nystroem':Nystroem(),
			'OAS':OAS(),
			'OneClassSVM':OneClassSVM(),
			'OrthogonalMatchingPursuit':OrthogonalMatchingPursuit(),
			'OrthogonalMatchingPursuitCV':OrthogonalMatchingPursuitCV(),
			'PCA':PCA(),
			'PLSCanonical':PLSCanonical(),
			'PLSRegression':PLSRegression(),
			'PLSSVD':PLSSVD(),
			'PassiveAggressiveClassifier':PassiveAggressiveClassifier(),
			'PassiveAggressiveRegressor':PassiveAggressiveRegressor(),
			'Perceptron':Perceptron(),
# coding:utf-8
'''
Created on 2020年1月11日

@author: root
'''
from sklearn.neighbors.unsupervised import NearestNeighbors
import numpy as np
from com.msb.knn.KNNDateOnHand import *

datingDataMat, datingLabels = file2matrix('../../../data/datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)

nbrs = NearestNeighbors(n_neighbors=10).fit(normMat)
input_man = [[50000, 8, 9.5]]
S = (input_man - minVals) / ranges
distances, indices = nbrs.kneighbors(S)
# classCount   K:类别名    V:这个类别中的样本出现的次数

classCount = {}
for i in range(10):
    voteLabel = datingLabels[indices[0][i]]
    classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
resultList = ['没感觉', '看起来还行', '极具魅力']
print(resultList[sortedClassCount[0][0] - 1])
示例#31
0
<img src="https://github.com/hse-aml/intro-to-dl/blob/master/week4/images/similar_images.jpg?raw=1" style="width:60%">

To speed up retrieval process, one should use Locality Sensitive Hashing on top of encoded vectors. This [technique](https://erikbern.com/2015/07/04/benchmark-of-approximate-nearest-neighbor-libraries.html) can narrow down the potential nearest neighbours of our image in latent space (encoder code). We will caclulate nearest neighbours in brute force way for simplicity.
"""

# restore trained encoder weights
s = reset_tf_session()
encoder, decoder = build_deep_autoencoder(IMG_SHAPE, code_size=32)
encoder.load_weights("encoder.h5")

images = X_train
codes = ### YOUR CODE HERE: encode all images ###
assert len(codes) == len(images)

from sklearn.neighbors.unsupervised import NearestNeighbors
nei_clf = NearestNeighbors(metric="euclidean")
nei_clf.fit(codes)

def get_similar(image, n_neighbors=5):
    assert image.ndim==3,"image must be [batch,height,width,3]"

    code = encoder.predict(image[None])
    
    (distances,),(idx,) = nei_clf.kneighbors(code,n_neighbors=n_neighbors)
    
    return distances,images[idx]

def show_similar(image):
    
    distances,neighbors = get_similar(image,n_neighbors=3)
    
示例#32
0
class BaseLabelPropagation(BaseEstimator, ClassifierMixin, metaclass=ABCMeta):
    """Base class for label propagation module.
    Parameters
    ----------
    kernel : {'knn', 'rbf', callable}
        String identifier for kernel function to use or the kernel function
        itself. Only 'rbf' and 'knn' strings are valid inputs. The function
        passed should take two inputs, each of shape [n_samples, n_features],
        and return a [n_samples, n_samples] shaped weight matrix
    gamma : float
        Parameter for rbf kernel
    n_neighbors : integer > 0
        Parameter for knn kernel
    alpha : float
        Clamping factor
    max_iter : integer
        Change maximum number of iterations allowed
    tol : float
        Convergence tolerance: threshold to consider the system at steady
        state
   n_jobs : int or None, optional (default=None)
        The number of parallel jobs to run.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.
    """
    def __init__(self,
                 kernel='rbf',
                 gamma=20,
                 n_neighbors=7,
                 alpha=1,
                 max_iter=30,
                 tol=1e-3,
                 n_jobs=None):

        self.max_iter = max_iter
        self.tol = tol

        # kernel parameters
        self.kernel = kernel
        self.gamma = gamma
        self.n_neighbors = n_neighbors

        # clamping factor
        self.alpha = alpha

        self.n_jobs = n_jobs

        self.graph_matrix = None

    def _get_kernel(self, X, y=None):
        if self.kernel == "rbf":
            if y is None:
                return rbf_kernel(X, X, gamma=self.gamma)
            else:
                return rbf_kernel(X, y, gamma=self.gamma)
        elif self.kernel == "knn":
            if self.nn_fit is None:
                t0 = time()
                self.nn_fit = NearestNeighbors(self.n_neighbors,
                                               n_jobs=self.n_jobs).fit(X)
                print("NearestNeighbors fit time cost:", time() - t0)
            if y is None:
                t0 = time()
                result = self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
                                                      self.n_neighbors,
                                                      mode='connectivity')
                print("construct kNN graph time cost:", time() - t0)
                return result
            else:
                return self.nn_fit.kneighbors(y, return_distance=False)
        elif callable(self.kernel):
            if y is None:
                return self.kernel(X, X)
            else:
                return self.kernel(X, y)
        else:
            raise ValueError("%s is not a valid kernel. Only rbf and knn"
                             " or an explicit function "
                             " are supported at this time." % self.kernel)

    @abstractmethod
    def _build_graph(self):
        raise NotImplementedError("Graph construction must be implemented"
                                  " to fit a label propagation model.")

    def predict(self, X):
        """Performs inductive inference across the model.
        Parameters
        ----------
        X : array_like, shape = [n_samples, n_features]
        Returns
        -------
        y : array_like, shape = [n_samples]
            Predictions for input data
        """
        probas = self.predict_proba(X)
        return self.classes_[np.argmax(probas, axis=1)].ravel()

    def predict_proba(self, X):
        """Predict probability for each possible outcome.
        Compute the probability estimates for each single sample in X
        and each possible outcome seen during training (categorical
        distribution).
        Parameters
        ----------
        X : array_like, shape = [n_samples, n_features]
        Returns
        -------
        probabilities : array, shape = [n_samples, n_classes]
            Normalized probability distributions across
            class labels
        """
        check_is_fitted(self, 'X_')

        X_2d = check_array(
            X, accept_sparse=['csc', 'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'])
        weight_matrices = self._get_kernel(self.X_, X_2d)
        if self.kernel == 'knn':
            probabilities = np.array([
                np.sum(self.label_distributions_[weight_matrix], axis=0)
                for weight_matrix in weight_matrices
            ])
        else:
            weight_matrices = weight_matrices.T
            probabilities = np.dot(weight_matrices, self.label_distributions_)
        normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
        probabilities /= normalizer
        return probabilities

    def get_graph(self, X, y):
        X, y = check_X_y(X, y)
        self.X_ = X
        check_classification_targets(y)
        graph_matrix = self._build_graph()
        return graph_matrix

    def fit(self, X, y):
        """Fit a semi-supervised label propagation model based
        All the input data is provided matrix X (labeled and unlabeled)
        and corresponding label matrix y with a dedicated marker value for
        unlabeled samples.
        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            A {n_samples by n_samples} size matrix will be created from this
        y : array_like, shape = [n_samples]
            n_labeled_samples (unlabeled points are marked as -1)
            All unlabeled samples will be transductively assigned labels
        Returns
        -------
        self : returns an instance of self.
        """
        t0 = time()
        X, y = check_X_y(X, y)
        self.X_ = X
        check_classification_targets(y)

        # actual graph construction (implementations should override this)
        graph_matrix = self._build_graph()
        t1 = time()

        # label construction
        # construct a categorical distribution for classification only
        classes = np.unique(y)
        classes = (classes[classes != -1])
        self.classes_ = classes

        n_samples, n_classes = len(y), len(classes)

        alpha = self.alpha
        if self._variant == 'spreading' and \
                (alpha is None or alpha <= 0.0 or alpha >= 1.0):
            raise ValueError('alpha=%s is invalid: it must be inside '
                             'the open interval (0, 1)' % alpha)
        y = np.asarray(y)
        unlabeled = y == -1

        # initialize distributions
        self.label_distributions_ = np.zeros((n_samples, n_classes))
        for label in classes:
            self.label_distributions_[y == label, classes == label] = 1

        y_static = np.copy(self.label_distributions_)
        if self._variant == 'propagation':
            # LabelPropagation
            y_static[unlabeled] = 0
        else:
            # LabelSpreading
            y_static *= 1 - alpha

        l_previous = np.zeros((self.X_.shape[0], n_classes))

        unlabeled = unlabeled[:, np.newaxis]
        if sparse.isspmatrix(graph_matrix):
            graph_matrix = graph_matrix.tocsr()

        for self.n_iter_ in range(self.max_iter):
            if np.abs(self.label_distributions_ - l_previous).sum() < self.tol:
                break

            l_previous = self.label_distributions_
            self.label_distributions_ = safe_sparse_dot(
                graph_matrix, self.label_distributions_)

            if self._variant == 'propagation':
                normalizer = np.sum(self.label_distributions_,
                                    axis=1)[:, np.newaxis]
                self.label_distributions_ /= normalizer
                self.label_distributions_ = np.where(unlabeled,
                                                     self.label_distributions_,
                                                     y_static)
            else:
                # clamp
                self.label_distributions_ = np.multiply(
                    alpha, self.label_distributions_) + y_static
        else:
            warnings.warn('max_iter=%d was reached without convergence.' %
                          self.max_iter,
                          category=ConvergenceWarning)
            self.n_iter_ += 1

        normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
        self.label_distributions_ /= normalizer

        # set the transduction item
        transduction = self.classes_[np.argmax(self.label_distributions_,
                                               axis=1)]
        self.transduction_ = transduction.ravel()

        t2 = time()
        print("building graph time cost: {}, spreading time cost: {}".format(
            t1 - t0, t2 - t1))

        return self
示例#33
0
class DBPlot(BaseEstimator):
    """
    Heuristic approach to estimate and visualize high-dimensional decision
    boundaries for trained binary classifiers by using black-box optimization
    to find regions in which the classifier is maximally uncertain (0.5 prediction
    probability). The total number of keypoints representing the decision boundary
    will depend on n_connecting_keypoints and n_interpolated_keypoints. Reduce
    either or both to reduce runtime.

    Parameters
    ----------
    estimator : BaseEstimator instance, optional (default=`KNeighborsClassifier(n_neighbors=10)`).
        Classifier for which the decision boundary should be plotted. Can be trained
        or untrained (in which case the fit method will train it). Must have
        probability estimates enabled (i.e. `estimator.predict_proba` must work).
        Make sure it is possible for probability estimates to get close to 0.5
        (more specifically, as close as specified by acceptance_threshold) - this usally
        requires setting an even number of neighbors, estimators etc.

    dimensionality_reduction : BaseEstimator instance, optional (default=`PCA(n_components=2)`).
        Dimensionality reduction method to help plot the decision boundary in 2D. Can be trained
        or untrained (in which case the fit method will train it). Must have n_components=2.
        Must be able to project new points into the 2D space after fitting
        (i.e. `dimensionality_reduction.transform` must work).

    acceptance_threshold : float, optional (default=0.03)
        Maximum allowed deviation from decision boundary (defined as the region
        with 0.5 prediction probability) when accepting decision boundary keypoints

    n_decision_boundary_keypoints : int, optional (default=60)
        Total number of decision boundary keypoints added, including both connecting
        and interpolated keypoints.

    n_connecting_keypoints : int, optional (default=None)
        Number of decision boundary keypoints estimated along lines connecting
        instances from two different classes (each such line must cross the
        decision boundary at least once). If None (default), it is set to 1/3
        of n_decision_boundary_keypoints

    n_interpolated_keypoints : int, optional (default=None)
        Number of decision boundary keypoints interpolated between connecting
        keypoints to increase keypoint density. If None (default), it is set to
        2/3 of n_decision_boundary_keypoints

    n_generated_testpoints_per_keypoint : int, optional (default=15)
        Number of demo points generated around decision boundary keypoints, and
        labeled according to the specified classifier, in order to enrich and
        validate the decision boundary plot

    linear_iteration_budget : int, optional (default=100)
        Maximum number of iterations the optimizer is allowed to run for each
        keypoint estimation while looking along linear trajectories

    hypersphere_iteration_budget : int, optional (default=300)
        Maximum number of iterations the optimizer is allowed to run for each
        keypoint estimation while looking along hypersphere surfaces

    verbose: bool, optional (default=True)
        Verbose output
    """

    def __init__(self, estimator=KNeighborsClassifier(n_neighbors=10), dimensionality_reduction=PCA(n_components=2), acceptance_threshold=0.03, n_decision_boundary_keypoints=60, n_connecting_keypoints=None, n_interpolated_keypoints=None, n_generated_testpoints_per_keypoint=15, linear_iteration_budget=100, hypersphere_iteration_budget=300, verbose=True):
        if acceptance_threshold == 0:
            raise Warning(
                "A nonzero acceptance threshold is strongly recommended so the optimizer can finish in finite time")
        if linear_iteration_budget < 2 or hypersphere_iteration_budget < 2:
            raise Exception("Invalid iteration budget")

        self.classifier = estimator
        self.dimensionality_reduction = dimensionality_reduction
        self.acceptance_threshold = acceptance_threshold

        if n_decision_boundary_keypoints and n_connecting_keypoints and n_interpolated_keypoints and n_connecting_keypoints + n_interpolated_keypoints != n_decision_boundary_keypoints:
            raise Exception(
                "n_connecting_keypoints and n_interpolated_keypoints must sum to n_decision_boundary_keypoints (set them to None to use calculated suggestions)")

        self.n_connecting_keypoints = n_connecting_keypoints if n_connecting_keypoints != None else n_decision_boundary_keypoints / 3
        self.n_interpolated_keypoints = n_interpolated_keypoints if n_interpolated_keypoints != None else n_decision_boundary_keypoints * 2 / 3

        self.linear_iteration_budget = linear_iteration_budget
        self.n_generated_testpoints_per_keypoint = n_generated_testpoints_per_keypoint
        self.hypersphere_iteration_budget = hypersphere_iteration_budget
        self.verbose = verbose

        self.decision_boundary_points = []
        self.decision_boundary_points_2d = []
        self.X_testpoints = []
        self.y_testpoints = []
        self.background = []
        self.steps = 3

        self.hypersphere_max_retry_budget = 20
        self.penalties_enabled = True
        self.random_gap_selection = False

    def setclassifier(self, estimator=KNeighborsClassifier(n_neighbors=10)):
        """Assign classifier for which decision boundary should be plotted.

        Parameters
        ----------
        estimator : BaseEstimator instance, optional (default=KNeighborsClassifier(n_neighbors=10)).
            Classifier for which the decision boundary should be plotted. Must have
            probability estimates enabled (i.e. estimator.predict_proba must work).
            Make sure it is possible for probability estimates to get close to 0.5
            (more specifically, as close as specified by acceptance_threshold).
        """
        self.classifier = estimator

    def fit(self, X, y, training_indices=None):
        """Specify data to be plotted, and fit classifier only if required (the
        specified clasifier is only trained if it has not been trained yet).

        All the input data is provided in the matrix X, and corresponding
        binary labels (values taking 0 or 1) in the vector y

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            A {n_samples by n_samples} size matrix containing data

        y : array-like, shape = [n_samples]
            Labels

        training_indices : array-like or float, optional (default=None)
            Indices on which the classifier has been trained / should be trained.
            If float, it is converted to a random sample with the specified proportion
            of the full dataset.

        Returns
        -------
        self : returns an instance of self.
        """
        if set(np.array(y, dtype=int).tolist()) != set([0, 1]):
            raise Exception(
                "Currently only implemented for binary classification. Make sure you pass in two classes (0 and 1)")

        if training_indices == None:
            train_idx = range(len(y))
        elif type(training_indices) == float:
            train_idx, test_idx = train_test_split(range(len(y)), test_size=0.5)
        else:
            train_idx = training_indices

        self.X = X
        self.y = y
        self.train_idx = train_idx
        #self.test_idx = np.setdiff1d(np.arange(len(y)), self.train_idx, assume_unique=False)
        self.test_idx = list(set(range(len(y))).difference(set(self.train_idx)))

        # fit classifier if necessary
        try:
            self.classifier.predict([X[0]])
        except:
            self.classifier.fit(X[train_idx, :], y[train_idx])

        self.y_pred = self.classifier.predict(self.X)

        # fit DR method if necessary
        try:
            self.dimensionality_reduction.transform([X[0]])
        except:
            self.dimensionality_reduction.fit(X, y)

        try:
            self.dimensionality_reduction.transform([X[0]])
        except:
            raise Exception(
                "Please make sure your dimensionality reduction method has an exposed transform() method! If in doubt, use PCA or Isomap")

        # transform data
        self.X2d = self.dimensionality_reduction.transform(self.X)
        self.mean_2d_dist = np.mean(pdist(self.X2d))
        self.X2d_xmin, self.X2d_xmax = np.min(self.X2d[:, 0]), np.max(self.X2d[:, 0])
        self.X2d_ymin, self.X2d_ymax = np.min(self.X2d[:, 1]), np.max(self.X2d[:, 1])

        self.majorityclass = 0 if list(y).count(0) > list(y).count(1) else 1
        self.minorityclass = 1 - self.majorityclass
        minority_idx, majority_idx = np.where(y == self.minorityclass)[
            0], np.where(y == self.majorityclass)[0]
        self.Xminor, self.Xmajor = X[minority_idx], X[majority_idx]
        self.Xminor2d, self.Xmajor2d = self.X2d[minority_idx], self.X2d[majority_idx]

        # set up efficient nearest neighbor models for later use
        self.nn_model_2d_majorityclass = NearestNeighbors(n_neighbors=2)
        self.nn_model_2d_majorityclass.fit(self.X2d[majority_idx, :])

        self.nn_model_2d_minorityclass = NearestNeighbors(n_neighbors=2)
        self.nn_model_2d_minorityclass.fit(self.X2d[minority_idx, :])

        # step 1. look for decision boundary points between corners of majority &
        # minority class distribution
        minority_corner_idx, majority_corner_idx = [], []
        for extremum1 in [np.min, np.max]:
            for extremum2 in [np.min, np.max]:
                _, idx = self.nn_model_2d_minorityclass.kneighbors(
                    [[extremum1(self.Xminor2d[:, 0]), extremum2(self.Xminor2d[:, 1])]])
                minority_corner_idx.append(idx[0][0])
                _, idx = self.nn_model_2d_majorityclass.kneighbors(
                    [[extremum1(self.Xmajor2d[:, 0]), extremum2(self.Xmajor2d[:, 1])]])
                majority_corner_idx.append(idx[0][0])

        # optimize to find new db keypoints between corners
        self._linear_decision_boundary_optimization(
            minority_corner_idx, majority_corner_idx, all_combinations=True, step=1)

        # step 2. look for decision boundary points on lines connecting randomly
        # sampled points of majority & minority class
        n_samples = int(self.n_connecting_keypoints)
        from_idx = list(random.sample(list(np.arange(len(self.Xminor))), n_samples))
        to_idx = list(random.sample(list(np.arange(len(self.Xmajor))), n_samples))

        # optimize to find new db keypoints between minority and majority class
        self._linear_decision_boundary_optimization(
            from_idx, to_idx, all_combinations=False, step=2)

        if len(self.decision_boundary_points_2d) < 2:
            print("Failed to find initial decision boundary. Retrying... If this keeps happening, increasing the acceptance threshold might help. Also, make sure the classifier is able to find a point with 0.5 prediction probability (usually requires an even number of estimators/neighbors/etc).")
            return self.fit(X, y, training_indices)

        # step 3. look for decision boundary points between already known db
        # points that are too distant (search on connecting line first, then on
        # surrounding hypersphere surfaces)
        edges, gap_distances, gap_probability_scores = self._get_sorted_db_keypoint_distances()  # find gaps
        self.nn_model_decision_boundary_points = NearestNeighbors(n_neighbors=2)
        self.nn_model_decision_boundary_points.fit(self.decision_boundary_points)

        i = 0
        retries = 0
        while i < self.n_interpolated_keypoints:
            if self.verbose:
                print("Step 3/{}:{}/".format(self.steps, i, self.n_interpolated_keypoints))
            if self.random_gap_selection:
                # randomly sample from sorted DB keypoint gaps?
                gap_idx = np.random.choice(len(gap_probability_scores),
                                           1, p=gap_probability_scores)[0]
            else:
                # get largest gap
                gap_idx = 0
            from_point = self.decision_boundary_points[edges[gap_idx][0]]
            to_point = self.decision_boundary_points[edges[gap_idx][1]]

            # optimize to find new db keypoint along line connecting two db keypoints
            # with large gap
            db_point = self._find_decision_boundary_along_line(
                from_point, to_point, penalize_tangent_distance=self.penalties_enabled)

            if self.decision_boundary_distance(db_point) > self.acceptance_threshold:
                if self.verbose:
                    print("No good solution along straight line - trying to find decision boundary on hypersphere surface around known decision boundary point")

                # hypersphere radius half the distance between from and to db keypoints
                R = euclidean(from_point, to_point) / 2.0
                # search around either source or target keypoint, with 0.5 probability,
                # hoping to find decision boundary in between
                if random.random() > 0.5:
                    from_point = to_point

                # optimize to find new db keypoint on hypersphere surphase around known keypoint
                db_point = self._find_decision_boundary_on_hypersphere(from_point, R)
                if self.decision_boundary_distance(db_point) <= self.acceptance_threshold:
                    db_point2d = self.dimensionality_reduction.transform([db_point])[0]
                    self.decision_boundary_points.append(db_point)
                    self.decision_boundary_points_2d.append(db_point2d)
                    i += 1
                    retries = 0
                else:
                    retries += 1
                    if retries > self.hypersphere_max_retry_budget:
                        i += 1
                        dist = self.decision_boundary_distance(db_point)
                        msg = "Found point is too distant from decision boundary ({}), but retry budget exceeded ({})"
                        print(msg.format(dist, self.hypersphere_max_retry_budget))
                    elif self.verbose:
                        dist = self.decision_boundary_distance(db_point)
                        print("Found point is too distant from decision boundary ({}) retrying...".format(dist))

            else:
                db_point2d = self.dimensionality_reduction.transform([db_point])[0]
                self.decision_boundary_points.append(db_point)
                self.decision_boundary_points_2d.append(db_point2d)
                i += 1
                retries = 0

            edges, gap_distances, gap_probability_scores = self._get_sorted_db_keypoint_distances()  # reload gaps

        self.decision_boundary_points = np.array(self.decision_boundary_points)
        self.decision_boundary_points_2d = np.array(self.decision_boundary_points_2d)

        if self.verbose:
            print("Done fitting! Found {} decision boundary keypoints.".format(
                len(self.decision_boundary_points)))

        return self

    def plot(self, plt=None, generate_testpoints=True, generate_background=True, tune_background_model=False, background_resolution=100, scatter_size_scale=1.0, legend=True):
        """Plots the dataset and the identified decision boundary in 2D.
        (If you wish to create custom plots, get the data using generate_plot() and plot it manually)

        Parameters
        ----------
        plt : matplotlib.pyplot or axis object (default=matplotlib.pyplot)
            Object to be plotted on

        generate_testpoints : boolean, optional (default=True)
            Whether to generate demo points around the estimated decision boundary
            as a sanity check

        generate_background : boolean, optional (default=True)
            Whether to generate faint background plot (using prediction probabilities
            of a fitted suppor vector machine, trained on generated demo points)
            to aid visualization

        tune_background_model : boolean, optional (default=False)
            Whether to tune the parameters of the support vector machine generating
            the background

        background_resolution : int, optional (default=100)
            Desired resolution (height and width) of background to be generated

        scatter_size_scale : float, optional (default=1.0)
            Scaling factor for scatter plot marker size

        legend : boolean, optional (default=False)
            Whether to display a legend

        Returns
        -------
        plt : The matplotlib.pyplot or axis object which has been passed in, after
        plotting the data and decision boundary on it. (plt.show() is NOT called
        and will be required)
        """
        if plt == None:
            plt = mplt

        if len(self.X_testpoints) == 0:
            self.generate_plot(generate_testpoints=generate_testpoints, generate_background=generate_background,
                               tune_background_model=tune_background_model, background_resolution=background_resolution)

        if generate_background and generate_testpoints:
            try:
                plt.imshow(np.flipud(self.background), extent=[
                           self.X2d_xmin, self.X2d_xmax, self.X2d_ymin, self.X2d_ymax], cmap="GnBu", alpha=0.33)
            except (Exception, ex):
                print("Failed to render image background")

        # decision boundary
        plt.scatter(self.decision_boundary_points_2d[:, 0], self.decision_boundary_points_2d[
                    :, 1], 600 * scatter_size_scale, c='c', marker='p')
        # generated demo points
        if generate_testpoints:
            plt.scatter(self.X_testpoints_2d[:, 0], self.X_testpoints_2d[
                        :, 1], 20 * scatter_size_scale, c=['g' if i else 'b' for i in self.y_testpoints], alpha=0.6)

        # training data
        plt.scatter(self.X2d[self.train_idx, 0], self.X2d[self.train_idx, 1], 150 * scatter_size_scale,
                    facecolor=['g' if i else 'b' for i in self.y[self.train_idx]],
                    edgecolor=['g' if self.y_pred[self.train_idx[i]] == self.y[self.train_idx[i]] == 1
                               else ('b' if self.y_pred[self.train_idx[i]] == self.y[self.train_idx[i]] == 0 else 'r')
                               for i in range(len(self.train_idx))], linewidths=5 * scatter_size_scale)
        # testing data
        plt.scatter(self.X2d[self.test_idx, 0], self.X2d[self.test_idx, 1], 150 * scatter_size_scale,
                    facecolor=['g' if i else 'b' for i in self.y[self.test_idx]],
                    edgecolor=['g' if self.y_pred[self.test_idx[i]] == self.y[self.test_idx[i]] == 1
                               else ('b' if self.y_pred[self.test_idx[i]] == self.y[self.test_idx[i]] == 0 else 'r')
                               for i in range(len(self.test_idx))], linewidths=5 * scatter_size_scale, marker='s')

        # label data points with their indices
        for i in range(len(self.X2d)):
            plt.text(self.X2d[i, 0] + (self.X2d_xmax - self.X2d_xmin) * 0.5e-2,
                     self.X2d[i, 1] + (self.X2d_ymax - self.X2d_ymin) * 0.5e-2, str(i), size=8)

        if legend:
            plt.legend(["Estimated decision boundary keypoints", "Generated demo data around decision boundary",
                        "Actual data (training set)", "Actual data (demo set)"], loc="lower right", prop={'size': 9})

        # decision boundary keypoints, in case not visible in background
        plt.scatter(self.decision_boundary_points_2d[:, 0], self.decision_boundary_points_2d[:, 1],
                    600 * scatter_size_scale, c='c', marker='p', alpha=0.1)
        plt.scatter(self.decision_boundary_points_2d[:, 0], self.decision_boundary_points_2d[:, 1],
                    30 * scatter_size_scale, c='c', marker='p', edgecolor='c', alpha=0.8)

        # minimum spanning tree through decision boundary keypoints
        D = pdist(self.decision_boundary_points_2d)
        edges = minimum_spanning_tree(squareform(D))
        for e in edges:
            plt.plot([self.decision_boundary_points_2d[e[0], 0], self.decision_boundary_points_2d[e[1], 0]],
                     [self.decision_boundary_points_2d[e[0], 1],
                         self.decision_boundary_points_2d[e[1], 1]],
                     '--c', linewidth=4 * scatter_size_scale)
            plt.plot([self.decision_boundary_points_2d[e[0], 0], self.decision_boundary_points_2d[e[1], 0]],
                     [self.decision_boundary_points_2d[e[0], 1],
                         self.decision_boundary_points_2d[e[1], 1]],
                     '--k', linewidth=1)

        if len(self.test_idx) == 0:
            print("No demo performance calculated, as no testing data was specified")
        else:
            freq = itemfreq(self.y[self.test_idx]).astype(float)
            imbalance = np.round(np.max((freq[0, 1], freq[1, 1])) / len(self.test_idx), 3)
            acc_score = np.round(accuracy_score(
                self.y[self.test_idx], self.y_pred[self.test_idx]), 3)
            f1 = np.round(f1_score(self.y[self.test_idx], self.y_pred[self.test_idx]), 3)
            plt.title("Test accuracy: " + str(acc_score) + ", F1 score: " +
                      str(f1) + ". Imbalance (max chance accuracy): " + str(imbalance))

        if self.verbose:
            print("Plot successfully generated! Don't forget to call the show() method to display it")

        return plt

    def generate_plot(self, generate_testpoints=True, generate_background=True, tune_background_model=False, background_resolution=100):
        """Generates and returns arrays for visualizing the dataset and the
        identified decision boundary in 2D.

        Parameters
        ----------
        generate_testpoints : boolean, optional (default=True)
            Whether to generate demo points around the estimated decision boundary
            as a sanity check

        generate_background : boolean, optional (default=True)
            Whether to generate faint background plot (using prediction probabilities
            of a fitted suppor vector machine, trained on generated demo points)
            to aid visualization

        tune_background_model : boolean, optional (default=False)
            Whether to tune the parameters of the support vector machine generating
            the background

        background_resolution : int, optional (default=100)
            Desired resolution (height and width) of background to be generated

        Returns
        -------
        decision_boundary_points_2d : array
            Array containing points in the dimensionality-reduced 2D space which
            are very close to the true decision boundary

        X_testpoints_2d : array
            Array containing generated demo points in the dimensionality-reduced
            2D space which surround the decision boundary and can be used for
            visual feedback to estimate which area would be assigned which class

        y_testpoints : array
            Classifier predictions for each of the generated demo points

        background: array
            Generated background image showing prediction probabilities of the
            classifier in each region (only returned if generate_background is set
            to True!)

        """
        if len(self.decision_boundary_points) == 0:
            raise Exception("Please call the fit method first!")

        if not generate_testpoints and generate_background:
            print("Warning: cannot generate a background without testpoints")

        if len(self.X_testpoints) == 0 and generate_testpoints:
            if self.verbose:
                print("Generating demo points around decision boundary...")
            self._generate_testpoints()

            if generate_background and generate_testpoints:
                if tune_background_model:
                    params = {'C': np.power(10, np.linspace(0, 2, 2)),
                              'gamma': np.power(10, np.linspace(-2, 0, 2))}
                    grid = GridSearchCV(SVC(), params, n_jobs=-1 if os.name != 'nt' else 1)
                    grid.fit(np.vstack((self.X2d[self.train_idx], self.X_testpoints_2d)), np.hstack(
                        (self.y[self.train_idx], self.y_testpoints)))
                    bestparams = grid.best_params_
                else:
                    bestparams = {'C': 1, 'gamma': 1}

                self.background_model = SVC(probability=True, C=bestparams['C'], gamma=bestparams['gamma']).fit(np.vstack(
                    (self.X2d[self.train_idx], self.X_testpoints_2d)), np.hstack((self.y[self.train_idx], self.y_testpoints)))
                xx, yy = np.meshgrid(np.linspace(self.X2d_xmin, self.X2d_xmax, background_resolution), np.linspace(
                    self.X2d_ymin, self.X2d_ymax, background_resolution))
                Z = self.background_model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 0]
                Z = Z.reshape((background_resolution, background_resolution))
                self.background = Z

        if generate_background and generate_testpoints:
            return self.decision_boundary_points_2d, self.X_testpoints_2d, self.y_testpoints, Z
        elif generate_testpoints:
            return self.decision_boundary_points_2d, self.X_testpoints_2d, self.y_testpoints
        else:
            return self.decision_boundary_points_2d

    def _generate_testpoints(self, tries=100):
        """Generate random demo points around decision boundary keypoints
        """
        nn_model = NearestNeighbors(n_neighbors=3)
        nn_model.fit(self.decision_boundary_points)

        nn_model_2d = NearestNeighbors(n_neighbors=2)
        nn_model_2d.fit(self.decision_boundary_points_2d)
        #max_radius = 2*np.max([nn_model_2d.kneighbors([self.decision_boundary_points_2d[i]])[0][0][1] for i in range(len(self.decision_boundary_points_2d))])

        self.X_testpoints = np.zeros((0, self.X.shape[1]))
        self.y_testpoints = []
        for i in range(len(self.decision_boundary_points)):
            if self.verbose:
                msg = "Generating testpoint for plotting {}/{}"
                print(msg.format(i, len(self.decision_boundary_points)))
            testpoints = np.zeros((0, self.X.shape[1]))
            # generate Np points in Gaussian around decision_boundary_points[i] with
            # radius depending on the distance to the next point
            d, idx = nn_model.kneighbors([self.decision_boundary_points[i]])
            radius = d[0][1] if d[0][1] != 0 else d[0][2]
            if radius == 0:
                radius = np.mean(pdist(self.decision_boundary_points_2d))
            max_radius = radius * 2
            radius /= 5.0

            # add demo points, keeping some balance
            max_imbalance = 5.0
            y_testpoints = []
            for j in range(self.n_generated_testpoints_per_keypoint - 2):
                c_radius = radius
                freq = itemfreq(y_testpoints).astype(float)
                imbalanced = freq.shape[0] != 0
                if freq.shape[0] == 2 and (freq[0, 1] / freq[1, 1] < 1.0 / max_imbalance or freq[0, 1] / freq[1, 1] > max_imbalance):
                    imbalanced = True

                for try_i in range(tries):
                    testpoint = np.random.normal(self.decision_boundary_points[
                                                 i], radius, (1, self.X.shape[1]))
                    try:
                        testpoint2d = self.dimensionality_reduction.transform(testpoint)[0]
                    except:  # DR can fail e.g. if NMF gets negative values
                        testpoint = []
                        continue
                    # demo point needs to be close to current key point
                    if euclidean(testpoint2d, self.decision_boundary_points_2d[i]) <= max_radius:
                        if not imbalanced:  # needs to be not imbalanced
                            break
                        y_pred = self.classifier.predict(testpoint)[0]
                        # imbalanced but this would actually improve things
                        if freq.shape[0] == 2 and freq[y_pred, 1] < freq[1 - y_pred, 1]:
                            break
                    c_radius /= 2.0
                if len(testpoint) != 0:
                    testpoints = np.vstack((testpoints, testpoint))
                    y_testpoints.append(self.classifier.predict(testpoint)[0])

            self.X_testpoints = np.vstack((self.X_testpoints, testpoints))
            self.y_testpoints = np.hstack((self.y_testpoints, y_testpoints))
            self.X_testpoints_2d = self.dimensionality_reduction.transform(self.X_testpoints)

        idx_within_bounds = np.where((self.X_testpoints_2d[:, 0] >= self.X2d_xmin) & (self.X_testpoints_2d[:, 0] <= self.X2d_xmax)
                                     & (self.X_testpoints_2d[:, 1] >= self.X2d_ymin) & (self.X_testpoints_2d[:, 1] <= self.X2d_ymax))[0]
        self.X_testpoints = self.X_testpoints[idx_within_bounds]
        self.y_testpoints = self.y_testpoints[idx_within_bounds]
        self.X_testpoints_2d = self.X_testpoints_2d[idx_within_bounds]

    def decision_boundary_distance(self, x, grad=0):
        """Returns the distance of the given point from the decision boundary,
        i.e. the distance from the region with maximal uncertainty (0.5
        prediction probability)"""
        return np.abs(0.5 - self.classifier.predict_proba([x])[0][1])

    def get_decision_boundary_keypoints(self):
        """Returns the arrays of located decision boundary keypoints (both in the
        original feature space, and in the dimensionality-reduced 2D space)

        Returns
        -------
        decision_boundary_points : array
            Array containing points in the original feature space which are very
            close to the true decision boundary (closer than acceptance_threshold)

        decision_boundary_points_2d : array
            Array containing points in the dimensionality-reduced 2D space which
            are very close to the true decision boundary
        """
        if len(self.decision_boundary_points) == 0:
            raise Exception("Please call the fit method first!")
        return self.decision_boundary_points, self.decision_boundary_points_2d

    def _get_sorted_db_keypoint_distances(self, N=None):
        """Use a minimum spanning tree heuristic to find the N largest gaps in the
        line constituted by the current decision boundary keypoints.
        """
        if N == None:
            N = self.n_interpolated_keypoints
        edges = minimum_spanning_tree(squareform(pdist(self.decision_boundary_points_2d)))
        edged = np.array([euclidean(self.decision_boundary_points_2d[u],
                                    self.decision_boundary_points_2d[v]) for u, v in edges])
        gap_edge_idx = np.argsort(edged)[::-1][:N]
        edges = edges[gap_edge_idx]
        gap_distances = np.square(edged[gap_edge_idx])
        gap_probability_scores = gap_distances / np.sum(gap_distances)
        return edges, gap_distances, gap_probability_scores

    def _linear_decision_boundary_optimization(self, from_idx, to_idx, all_combinations=True, retry_neighbor_if_failed=True, step=None, suppress_output=False):
        """Use global optimization to locate the decision boundary along lines
        defined by instances from_idx and to_idx in the dataset (from_idx and to_idx
        have to contain indices from distinct classes to guarantee the existence of
        a decision boundary between them!)
        """
        step_str = ("Step " + str(step) + "/" + str(self.steps) + ":") if step != None else ""

        retries = 4 if retry_neighbor_if_failed else 1
        for i in range(len(from_idx)):
            n = len(to_idx) if all_combinations else 1
            for j in range(n):
                from_i = from_idx[i]
                to_i = to_idx[j] if all_combinations else to_idx[i]
                for k in range(retries):
                    if k == 0:
                        from_point = self.Xminor[from_i]
                        to_point = self.Xmajor[to_i]
                    else:
                        # first attempt failed, try nearest neighbors of source and destination
                        # point instead
                        _, idx = self.nn_model_2d_minorityclass.kneighbors([self.Xminor2d[from_i]])
                        from_point = self.Xminor[idx[0][k / 2]]
                        _, idx = self.nn_model_2d_minorityclass.kneighbors([self.Xmajor2d[to_i]])
                        to_point = self.Xmajor[idx[0][k % 2]]

                    if euclidean(from_point, to_point) == 0:
                        break  # no decision boundary between equivalent points

                    db_point = self._find_decision_boundary_along_line(
                        from_point, to_point, penalize_tangent_distance=self.penalties_enabled, penalize_extremes=self.penalties_enabled)

                    if self.decision_boundary_distance(db_point) <= self.acceptance_threshold:
                        db_point2d = self.dimensionality_reduction.transform([db_point])[0]
                        if db_point2d[0] >= self.X2d_xmin and db_point2d[0] <= self.X2d_xmax and db_point2d[1] >= self.X2d_ymin and db_point2d[1] <= self.X2d_ymax:
                            self.decision_boundary_points.append(db_point)
                            self.decision_boundary_points_2d.append(db_point2d)
                            if self.verbose and not suppress_output:
                                # , ": New decision boundary keypoint found using linear optimization!"
                                print("{} {}/{}".format(step_str, i * n + j, len(from_idx) * n))
                            break
                        else:
                            if self.verbose and not suppress_output:
                                msg = "{} {}/{}: Rejected decision boundary keypoint (outside of plot area)"
                                print(msg.format(step_str, i * n + j, len(from_idx) * n))

    def _find_decision_boundary_along_line(self, from_point, to_point, penalize_extremes=False, penalize_tangent_distance=False):
        def objective(l, grad=0):
            # interpolate between source and destionation; calculate distance from
            # decision boundary
            X = from_point + l[0] * (to_point - from_point)
            error = self.decision_boundary_distance(X)

            if penalize_tangent_distance:
                # distance from tangent between class1 and class0 point in 2d space
                x0, y0 = self.dimensionality_reduction.transform([X])[0]
                x1, y1 = self.dimensionality_reduction.transform([from_point])[0]
                x2, y2 = self.dimensionality_reduction.transform([to_point])[0]
                error += 1e-12 * np.abs((y2 - y1) * x0 - (x2 - x1) * y0 +
                                        x2 * y1 - y2 * x1) / np.sqrt((y2 - y1)**2 + (x2 - x1)**2)

            if penalize_extremes:
                error += 1e-8 * np.abs(0.5 - l[0])

            return error

        optimizer = self._get_optimizer()
        optimizer.set_min_objective(objective)
        cl = optimizer.optimize([random.random()])
        db_point = from_point + cl[0] * (to_point - from_point)
        return db_point

    def _find_decision_boundary_on_hypersphere(self, centroid, R, penalize_known=False):
        def objective(phi, grad=0):
            # search on hypersphere surface in polar coordinates - map back to cartesian
            cx = centroid + polar_to_cartesian(phi, R)
            try:
                cx2d = self.dimensionality_reduction.transform([cx])[0]
                error = self.decision_boundary_distance(cx)
                if penalize_known:
                    # slight penalty for being too close to already known decision boundary
                    # keypoints
                    db_distances = [euclidean(cx2d, self.decision_boundary_points_2d[k])
                                    for k in range(len(self.decision_boundary_points_2d))]
                    error += 1e-8 * ((self.mean_2d_dist - np.min(db_distances)) /
                                     self.mean_2d_dist)**2
                return error
            except (Exception, ex):
                print("Error in objective function:", ex)
                return np.infty

        optimizer = self._get_optimizer(
            D=self.X.shape[1] - 1, upper_bound=2 * np.pi, iteration_budget=self.hypersphere_iteration_budget)
        optimizer.set_min_objective(objective)
        db_phi = optimizer.optimize([rnd.random() * 2 * np.pi for k in range(self.X.shape[1] - 1)])
        db_point = centroid + polar_to_cartesian(db_phi, R)
        return db_point

    def _get_optimizer(self, D=1, upper_bound=1, iteration_budget=None):
        """Utility function creating an NLOPT optimizer with default
        parameters depending on this objects parameters
        """
        if iteration_budget == None:
            iteration_budget = self.linear_iteration_budget

        opt = nlopt.opt(nlopt.GN_DIRECT_L_RAND, D)
        # opt.set_stopval(self.acceptance_threshold/10.0)
        opt.set_ftol_rel(1e-5)
        opt.set_maxeval(iteration_budget)
        opt.set_lower_bounds(0)
        opt.set_upper_bounds(upper_bound)

        return opt
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
                                              ClassifierMixin)):
    """Base class for label propagation module.

    Parameters
    ----------
    kernel : {'knn', 'rbf'}
        String identifier for kernel function to use.
        Only 'rbf' and 'knn' kernels are currently supported..

    gamma : float
        Parameter for rbf kernel

    alpha : float
        Clamping factor

    max_iter : float
        Change maximum number of iterations allowed

    tol : float
        Convergence tolerance: threshold to consider the system at steady
        state

    n_neighbors : integer > 0
        Parameter for knn kernel

    """

    def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
                 alpha=1, max_iter=30, tol=1e-3):

        self.max_iter = max_iter
        self.tol = tol

        # kernel parameters
        self.kernel = kernel
        self.gamma = gamma
        self.n_neighbors = n_neighbors

        # clamping factor
        self.alpha = alpha

    def _get_kernel(self, X, y=None):
        if self.kernel == "rbf":
            if y is None:
                return rbf_kernel(X, X, gamma=self.gamma)
            else:
                return rbf_kernel(X, y, gamma=self.gamma)
        elif self.kernel == "knn":
            if self.nn_fit is None:
                self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
            if y is None:
                # Nearest neighbors returns a directed matrix.
                dir_graph = self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
                                                         self.n_neighbors,
                                                         mode='connectivity')
                # Making the matrix symmetric
                un_graph = dir_graph + dir_graph.T
                # Since it is a connectivity matrix, all values should be
                # either 0 or 1
                un_graph[un_graph > 1.0] = 1.0
                return un_graph
            else:
                return self.nn_fit.kneighbors(y, return_distance=False)
        else:
            raise ValueError("%s is not a valid kernel. Only rbf and knn"
                             " are supported at this time" % self.kernel)

    @abstractmethod
    def _build_graph(self):
        raise NotImplementedError("Graph construction must be implemented"
                                  " to fit a label propagation model.")

    def predict(self, X):
        """Performs inductive inference across the model.

        Parameters
        ----------
        X : array_like, shape = [n_samples, n_features]

        Returns
        -------
        y : array_like, shape = [n_samples]
            Predictions for input data
        """
        probas = self.predict_proba(X)
        return self.classes_[np.argmax(probas, axis=1)].ravel()

    def predict_proba(self, X):
        """Predict probability for each possible outcome.

        Compute the probability estimates for each single sample in X
        and each possible outcome seen during training (categorical
        distribution).

        Parameters
        ----------
        X : array_like, shape = [n_samples, n_features]

        Returns
        -------
        probabilities : array, shape = [n_samples, n_classes]
            Normalized probability distributions across
            class labels
        """
        check_is_fitted(self, 'X_')

        X_2d = check_array(X, accept_sparse = ['csc', 'csr', 'coo', 'dok',
                        'bsr', 'lil', 'dia'])
        weight_matrices = self._get_kernel(self.X_, X_2d)
        if self.kernel == 'knn':
            probabilities = []
            for weight_matrix in weight_matrices:
                ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
                probabilities.append(ine)
            probabilities = np.array(probabilities)
        else:
            weight_matrices = weight_matrices.T
            probabilities = np.dot(weight_matrices, self.label_distributions_)
        normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
        probabilities /= normalizer
        return probabilities

    def fit(self, X, y):
        """Fit a semi-supervised label propagation model based

        All the input data is provided matrix X (labeled and unlabeled)
        and corresponding label matrix y with a dedicated marker value for
        unlabeled samples.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            A {n_samples by n_samples} size matrix will be created from this

        y : array_like, shape = [n_samples]
            n_labeled_samples (unlabeled points are marked as -1)
            All unlabeled samples will be transductively assigned labels

        Returns
        -------
        self : returns an instance of self.
        """
        X, y = check_X_y(X, y)
        self.X_ = X
        check_classification_targets(y)

        # actual graph construction (implementations should override this)
        graph_matrix = self._build_graph()

        # label construction
        # construct a categorical distribution for classification only
        classes = np.unique(y)
        classes = (classes[classes != -1])
        self.classes_ = classes

        n_samples, n_classes = len(y), len(classes)

        y = np.asarray(y)
        unlabeled = y == -1
        clamp_weights = np.ones((n_samples, 1))
        clamp_weights[~unlabeled, 0] = 1 - self.alpha

        # initialize distributions
        self.label_distributions_ = np.zeros((n_samples, n_classes))
        for label in classes:
            self.label_distributions_[y == label, classes == label] = 1

        y_static = np.copy(self.label_distributions_)
        if self.alpha > 0.:
            y_static *= self.alpha
        y_static[unlabeled] = 0

        l_previous = np.zeros((self.X_.shape[0], n_classes))

        remaining_iter = self.max_iter
        if sparse.isspmatrix(graph_matrix):
            graph_matrix = graph_matrix.tocsr()
        while (_not_converged(self.label_distributions_, l_previous, self.tol)
                and remaining_iter > 1):
            l_previous = self.label_distributions_
            self.label_distributions_ = safe_sparse_dot(
                graph_matrix, self.label_distributions_)
            # clamp
            self.label_distributions_ = np.multiply(
                clamp_weights, self.label_distributions_) + y_static

            remaining_iter -= 1

        normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
        self.label_distributions_ /= normalizer

        if remaining_iter <= 1:
            warnings.warn('max_iter was reached without convergence.',
                          category=ConvergenceWarning)

        # set the transduction item
        transduction = self.classes_[np.argmax(self.label_distributions_,
                                               axis=1)]
        self.transduction_ = transduction.ravel()
        self.n_iter_ = self.max_iter - remaining_iter
        return self
示例#35
0
    def _generate_testpoints(self, tries=100):
        """Generate random demo points around decision boundary keypoints
        """
        nn_model = NearestNeighbors(n_neighbors=3)
        nn_model.fit(self.decision_boundary_points)

        nn_model_2d = NearestNeighbors(n_neighbors=2)
        nn_model_2d.fit(self.decision_boundary_points_2d)
        #max_radius = 2*np.max([nn_model_2d.kneighbors([self.decision_boundary_points_2d[i]])[0][0][1] for i in range(len(self.decision_boundary_points_2d))])

        self.X_testpoints = np.zeros((0, self.X.shape[1]))
        self.y_testpoints = []
        for i in range(len(self.decision_boundary_points)):
            if self.verbose:
                msg = "Generating testpoint for plotting {}/{}"
                print(msg.format(i, len(self.decision_boundary_points)))
            testpoints = np.zeros((0, self.X.shape[1]))
            # generate Np points in Gaussian around decision_boundary_points[i] with
            # radius depending on the distance to the next point
            d, idx = nn_model.kneighbors([self.decision_boundary_points[i]])
            radius = d[0][1] if d[0][1] != 0 else d[0][2]
            if radius == 0:
                radius = np.mean(pdist(self.decision_boundary_points_2d))
            max_radius = radius * 2
            radius /= 5.0

            # add demo points, keeping some balance
            max_imbalance = 5.0
            y_testpoints = []
            for j in range(self.n_generated_testpoints_per_keypoint - 2):
                c_radius = radius
                freq = itemfreq(y_testpoints).astype(float)
                imbalanced = freq.shape[0] != 0
                if freq.shape[0] == 2 and (freq[0, 1] / freq[1, 1] < 1.0 / max_imbalance or freq[0, 1] / freq[1, 1] > max_imbalance):
                    imbalanced = True

                for try_i in range(tries):
                    testpoint = np.random.normal(self.decision_boundary_points[
                                                 i], radius, (1, self.X.shape[1]))
                    try:
                        testpoint2d = self.dimensionality_reduction.transform(testpoint)[0]
                    except:  # DR can fail e.g. if NMF gets negative values
                        testpoint = []
                        continue
                    # demo point needs to be close to current key point
                    if euclidean(testpoint2d, self.decision_boundary_points_2d[i]) <= max_radius:
                        if not imbalanced:  # needs to be not imbalanced
                            break
                        y_pred = self.classifier.predict(testpoint)[0]
                        # imbalanced but this would actually improve things
                        if freq.shape[0] == 2 and freq[y_pred, 1] < freq[1 - y_pred, 1]:
                            break
                    c_radius /= 2.0
                if len(testpoint) != 0:
                    testpoints = np.vstack((testpoints, testpoint))
                    y_testpoints.append(self.classifier.predict(testpoint)[0])

            self.X_testpoints = np.vstack((self.X_testpoints, testpoints))
            self.y_testpoints = np.hstack((self.y_testpoints, y_testpoints))
            self.X_testpoints_2d = self.dimensionality_reduction.transform(self.X_testpoints)

        idx_within_bounds = np.where((self.X_testpoints_2d[:, 0] >= self.X2d_xmin) & (self.X_testpoints_2d[:, 0] <= self.X2d_xmax)
                                     & (self.X_testpoints_2d[:, 1] >= self.X2d_ymin) & (self.X_testpoints_2d[:, 1] <= self.X2d_ymax))[0]
        self.X_testpoints = self.X_testpoints[idx_within_bounds]
        self.y_testpoints = self.y_testpoints[idx_within_bounds]
        self.X_testpoints_2d = self.X_testpoints_2d[idx_within_bounds]
示例#36
0
    def _preprocess_neighbors(self, rebuild=False, save=True):
        neighbors_model_path = os.path.join(
            self.selected_dir,
            "neighbors_model-step" + str(self.model.step) + ".pkl")
        neighbors_path = os.path.join(
            self.selected_dir,
            "neighbors-step" + str(self.model.step) + ".npy")
        neighbors_weight_path = os.path.join(
            self.selected_dir,
            "neighbors_weight-step" + str(self.model.step) + ".npy")
        test_neighbors_path = os.path.join(
            self.selected_dir,
            "test_neighbors-step" + str(self.model.step) + ".npy")
        test_neighbors_weight_path = os.path.join(
            self.selected_dir,
            "test_neighbors_weight-step" + str(self.model.step) + ".npy")
        if os.path.exists(neighbors_model_path) and \
                os.path.exists(neighbors_path) and \
                os.path.exists(test_neighbors_path) and rebuild == False and DEBUG == False:
            logger.info("neighbors and neighbor_weight exist!!!")
            self.neighbors = np.load(neighbors_path)
            self.neighbors_weight = np.load(neighbors_weight_path)
            self.test_neighbors = np.load(test_neighbors_path)
            return
        logger.info("neighbors and neighbor_weight "
                    "do not exist, preprocessing!")
        train_X = self.get_full_train_X()
        train_num = train_X.shape[0]
        train_y = self.get_full_train_label()
        train_y = np.array(train_y)
        test_X = self.get_test_X()
        test_num = test_X.shape[0]
        self.max_neighbors = min(len(train_y), self.max_neighbors)
        logger.info("data shape: {}, labeled_num: {}".format(
            str(train_X.shape), sum(train_y != -1)))
        nn_fit = NearestNeighbors(7, n_jobs=-4).fit(train_X)
        logger.info("nn construction finished!")
        neighbor_result = nn_fit.kneighbors_graph(
            nn_fit._fit_X,
            self.max_neighbors,
            # 2,
            mode="distance")
        test_neighbors_result = nn_fit.kneighbors_graph(test_X,
                                                        self.max_neighbors,
                                                        mode="distance")
        logger.info("neighbor_result got!")
        self.neighbors, self.neighbors_weight = self.csr_to_impact_matrix(
            neighbor_result, train_num, self.max_neighbors)
        self.test_neighbors, test_neighbors_weight = self.csr_to_impact_matrix(
            test_neighbors_result, test_num, self.max_neighbors)

        logger.info("preprocessed neighbors got!")

        # save neighbors information
        if save:
            pickle_save_data(neighbors_model_path, nn_fit)
            np.save(neighbors_path, self.neighbors)
            np.save(neighbors_weight_path, self.neighbors_weight)
            np.save(test_neighbors_path, self.test_neighbors)
            np.save(test_neighbors_weight_path, test_neighbors_weight)
        return self.neighbors, self.test_neighbors
示例#37
0
def joint_information(x, y, n_neighbors=3, random_noise=0.3):
    n_samples = x.size

    if random_noise:
        x = with_added_white_noise(x, random_noise)
        y = with_added_white_noise(y, random_noise)

    x = x.reshape((-1, 1))
    y = y.reshape((-1, 1))
    xy = np.hstack((x, y))

    # Here we rely on NearestNeighbors to select the fastest algorithm.
    nn = NearestNeighbors(metric='chebyshev', n_neighbors=n_neighbors)

    nn.fit(xy)
    radius = nn.kneighbors()[0]
    radius = np.nextafter(radius[:, -1], 0)

    # Algorithm is selected explicitly to allow passing an array as radius
    # later (not all algorithms support this).
    nn.set_params(algorithm='kd_tree')

    nn.fit(x)
    ind = nn.radius_neighbors(radius=radius, return_distance=False)
    nx = np.array([i.size for i in ind])

    nn.fit(y)
    ind = nn.radius_neighbors(radius=radius, return_distance=False)
    ny = np.array([i.size for i in ind])

    mi = (digamma(n_samples) + digamma(n_neighbors) -
          np.mean(digamma(nx + 1)) - np.mean(digamma(ny + 1)))

    return max(0, mi)
示例#38
0
    def fit(self, X, y, training_indices=None):
        """Specify data to be plotted, and fit classifier only if required (the
        specified clasifier is only trained if it has not been trained yet).

        All the input data is provided in the matrix X, and corresponding
        binary labels (values taking 0 or 1) in the vector y

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            A {n_samples by n_samples} size matrix containing data

        y : array-like, shape = [n_samples]
            Labels

        training_indices : array-like or float, optional (default=None)
            Indices on which the classifier has been trained / should be trained.
            If float, it is converted to a random sample with the specified proportion
            of the full dataset.

        Returns
        -------
        self : returns an instance of self.
        """
        if set(np.array(y, dtype=int).tolist()) != set([0, 1]):
            raise Exception(
                "Currently only implemented for binary classification. Make sure you pass in two classes (0 and 1)")

        if training_indices == None:
            train_idx = range(len(y))
        elif type(training_indices) == float:
            train_idx, test_idx = train_test_split(range(len(y)), test_size=0.5)
        else:
            train_idx = training_indices

        self.X = X
        self.y = y
        self.train_idx = train_idx
        #self.test_idx = np.setdiff1d(np.arange(len(y)), self.train_idx, assume_unique=False)
        self.test_idx = list(set(range(len(y))).difference(set(self.train_idx)))

        # fit classifier if necessary
        try:
            self.classifier.predict([X[0]])
        except:
            self.classifier.fit(X[train_idx, :], y[train_idx])

        self.y_pred = self.classifier.predict(self.X)

        # fit DR method if necessary
        try:
            self.dimensionality_reduction.transform([X[0]])
        except:
            self.dimensionality_reduction.fit(X, y)

        try:
            self.dimensionality_reduction.transform([X[0]])
        except:
            raise Exception(
                "Please make sure your dimensionality reduction method has an exposed transform() method! If in doubt, use PCA or Isomap")

        # transform data
        self.X2d = self.dimensionality_reduction.transform(self.X)
        self.mean_2d_dist = np.mean(pdist(self.X2d))
        self.X2d_xmin, self.X2d_xmax = np.min(self.X2d[:, 0]), np.max(self.X2d[:, 0])
        self.X2d_ymin, self.X2d_ymax = np.min(self.X2d[:, 1]), np.max(self.X2d[:, 1])

        self.majorityclass = 0 if list(y).count(0) > list(y).count(1) else 1
        self.minorityclass = 1 - self.majorityclass
        minority_idx, majority_idx = np.where(y == self.minorityclass)[
            0], np.where(y == self.majorityclass)[0]
        self.Xminor, self.Xmajor = X[minority_idx], X[majority_idx]
        self.Xminor2d, self.Xmajor2d = self.X2d[minority_idx], self.X2d[majority_idx]

        # set up efficient nearest neighbor models for later use
        self.nn_model_2d_majorityclass = NearestNeighbors(n_neighbors=2)
        self.nn_model_2d_majorityclass.fit(self.X2d[majority_idx, :])

        self.nn_model_2d_minorityclass = NearestNeighbors(n_neighbors=2)
        self.nn_model_2d_minorityclass.fit(self.X2d[minority_idx, :])

        # step 1. look for decision boundary points between corners of majority &
        # minority class distribution
        minority_corner_idx, majority_corner_idx = [], []
        for extremum1 in [np.min, np.max]:
            for extremum2 in [np.min, np.max]:
                _, idx = self.nn_model_2d_minorityclass.kneighbors(
                    [[extremum1(self.Xminor2d[:, 0]), extremum2(self.Xminor2d[:, 1])]])
                minority_corner_idx.append(idx[0][0])
                _, idx = self.nn_model_2d_majorityclass.kneighbors(
                    [[extremum1(self.Xmajor2d[:, 0]), extremum2(self.Xmajor2d[:, 1])]])
                majority_corner_idx.append(idx[0][0])

        # optimize to find new db keypoints between corners
        self._linear_decision_boundary_optimization(
            minority_corner_idx, majority_corner_idx, all_combinations=True, step=1)

        # step 2. look for decision boundary points on lines connecting randomly
        # sampled points of majority & minority class
        n_samples = int(self.n_connecting_keypoints)
        from_idx = list(random.sample(list(np.arange(len(self.Xminor))), n_samples))
        to_idx = list(random.sample(list(np.arange(len(self.Xmajor))), n_samples))

        # optimize to find new db keypoints between minority and majority class
        self._linear_decision_boundary_optimization(
            from_idx, to_idx, all_combinations=False, step=2)

        if len(self.decision_boundary_points_2d) < 2:
            print("Failed to find initial decision boundary. Retrying... If this keeps happening, increasing the acceptance threshold might help. Also, make sure the classifier is able to find a point with 0.5 prediction probability (usually requires an even number of estimators/neighbors/etc).")
            return self.fit(X, y, training_indices)

        # step 3. look for decision boundary points between already known db
        # points that are too distant (search on connecting line first, then on
        # surrounding hypersphere surfaces)
        edges, gap_distances, gap_probability_scores = self._get_sorted_db_keypoint_distances()  # find gaps
        self.nn_model_decision_boundary_points = NearestNeighbors(n_neighbors=2)
        self.nn_model_decision_boundary_points.fit(self.decision_boundary_points)

        i = 0
        retries = 0
        while i < self.n_interpolated_keypoints:
            if self.verbose:
                print("Step 3/{}:{}/".format(self.steps, i, self.n_interpolated_keypoints))
            if self.random_gap_selection:
                # randomly sample from sorted DB keypoint gaps?
                gap_idx = np.random.choice(len(gap_probability_scores),
                                           1, p=gap_probability_scores)[0]
            else:
                # get largest gap
                gap_idx = 0
            from_point = self.decision_boundary_points[edges[gap_idx][0]]
            to_point = self.decision_boundary_points[edges[gap_idx][1]]

            # optimize to find new db keypoint along line connecting two db keypoints
            # with large gap
            db_point = self._find_decision_boundary_along_line(
                from_point, to_point, penalize_tangent_distance=self.penalties_enabled)

            if self.decision_boundary_distance(db_point) > self.acceptance_threshold:
                if self.verbose:
                    print("No good solution along straight line - trying to find decision boundary on hypersphere surface around known decision boundary point")

                # hypersphere radius half the distance between from and to db keypoints
                R = euclidean(from_point, to_point) / 2.0
                # search around either source or target keypoint, with 0.5 probability,
                # hoping to find decision boundary in between
                if random.random() > 0.5:
                    from_point = to_point

                # optimize to find new db keypoint on hypersphere surphase around known keypoint
                db_point = self._find_decision_boundary_on_hypersphere(from_point, R)
                if self.decision_boundary_distance(db_point) <= self.acceptance_threshold:
                    db_point2d = self.dimensionality_reduction.transform([db_point])[0]
                    self.decision_boundary_points.append(db_point)
                    self.decision_boundary_points_2d.append(db_point2d)
                    i += 1
                    retries = 0
                else:
                    retries += 1
                    if retries > self.hypersphere_max_retry_budget:
                        i += 1
                        dist = self.decision_boundary_distance(db_point)
                        msg = "Found point is too distant from decision boundary ({}), but retry budget exceeded ({})"
                        print(msg.format(dist, self.hypersphere_max_retry_budget))
                    elif self.verbose:
                        dist = self.decision_boundary_distance(db_point)
                        print("Found point is too distant from decision boundary ({}) retrying...".format(dist))

            else:
                db_point2d = self.dimensionality_reduction.transform([db_point])[0]
                self.decision_boundary_points.append(db_point)
                self.decision_boundary_points_2d.append(db_point2d)
                i += 1
                retries = 0

            edges, gap_distances, gap_probability_scores = self._get_sorted_db_keypoint_distances()  # reload gaps

        self.decision_boundary_points = np.array(self.decision_boundary_points)
        self.decision_boundary_points_2d = np.array(self.decision_boundary_points_2d)

        if self.verbose:
            print("Done fitting! Found {} decision boundary keypoints.".format(
                len(self.decision_boundary_points)))

        return self
#coding:utf-8 
'''
Created on 2018年1月23日

@author: root
'''
from sklearn.neighbors.unsupervised import NearestNeighbors
import numpy as np
from KNNDateOnHand import *

datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
normMat,ranges,minVals = autoNorm(datingDataMat)

nbrs = NearestNeighbors(n_neighbors=3).fit(normMat)  
input_man= [30000,5,0.5]
S = (input_man - minVals)/ranges
distances, indices = nbrs.kneighbors(S)
print(indices)
print(distances)
# classCount   K:类别名    V:这个类别中的样本出现的次数
classCount = {}
for i in range(3):
    voteLabel = datingLabels[indices[0][i]]
    classCount[voteLabel] = classCount.get(voteLabel,0) + 1
sortedClassCount = sorted(classCount.iteritems(),key=operator.itemgetter(1),reverse=True)
resultList = ['没感觉', '看起来还行','极具魅力']
print(resultList[sortedClassCount[0][0]-1])

示例#40
0
class BaseLabelPropagation(BaseEstimator, ClassifierMixin, metaclass=ABCMeta):
    def __init__(self,
                 kernel='rbf',
                 gamma=20,
                 n_neighbors=7,
                 alpha=1,
                 max_iter=30,
                 tol=1e-3,
                 n_jobs=None):

        self.max_iter = max_iter
        self.tol = tol

        # kernel parameters
        self.kernel = kernel
        self.gamma = gamma
        self.n_neighbors = n_neighbors

        # clamping factor
        self.alpha = alpha

        self.n_jobs = n_jobs

    def _get_kernel(self, X, y=None):
        if self.kernel == "rbf":
            if y is None:
                return rbf_kernel(X, X, gamma=self.gamma)
            else:
                return rbf_kernel(X, y, gamma=self.gamma)
        elif self.kernel == "knn":
            if self.nn_fit is None:
                self.nn_fit = NearestNeighbors(self.n_neighbors,
                                               n_jobs=self.n_jobs).fit(X)
            if y is None:
                return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
                                                    self.n_neighbors,
                                                    mode='connectivity')
            else:
                return self.nn_fit.kneighbors(y, return_distance=False)
        elif callable(self.kernel):
            if y is None:
                return self.kernel(X, X)
            else:
                return self.kernel(X, y)
        else:
            raise ValueError("%s is not a valid kernel. Only rbf and knn"
                             " or an explicit function "
                             " are supported at this time." % self.kernel)

    def fit(self, X, y):
        """
        Parameters
        ----------
        X : array-like ,shape = [n_samples, n_features]
            input data matrix

        y : array-like, shape = [n_samples]
            n_labeled_samples (unlabeled = -1)

        Returns
        ----------
        self : returns an instance of self.
        """
        # initialize X_
        self.X_ = X

        # actual graph construction
        graph_matrix = self._build_graph()

        # initialize classes
        classes = np.unique(y)
        classes = (classes[classes != -1])  ## self indexing array
        self.classes_ = classes

        # set n size
        n_samples, n_classes = len(y), len(classes)

        # set unlabeled to -1
        y = np.asarray(y)
        unlabeled = y == -1

        # initialize distributions
        self.label_distributions_ = np.zeros((n_samples, n_classes))
        for label in classes:
            self.label_distributions_[y == label, classes == label] = 1

        y_static = np.copy(self.label_distributions_)
        if self._variant == 'propagation':
            y_static[unlabeled] = 0

        # initialize l_previous
        l_previous = np.zeros((self.X_.shape[0], n_classes))

        # add a dimension to unlabeled
        unlabeled = unlabeled[:, np.newaxis]

        for self.n_iter_ in range(self.max_iter):
            if np.abs(self.label_distributions_ - l_previous).sum() < self.tol:
                break

            l_previous = self.label_distributions_
            self.label_distributions_ = safe_sparse_dot(
                graph_matrix, self.label_distributions_)  ## BLAS dot

            if self._variant == 'propagation':
                normalizer = np.sum(self.label_distributions_,
                                    axis=1)[:, np.newaxis]
                self.label_distributions_ /= normalizer
                self.label_distributions_ = np.where(unlabeled,
                                                     self.label_distributions_,
                                                     y_static)
        else:
            self.n_iter_ += 1

        normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
        self.label_distributions_ /= normalizer

        # set the transduction item
        transduction = self.classes_[np.argmax(self.label_distributions_,
                                               axis=1)]
        self.transduction_ = transduction.ravel()
        return self

    def _build_graph(self):
        """Matrix representing a fully connected graph between each sample

        This basic implementation creates a non-stochastic affinity matrix, so
        class distributions will exceed 1 (normalization may be desired).
        """
        if self.kernel == 'knn':
            self.nn_fit = None
        affinity_matrix = self._get_kernel(self.X_)
        normalizer = affinity_matrix.sum(axis=0)
        if sparse.isspmatrix(affinity_matrix):
            affinity_matrix.data /= np.diag(np.array(normalizer))
        else:
            affinity_matrix /= normalizer[:, np.newaxis]
        return affinity_matrix