示例#1
0
 def train(self, sparseX, **kargv):
     print 'Info: Training KNN'
     if 'algorithm' not in kargv:
         self.model = NearestNeighbors(kargv['topK'],
                                       metric=kargv['metric'])
     else:
         self.model = NearestNeighbors(kargv['topK'],
                                       metric=kargv['metric'],
                                       algorithm=kargv['algorithm'])
     self.model.fit(sparseX)
示例#2
0
    def fit(self, X, y, sample_weight=None):
        """ Prepare different things for fast computation of metrics """
        X, y, sample_weight = check_xyw(X, y, sample_weight=sample_weight)
        self._label_mask = numpy.array(y == self.uniform_label)
        assert sum(self._label_mask) > 0, 'No events of uniform class!'
        # weights of events
        self._masked_weight = sample_weight[self._label_mask]

        X_part = numpy.array(take_features(
            X, self.uniform_features))[self._label_mask, :]
        # computing knn indices
        neighbours = NearestNeighbors(n_neighbors=self.n_neighbours,
                                      algorithm='kd_tree').fit(X_part)
        _, self._groups_indices = neighbours.kneighbors(X_part)
        self._group_matrix = ut.group_indices_to_groups_matrix(
            self._groups_indices, n_events=len(X_part))
        # self._group_weights = ut.compute_group_weights_by_indices(self._groups_indices,
        # sample_weight=self._masked_weight)
        self._group_weights = ut.compute_group_weights(
            self._group_matrix, sample_weight=self._masked_weight)
        # self._divided_weights = ut.compute_divided_weight_by_indices(self._groups_indices,
        #                                                              sample_weight=self._masked_weight)
        self._divided_weights = ut.compute_divided_weight(
            self._group_matrix, sample_weight=self._masked_weight)
        return self
示例#3
0
 def _get_kernel(self, X, y=None):
     if self.kernel == "rbf":
         if y is None:
             return rbf_kernel(X, X, gamma=self.gamma)
         else:
             return rbf_kernel(X, y, gamma=self.gamma)
     elif self.kernel == "knn":
         if self.nn_fit is None:
             t0 = time()
             self.nn_fit = NearestNeighbors(self.n_neighbors,
                                            n_jobs=self.n_jobs).fit(X)
             print("NearestNeighbors fit time cost:", time() - t0)
         if y is None:
             t0 = time()
             result = self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
                                                   self.n_neighbors,
                                                   mode='connectivity')
             print("construct kNN graph time cost:", time() - t0)
             return result
         else:
             return self.nn_fit.kneighbors(y, return_distance=False)
     elif callable(self.kernel):
         if y is None:
             return self.kernel(X, X)
         else:
             return self.kernel(X, y)
     else:
         raise ValueError("%s is not a valid kernel. Only rbf and knn"
                          " or an explicit function "
                          " are supported at this time." % self.kernel)
示例#4
0
def joint_information(x, y, n_neighbors=3, random_noise=0.3):
    n_samples = x.size

    if random_noise:
        x = with_added_white_noise(x, random_noise)
        y = with_added_white_noise(y, random_noise)

    x = x.reshape((-1, 1))
    y = y.reshape((-1, 1))
    xy = np.hstack((x, y))

    # Here we rely on NearestNeighbors to select the fastest algorithm.
    nn = NearestNeighbors(metric='chebyshev', n_neighbors=n_neighbors)

    nn.fit(xy)
    radius = nn.kneighbors()[0]
    radius = np.nextafter(radius[:, -1], 0)

    # Algorithm is selected explicitly to allow passing an array as radius
    # later (not all algorithms support this).
    nn.set_params(algorithm='kd_tree')

    nn.fit(x)
    ind = nn.radius_neighbors(radius=radius, return_distance=False)
    nx = np.array([i.size for i in ind])

    nn.fit(y)
    ind = nn.radius_neighbors(radius=radius, return_distance=False)
    ny = np.array([i.size for i in ind])

    mi = (digamma(n_samples) + digamma(n_neighbors) -
          np.mean(digamma(nx + 1)) - np.mean(digamma(ny + 1)))

    return max(0, mi)
示例#5
0
def image_retrieval(constant_overwrites):
    img_shape, attr, x_train, x_test = load_faces_dataset()
    constants = merge_dict(get_constants(), constant_overwrites)
    constants['img_shape'] = img_shape
    encoder_filename = constants['encoder_filename']
    decoder_filename = constants['decoder_filename']
    reset_tf_session()
    autoencoder, encoder, decoder = model_builder(network_builder, constants)
    if os.path.exists(encoder_filename) and not constants['retrain']:
        encoder.load_weights(encoder_filename)
    else:
        data = {'X_train': x_train, 'X_test': x_test}
        train(autoencoder, data, constants)
        encoder.save_weights(encoder_filename)
        decoder.save_weights(decoder_filename)

    images = x_train
    codes = encoder.predict(images)
    assert len(codes) == len(images)
    nei_clf = NearestNeighbors(metric="euclidean")
    nei_clf.fit(codes)

    # Cherry-picked examples:

    # smiles
    show_similar(x_test[247], nei_clf, encoder, images)

    # ethnicity
    show_similar(x_test[56], nei_clf, encoder, images)

    # glasses
    show_similar(x_test[63], nei_clf, encoder, images)
示例#6
0
 def fit(self, X, y):
     t = time()  # get labels for test data
     # build the graph result is the affinity matrix
     if self.kernel is 'dbscan' or self.kernel is None:
         affinity_matrix = self.dbscan(X, self.eps, self.minPts)
     # it is possible to use other kernels -> as parameter
     elif self.kernel is 'rbf':
         affinity_matrix = rbf_kernel(X, X, gamma=self.gamma)
     elif self.kernel is 'knn':
         affinity_matrix = NearestNeighbors(self.naighbors).fit(X).kneighbors_graph(X, self.naighbors).toarray()
     else:
         raise
     print( "praph(%s) time %2.3fms"%(self.kernel, (time() - t) *1000))
     if affinity_matrix.max() == 0 :
         print("no affinity matrix found")
         return y
     
     degree_martix   = np.diag(affinity_matrix.sum(axis=0))
     affinity_matrix = np.matrix(affinity_matrix)
     
     try:
         inserve_degree_matrix = np.linalg.inv(degree_martix)
     except np.linalg.linalg.LinAlgError as err:
         if 'Singular matrix' in err.args:
             # use a pseudo inverse if it's not possible to make a normal of the degree matrix
             inserve_degree_matrix =  np.linalg.pinv(degree_martix)
         else:
             raise
         
     matrix = inserve_degree_matrix * affinity_matrix
     # split labels in different vectors to calculate the propagation for the separate label
     labels = np.unique(y)
     labels = [x for x in labels if x != self.unlabeledValue]
     # init the yn1 and y0
     y0  = [[1 if (x == l) else 0 for x in y] for l in labels]
     yn1 = y0
     # function to set the probability to 1 if it was labeled in the source
     toOrgLabels      = np.vectorize(lambda x, y : 1 if y == 1 else x , otypes=[np.int0])
     # function to set the index's of the source labeled
     toOrgLabelsIndex = np.vectorize(lambda x, y, z : z if y == 1 else x , otypes=[np.int0])
     lastLabels       = np.argmax(y0, axis=0)
     while True:
         yn1 = yn1 * matrix
         #first matrix to labels
         ynLablesIndex = np.argmax(yn1, axis=0)
         # row-normalize
         yn1 /= yn1.max()
         yn1 = toOrgLabels(yn1, y0)
         for x in y0:
             ynLablesIndex = toOrgLabelsIndex(ynLablesIndex, x, y0.index(x))
         #second original labels to result
         if np.array_equiv(ynLablesIndex, lastLabels):
             break
         lastLabels = ynLablesIndex
     # result is the index of the labels -> cast index to the given labels
     toLabeles = np.vectorize(lambda x : labels[x])
     return np.array(toLabeles(lastLabels))[0]
示例#7
0
    def compute_parameters(self, trainX, trainY):
        for variable in self.uniform_variables:
            if variable not in trainX.columns:
                raise ValueError("Dataframe is missing %s column" % variable)

        if self.knn is None:
            A = pairwise_distances(trainX[self.uniform_variables])
            A = self.distance_dependence(A)
            A *= (trainY[:, numpy.newaxis] == trainY[numpy.newaxis, :])
        else:
            is_signal = trainY > 0.5
            # computing knn indices of same type
            uniforming_features_of_signal = numpy.array(trainX.ix[is_signal, self.uniform_variables])
            neighbours = NearestNeighbors(n_neighbors=self.knn, algorithm='kd_tree').fit(uniforming_features_of_signal)
            signal_distances, knn_signal_indices = neighbours.kneighbors(uniforming_features_of_signal)
            knn_signal_indices = numpy.where(is_signal)[0].take(knn_signal_indices)

            uniforming_features_of_bg = numpy.array(trainX.ix[~is_signal, self.uniform_variables])
            neighbours = NearestNeighbors(n_neighbors=self.knn, algorithm='kd_tree').fit(uniforming_features_of_bg)
            bg_distances, knn_bg_indices = neighbours.kneighbors(uniforming_features_of_bg)
            knn_bg_indices = numpy.where(~is_signal)[0].take(knn_bg_indices)

            signal_distances = self.distance_dependence(signal_distances.flatten())
            bg_distances = self.distance_dependence(bg_distances.flatten())

            signal_ind_ptr = numpy.arange(0, sum(is_signal) * self.knn + 1, self.knn)
            bg_ind_ptr = numpy.arange(0, sum(~is_signal) * self.knn + 1, self.knn)
            signal_column_indices = knn_signal_indices.flatten()
            bg_column_indices = knn_bg_indices.flatten()

            A_sig = sparse.csr_matrix(sparse.csr_matrix((signal_distances, signal_column_indices, signal_ind_ptr),
                                                        shape=(sum(is_signal), len(trainX))))
            A_bg = sparse.csr_matrix(sparse.csr_matrix((bg_distances, bg_column_indices, bg_ind_ptr),
                                                       shape=(sum(~is_signal), len(trainX))))

            A = sparse.vstack((A_sig, A_bg), format='csr')

        if self.row_normalize:
            from sklearn.preprocessing import normalize

            A = normalize(A, norm='l1', axis=1)

        return A, numpy.ones(A.shape[0])
示例#8
0
    def preprocess_neighbors(self, rebuild=False, save=True):
        neighbors_model_path = os.path.join(self.selected_dir,
                                            "neighbors_model" + ".pkl")
        neighbors_path = os.path.join(self.selected_dir, "neighbors" + ".npy")
        neighbors_weight_path = os.path.join(self.selected_dir,
                                             "neighbors_weight" + ".npy")
        test_neighbors_path = os.path.join(self.selected_dir,
                                           "test_neighbors" + ".npy")
        test_neighbors_weight_path = os.path.join(
            self.selected_dir, "test_neighbors_weight" + ".npy")
        if os.path.exists(neighbors_model_path) and \
                os.path.exists(neighbors_path) and \
                os.path.exists(test_neighbors_path) and rebuild == False:
            print("neighbors and neighbor_weight exist!!!")
            neighbors = np.load(neighbors_path)
            neighbors_weight = np.load(neighbors_weight_path)
            test_neighbors = np.load(test_neighbors_path)
            self.test_neighbors = test_neighbors
            return neighbors, neighbors_weight, test_neighbors
        print("neighbors and neighbor_weight  do not exist, preprocessing!")
        train_num = self.train_X.shape[0]
        train_y = np.array(self.train_y)
        test_num = self.test_X.shape[0]
        max_neighbors = min(len(train_y), 200)
        print("data shape: {}, labeled_num: {}".format(str(self.train_X.shape),
                                                       sum(train_y != -1)))
        nn_fit = NearestNeighbors(7, n_jobs=-4).fit(self.train_X)
        print("nn construction finished!")
        neighbor_result = nn_fit.kneighbors_graph(
            nn_fit._fit_X,
            max_neighbors,
            # 2,
            mode="distance")
        test_neighbors_result = nn_fit.kneighbors_graph(self.test_X,
                                                        max_neighbors,
                                                        mode="distance")
        print("neighbor_result got!")
        neighbors, neighbors_weight = csr_to_impact_matrix(
            neighbor_result, train_num, max_neighbors)
        test_neighbors, test_neighbors_weight = csr_to_impact_matrix(
            test_neighbors_result, test_num, max_neighbors)
        self.test_neighbors = test_neighbors

        print("preprocessed neighbors got!")

        # save neighbors information
        if save:
            pickle_save_data(neighbors_model_path, nn_fit)
            np.save(neighbors_path, neighbors)
            np.save(neighbors_weight_path, neighbors_weight)
            np.save(test_neighbors_path, test_neighbors)
            np.save(test_neighbors_weight_path, test_neighbors_weight)
        return neighbors, neighbors_weight, test_neighbors
示例#9
0
 def train(self, userId):
     if not self.model:
         self.model = NearestNeighbors(n_neighbors=self.num + 1).fit(
             self.provider.provideAll())
     distance, neighborList = self.model.kneighbors(
         [self.provider.provide(userId)])
     if distance[0][2] == 0:
         return []
     similarity = self.distanceToSimilarity(distance[0][1:])
     res = []
     for i in range(self.num):
         res.append((neighborList[i], similarity[i]))
     return res
示例#10
0
def compute_knn_indices_of_signal(X, is_signal, n_neighbours=50):
    """For each event returns the knn closest signal(!) events. No matter of what class the event is.

    :type X: numpy.array, shape = [n_samples, n_features] the distance is measured over these variables
    :type is_signal: numpy.array, shape = [n_samples] with booleans
    :rtype numpy.array, shape [len(dataframe), knn], each row contains indices of closest signal events
    """
    assert len(X) == len(is_signal), "Different lengths"
    signal_indices = numpy.where(is_signal)[0]
    X_signal = numpy.array(X)[numpy.array(is_signal)]
    neighbours = NearestNeighbors(n_neighbors=n_neighbours,
                                  algorithm='kd_tree').fit(X_signal)
    _, knn_signal_indices = neighbours.kneighbors(X)
    return numpy.take(signal_indices, knn_signal_indices)
示例#11
0
def rvalue(X, Y, n_neighbors=10, theta=1):
    
    neigh = NearestNeighbors(n_neighbors=n_neighbors).fit(X)
    
    sum = 0
    
    for i in range(len(X)):
        _, [indices] = neigh.kneighbors([X[i]])
        
        diff = [Y[index] for index in indices if Y[index] != Y[i]]
        
        if len(diff) > theta:
            sum += 1
    
    return sum / len(X)
 def predictAll(self, userIndex):
     if not self.default:
         self.coldStart()
     uf = self.getParam(userIndex)
     if sum(uf) == 0:
         return self.default
     # data,transfer=self.afProvider.filterClicked()
     if not self.model:
         self.model = NearestNeighbors(n_neighbors=self.maxNum,
                                       algorithm='auto').fit(
                                           self.afProvider.provideAll())
     distance, candidates = self.model.kneighbors([uf])
     res = []
     for i in range(self.maxNum):
         res.append((candidates[0][i], 1 - distance[0][i]))
     return res
示例#13
0
def computeSignalKnnIndices(uniform_variables,
                            dataframe,
                            is_signal,
                            n_neighbors=50):
    """For each event returns the knn closest signal(!) events. No matter of what class the event is.
    :type uniform_variables: list of names of variables, using which we want to compute the distance
    :type dataframe: pandas.DataFrame, should contain these variables
    :type is_signal: numpy.array, shape = [n_samples] with booleans
    :rtype numpy.array, shape [len(dataframe), knn], each row contains indices of closest signal events
    """
    assert len(dataframe) == len(is_signal), "Different lengths"
    signal_indices = numpy.where(is_signal)[0]
    for variable in uniform_variables:
        assert variable in dataframe.columns, "Dataframe is missing %s column" % variable
    uniforming_features_of_signal = numpy.array(
        dataframe.ix[is_signal, uniform_variables])
    neighbours = NearestNeighbors(
        n_neighbors=n_neighbors,
        algorithm='kd_tree').fit(uniforming_features_of_signal)
    _, knn_signal_indices = neighbours.kneighbors(dataframe[uniform_variables])
    return numpy.take(signal_indices, knn_signal_indices)
示例#14
0
def recommend():
    #if request.method == 'POST':
    f = request.files['file']
    basepath = os.path.dirname(__file__)
    file_path = os.path.join(basepath, 'uploads', secure_filename(f.filename))
    f.save(file_path)

    #custer for recommending
    filelist.sort()
    featurelist = []
    for i, imagepath in enumerate(filelist):
        print("    Status: %s / %s" % (i, len(filelist)), end="\r")
        img = image.load_img(imagepath, target_size=(224, 224))
        img_data = image.img_to_array(img)
        img_data = np.expand_dims(img_data, axis=0)
        img_data = preprocess_input(img_data)
        features = np.array(model.predict(img_data))
        featurelist.append(features.flatten())
    nei_clf = NearestNeighbors(metric="euclidean")
    nei_clf.fit(featurelist)
    distances, neighbors = get_similar(file_path, n_neighbors=3)
    return 'hello recommender'
示例#15
0
 def adaptive_evaluation_bkp(self):
     train_X = self.data.get_train_X()
     affinity_matrix = self.data.get_graph()
     affinity_matrix.setdiag(0)
     pred = self.pred_dist
     test_X = self.data.get_test_X()
     test_y = self.data.get_test_ground_truth()
     # nn_fit = self.data.get_neighbors_model()
     nn_fit = NearestNeighbors(n_jobs=-4).fit(train_X)
     logger.info("nn construction finished!")
     neighbor_result = nn_fit.kneighbors_graph(test_X,
                                         100,
                                         mode="distance")
     logger.info("neighbor_result got!")
     estimate_k = 5
     s = 0
     rest_idxs = self.data.get_rest_idxs()
     # removed_idxs = self.remv
     labels = []
     for i in tqdm(range(test_X.shape[0])):
         start = neighbor_result.indptr[i]
         end = neighbor_result.indptr[i + 1]
         j_in_this_row = neighbor_result.indices[start:end]
         data_in_this_row = neighbor_result.data[start:end]
         sorted_idx = data_in_this_row.argsort()
         assert (len(sorted_idx) == 100)
         j_in_this_row = j_in_this_row[sorted_idx]
         estimated_idxs = j_in_this_row[:estimate_k]
         estimated_idxs = np.array([i for i in estimated_idxs if i in rest_idxs])
         adaptive_k = affinity_matrix[estimated_idxs, :].sum() / estimate_k
         selected_idxs = j_in_this_row[:int(adaptive_k)]
         p = pred[selected_idxs].sum(axis=0)
         labels.append(p.argmax())
         s += adaptive_k
         # print(adaptive_k)
     acc = accuracy_score(test_y, labels)
     logger.info("exp accuracy: {}".format(acc))
     print(s/test_X.shape[0])
示例#16
0
    def trainAll(self):
        if not self.model:
            self.model = NearestNeighbors(n_neighbors=self.num + 1,
                                          algorithm='auto').fit(
                                              self.provider.provideAll())
        res = []
        distances, friends = self.model.kneighbors(self.provider.provideAll())
        for count in range(len(friends)):
            friend = []
            if distances[count][2] == 0:
                res.append(friend)
                continue
            similarity = self.distanceToSimilarity(distances[count])[1:]
            neighborList = friends[count][1:]

            for i in range(self.num):
                friend.append((neighborList[i], similarity[i]))
            res.append(friend)
            print("User " + str(count) + " finded!")
        if self.isUpdate():
            # DBUtil.dumpFriends(res)
            CacheUtil.dumpUserFriends(res)
            DBUtil.dumpFriends(res)
        return res
示例#17
0
def upload():
    if request.method == 'POST':
        # Get the file from post request
        f = request.files['file']

        # Save the file to ./uploads
        basepath = os.path.dirname(__file__)
        file_path = os.path.join(basepath, 'uploads',
                                 secure_filename(f.filename))
        f.save(file_path)

        # Make prediction
        output_class = [
            "batteries", "cloth", "e-waste", "glass", "light bulbs",
            "metallic", "organic", "paper", "plastic"
        ]

        preds = model_predict(file_path, model)
        print(preds)

        pred_class = output_class[np.argmax(preds)]
        pred_class_percent = round(np.max(preds) * 100, 2)

        result = 'It is ' + pred_class + ' waste'  # Convert to string
        pred_class = ' with ' + str(pred_class_percent) + '% confidence'

        #k-nn for recommending
        filelist.sort()
        featurelist = []
        for i, imagepath in enumerate(filelist):
            print("    Status: %s / %s" % (i, len(filelist)), end="\r")
            img = image.load_img(imagepath, target_size=(224, 224))
            img_data = image.img_to_array(img)
            img_data = np.expand_dims(img_data, axis=0)
            img_data = preprocess_input(img_data)
            features = np.array(model.predict(img_data))
            featurelist.append(features.flatten())
        nei_clf = NearestNeighbors(metric="euclidean")
        nei_clf.fit(featurelist)
        code = model_predict(file_path, model)
        (distances, ), (idx, ) = nei_clf.kneighbors(code, n_neighbors=3)

        #all images are loaded as np arrays
        images = []
        labels = []
        j = 1
        for i, image_path in enumerate(filelist):
            images.append(load_data(image_path))
        images = np.asarray(
            images
        )  # all of the images are converted to np array of (1360,224,224,3)

        print(distances, images[idx])
        print(images[idx].shape)

        final_result = result + pred_class
        image_save = Image.fromarray(
            (np.array(images[0]) * 255).astype(np.uint8))
        #image_save = Image.fromarray(images[idx], "RGB")
        image_save.save('out.jpg')
        image_output = os.path.join(basepath, 'out.jpg')
        immg = '<img src="' + image_output + '" style="height: 132px; width: 132px;">'
        #return render_template('index.html', filename=image_output)
        return final_result
    return None
示例#18
0
<img src="https://github.com/hse-aml/intro-to-dl/blob/master/week4/images/similar_images.jpg?raw=1" style="width:60%">

To speed up retrieval process, one should use Locality Sensitive Hashing on top of encoded vectors. This [technique](https://erikbern.com/2015/07/04/benchmark-of-approximate-nearest-neighbor-libraries.html) can narrow down the potential nearest neighbours of our image in latent space (encoder code). We will caclulate nearest neighbours in brute force way for simplicity.
"""

# restore trained encoder weights
s = reset_tf_session()
encoder, decoder = build_deep_autoencoder(IMG_SHAPE, code_size=32)
encoder.load_weights("encoder.h5")

images = X_train
codes = ### YOUR CODE HERE: encode all images ###
assert len(codes) == len(images)

from sklearn.neighbors.unsupervised import NearestNeighbors
nei_clf = NearestNeighbors(metric="euclidean")
nei_clf.fit(codes)

def get_similar(image, n_neighbors=5):
    assert image.ndim==3,"image must be [batch,height,width,3]"

    code = encoder.predict(image[None])
    
    (distances,),(idx,) = nei_clf.kneighbors(code,n_neighbors=n_neighbors)
    
    return distances,images[idx]

def show_similar(image):
    
    distances,neighbors = get_similar(image,n_neighbors=3)
    
示例#19
0
    def fit(self, X, y, training_indices=None):
        """Specify data to be plotted, and fit classifier only if required (the
        specified clasifier is only trained if it has not been trained yet).

        All the input data is provided in the matrix X, and corresponding
        binary labels (values taking 0 or 1) in the vector y

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            A {n_samples by n_samples} size matrix containing data

        y : array-like, shape = [n_samples]
            Labels

        training_indices : array-like or float, optional (default=None)
            Indices on which the classifier has been trained / should be trained.
            If float, it is converted to a random sample with the specified proportion
            of the full dataset.

        Returns
        -------
        self : returns an instance of self.
        """
        if set(np.array(y, dtype=int).tolist()) != set([0, 1]):
            raise Exception(
                "Currently only implemented for binary classification. Make sure you pass in two classes (0 and 1)")

        if training_indices == None:
            train_idx = range(len(y))
        elif type(training_indices) == float:
            train_idx, test_idx = train_test_split(range(len(y)), test_size=0.5)
        else:
            train_idx = training_indices

        self.X = X
        self.y = y
        self.train_idx = train_idx
        #self.test_idx = np.setdiff1d(np.arange(len(y)), self.train_idx, assume_unique=False)
        self.test_idx = list(set(range(len(y))).difference(set(self.train_idx)))

        # fit classifier if necessary
        try:
            self.classifier.predict([X[0]])
        except:
            self.classifier.fit(X[train_idx, :], y[train_idx])

        self.y_pred = self.classifier.predict(self.X)

        # fit DR method if necessary
        try:
            self.dimensionality_reduction.transform([X[0]])
        except:
            self.dimensionality_reduction.fit(X, y)

        try:
            self.dimensionality_reduction.transform([X[0]])
        except:
            raise Exception(
                "Please make sure your dimensionality reduction method has an exposed transform() method! If in doubt, use PCA or Isomap")

        # transform data
        self.X2d = self.dimensionality_reduction.transform(self.X)
        self.mean_2d_dist = np.mean(pdist(self.X2d))
        self.X2d_xmin, self.X2d_xmax = np.min(self.X2d[:, 0]), np.max(self.X2d[:, 0])
        self.X2d_ymin, self.X2d_ymax = np.min(self.X2d[:, 1]), np.max(self.X2d[:, 1])

        self.majorityclass = 0 if list(y).count(0) > list(y).count(1) else 1
        self.minorityclass = 1 - self.majorityclass
        minority_idx, majority_idx = np.where(y == self.minorityclass)[
            0], np.where(y == self.majorityclass)[0]
        self.Xminor, self.Xmajor = X[minority_idx], X[majority_idx]
        self.Xminor2d, self.Xmajor2d = self.X2d[minority_idx], self.X2d[majority_idx]

        # set up efficient nearest neighbor models for later use
        self.nn_model_2d_majorityclass = NearestNeighbors(n_neighbors=2)
        self.nn_model_2d_majorityclass.fit(self.X2d[majority_idx, :])

        self.nn_model_2d_minorityclass = NearestNeighbors(n_neighbors=2)
        self.nn_model_2d_minorityclass.fit(self.X2d[minority_idx, :])

        # step 1. look for decision boundary points between corners of majority &
        # minority class distribution
        minority_corner_idx, majority_corner_idx = [], []
        for extremum1 in [np.min, np.max]:
            for extremum2 in [np.min, np.max]:
                _, idx = self.nn_model_2d_minorityclass.kneighbors(
                    [[extremum1(self.Xminor2d[:, 0]), extremum2(self.Xminor2d[:, 1])]])
                minority_corner_idx.append(idx[0][0])
                _, idx = self.nn_model_2d_majorityclass.kneighbors(
                    [[extremum1(self.Xmajor2d[:, 0]), extremum2(self.Xmajor2d[:, 1])]])
                majority_corner_idx.append(idx[0][0])

        # optimize to find new db keypoints between corners
        self._linear_decision_boundary_optimization(
            minority_corner_idx, majority_corner_idx, all_combinations=True, step=1)

        # step 2. look for decision boundary points on lines connecting randomly
        # sampled points of majority & minority class
        n_samples = int(self.n_connecting_keypoints)
        from_idx = list(random.sample(list(np.arange(len(self.Xminor))), n_samples))
        to_idx = list(random.sample(list(np.arange(len(self.Xmajor))), n_samples))

        # optimize to find new db keypoints between minority and majority class
        self._linear_decision_boundary_optimization(
            from_idx, to_idx, all_combinations=False, step=2)

        if len(self.decision_boundary_points_2d) < 2:
            print("Failed to find initial decision boundary. Retrying... If this keeps happening, increasing the acceptance threshold might help. Also, make sure the classifier is able to find a point with 0.5 prediction probability (usually requires an even number of estimators/neighbors/etc).")
            return self.fit(X, y, training_indices)

        # step 3. look for decision boundary points between already known db
        # points that are too distant (search on connecting line first, then on
        # surrounding hypersphere surfaces)
        edges, gap_distances, gap_probability_scores = self._get_sorted_db_keypoint_distances()  # find gaps
        self.nn_model_decision_boundary_points = NearestNeighbors(n_neighbors=2)
        self.nn_model_decision_boundary_points.fit(self.decision_boundary_points)

        i = 0
        retries = 0
        while i < self.n_interpolated_keypoints:
            if self.verbose:
                print("Step 3/{}:{}/".format(self.steps, i, self.n_interpolated_keypoints))
            if self.random_gap_selection:
                # randomly sample from sorted DB keypoint gaps?
                gap_idx = np.random.choice(len(gap_probability_scores),
                                           1, p=gap_probability_scores)[0]
            else:
                # get largest gap
                gap_idx = 0
            from_point = self.decision_boundary_points[edges[gap_idx][0]]
            to_point = self.decision_boundary_points[edges[gap_idx][1]]

            # optimize to find new db keypoint along line connecting two db keypoints
            # with large gap
            db_point = self._find_decision_boundary_along_line(
                from_point, to_point, penalize_tangent_distance=self.penalties_enabled)

            if self.decision_boundary_distance(db_point) > self.acceptance_threshold:
                if self.verbose:
                    print("No good solution along straight line - trying to find decision boundary on hypersphere surface around known decision boundary point")

                # hypersphere radius half the distance between from and to db keypoints
                R = euclidean(from_point, to_point) / 2.0
                # search around either source or target keypoint, with 0.5 probability,
                # hoping to find decision boundary in between
                if random.random() > 0.5:
                    from_point = to_point

                # optimize to find new db keypoint on hypersphere surphase around known keypoint
                db_point = self._find_decision_boundary_on_hypersphere(from_point, R)
                if self.decision_boundary_distance(db_point) <= self.acceptance_threshold:
                    db_point2d = self.dimensionality_reduction.transform([db_point])[0]
                    self.decision_boundary_points.append(db_point)
                    self.decision_boundary_points_2d.append(db_point2d)
                    i += 1
                    retries = 0
                else:
                    retries += 1
                    if retries > self.hypersphere_max_retry_budget:
                        i += 1
                        dist = self.decision_boundary_distance(db_point)
                        msg = "Found point is too distant from decision boundary ({}), but retry budget exceeded ({})"
                        print(msg.format(dist, self.hypersphere_max_retry_budget))
                    elif self.verbose:
                        dist = self.decision_boundary_distance(db_point)
                        print("Found point is too distant from decision boundary ({}) retrying...".format(dist))

            else:
                db_point2d = self.dimensionality_reduction.transform([db_point])[0]
                self.decision_boundary_points.append(db_point)
                self.decision_boundary_points_2d.append(db_point2d)
                i += 1
                retries = 0

            edges, gap_distances, gap_probability_scores = self._get_sorted_db_keypoint_distances()  # reload gaps

        self.decision_boundary_points = np.array(self.decision_boundary_points)
        self.decision_boundary_points_2d = np.array(self.decision_boundary_points_2d)

        if self.verbose:
            print("Done fitting! Found {} decision boundary keypoints.".format(
                len(self.decision_boundary_points)))

        return self
示例#20
0
    def _preprocess_neighbors(self, rebuild=False, save=True):
        neighbors_model_path = os.path.join(
            self.selected_dir,
            "neighbors_model-step" + str(self.model.step) + ".pkl")
        neighbors_path = os.path.join(
            self.selected_dir,
            "neighbors-step" + str(self.model.step) + ".npy")
        neighbors_weight_path = os.path.join(
            self.selected_dir,
            "neighbors_weight-step" + str(self.model.step) + ".npy")
        test_neighbors_path = os.path.join(
            self.selected_dir,
            "test_neighbors-step" + str(self.model.step) + ".npy")
        test_neighbors_weight_path = os.path.join(
            self.selected_dir,
            "test_neighbors_weight-step" + str(self.model.step) + ".npy")
        if os.path.exists(neighbors_model_path) and \
                os.path.exists(neighbors_path) and \
                os.path.exists(test_neighbors_path) and rebuild == False and DEBUG == False:
            logger.info("neighbors and neighbor_weight exist!!!")
            self.neighbors = np.load(neighbors_path)
            self.neighbors_weight = np.load(neighbors_weight_path)
            self.test_neighbors = np.load(test_neighbors_path)
            return
        logger.info("neighbors and neighbor_weight "
                    "do not exist, preprocessing!")
        train_X = self.get_full_train_X()
        train_num = train_X.shape[0]
        train_y = self.get_full_train_label()
        train_y = np.array(train_y)
        test_X = self.get_test_X()
        test_num = test_X.shape[0]
        self.max_neighbors = min(len(train_y), self.max_neighbors)
        logger.info("data shape: {}, labeled_num: {}".format(
            str(train_X.shape), sum(train_y != -1)))
        nn_fit = NearestNeighbors(7, n_jobs=-4).fit(train_X)
        logger.info("nn construction finished!")
        neighbor_result = nn_fit.kneighbors_graph(
            nn_fit._fit_X,
            self.max_neighbors,
            # 2,
            mode="distance")
        test_neighbors_result = nn_fit.kneighbors_graph(test_X,
                                                        self.max_neighbors,
                                                        mode="distance")
        logger.info("neighbor_result got!")
        self.neighbors, self.neighbors_weight = self.csr_to_impact_matrix(
            neighbor_result, train_num, self.max_neighbors)
        self.test_neighbors, test_neighbors_weight = self.csr_to_impact_matrix(
            test_neighbors_result, test_num, self.max_neighbors)

        logger.info("preprocessed neighbors got!")

        # save neighbors information
        if save:
            pickle_save_data(neighbors_model_path, nn_fit)
            np.save(neighbors_path, self.neighbors)
            np.save(neighbors_weight_path, self.neighbors_weight)
            np.save(test_neighbors_path, self.test_neighbors)
            np.save(test_neighbors_weight_path, test_neighbors_weight)
        return self.neighbors, self.test_neighbors
示例#21
0
    def _generate_testpoints(self, tries=100):
        """Generate random demo points around decision boundary keypoints
        """
        nn_model = NearestNeighbors(n_neighbors=3)
        nn_model.fit(self.decision_boundary_points)

        nn_model_2d = NearestNeighbors(n_neighbors=2)
        nn_model_2d.fit(self.decision_boundary_points_2d)
        #max_radius = 2*np.max([nn_model_2d.kneighbors([self.decision_boundary_points_2d[i]])[0][0][1] for i in range(len(self.decision_boundary_points_2d))])

        self.X_testpoints = np.zeros((0, self.X.shape[1]))
        self.y_testpoints = []
        for i in range(len(self.decision_boundary_points)):
            if self.verbose:
                msg = "Generating testpoint for plotting {}/{}"
                print(msg.format(i, len(self.decision_boundary_points)))
            testpoints = np.zeros((0, self.X.shape[1]))
            # generate Np points in Gaussian around decision_boundary_points[i] with
            # radius depending on the distance to the next point
            d, idx = nn_model.kneighbors([self.decision_boundary_points[i]])
            radius = d[0][1] if d[0][1] != 0 else d[0][2]
            if radius == 0:
                radius = np.mean(pdist(self.decision_boundary_points_2d))
            max_radius = radius * 2
            radius /= 5.0

            # add demo points, keeping some balance
            max_imbalance = 5.0
            y_testpoints = []
            for j in range(self.n_generated_testpoints_per_keypoint - 2):
                c_radius = radius
                freq = itemfreq(y_testpoints).astype(float)
                imbalanced = freq.shape[0] != 0
                if freq.shape[0] == 2 and (freq[0, 1] / freq[1, 1] < 1.0 / max_imbalance or freq[0, 1] / freq[1, 1] > max_imbalance):
                    imbalanced = True

                for try_i in range(tries):
                    testpoint = np.random.normal(self.decision_boundary_points[
                                                 i], radius, (1, self.X.shape[1]))
                    try:
                        testpoint2d = self.dimensionality_reduction.transform(testpoint)[0]
                    except:  # DR can fail e.g. if NMF gets negative values
                        testpoint = []
                        continue
                    # demo point needs to be close to current key point
                    if euclidean(testpoint2d, self.decision_boundary_points_2d[i]) <= max_radius:
                        if not imbalanced:  # needs to be not imbalanced
                            break
                        y_pred = self.classifier.predict(testpoint)[0]
                        # imbalanced but this would actually improve things
                        if freq.shape[0] == 2 and freq[y_pred, 1] < freq[1 - y_pred, 1]:
                            break
                    c_radius /= 2.0
                if len(testpoint) != 0:
                    testpoints = np.vstack((testpoints, testpoint))
                    y_testpoints.append(self.classifier.predict(testpoint)[0])

            self.X_testpoints = np.vstack((self.X_testpoints, testpoints))
            self.y_testpoints = np.hstack((self.y_testpoints, y_testpoints))
            self.X_testpoints_2d = self.dimensionality_reduction.transform(self.X_testpoints)

        idx_within_bounds = np.where((self.X_testpoints_2d[:, 0] >= self.X2d_xmin) & (self.X_testpoints_2d[:, 0] <= self.X2d_xmax)
                                     & (self.X_testpoints_2d[:, 1] >= self.X2d_ymin) & (self.X_testpoints_2d[:, 1] <= self.X2d_ymax))[0]
        self.X_testpoints = self.X_testpoints[idx_within_bounds]
        self.y_testpoints = self.y_testpoints[idx_within_bounds]
        self.X_testpoints_2d = self.X_testpoints_2d[idx_within_bounds]
示例#22
0
			'MLPRegressor':MLPRegressor(),
			'MaxAbsScaler':MaxAbsScaler(),
			'MeanShift':MeanShift(),
			'MinCovDet':MinCovDet(),
			'MinMaxScaler':MinMaxScaler(),
			'MiniBatchDictionaryLearning':MiniBatchDictionaryLearning(),
			'MiniBatchKMeans':MiniBatchKMeans(),
			'MiniBatchSparsePCA':MiniBatchSparsePCA(),
			'MultiTaskElasticNet':MultiTaskElasticNet(),
			'MultiTaskElasticNetCV':MultiTaskElasticNetCV(),
			'MultiTaskLasso':MultiTaskLasso(),
			'MultiTaskLassoCV':MultiTaskLassoCV(),
			'MultinomialNB':MultinomialNB(),
			'NMF':NMF(),
			'NearestCentroid':NearestCentroid(),
			'NearestNeighbors':NearestNeighbors(),
			'Normalizer':Normalizer(),
			'NuSVC':NuSVC(),
			'NuSVR':NuSVR(),
			'Nystroem':Nystroem(),
			'OAS':OAS(),
			'OneClassSVM':OneClassSVM(),
			'OrthogonalMatchingPursuit':OrthogonalMatchingPursuit(),
			'OrthogonalMatchingPursuitCV':OrthogonalMatchingPursuitCV(),
			'PCA':PCA(),
			'PLSCanonical':PLSCanonical(),
			'PLSRegression':PLSRegression(),
			'PLSSVD':PLSSVD(),
			'PassiveAggressiveClassifier':PassiveAggressiveClassifier(),
			'PassiveAggressiveRegressor':PassiveAggressiveRegressor(),
			'Perceptron':Perceptron(),
# coding:utf-8
'''
Created on 2020年1月11日

@author: root
'''
from sklearn.neighbors.unsupervised import NearestNeighbors
import numpy as np
from com.msb.knn.KNNDateOnHand import *

datingDataMat, datingLabels = file2matrix('../../../data/datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)

nbrs = NearestNeighbors(n_neighbors=10).fit(normMat)
input_man = [[50000, 8, 9.5]]
S = (input_man - minVals) / ranges
distances, indices = nbrs.kneighbors(S)
# classCount   K:类别名    V:这个类别中的样本出现的次数

classCount = {}
for i in range(10):
    voteLabel = datingLabels[indices[0][i]]
    classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
resultList = ['没感觉', '看起来还行', '极具魅力']
print(resultList[sortedClassCount[0][0] - 1])