Beispiel #1
0
class T5_KMEANS:
    def __init__(self, ndims=5, nn=25, k=10000, verbose=False):
        self.logger = logging.getLogger("T5Clustering")
        logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
                            level=logging.DEBUG)
        self.ndims = ndims
        self.nn = nn
        self.k = k
        self.data = None
        self.train_data = None
        self.test_data = None
        self.data_embedded = None
        self.sampling = None
        self.reducer = DimReducer(n_components=ndims, n_neighbors=nn)
        self.kmeans = KMeans(n_clusters=k, verbose=verbose)

    def split(self, fname, sampling=False, train_size=1000000, test_size=None):
        self.data = read(fname, scale=True)
        self.sampling = sampling
        if sampling:
            self.train_data, test_data = split(train_size=train_size,
                                               test_size=test_size)
        else:
            self.train_data = self.data
        return self

    def reduce(self):
        self.logger.info("Dimensionality reduction (UMAP): %s",
                         self.reducer.umap.get_params)
        self.reducer.fit(self.train_data)
        del self.train_data
        step = min(500000, len(self.data))
        result = self.reducer.reduce(self.data[0:step], as_df=False)
        for i in range(step, len(self.data), step):
            result = np.append(result,
                               self.reducer.reduce(self.data[i:i + step],
                                                   as_df=False),
                               axis=0)
            self.logger.debug("Dim reduce batch : %d", i)
        self.data = None
        self.data_embedded = result
        return self

    def cluster(self):
        self.logger.info("Clustering ... (KMeans)")
        self.kmeans.fit(self.data_embedded)
        self.logger.info("Clusters: %d", len(self.kmeans.cluster_centers_))
        return self
Beispiel #2
0
def kmeans_prop_cuda_batch(fname, sample_rate, slice_w, slice_h, down_sample=0.25, num_clusters=8, batch_size=1):
    """Produce a "most frequent" color visualization video file by iterating
        over the frames of the source and using a kmean's clustering algorithm using cuda
    """

    from cuml.cluster import KMeans as KMeansCuda 

    cap = cv2.VideoCapture(fname)
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    num_slices = total_frames // sample_rate

    vis_output = np.zeros((slice_h, slice_w * num_slices, 3), dtype='uint8')
    cluster_model = KMeansCuda(n_clusters=num_clusters, init='scalable-k-means++', n_init=20)

    for i in tqdm(range(total_frames)):
        ret = cap.grab()
        if (i % sample_rate) == 0:
            ret, frame = cap.retrieve()
            temp_f = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            reduc = cv2.resize(temp_f, None, fx=down_sample, fy=down_sample, interpolation=cv2.INTER_CUBIC).astype('float32')
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                cluster_model.fit(reduc.reshape((-1,3)))
                colors = np.around(cluster_model.cluster_centers_).astype('uint8')
            lbl, counts = np.unique(cluster_model.labels_, return_counts=True)
            cut = slice_h / np.sum(counts)
            ordering = np.argsort(counts)[::-1]
            tind = int(i // sample_rate)
            prev_ind = 0
            for i, val in enumerate(ordering):
                height = int(round(cut * counts[val]))
                l_ind = (tind * slice_w)
                r_ind = (tind + 1) * slice_w
                vis_output[prev_ind:prev_ind+height, l_ind:r_ind] = colors[val]
                prev_ind += height
            
    cap.release()
    return vis_output
def make_clusters_KMeans(dat_to_cluster , nb_clust, n=15000):
    estimator = KMeans(n_clusters=nb_clust)
    if n < len(dat_to_cluster):
        s=np.asarray(sample(list(dat_to_cluster), k = n), dtype=np.float_)
        estimator= estimator.fit(s)
        res = []
        notinit = True
        for i in range(0, len(dat_to_cluster) // n + 1):
            d = np.asarray(list(dat_to_cluster)[n * i: n * (i + 1)])
            if notinit:
                res = list(estimator.predict(d))
                notinit = False
            else:
                a =list(estimator.predict(d))

                for j in a:
                    res.append(j)


    else :
        res = estimator.fit_predict(dat_to_cluster)
    return res, estimator
Beispiel #4
0
    def fit(self, X, y=None) -> "KBinsDiscretizer":
        """
        Fit the estimator.

        Parameters
        ----------
        X : numeric array-like, shape (n_samples, n_features)
            Data to be discretized.

        y : None
            Ignored. This parameter exists only for compatibility with
            :class:`sklearn.pipeline.Pipeline`.

        Returns
        -------
        self
        """
        X = self._validate_data(X, dtype='numeric')

        valid_encode = ('onehot', 'onehot-dense', 'ordinal')
        if self.encode not in valid_encode:
            raise ValueError("Valid options for 'encode' are {}. "
                             "Got encode={!r} instead.".format(
                                 valid_encode, self.encode))
        valid_strategy = ('uniform', 'quantile', 'kmeans')
        if self.strategy not in valid_strategy:
            raise ValueError("Valid options for 'strategy' are {}. "
                             "Got strategy={!r} instead.".format(
                                 valid_strategy, self.strategy))

        n_features = X.shape[1]
        n_bins = self._validate_n_bins(n_features)
        n_bins = np.asnumpy(n_bins)

        bin_edges = cpu_np.zeros(n_features, dtype=object)
        for jj in range(n_features):
            column = X[:, jj]
            col_min, col_max = column.min(), column.max()

            if col_min == col_max:
                warnings.warn("Feature %d is constant and will be "
                              "replaced with 0." % jj)
                n_bins[jj] = 1
                bin_edges[jj] = np.array([-np.inf, np.inf])
                continue

            if self.strategy == 'uniform':
                bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)

            elif self.strategy == 'quantile':
                quantiles = np.linspace(0, 100, n_bins[jj] + 1)
                bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
                # Workaround for https://github.com/cupy/cupy/issues/4451
                # This should be removed as soon as a fix is available in cupy
                # in order to limit alterations in the included sklearn code
                bin_edges[jj][-1] = col_max

            elif self.strategy == 'kmeans':
                # Deterministic initialization with uniform spacing
                uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
                init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5

                # 1D k-means procedure
                km = KMeans(n_clusters=n_bins[jj],
                            init=init,
                            n_init=1,
                            output_type='cupy')
                km = km.fit(column[:, None])
                with using_output_type('cupy'):
                    centers = km.cluster_centers_[:, 0]
                # Must sort, centers may be unsorted even with sorted init
                centers.sort()
                bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
                bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]

            # Remove bins whose width are too small (i.e., <= 1e-8)
            if self.strategy in ('quantile', 'kmeans'):
                mask = np.diff(bin_edges[jj], prepend=-np.inf) > 1e-8
                bin_edges[jj] = bin_edges[jj][mask]
                if len(bin_edges[jj]) - 1 != n_bins[jj]:
                    warnings.warn('Bins whose width are too small (i.e., <= '
                                  '1e-8) in feature %d are removed. Consider '
                                  'decreasing the number of bins.' % jj)
                    n_bins[jj] = len(bin_edges[jj]) - 1

        self.bin_edges_ = bin_edges
        self.n_bins_ = n_bins

        if 'onehot' in self.encode:
            self._encoder = OneHotEncoder(categories=np.array(
                [np.arange(i) for i in self.n_bins_]),
                                          sparse=self.encode == 'onehot',
                                          output_type='cupy')
            # Fit the OneHotEncoder with toy datasets
            # so that it's ready for use after the KBinsDiscretizer is fitted
            self._encoder.fit(np.zeros((1, len(self.n_bins_)), dtype=int))

        return self