def generate_rings_dataset(n_train):
    rng = np.random.RandomState()
    obs = n_train
    n_train = n_train * 20
    n_samples4 = n_samples3 = n_samples2 = n_train // 4
    n_samples1 = n_train - n_samples4 - n_samples3 - n_samples2

    # so as not to have the first point = last point, we set endpoint=False
    linspace4 = np.linspace(0, 2 * np.pi, n_samples4, endpoint=False)
    linspace3 = np.linspace(0, 2 * np.pi, n_samples3, endpoint=False)
    linspace2 = np.linspace(0, 2 * np.pi, n_samples2, endpoint=False)
    linspace1 = np.linspace(0, 2 * np.pi, n_samples1, endpoint=False)

    circ4_x = np.cos(linspace4)
    circ4_y = np.sin(linspace4)
    circ3_x = np.cos(linspace4) * 0.75
    circ3_y = np.sin(linspace3) * 0.75
    circ2_x = np.cos(linspace2) * 0.5
    circ2_y = np.sin(linspace2) * 0.5
    circ1_x = np.cos(linspace1) * 0.25
    circ1_y = np.sin(linspace1) * 0.25

    X = np.vstack([
        np.hstack([circ4_x, circ3_x, circ2_x, circ1_x]),
        np.hstack([circ4_y, circ3_y, circ2_y, circ1_y])
    ]).T * 3.0
    X = util_shuffle(X, random_state=rng)

    # Add noise
    X = X + rng.normal(scale=0.08, size=X.shape)
    inds = np.random.choice(list(range(n_train)), obs)
    X = X[inds]
    return X.astype("float32")
Beispiel #2
0
def rings(N=10000, seed=1):
    """ Generates N elements of rings dataset with set seed. """
    np.random.seed(seed)
    n_samples4 = n_samples3 = n_samples2 = N // 4
    n_samples1 = N - n_samples4 - n_samples3 - n_samples2

    # so as not to have the first point = last point, we set endpoint=False
    linspace4 = np.linspace(0, 2 * np.pi, n_samples4, endpoint=False)
    linspace3 = np.linspace(0, 2 * np.pi, n_samples3, endpoint=False)
    linspace2 = np.linspace(0, 2 * np.pi, n_samples2, endpoint=False)
    linspace1 = np.linspace(0, 2 * np.pi, n_samples1, endpoint=False)

    circ4_x = np.cos(linspace4)
    circ4_y = np.sin(linspace4)
    circ3_x = np.cos(linspace4) * 0.75
    circ3_y = np.sin(linspace3) * 0.75
    circ2_x = np.cos(linspace2) * 0.5
    circ2_y = np.sin(linspace2) * 0.5
    circ1_x = np.cos(linspace1) * 0.25
    circ1_y = np.sin(linspace1) * 0.25

    X = np.vstack([
        np.hstack([circ4_x, circ3_x, circ2_x, circ1_x]),
        np.hstack([circ4_y, circ3_y, circ2_y, circ1_y])
    ]).T * 3.0
    X = util_shuffle(X)

    # Add noise
    X = X + np.random.normal(scale=0.08, size=X.shape)

    return X.astype('float32')
Beispiel #3
0
def make_circles(n_samples=500,
                 n_classes=5,
                 shuffle=True,
                 noise=None,
                 random_state=None,
                 factor_step=0.2):

    M = n_samples // n_classes
    generator = check_random_state(random_state)
    linspace = np.linspace(0, 2 * np.pi, M + 1)[:-1]

    def _get_X(i):
        factor = 1 + i * factor_step
        circ_x = np.cos(linspace) * factor
        circ_y = np.sin(linspace) * factor
        return np.vstack((circ_x, circ_y, np.ones(M))).T

    def _get_y(i):
        return i * np.ones(M, dtype=np.intp)

    X = np.vstack(_get_X(i) for i in range(n_classes))
    y = np.hstack(_get_y(i) for i in range(n_classes))

    if shuffle:
        X, y = util_shuffle(X, y, random_state=generator)

    if noise is not None:
        eps = generator.normal(scale=noise, size=X.shape)
        eps[:, 2] *= 10 * n_classes  # Extra noise on the third dimension
        X += eps

    return Bunch(data=X, target=y)
Beispiel #4
0
    def sample(self, batch_size):
        n_samples4 = n_samples3 = n_samples2 = batch_size // 4
        n_samples1 = batch_size - n_samples4 - n_samples3 - n_samples2

        # so as not to have the first point = last point, we set endpoint=False
        linspace4 = np.linspace(0, 2 * np.pi, n_samples4, endpoint=False)
        linspace3 = np.linspace(0, 2 * np.pi, n_samples3, endpoint=False)
        linspace2 = np.linspace(0, 2 * np.pi, n_samples2, endpoint=False)
        linspace1 = np.linspace(0, 2 * np.pi, n_samples1, endpoint=False)

        circ4_x = np.cos(linspace4)
        circ4_y = np.sin(linspace4)
        circ3_x = np.cos(linspace4) * 0.75
        circ3_y = np.sin(linspace3) * 0.75
        circ2_x = np.cos(linspace2) * 0.5
        circ2_y = np.sin(linspace2) * 0.5
        circ1_x = np.cos(linspace1) * 0.25
        circ1_y = np.sin(linspace1) * 0.25

        X = np.vstack([
            np.hstack([circ4_x, circ3_x, circ2_x, circ1_x]),
            np.hstack([circ4_y, circ3_y, circ2_y, circ1_y])
        ]).T * 3.0
        X = util_shuffle(X)

        # Add noise
        X = X + np.random.normal(scale=0.08, size=X.shape)

        return torch.from_numpy(X.astype("float32"))
Beispiel #5
0
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
                 factor1=0.8, factor2=0.5):

        per_samples = int(n_samples/3)
        n_samples_in = per_samples
        n_samples_mid = per_samples
        n_samples_out = n_samples - n_samples_in - n_samples_mid
        linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False)
        linspace_mid = np.linspace(0, 2 * np.pi, n_samples_mid, endpoint=False)
        linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False)

        outer_circ_x = np.cos(linspace_out)
        outer_circ_y = np.sin(linspace_out)
        outer_data = np.vstack([outer_circ_x.T, outer_circ_y.T]).T
        mid_circ_x = np.cos(linspace_mid) * factor1
        mid_circ_y = np.sin(linspace_mid) * factor1
        mid_data = np.vstack([mid_circ_x.T, mid_circ_y.T]).T
        inner_circ_x = np.cos(linspace_in) * factor2
        inner_circ_y = np.sin(linspace_in) * factor2
        inner_data = np.vstack([inner_circ_x.T, inner_circ_y.T]).T

        X = np.vstack([outer_data, mid_data, inner_data])

        y = np.hstack([np.zeros(n_samples_out, dtype=np.intp),
                       np.ones(n_samples_mid, dtype=np.intp),
                       np.ones(n_samples_in, dtype=np.intp)*2])

        if shuffle:
            X, y = util_shuffle(X, y, random_state=random_state)

        return X, y
Beispiel #6
0
def gen_rings(rng=None, batch_size=200):
    if rng is None:
        rng = np.random.RandomState()

    n_samples4 = n_samples3 = n_samples2 = batch_size // 4
    n_samples1 = batch_size - n_samples4 - n_samples3 - n_samples2

    # so as not to have the first point = last point, we set endpoint=False
    linspace4 = np.linspace(0, 2 * np.pi, n_samples4, endpoint=False)
    linspace3 = np.linspace(0, 2 * np.pi, n_samples3, endpoint=False)
    linspace2 = np.linspace(0, 2 * np.pi, n_samples2, endpoint=False)
    linspace1 = np.linspace(0, 2 * np.pi, n_samples1, endpoint=False)

    circ4_x = np.cos(linspace4)
    circ4_y = np.sin(linspace4)
    circ3_x = np.cos(linspace4) * 0.75
    circ3_y = np.sin(linspace3) * 0.75
    circ2_x = np.cos(linspace2) * 0.5
    circ2_y = np.sin(linspace2) * 0.5
    circ1_x = np.cos(linspace1) * 0.25
    circ1_y = np.sin(linspace1) * 0.25

    X = np.vstack([
        np.hstack([circ4_x, circ3_x, circ2_x, circ1_x]),
        np.hstack([circ4_y, circ3_y, circ2_y, circ1_y])
    ]).T * 3.0
    X = util_shuffle(X, random_state=rng)

    # Add noise
    X = X + rng.normal(scale=0.08, size=X.shape)

    return X.astype("float32")
Beispiel #7
0
def make_three_moons(n_samples=1500, shuffle=True, noise=None, random_state=None):
    """Make two interleaving half circles

    A simple toy dataset to visualize clustering and classification
    algorithms. Read more in the :ref:`User Guide <sample_generators>`.

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The total number of points generated.

    shuffle : bool, optional (default=True)
        Whether to shuffle the samples.

    noise : double or None (default=None)
        Standard deviation of Gaussian noise added to the data.

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    Returns
    -------
    X : array of shape [n_samples, 2]
        The generated samples.

    y : array of shape [n_samples]
        The integer labels (0 or 1) for class membership of each sample.
    """

    n_samples_one = n_samples // 3

    generator = check_random_state(random_state)

    one_circ_x = np.cos(np.linspace(0, np.pi, n_samples_one))
    one_circ_y = np.sin(np.linspace(0, np.pi, n_samples_one)) -.5
    two_circ_x = np.cos(np.linspace(0, np.pi, n_samples_one)) + 2.2
    two_circ_y = np.sin(np.linspace(0, np.pi, n_samples_one)) -.5
    three_circ_x = np.cos(np.linspace(0, np.pi, n_samples_one)) -2.2
    three_circ_y = np.sin(np.linspace(0, np.pi, n_samples_one)) -.5

    X = np.vstack((np.append(np.append(one_circ_x, two_circ_x), three_circ_x),
                   np.append(np.append(one_circ_y, two_circ_y), three_circ_y))).T
    y = np.hstack([np.zeros(n_samples_one, dtype=np.intp),
                   np.ones(n_samples_one, dtype=np.intp),
                   np.ones(n_samples_one, dtype=np.intp) * 2])

    if shuffle:
        X, y = util_shuffle(X, y, random_state=generator)

    if noise is not None:
        X += generator.normal(scale=noise, size=X.shape)

    return X, y
Beispiel #8
0
def my_make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
    """This is a slightly modified function as compared to the sklearn version.
    This functions makes two interleaving half circles
    A simple toy dataset to visualize clustering and classification
    algorithms. Read more in the :ref:`User Guide <sample_generators>`. 
    In this version, the position of sample points on the half circles is also
    random.
    Parameters
    ----------
    n_samples : int, optional (default=100)
        The total number of points generated.
    shuffle : bool, optional (default=True)
        Whether to shuffle the samples.
    noise : double or None (default=None)
        Standard deviation of Gaussian noise added to the data.
    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.
    Returns
    -------
    X : array of shape [n_samples, 2]
        The generated samples.
    y : array of shape [n_samples]
        The integer labels (0 or 1) for class membership of each sample.
    """

    n_samples_out = n_samples // 2
    n_samples_in = n_samples - n_samples_out

    generator = check_random_state(random_state)

    angle = generator.uniform(size=(n_samples_out, )) * np.pi
    outer_circ_x = np.cos(angle)
    outer_circ_y = np.sin(angle)
    angle = generator.uniform(size=(n_samples_out, )) * np.pi
    inner_circ_x = 1 - np.cos(angle)
    inner_circ_y = 1 - np.sin(angle) - .5

    X = np.vstack(
        (np.append(outer_circ_x,
                   inner_circ_x), np.append(outer_circ_y, inner_circ_y))).T
    y = np.hstack([
        np.zeros(n_samples_out, dtype=np.intp),
        np.ones(n_samples_in, dtype=np.intp)
    ])

    if shuffle:
        X, y = util_shuffle(X, y, random_state=generator)

    if noise != None:
        X += generator.normal(scale=noise, size=X.shape)

    return X, y
Beispiel #9
0
def simple_split(X_in, y_in, percent, n_class, shuffle=True):
    """
    This function splits a annotated dataset in two pieces. The relative size 
    of these is specified. The split is stratified.
    Parameters
    ----------    
     - X : a 2D numpy array of floats
         It contains training examples.
    
    - y : 1D numpy array of int
        It contains class labels of the training examples stored in X.
        
    - percent : float in the [0,1] range
        the proportion of the data in the first chunck of data.
        
    - n_class : int
        The number of classes.
    Returns
    -------    
    - X1 : a 2D numpy array of floats
        It contains the first chunck of training examples
        
    - y1 : 1D numpy array of int
        It containsclass labels of the training examples stored in X1.
        
    - X2 : a 2D numpy array of floats
        It contains the second chunck of training examples
        
    - y2 : 1D numpy array of int
        It containsclass labels of the training examples stored in X2.
                
    """
    cards = np.zeros((n_class, ))
    inds = []
    if shuffle:
        X, y = util_shuffle(X_in, y_in, random_state=0)
    else:
        X = X_in
        y = y_in
    for i in range(np.min(y), np.min(y) + n_class):
        cards[i - np.min(y)] = np.sum(y == i)
        proportion = int(percent * cards[i - np.min(y)])
        if (proportion == 0):
            proportion = 1
        inds = inds + list(np.where(y == i)[0][:proportion])
    X1 = X[inds]
    y1 = y[inds]
    inds2 = np.setdiff1d(range(len(y)), inds)
    X2 = X[inds2]
    y2 = y[inds2]
    return X1, y1, X2, y2
Beispiel #10
0
def assert_shuffle_match(X,
                         X_tr,
                         groups=None,
                         group_names=None,
                         select=None,
                         random_state=0):
    ge = GroupExtractor(select=select, groups=groups, group_names=group_names)
    shuffled = {"orig": ge.fit_transform(X), "tran": ge.fit_transform(X_tr)}
    gr = GroupRemover(select=select, groups=groups, group_names=group_names)
    same = {"orig": gr.fit_transform(X), "tran": gr.fit_transform(X_tr)}
    assert np.allclose(same["orig"], same["tran"])
    assert np.allclose(
        util_shuffle(shuffled["orig"], random_state=random_state),
        shuffled["tran"])
Beispiel #11
0
    def transform(self, X):
        """Transform the input data, shuffling the desired groups.

        Parameters
        ----------
        X : numpy.ndarray
            The feature matrix.
        """
        X = check_array(X,
                        copy=True,
                        dtype=[np.float32, np.float64, int],
                        force_all_finite=False)
        groups = check_groups(groups=self.groups_, X=X, allow_overlap=True)
        if self.select_ is None:
            return X

        generator = check_random_state(self.random_state)
        idx = np.concatenate([groups[e] for e in self.select_])
        shuffle_view = X[:, idx]
        shuffle_view = util_shuffle(shuffle_view, random_state=generator)
        X[:, idx] = shuffle_view
        return X
def data_gen(data, n_samples, noise=None, rng=np.random):
    if data == 'swissroll':
        if noise is None:
            noise = 1.0

        data = sklearn.datasets.make_swiss_roll(n_samples=n_samples,
                                                noise=noise)[0]
        data = data.astype(np.float32)[:, [0, 2]]
        data /= 5

        return data, noise

    elif data == 'gaussian_1':
        return np.random.multivariate_normal([1.0, 1.0],
                                             [[0.09, 0.0], [0.0, 0.09]],
                                             n_samples), None

    elif data == 'gaussian_2':
        return np.random.multivariate_normal([1.0, 1.0],
                                             [[0.25, 0.0], [0.0, 0.25]],
                                             n_samples), None

    elif data == 'gaussian_3':
        return np.random.multivariate_normal([1.0, 1.0],
                                             [[1.0, 0.0], [0.0, 1.0]],
                                             n_samples), None

    elif data == 'mixture_1':
        coins = np.random.choice(3, n_samples, p=[1. / 3, 1. / 3, 1. / 3])
        bincounts = np.bincount(coins)

        means = [[0.0, 0.0], [2.0, 3.0], [2.0, -3.0]]
        covars = [[[0.1, 0.0], [0.0, 1.5]], [[1.0, 0.0], [0.0, 0.1]],
                  [[1.0, 0.0], [0.0, 0.1]]]

        samples = np.zeros((n_samples, 2))

        offset = 0
        for i in range(3):
            samples[offset:(offset +
                            bincounts[i])] = np.random.multivariate_normal(
                                means[i], covars[i], bincounts[i])

            offset += bincounts[i]

        return util_shuffle(samples), None

    elif data == 'mixture_final':
        coins = np.random.choice(4,
                                 n_samples,
                                 p=[1. / 4, 1. / 4, 1. / 4, 1. / 4])
        bincounts = np.bincount(coins)

        means = [[-2.0, 0.0], [2.0, 0.0], [0.0, -2.0], [0.0, 2.0]]
        covars = [[[0.09, 0.0], [0.0, 1.0]], [[0.09, 0.0], [0.0, 1.0]],
                  [[1.0, 0.0], [0.0, 0.09]], [[1.0, 0.0], [0.0, 0.09]]]

        samples = np.zeros((n_samples, 2))

        offset = 0
        for i in range(4):
            samples[offset:(offset +
                            bincounts[i])] = np.random.multivariate_normal(
                                means[i], covars[i], bincounts[i])

            offset += bincounts[i]

        return util_shuffle(samples), None

    elif data == 'mixture_2':
        coins = np.random.choice(2, n_samples, p=[1. / 2, 1. / 2])
        bincounts = np.bincount(coins)

        means = [[-3.0, -3.0], [3.0, 3.0]]
        covars = [[[0.09, 0.0], [0.0, 0.09]], [[0.09, 0.0], [0.0, 0.09]]]

        samples = np.zeros((n_samples, 2))

        offset = 0
        for i in range(2):
            samples[offset:(offset +
                            bincounts[i])] = np.random.multivariate_normal(
                                means[i], covars[i], bincounts[i])

            offset += bincounts[i]

        return util_shuffle(samples), None

    elif data == 'mixture_3':
        coins = np.random.choice(2, n_samples, p=[1. / 2, 1. / 2])
        bincounts = np.bincount(coins)

        means = [[-1.0, -1.0], [1.0, 1.0]]
        covars = [[[0.25, 0.0], [0.0, 0.25]], [[0.09, 0.0], [0.0, 0.09]]]

        samples = np.zeros((n_samples, 2))

        offset = 0
        for i in range(2):
            samples[offset:(offset +
                            bincounts[i])] = np.random.multivariate_normal(
                                means[i], covars[i], bincounts[i])

            offset += bincounts[i]

        return util_shuffle(samples), None

    elif data == 'circles':
        if noise is None:
            noise = 0.08

        data = sklearn.datasets.make_circles(n_samples=n_samples,
                                             factor=.5,
                                             noise=noise)[0]
        data = data.astype(np.float32)
        # data *= 3
        data *= 5

        return data, noise

    elif data == 'circles_easy':
        if noise is None:
            noise = 0.02

        data = sklearn.datasets.make_circles(n_samples=n_samples,
                                             factor=0.4,
                                             noise=noise)[0]
        data = data.astype(np.float32)
        data *= 3
        # data *= 5

        return data, noise

    elif data == 'rings':
        if noise is None:
            noise = 0.08

        n_samples4 = n_samples3 = n_samples2 = n_samples // 4
        n_samples1 = n_samples - n_samples4 - n_samples3 - n_samples2

        # so as not to have the first point = last point, we set endpoint=False
        linspace4 = np.linspace(0, 2 * np.pi, n_samples4, endpoint=False)
        linspace3 = np.linspace(0, 2 * np.pi, n_samples3, endpoint=False)
        linspace2 = np.linspace(0, 2 * np.pi, n_samples2, endpoint=False)
        linspace1 = np.linspace(0, 2 * np.pi, n_samples1, endpoint=False)

        circ4_x = np.cos(linspace4)
        circ4_y = np.sin(linspace4)
        circ3_x = np.cos(linspace4) * 0.75
        circ3_y = np.sin(linspace3) * 0.75
        circ2_x = np.cos(linspace2) * 0.5
        circ2_y = np.sin(linspace2) * 0.5
        circ1_x = np.cos(linspace1) * 0.25
        circ1_y = np.sin(linspace1) * 0.25

        X = np.vstack([
            np.hstack([circ4_x, circ3_x, circ2_x, circ1_x]),
            np.hstack([circ4_y, circ3_y, circ2_y, circ1_y])
        ]).T * 3.0
        X = util_shuffle(X, random_state=rng)

        # Add noise
        X = X + rng.normal(scale=noise, size=X.shape)

        return X.astype(np.float32), noise

    elif data == 'moons':
        if noise is None:
            noise = 0.03

        data = sklearn.datasets.make_moons(n_samples=n_samples, noise=noise)[0]
        data = data.astype(np.float32)
        data = data * 3
        # data = data * 2 + np.array([-1, -0.2])

        return data, noise

    elif data == '8gaussians':
        scale = 4.
        centers = [(1, 0), (-1, 0), (0, 1), (0, -1),
                   (1. / np.sqrt(2), 1. / np.sqrt(2)),
                   (1. / np.sqrt(2), -1. / np.sqrt(2)),
                   (-1. / np.sqrt(2), 1. / np.sqrt(2)),
                   (-1. / np.sqrt(2), -1. / np.sqrt(2))]
        centers = [(scale * x, scale * y) for x, y in centers]

        dataset = []
        for i in range(n_samples):
            point = rng.randn(2) * 0.5
            idx = rng.randint(8)
            center = centers[idx]
            point[0] += center[0]
            point[1] += center[1]
            dataset.append(point)
        dataset = np.array(dataset, dtype=np.float32)
        dataset /= 1.414

        return dataset, None

    elif data == '4gaussians':
        scale = 4.
        centers = [(1, 0), (-1, 0), (0, 1), (0, -1)]
        centers = [(scale * x, scale * y) for x, y in centers]

        dataset = []
        for i in range(n_samples):
            point = rng.randn(2) * 0.5
            idx = rng.randint(4)
            center = centers[idx]
            point[0] += center[0]
            point[1] += center[1]
            dataset.append(point)
        dataset = np.array(dataset, dtype=np.float32)
        dataset /= 1.414

        return dataset, None

    elif data == 'pinwheel':
        radial_std = 0.3
        tangential_std = 0.1
        num_classes = 3
        num_per_class = n_samples // num_classes
        rate = 0.25
        rads = np.linspace(0, 2 * np.pi, num_classes, endpoint=False)

        features = rng.randn(num_classes * num_per_class, 2) * \
            np.array([radial_std, tangential_std])
        features[:, 0] += 1.
        labels = np.repeat(np.arange(num_classes), num_per_class)

        angles = rads[labels] + rate * np.exp(features[:, 0])
        rotations = np.stack(
            [np.cos(angles), -np.sin(angles),
             np.sin(angles),
             np.cos(angles)])
        rotations = np.reshape(rotations.T, (-1, 2, 2))

        return 2 * rng.permutation(
            np.einsum('ti, tij -> tj', features, rotations)), None

    elif data == '2spirals':
        if noise is None:
            noise = 0.1

        n = np.sqrt(np.random.rand(n_samples // 2, 1)) * \
            540 * (2 * np.pi) / 360
        d1x = -np.cos(n) * n + np.random.rand(n_samples // 2, 1) * 0.5
        d1y = np.sin(n) * n + np.random.rand(n_samples // 2, 1) * 0.5
        x = np.vstack((np.hstack((d1x, d1y)), np.hstack((-d1x, -d1y)))) / 3
        x += np.random.randn(*x.shape) * noise

        return x, noise

    elif data == 'checkerboard':
        x1 = np.random.rand(n_samples) * 4 - 2
        x2_ = np.random.rand(n_samples) - \
            np.random.randint(0, 2, n_samples) * 2
        x2 = x2_ + (np.floor(x1) % 2)

        return np.concatenate([x1[:, None], x2[:, None]], 1) * 2, None

    elif data == 'line':
        x = rng.rand(n_samples) * 5 - 2.5
        y = x

        return np.stack((x, y), 1), None

    elif data == 'cos':
        if noise is None:
            noise = 0.1

        x = rng.rand(n_samples) * 5 - 2.5 + noise * rng.randn(n_samples)
        y = np.sin(x) * 2.5 + noise * rng.randn(n_samples)

        return np.stack((x, y), 1), noise

    else:
        raise ValueError('Choose one of the available data options.')
def main():
    if args.data == 'boston':
        data = np.load('data_small/boston_no_discrete.npy')
    elif args.data == 'white_wine':
        data = np.load('data_small/white_no_discrete_no_corr_0.98.npy')
    elif args.data == 'red_wine':
        data = np.load('data_small/red_no_discrete_no_corr_0.98.npy')
    n_features = data.shape[1]
    n_train = int(data.shape[0] * 0.9)
    train_data_clean = util_shuffle(data[:n_train])
    test_data = data[:n_train]
    kf = KFold(n_splits=10)

    covar = np.diag(args.covar * np.ones((n_features, )))

    train_data = train_data_clean + \
        np.random.multivariate_normal(mean=np.zeros(
            (n_features,)), cov=covar, size=n_train)

    # train_covars = np.repeat(
    #     covar[np.newaxis, :, :], n_train, axis=0)

    # train_dataset = DeconvDataset(train_data, train_covars)
    for i, (train_index, eval_index) in enumerate(kf.split(train_data)):
        X_train, X_eval = train_data[train_index], train_data[eval_index]
        train_covars = np.repeat(covar[np.newaxis, :, :],
                                 X_train.shape[0],
                                 axis=0)
        eval_covars = np.repeat(covar[np.newaxis, :, :],
                                X_eval.shape[0],
                                axis=0)

        train_dataset = DeconvDataset(X_train, train_covars)
        train_loader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True)

        eval_dataset = DeconvDataset(X_eval, eval_covars)
        eval_loader = DataLoader(eval_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False)

        model = SVIFlowToy(dimensions=n_features,
                           objective=args.objective,
                           posterior_context_size=n_features,
                           batch_size=args.batch_size,
                           device=device,
                           maf_steps_prior=args.flow_steps_prior,
                           maf_steps_posterior=args.flow_steps_posterior,
                           maf_features=args.maf_features,
                           maf_hidden_blocks=args.maf_hidden_blocks,
                           K=args.K)

        message = 'Total number of parameters: %s' % (sum(
            p.numel() for p in model.parameters()))
        logger.info(message)

        optimizer = torch.optim.Adam(params=model.parameters(), lr=args.lr)

        # training
        scheduler = [30]
        epoch = 0
        best_model = copy.deepcopy(model.state_dict())

        best_eval_loss = compute_eval_loss(model, eval_loader, device,
                                           len(eval_index))
        n_epochs_not_improved = 0

        model.train()
        while n_epochs_not_improved < scheduler[-1] and epoch < args.n_epochs:
            for batch_idx, data in enumerate(train_loader):
                data[0] = data[0].to(device)
                data[1] = data[1].to(device)

                for prior_params in model.model._prior.parameters():
                    prior_params.requires_grad = True

                for post_params in model.model._approximate_posterior.parameters(
                ):
                    post_params.requires_grad = False

                for i in range(args.prior_iter):

                    loss = -model.score(data).mean()
                    message = 'Loss prior %s: %f' % (i, loss)
                    logger.info(message)
                    optimizer.zero_grad()
                    loss.backward(retain_graph=True)
                    optimizer.step()

                for prior_params in model.model._prior.parameters():
                    prior_params.requires_grad = False

                for post_params in model.model._approximate_posterior.parameters(
                ):
                    post_params.requires_grad = True

                for i in range(args.posterior_iter):
                    loss = -model.score(data).mean()
                    message = 'Loss posterior %s: %f' % (i, loss)
                    logger.info(message)
                    optimizer.zero_grad()
                    loss.backward(retain_graph=True)
                    optimizer.step()

            model.eval()
            test_loss_clean = - \
                model.model._prior.log_prob(
                    torch.from_numpy(test_data).to(device)).mean()
            message = 'Test loss (clean) = %.5f' % test_loss_clean
            logger.info(message)
            eval_loss = compute_eval_loss(model, eval_loader, device,
                                          len(eval_index))

            if eval_loss < best_eval_loss:
                best_model = copy.deepcopy(model.state_dict())
                best_eval_loss = eval_loss
                n_epochs_not_improved = 0

            else:
                n_epochs_not_improved += 1

            model.train()
            epoch += 1
        break

        model = model.load_state_dict(best_model)
        test_loss_clean = - \
            model.model._prior.log_prob(
                torch.from_numpy(test_data).to(device)).mean()
        message = 'Final test loss (clean) = %.5f' % test_loss_clean
        logger.info(message)
Beispiel #14
0
def make_classification_adjusted(n_samples=100,
                                 n_features=20,
                                 n_informative=2,
                                 n_redundant=2,
                                 n_repeated=0,
                                 n_classes=2,
                                 n_clusters_per_class=2,
                                 weights=None,
                                 flip_y=0.01,
                                 class_sep=1.0,
                                 hypercube=True,
                                 shift=0.0,
                                 scale=1.0,
                                 shuffle=True,
                                 random_state=None):
    """Generate a random n-class classification problem.
    This initially creates clusters of points normally distributed (std=1)
    about vertices of an ``n_informative``-dimensional hypercube with sides of
    length ``2*class_sep`` and assigns an equal number of clusters to each
    class. It introduces interdependence between these features and adds
    various types of further noise to the data.
    Without shuffling, ``X`` horizontally stacks features in the following
    order: the primary ``n_informative`` features, followed by ``n_redundant``
    linear combinations of the informative features, followed by ``n_repeated``
    duplicates, drawn randomly with replacement from the informative and
    redundant features. The remaining features are filled with random noise.
    Thus, without shuffling, all useful features are contained in the columns
    ``X[:, :n_informative + n_redundant + n_repeated]``.
    Read more in the :ref:`User Guide <sample_generators>`.
    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.
    n_features : int, optional (default=20)
        The total number of features. These comprise ``n_informative``
        informative features, ``n_redundant`` redundant features,
        ``n_repeated`` duplicated features and
        ``n_features-n_informative-n_redundant-n_repeated`` useless features
        drawn at random.
    n_informative : int, optional (default=2)
        The number of informative features. Each class is composed of a number
        of gaussian clusters each located around the vertices of a hypercube
        in a subspace of dimension ``n_informative``. For each cluster,
        informative features are drawn independently from  N(0, 1) and then
        randomly linearly combined within each cluster in order to add
        covariance. The clusters are then placed on the vertices of the
        hypercube.
    n_redundant : int, optional (default=2)
        The number of redundant features. These features are generated as
        random linear combinations of the informative features.
    n_repeated : int, optional (default=0)
        The number of duplicated features, drawn randomly from the informative
        and the redundant features.
    n_classes : int, optional (default=2)
        The number of classes (or labels) of the classification problem.
    n_clusters_per_class : int, optional (default=2)
        The number of clusters per class.
    weights : array-like of shape (n_classes,) or (n_classes - 1,),\
              (default=None)
        The proportions of samples assigned to each class. If None, then
        classes are balanced. Note that if ``len(weights) == n_classes - 1``,
        then the last class weight is automatically inferred.
        More than ``n_samples`` samples may be returned if the sum of
        ``weights`` exceeds 1.
    flip_y : float, optional (default=0.01)
        The fraction of samples whose class is assigned randomly. Larger
        values introduce noise in the labels and make the classification
        task harder.
    class_sep : float, optional (default=1.0)
        The factor multiplying the hypercube size.  Larger values spread
        out the clusters/classes and make the classification task easier.
    hypercube : boolean, optional (default=True)
        If True, the clusters are put on the vertices of a hypercube. If
        False, the clusters are put on the vertices of a random polytope.
    shift : float, array of shape [n_features] or None, optional (default=0.0)
        Shift features by the specified value. If None, then features
        are shifted by a random value drawn in [-class_sep, class_sep].
    scale : float, array of shape [n_features] or None, optional (default=1.0)
        Multiply features by the specified value. If None, then features
        are scaled by a random value drawn in [1, 100]. Note that scaling
        happens after shifting.
    shuffle : boolean, optional (default=True)
        Shuffle the samples and the features.
    random_state : int, RandomState instance or None (default)
        Determines random number generation for dataset creation. Pass an int
        for reproducible output across multiple function calls.
        See :term:`Glossary <random_state>`.
    Returns
    -------
    X : array of shape [n_samples, n_features]
        The generated samples.
    y : array of shape [n_samples]
        The integer labels for class membership of each sample.
    Notes
    -----
    The algorithm is adapted from Guyon [1] and was designed to generate
    the "Madelon" dataset.
    References
    ----------
    .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
           selection benchmark", 2003.
    See also
    --------
    make_blobs: simplified variant
    make_multilabel_classification: unrelated generator for multilabel tasks
    """
    generator = check_random_state(random_state)

    # Count features, clusters and samples
    if n_informative + n_redundant + n_repeated > n_features:
        raise ValueError("Number of informative, redundant and repeated "
                         "features must sum to less than the number of total"
                         " features")
    # Use log2 to avoid overflow errors
    if n_informative < np.log2(n_classes * n_clusters_per_class):
        msg = "n_classes({}) * n_clusters_per_class({}) must be"
        msg += " smaller or equal 2**n_informative({})={}"
        raise ValueError(
            msg.format(n_classes, n_clusters_per_class, n_informative,
                       2**n_informative))

    if weights is not None:
        if len(weights) not in [n_classes, n_classes - 1]:
            raise ValueError("Weights specified but incompatible with number "
                             "of classes.")
        if len(weights) == n_classes - 1:
            if isinstance(weights, list):
                weights = weights + [1.0 - sum(weights)]
            else:
                weights = np.resize(weights, n_classes)
                weights[-1] = 1.0 - sum(weights[:-1])
    else:
        weights = [1.0 / n_classes] * n_classes

    n_useless = n_features - n_informative - n_redundant - n_repeated
    n_clusters = n_classes * n_clusters_per_class

    # Distribute samples among clusters by weight
    n_samples_per_cluster = [
        int(n_samples * weights[k % n_classes] / n_clusters_per_class)
        for k in range(n_clusters)
    ]

    for i in range(n_samples - sum(n_samples_per_cluster)):
        n_samples_per_cluster[i % n_clusters] += 1

    # Initialize X and y
    X = np.zeros((n_samples, n_features))
    y = np.zeros(n_samples, dtype=np.int)

    # Build the polytope whose vertices become cluster centroids
    centroids = _generate_hypercube(n_clusters, n_informative,
                                    generator).astype(float, copy=False)
    centroids *= 2 * class_sep
    centroids -= class_sep
    if not hypercube:
        centroids *= generator.rand(n_clusters, 1)
        centroids *= generator.rand(1, n_informative)

    # Initially draw informative features from the standard normal
    X[:, :n_informative] = generator.randn(n_samples, n_informative)

    # Create each cluster; a variant of make_blobs
    stop = 0
    cluster_assigned = np.zeros(X.shape[0], dtype=np.int)
    for k, centroid in enumerate(centroids):
        start, stop = stop, stop + n_samples_per_cluster[k]
        y[start:stop] = k % n_classes  # assign labels
        cluster_assigned[start:stop] = k + 1
        X_k = X[start:stop, :n_informative]  # slice a view of the cluster

        A = 2 * generator.rand(n_informative, n_informative) - 1
        X_k[...] = np.dot(X_k, A)  # introduce random covariance

        X_k += centroid  # shift the cluster to a vertex

    # Create redundant features
    if n_redundant > 0:
        B = 2 * generator.rand(n_informative, n_redundant) - 1
        X[:, n_informative:n_informative + n_redundant] = \
            np.dot(X[:, :n_informative], B)

    # Repeat some features
    if n_repeated > 0:
        n = n_informative + n_redundant
        indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
        X[:, n:n + n_repeated] = X[:, indices]

    # Fill useless features
    if n_useless > 0:
        X[:, -n_useless:] = generator.randn(n_samples, n_useless)

    # Randomly replace labels
    if flip_y >= 0.0:
        flip_mask = generator.rand(n_samples) < flip_y
        y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())

    # Randomly shift and scale
    if shift is None:
        shift = (2 * generator.rand(n_features) - 1) * class_sep
    X += shift

    if scale is None:
        scale = 1 + 100 * generator.rand(n_features)
    X *= scale

    if shuffle:
        # Randomly permute samples
        X, y = util_shuffle(X, y, random_state=generator)

        # Randomly permute features
        indices = np.arange(n_features)
        generator.shuffle(indices)
        X[:, :] = X[:, indices]

    return X, y, cluster_assigned
Beispiel #15
0
def get_2d_data(data, size):
    if data == "swissroll":
        data = sklearn.datasets.make_swiss_roll(n_samples=size, noise=1.0)[0]
        data = data[:, [0, 2]]
        data /= 5

    elif data == "circles":
        data = sklearn.datasets.make_circles(n_samples=size,
                                             factor=.5,
                                             noise=0.08)[0]
        data *= 3

    elif data == "rings":
        n_samples4 = n_samples3 = n_samples2 = size // 4
        n_samples1 = size - n_samples4 - n_samples3 - n_samples2

        # so as not to have the first point = last point, we set endpoint=False
        linspace4 = np.linspace(0, 2 * np.pi, n_samples4, endpoint=False)
        linspace3 = np.linspace(0, 2 * np.pi, n_samples3, endpoint=False)
        linspace2 = np.linspace(0, 2 * np.pi, n_samples2, endpoint=False)
        linspace1 = np.linspace(0, 2 * np.pi, n_samples1, endpoint=False)

        circ4_x = np.cos(linspace4)
        circ4_y = np.sin(linspace4)
        circ3_x = np.cos(linspace4) * 0.75
        circ3_y = np.sin(linspace3) * 0.75
        circ2_x = np.cos(linspace2) * 0.5
        circ2_y = np.sin(linspace2) * 0.5
        circ1_x = np.cos(linspace1) * 0.25
        circ1_y = np.sin(linspace1) * 0.25

        X = np.vstack([
            np.hstack([circ4_x, circ3_x, circ2_x, circ1_x]),
            np.hstack([circ4_y, circ3_y, circ2_y, circ1_y])
        ]).T * 3.0
        X = util_shuffle(X)

        # Add noise
        data = X + np.random.normal(scale=0.08, size=X.shape)

    elif data == "8gaussians":
        dim = 2
        scale = 4.
        centers = [(1, 0), (-1, 0), (0, 1), (0, -1),
                   (1. / np.sqrt(2), 1. / np.sqrt(2)),
                   (1. / np.sqrt(2), -1. / np.sqrt(2)),
                   (-1. / np.sqrt(2), 1. / np.sqrt(2)),
                   (-1. / np.sqrt(2), -1. / np.sqrt(2))]
        centers = [(scale * x, scale * y) for x, y in centers]
        for i in range(len(centers)):
            for k in range(dim - 2):
                centers[i] = centers[i] + (0, )

        data = []
        for i in range(size):
            point = np.random.randn(dim) * 0.5
            idx = np.random.randint(8)
            center = centers[idx]
            point[0] += center[0]
            point[1] += center[1]
            data.append(point)
        data = np.array(data)
        data /= 1.414

    elif data == "pinwheel":
        radial_std = 0.3
        tangential_std = 0.1
        num_classes = 5
        num_per_class = size // 5
        rate = 0.25
        rads = np.linspace(0, 2 * np.pi, num_classes, endpoint=False)

        features = np.random.randn(num_classes*num_per_class, 2) \
            * np.array([radial_std, tangential_std])
        features[:, 0] += 1.
        labels = np.repeat(np.arange(num_classes), num_per_class)

        angles = rads[labels] + rate * np.exp(features[:, 0])
        rotations = np.stack(
            [np.cos(angles), -np.sin(angles),
             np.sin(angles),
             np.cos(angles)])
        rotations = np.reshape(rotations.T, (-1, 2, 2))

        data = 2 * np.random.permutation(
            np.einsum("ti,tij->tj", features, rotations))

    elif data == "2spirals":
        n = np.sqrt(np.random.rand(size // 2, 1)) * 540 * (2 * np.pi) / 360
        d1x = -np.cos(n) * n + np.random.rand(size // 2, 1) * 0.5
        d1y = np.sin(n) * n + np.random.rand(size // 2, 1) * 0.5
        x = np.vstack((np.hstack((d1x, d1y)), np.hstack((-d1x, -d1y)))) / 3
        x += np.random.randn(*x.shape) * 0.1
        data = x

    elif data == "checkerboard":
        x1 = np.random.rand(size) * 4 - 2
        x2_ = np.random.rand(size) - np.random.randint(0, 2, size) * 2
        x2 = x2_ + (np.floor(x1) % 2)
        data = np.concatenate([x1[:, None], x2[:, None]], 1)
        data *= 2

    elif data == "line":
        x = np.random.rand(size) * 5 - 2.5
        y = x
        data = np.stack((x, y), 1)
    elif data == "cos":
        x = np.random.rand(size) * 5 - 2.5
        y = np.sin(x) * 2.5
        data = np.stack((x, y), 1)

    elif data == "2uniforms":
        mixture_component = (np.random.rand(size) > 0.5).astype(int)
        x1 = np.random.rand(size) + mixture_component - 2 * (1 -
                                                             mixture_component)
        x2 = 2 * (np.random.rand(size) - 0.5)
        data = np.stack((x1, x2), 1)

    elif data == "2lines":
        x1 = np.empty(size)
        x1[:size // 2] = -1.
        x1[size // 2:] = 1.
        x1 += 0.01 * (np.random.rand(size) - .5)
        x2 = 2 * (np.random.rand(size) - 0.5)
        data = np.stack((x1, x2), 1)
        data = util_shuffle(data)

    elif data == "2marginals":
        x1 = np.empty(size)
        x1[:size // 2] = -1.
        x1[size // 2:] = 1.
        x1 += .5 * (np.random.rand(size) - .5)
        x2 = np.random.normal(size=size)
        data = np.stack((x1, x2), 1)
        data = util_shuffle(data)

    elif data == "1uniform":
        x1 = np.random.rand(size) - .5
        x2 = np.random.rand(size) - .5
        data = np.stack((x1, x2), 1)
        data = util_shuffle(data)

    elif data == "annulus":
        rad1 = 2
        rad2 = 1
        theta = 2 * np.pi * np.random.random(size)
        r = np.sqrt(np.random.random(size) * (rad1**2 - rad2**2) + rad2**2)
        x1 = r * np.cos(theta)
        x2 = r * np.sin(theta)
        data = np.stack((x1, x2), 1)

    elif data == "sawtooth":
        u = np.random.rand(size)
        branch = u < .5
        x1 = np.zeros(size)
        x1[branch] = -1 - np.sqrt(1 - 2 * u[branch])
        x1[~branch] = 1 + np.sqrt(2 * u[~branch] - 1)
        x2 = np.random.rand(size)
        data = np.stack((x1, x2), 1)

    elif data == "quadspline":
        u = np.random.rand(size)
        branch = u < .5
        x1 = np.zeros(size)
        x1[branch] = -1 + np.cbrt(2 * u[branch] - 1)
        x1[~branch] = 1 + np.cbrt(2 * u[~branch] - 1)
        x2 = np.random.rand(size)
        data = np.stack((x1, x2), 1)

    elif data == "split-gaussian":
        x1 = np.random.normal(size=size)
        x2 = np.random.normal(size=size)
        x2[x1 >= 0] += 2
        x2[x1 < 0] -= 2
        data = np.stack((x1, x2), 1)

    else:
        assert False, f"Unknown dataset `{data}''"

    return torch.tensor(data, dtype=torch.get_default_dtype())
Beispiel #16
0
def make_classification(n_samples=100, n_features=20, n_informative=2,
                        n_redundant=2, n_repeated=0, n_classes=2,
                        n_clusters_per_class=2, weights=None, flip_y=0.01,
                        class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
                        shuffle=True, random_state=None):
    """Generate a random n-class classification problem.

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.

    n_features : int, optional (default=20)
        The total number of features. These comprise `n_informative`
        informative features, `n_redundant` redundant features, `n_repeated`
        duplicated features and `n_features-n_informative-n_redundant-
        n_repeated` useless features drawn at random.

    n_informative : int, optional (default=2)
        The number of informative features. Each class is composed of a number
        of gaussian clusters each located around the vertices of a hypercube
        in a subspace of dimension `n_informative`. For each cluster,
        informative features are drawn independently from  N(0, 1) and then
        randomly linearly combined in order to add covariance. The clusters
        are then placed on the vertices of the hypercube.

    n_redundant : int, optional (default=2)
        The number of redundant features. These features are generated as
        random linear combinations of the informative features.

    n_repeated : int, optional (default=2)
        The number of duplicated features, drawn randomly from the informative
        and the redundant features.

    n_classes : int, optional (default=2)
        The number of classes (or labels) of the classification problem.

    n_clusters_per_class : int, optional (default=2)
        The number of clusters per class.

    weights : list of floats or None (default=None)
        The proportions of samples assigned to each class. If None, then
        classes are balanced. Note that if `len(weights) == n_classes - 1`,
        then the last class weight is automatically inferred.

    flip_y : float, optional (default=0.01)
        The fraction of samples whose class are randomly exchanged.

    class_sep : float, optional (default=1.0)
        The factor multiplying the hypercube dimension.

    hypercube : boolean, optional (default=True)
        If True, the clusters are put on the vertices of a hypercube. If
        False, the clusters are put on the vertices of a random polytope.

    shift : float or None, optional (default=0.0)
        Shift all features by the specified value. If None, then features
        are shifted by a random value drawn in [-class_sep, class_sep].

    scale : float or None, optional (default=1.0)
        Multiply all features by the specified value. If None, then features
        are scaled by a random value drawn in [1, 100]. Note that scaling
        happens after shifting.

    shuffle : boolean, optional (default=True)
        Shuffle the samples and the features.

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    Returns
    -------
    X : array of shape [n_samples, n_features]
        The generated samples.

    y : array of shape [n_samples]
        The integer labels for class membership of each sample.

    Notes
    -----
    The algorithm is adapted from Guyon [1] and was designed to generate
    the "Madelon" dataset.

    References
    ----------
    .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
           selection benchmark", 2003.
    """
    from itertools import product
    from sklearn.utils import shuffle as util_shuffle

    generator = check_random_state(random_state)

    # Count features, clusters and samples
    if n_informative + n_redundant + n_repeated > n_features:
        raise ValueError("Number of informative, redundant and repeated "
                         "features must sum to less than the number of total"
                         " features")
    if 2 ** n_informative < n_classes * n_clusters_per_class:
        raise ValueError("n_classes * n_clusters_per_class must"
                         " be smaller or equal 2 ** n_informative")
    if weights and len(weights) not in [n_classes, n_classes - 1]:
        raise ValueError("Weights specified but incompatible with number "
                         "of classes.")

    n_useless = n_features - n_informative - n_redundant - n_repeated
    n_clusters = n_classes * n_clusters_per_class

    if weights and len(weights) == (n_classes - 1):
        weights.append(1.0 - sum(weights))

    if weights is None:
        weights = [1.0 / n_classes] * n_classes
        weights[-1] = 1.0 - sum(weights[:-1])

    n_samples_per_cluster = []

    for k in range(n_clusters):
        n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
                                     / n_clusters_per_class))

    for i in range(n_samples - sum(n_samples_per_cluster)):
        n_samples_per_cluster[i % n_clusters] += 1

    # Intialize X and y
    X = np.zeros((n_samples, n_features))
    y = np.zeros(n_samples, dtype=np.int)

    # Build the polytope
    C = np.array(list(product([-class_sep, class_sep], repeat=n_informative)))

    generator.shuffle(C)

    if not hypercube:
        C[:n_clusters] *= generator.rand(n_clusters, 1)
        C *= generator.rand(1, n_informative)

    # Loop over all clusters
    pos = 0
    pos_end = 0

    for k in range(n_clusters):
        # Number of samples in cluster k
        n_samples_k = n_samples_per_cluster[k]

        # Define the range of samples
        pos = pos_end
        pos_end = pos + n_samples_k

        # Assign labels
        y[pos:pos_end] = k % n_classes

        # Draw features at random
        X[pos:pos_end, :n_informative] = generator.randn(n_samples_k,
                                                         n_informative)

        # Multiply by a random matrix to create co-variance of the features
        A = 2 * generator.rand(n_informative, n_informative) - 1
        X[pos:pos_end, :n_informative] = np.dot(X[pos:pos_end, :n_informative],
                                                A)

        # Shift the cluster to a vertice
        X[pos:pos_end, :n_informative] += np.tile(C[k, :], (n_samples_k, 1))

    # Create redundant features
    if n_redundant > 0:
        B = 2 * generator.rand(n_informative, n_redundant) - 1
        X[:, n_informative:n_informative + n_redundant] = \
            np.dot(X[:, :n_informative], B)

    # Repeat some features
    if n_repeated > 0:
        n = n_informative + n_redundant
        indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
        X[:, n:n + n_repeated] = X[:, indices]

    # Fill useless features
    X[:, n_features - n_useless:] = generator.randn(n_samples, n_useless)

    # Randomly flip labels
    if flip_y >= 0.0:
        for i in range(n_samples):
            if generator.rand() < flip_y:
                y[i] = generator.randint(n_classes)

    # Randomly shift and scale
    constant_shift = shift is not None
    constant_scale = scale is not None

    for f in range(n_features):
        if not constant_shift:
            shift = (2 * generator.rand() - 1) * class_sep

        if not constant_scale:
            scale = 1 + 100 * generator.rand()

        X[:, f] += shift
        X[:, f] *= scale

    # Randomly permute samples and features
    if shuffle:
        X, y = util_shuffle(X, y, random_state=generator)

        indices = np.arange(n_features)
        generator.shuffle(indices)
        X[:, :] = X[:, indices]

    return X, y
Beispiel #17
0
def make_classification(n_samples=100,
                        n_features=20,
                        cov=None,
                        n_informative=2,
                        n_redundant=2,
                        n_repeated=0,
                        n_classes=2,
                        n_clusters_per_class=2,
                        weights=None,
                        flip_y=0.01,
                        class_sep=1.0,
                        hypercube=True,
                        shift=0.0,
                        scale=1.0,
                        shuffle=True,
                        random_state=None):
    generator = check_random_state(random_state)

    # Count features, clusters and samples
    if n_informative + n_redundant + n_repeated > n_features:
        raise ValueError("Number of informative, redundant and repeated "
                         "features must sum to less than the number of total"
                         " features")
    if 2**n_informative < n_classes * n_clusters_per_class:
        raise ValueError("n_classes * n_clusters_per_class must"
                         " be smaller or equal 2 ** n_informative")
    if weights and len(weights) not in [n_classes, n_classes - 1]:
        raise ValueError("Weights specified but incompatible with number "
                         "of classes.")

    n_useless = n_features - n_informative - n_redundant - n_repeated
    n_clusters = n_classes * n_clusters_per_class

    if weights and len(weights) == (n_classes - 1):
        weights.append(1.0 - sum(weights))

    if weights is None:
        weights = [1.0 / n_classes] * n_classes
        weights[-1] = 1.0 - sum(weights[:-1])

    # Distribute samples among clusters by weight
    n_samples_per_cluster = []
    for k in range(n_clusters):
        n_samples_per_cluster.append(
            int(n_samples * weights[k % n_classes] / n_clusters_per_class))
    for i in range(n_samples - sum(n_samples_per_cluster)):
        n_samples_per_cluster[i % n_clusters] += 1

    # Initialize X and y
    X = np.zeros((n_samples, n_features))
    y = np.zeros(n_samples, dtype=np.int)

    # Build the polytope whose vertices become cluster centroids
    centroids = _generate_hypercube(n_clusters, n_informative,
                                    generator).astype(float)
    centroids *= 2 * class_sep
    centroids -= class_sep
    if not hypercube:
        centroids *= generator.rand(n_clusters, 1)
        centroids *= generator.rand(1, n_informative)

    # Initially draw informative features from the standard normal
    X[:, :n_informative] = generator.randn(n_samples, n_informative)

    # Create each cluster; a variant of make_blobs
    stop = 0
    for k, centroid in enumerate(centroids):
        start, stop = stop, stop + n_samples_per_cluster[k]
        y[start:stop] = k % n_classes  # assign labels
        X_k = X[start:stop, :n_informative]  # slice a view of the cluster
        if cov == None:
            # introduce random covariance
            A = 2 * generator.rand(n_informative, n_informative) - 1
            X_k[...] = np.dot(X_k, A)
        else:
            # use the user-specified covariance matrix
            A = np.linalg.cholesky(cov[k])
            X_k[...] = np.dot(A, X_k.T).T

        X_k += centroid  # shift the cluster to a vertex

    # Create redundant features
    if n_redundant > 0:
        B = 2 * generator.rand(n_informative, n_redundant) - 1
        X[:, n_informative:n_informative + n_redundant] = \
            np.dot(X[:, :n_informative], B)

    # Repeat some features
    if n_repeated > 0:
        n = n_informative + n_redundant
        indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
        X[:, n:n + n_repeated] = X[:, indices]

    # Fill useless features
    if n_useless > 0:
        X[:, -n_useless:] = generator.randn(n_samples, n_useless)

    # Randomly replace labels
    if flip_y >= 0.0:
        flip_mask = generator.rand(n_samples) < flip_y
        y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())

    # Randomly shift and scale
    if shift is None:
        shift = (2 * generator.rand(n_features) - 1) * class_sep
    X += shift

    if scale is None:
        scale = 1 + 100 * generator.rand(n_features)
    X *= scale

    if shuffle:
        # Randomly permute samples
        X, y = util_shuffle(X, y, random_state=generator)

        # Randomly permute features
        indices = np.arange(n_features)
        generator.shuffle(indices)
        X[:, :] = X[:, indices]

    return X, y
Beispiel #18
0
def make_classification(n_samples=100,
                        n_features=20,
                        n_informative=2,
                        n_redundant=2,
                        n_repeated=0,
                        n_classes=2,
                        n_clusters_per_class=2,
                        weights=None,
                        flip_y=0.01,
                        class_sep=1.0,
                        hypercube=True,
                        shift=0.0,
                        scale=1.0,
                        shuffle=True,
                        random_state=None):
    """Generate a random n-class classification problem.

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.

    n_features : int, optional (default=20)
        The total number of features. These comprise `n_informative`
        informative features, `n_redundant` redundant features, `n_repeated`
        duplicated features and `n_features-n_informative-n_redundant-
        n_repeated` useless features drawn at random.

    n_informative : int, optional (default=2)
        The number of informative features. Each class is composed of a number
        of gaussian clusters each located around the vertices of a hypercube
        in a subspace of dimension `n_informative`. For each cluster,
        informative features are drawn independently from  N(0, 1) and then
        randomly linearly combined in order to add covariance. The clusters
        are then placed on the vertices of the hypercube.

    n_redundant : int, optional (default=2)
        The number of redundant features. These features are generated as
        random linear combinations of the informative features.

    n_repeated : int, optional (default=2)
        The number of duplicated features, drawn randomly from the informative
        and the redundant features.

    n_classes : int, optional (default=2)
        The number of classes (or labels) of the classification problem.

    n_clusters_per_class : int, optional (default=2)
        The number of clusters per class.

    weights : list of floats or None (default=None)
        The proportions of samples assigned to each class. If None, then
        classes are balanced. Note that if `len(weights) == n_classes - 1`,
        then the last class weight is automatically inferred.

    flip_y : float, optional (default=0.01)
        The fraction of samples whose class are randomly exchanged.

    class_sep : float, optional (default=1.0)
        The factor multiplying the hypercube dimension.

    hypercube : boolean, optional (default=True)
        If True, the clusters are put on the vertices of a hypercube. If
        False, the clusters are put on the vertices of a random polytope.

    shift : float or None, optional (default=0.0)
        Shift all features by the specified value. If None, then features
        are shifted by a random value drawn in [-class_sep, class_sep].

    scale : float or None, optional (default=1.0)
        Multiply all features by the specified value. If None, then features
        are scaled by a random value drawn in [1, 100]. Note that scaling
        happens after shifting.

    shuffle : boolean, optional (default=True)
        Shuffle the samples and the features.

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    Returns
    -------
    X : array of shape [n_samples, n_features]
        The generated samples.

    y : array of shape [n_samples]
        The integer labels for class membership of each sample.

    Notes
    -----
    The algorithm is adapted from Guyon [1] and was designed to generate
    the "Madelon" dataset.

    References
    ----------
    .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
           selection benchmark", 2003.
    """
    from itertools import product
    from sklearn.utils import shuffle as util_shuffle

    generator = check_random_state(random_state)

    # Count features, clusters and samples
    if n_informative + n_redundant + n_repeated > n_features:
        raise ValueError("Number of informative, redundant and repeated "
                         "features must sum to less than the number of total"
                         " features")
    if 2**n_informative < n_classes * n_clusters_per_class:
        raise ValueError("n_classes * n_clusters_per_class must"
                         " be smaller or equal 2 ** n_informative")
    if weights and len(weights) not in [n_classes, n_classes - 1]:
        raise ValueError("Weights specified but incompatible with number "
                         "of classes.")

    n_useless = n_features - n_informative - n_redundant - n_repeated
    n_clusters = n_classes * n_clusters_per_class

    if weights and len(weights) == (n_classes - 1):
        weights.append(1.0 - sum(weights))

    if weights is None:
        weights = [1.0 / n_classes] * n_classes
        weights[-1] = 1.0 - sum(weights[:-1])

    n_samples_per_cluster = []

    for k in range(n_clusters):
        n_samples_per_cluster.append(
            int(n_samples * weights[k % n_classes] / n_clusters_per_class))

    for i in range(n_samples - sum(n_samples_per_cluster)):
        n_samples_per_cluster[i % n_clusters] += 1

    # Intialize X and y
    X = np.zeros((n_samples, n_features))
    y = np.zeros(n_samples, dtype=np.int)

    # Build the polytope
    C = np.array(list(product([-class_sep, class_sep], repeat=n_informative)))

    generator.shuffle(C)

    if not hypercube:
        C[:n_clusters] *= generator.rand(n_clusters, 1)
        C *= generator.rand(1, n_informative)

    # Loop over all clusters
    pos = 0
    pos_end = 0

    for k in range(n_clusters):
        # Number of samples in cluster k
        n_samples_k = n_samples_per_cluster[k]

        # Define the range of samples
        pos = pos_end
        pos_end = pos + n_samples_k

        # Assign labels
        y[pos:pos_end] = k % n_classes

        # Draw features at random
        X[pos:pos_end, :n_informative] = generator.randn(
            n_samples_k, n_informative)

        # Multiply by a random matrix to create co-variance of the features
        A = 2 * generator.rand(n_informative, n_informative) - 1
        X[pos:pos_end, :n_informative] = np.dot(X[pos:pos_end, :n_informative],
                                                A)

        # Shift the cluster to a vertice
        X[pos:pos_end, :n_informative] += np.tile(C[k, :], (n_samples_k, 1))

    # Create redundant features
    if n_redundant > 0:
        B = 2 * generator.rand(n_informative, n_redundant) - 1
        X[:, n_informative:n_informative + n_redundant] = \
            np.dot(X[:, :n_informative], B)

    # Repeat some features
    if n_repeated > 0:
        n = n_informative + n_redundant
        indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
        X[:, n:n + n_repeated] = X[:, indices]

    # Fill useless features
    X[:, n_features - n_useless:] = generator.randn(n_samples, n_useless)

    # Randomly flip labels
    if flip_y >= 0.0:
        for i in range(n_samples):
            if generator.rand() < flip_y:
                y[i] = generator.randint(n_classes)

    # Randomly shift and scale
    constant_shift = shift is not None
    constant_scale = scale is not None

    for f in range(n_features):
        if not constant_shift:
            shift = (2 * generator.rand() - 1) * class_sep

        if not constant_scale:
            scale = 1 + 100 * generator.rand()

        X[:, f] += shift
        X[:, f] *= scale

    # Randomly permute samples and features
    if shuffle:
        X, y = util_shuffle(X, y, random_state=generator)

        indices = np.arange(n_features)
        generator.shuffle(indices)
        X[:, :] = X[:, indices]

    return X, y
Beispiel #19
0
def make_classification(
    n_samples=100,
    n_features=20,
    n_informative=2,
    n_redundant=2,
    n_repeated=0,
    n_classes=2,
    n_clusters_per_class=2,
    weights=None,
    flip_y=0.01,
    class_sep=1.0,
    hypercube=True,
    shift=0.0,
    scale=1.0,
    shuffle=True,
    useful_indices=False,
    random_state=None,
):
    """Generate a random n-class classification problem.

    This initially creates clusters of points normally distributed (std=1)
    about vertices of an `n_informative`-dimensional hypercube with sides of
    length `2*class_sep` and assigns an equal number of clusters to each
    class. It introduces interdependence between these features and adds
    various types of further noise to the data.

    Prior to shuffling, `X` stacks a number of these primary "informative"
    features, "redundant" linear combinations of these, "repeated" duplicates
    of sampled features, and arbitrary noise for and remaining features.

    Read more in the :ref:`User Guide <sample_generators>`.

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.

    n_features : int, optional (default=20)
        The total number of features. These comprise `n_informative`
        informative features, `n_redundant` redundant features, `n_repeated`
        duplicated features and `n_features-n_informative-n_redundant-
        n_repeated` useless features drawn at random.

    n_informative : int, optional (default=2)
        The number of informative features. Each class is composed of a number
        of gaussian clusters each located around the vertices of a hypercube
        in a subspace of dimension `n_informative`. For each cluster,
        informative features are drawn independently from  N(0, 1) and then
        randomly linearly combined within each cluster in order to add
        covariance. The clusters are then placed on the vertices of the
        hypercube.

    n_redundant : int, optional (default=2)
        The number of redundant features. These features are generated as
        random linear combinations of the informative features.

    n_repeated : int, optional (default=0)
        The number of duplicated features, drawn randomly from the informative
        and the redundant features.

    n_classes : int, optional (default=2)
        The number of classes (or labels) of the classification problem.

    n_clusters_per_class : int, optional (default=2)
        The number of clusters per class.

    weights : list of floats or None (default=None)
        The proportions of samples assigned to each class. If None, then
        classes are balanced. Note that if `len(weights) == n_classes - 1`,
        then the last class weight is automatically inferred.
        More than `n_samples` samples may be returned if the sum of `weights`
        exceeds 1.

    flip_y : float, optional (default=0.01)
        The fraction of samples whose class are randomly exchanged. Larger
        values introduce noise in the labels and make the classification
        task harder.

    class_sep : float, optional (default=1.0)
        The factor multiplying the hypercube size.  Larger values spread
        out the clusters/classes and make the classification task easier.

    hypercube : boolean, optional (default=True)
        If True, the clusters are put on the vertices of a hypercube. If
        False, the clusters are put on the vertices of a random polytope.

    shift : float, array of shape [n_features] or None, optional (default=0.0)
        Shift features by the specified value. If None, then features
        are shifted by a random value drawn in [-class_sep, class_sep].

    scale : float, array of shape [n_features] or None, optional (default=1.0)
        Multiply features by the specified value. If None, then features
        are scaled by a random value drawn in [1, 100]. Note that scaling
        happens after shifting.

    shuffle : boolean, optional (default=True)
        Shuffle the samples and the features.

    useful_indices : boolean, optional (default=False)
        If True, a boolean array indicating useful features is returned

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    Returns
    -------
    X : array of shape [n_samples, n_features]
        The generated samples.

    y : array of shape [n_samples]
        The integer labels for class membership of each sample.

    useful_indices : array of shape [n_features], optional
        A boolean array indicating the usefulness of each feature. An element
        in this array is True if the corresponding feature is either
        informative, redundant, or repeated. It is returned only if indices
        is True.

    Notes
    -----
    The algorithm is adapted from Guyon [1] and was designed to generate
    the "Madelon" dataset.

    References
    ----------
    .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
           selection benchmark", 2003.

    See also
    --------
    make_blobs: simplified variant
    make_multilabel_classification: unrelated generator for multilabel tasks
    """
    generator = check_random_state(random_state)

    # Count features, clusters and samples
    if n_informative + n_redundant + n_repeated > n_features:
        raise ValueError("Number of informative, redundant and repeated "
                         "features must sum to less than the number of total"
                         " features")
    if 2**n_informative < n_classes * n_clusters_per_class:
        raise ValueError("n_classes * n_clusters_per_class must"
                         " be smaller or equal 2 ** n_informative")
    if weights and len(weights) not in [n_classes, n_classes - 1]:
        raise ValueError("Weights specified but incompatible with number "
                         "of classes.")

    n_useless = n_features - n_informative - n_redundant - n_repeated
    n_clusters = n_classes * n_clusters_per_class

    if weights and len(weights) == (n_classes - 1):
        weights = weights + [1.0 - sum(weights)]

    if weights is None:
        weights = [1.0 / n_classes] * n_classes
        weights[-1] = 1.0 - sum(weights[:-1])

    # Distribute samples among clusters by weight
    n_samples_per_cluster = []
    for k in range(n_clusters):
        n_samples_per_cluster.append(
            int(n_samples * weights[k % n_classes] / n_clusters_per_class))
    for i in range(n_samples - sum(n_samples_per_cluster)):
        n_samples_per_cluster[i % n_clusters] += 1

    # Initialize X and y
    X = np.zeros((n_samples, n_features))
    y = np.zeros(n_samples, dtype=np.int)

    # Build the polytope whose vertices become cluster centroids
    centroids = _generate_hypercube(n_clusters, n_informative,
                                    generator).astype(float)
    centroids *= 2 * class_sep
    centroids -= class_sep
    if not hypercube:
        centroids *= generator.rand(n_clusters, 1)
        centroids *= generator.rand(1, n_informative)

    # Initially draw informative features from the standard normal
    X[:, :n_informative] = generator.randn(n_samples, n_informative)

    # Create each cluster; a variant of make_blobs
    stop = 0
    for k, centroid in enumerate(centroids):
        start, stop = stop, stop + n_samples_per_cluster[k]
        y[start:stop] = k % n_classes  # assign labels
        X_k = X[start:stop, :n_informative]  # slice a view of the cluster

        A = 2 * generator.rand(n_informative, n_informative) - 1
        X_k[...] = np.dot(X_k, A)  # introduce random covariance

        X_k += centroid  # shift the cluster to a vertex

    # Create redundant features
    if n_redundant > 0:
        B = 2 * generator.rand(n_informative, n_redundant) - 1
        X[:, n_informative:n_informative + n_redundant] = np.dot(
            X[:, :n_informative], B)

    # Repeat some features
    if n_repeated > 0:
        n = n_informative + n_redundant
        indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
        X[:, n:n + n_repeated] = X[:, indices]

    # Fill useless features
    if n_useless > 0:
        X[:, -n_useless:] = generator.randn(n_samples, n_useless)

    # Randomly replace labels
    if flip_y >= 0.0:
        flip_mask = generator.rand(n_samples) < flip_y
        y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())

    # Randomly shift and scale
    if shift is None:
        shift = (2 * generator.rand(n_features) - 1) * class_sep
    X += shift

    if scale is None:
        scale = 1 + 100 * generator.rand(n_features)
    X *= scale

    indices = np.arange(n_features)
    if shuffle:
        # Randomly permute samples
        X, y = util_shuffle(X, y, random_state=generator)

        # Randomly permute features
        generator.shuffle(indices)
        X[:, :] = X[:, indices]

    if useful_indices:
        n_useful = n_informative + n_redundant + n_repeated
        return X, y, indices < n_useful
    else:
        return X, y
Beispiel #20
0
def make_group_classification(
    n_samples=100,
    n_groups=20,
    n_informative_groups=2,
    n_features_per_group=20,
    n_informative_per_group=2,
    n_redundant_per_group=2,
    n_repeated_per_group=0,
    n_classes=2,
    n_clusters_per_class=2,
    weights=None,
    flip_y=0.01,
    class_sep=1.0,
    hypercube=True,
    shift=0.0,
    scale=1.0,
    shuffle=True,
    useful_indices=False,
    random_state=None,
):
    """Generate a random n-class sparse group classification problem.

    This function is a generalization of sklearn.datasets.make_classification
    to feature matrices with grouped covariates. Prior to shuffling, ``X``
    stacks a number of these primary "informative"
    features, "redundant" linear combinations of these, "repeated" duplicates
    of sampled features, and arbitrary noise for and remaining features.
    This method uses sklearn.datasets.make_classification to construct a
    giant unshuffled classification problem of size
    ``n_groups * n_features_per_group`` and then distributes the returned
    features to each group. It then optionally shuffles each group.

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.

    n_groups : int, optional (default=10)
        The number of feature groups.

    n_informative_groups : int, optional (default=2)
        The total number of informative groups. All other groups will be
        just noise.

    n_features_per_group : int, optional (default=20)
        The total number of features_per_group. These comprise `n_informative`
        informative features, `n_redundant` redundant features, `n_repeated`
        duplicated features and `n_features-n_informative-n_redundant-
        n_repeated` useless features drawn at random.

    n_informative_per_group : int, optional (default=2)
        The number of informative features_per_group. Each class is composed
        of a number of gaussian clusters each located around the vertices of a
        hypercube in a subspace of dimension `n_informative_per_group`. For
        each cluster, informative features are drawn independently from
        N(0, 1) and then randomly linearly combined within each cluster in
        order to add covariance. The clusters are then placed on the vertices
        of the hypercube.

    n_redundant_per_group : int, optional (default=2)
        The number of redundant features per group. These features are
        generated as random linear combinations of the informative features.

    n_repeated_per_group : int, optional (default=0)
        The number of duplicated features per group, drawn randomly from the
        informative and the redundant features.

    n_classes : int, optional (default=2)
        The number of classes (or labels) of the classification problem.

    n_clusters_per_class : int, optional (default=2)
        The number of clusters per class.

    weights : list of floats or None (default=None)
        The proportions of samples assigned to each class. If None, then
        classes are balanced. Note that if `len(weights) == n_classes - 1`,
        then the last class weight is automatically inferred.
        More than `n_samples` samples may be returned if the sum of `weights`
        exceeds 1.

    flip_y : float, optional (default=0.01)
        The fraction of samples whose class are randomly exchanged. Larger
        values introduce noise in the labels and make the classification
        task harder.

    class_sep : float, optional (default=1.0)
        The factor multiplying the hypercube size.  Larger values spread
        out the clusters/classes and make the classification task easier.

    hypercube : boolean, optional (default=True)
        If True, the clusters are put on the vertices of a hypercube. If
        False, the clusters are put on the vertices of a random polytope.

    shift : float, array of shape [n_features] or None, optional (default=0.0)
        Shift features by the specified value. If None, then features
        are shifted by a random value drawn in [-class_sep, class_sep].

    scale : float, array of shape [n_features] or None, optional (default=1.0)
        Multiply features by the specified value. If None, then features
        are scaled by a random value drawn in [1, 100]. Note that scaling
        happens after shifting.

    shuffle : boolean, optional (default=True)
        Shuffle the samples and the features.

    useful_indices : boolean, optional (default=False)
        If True, a boolean array indicating useful features is returned

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    Returns
    -------
    X : array of shape [n_samples, n_features]
        The generated samples.

    y : array of shape [n_samples]
        The integer labels for class membership of each sample.

    groups : list of arrays
        Each element is an array of feature indices that belong to that group

    indices : array of shape [n_features]
        A boolean array indicating which features are useful. Returned only
        if `useful_indices` is True.

    Notes
    -----
    The algorithm is adapted from Guyon [1] and was designed to generate
    the "Madelon" dataset.

    References
    ----------
    .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
           selection benchmark", 2003.

    See Also
    --------
    sklearn.datasets.make_classification: non-group-sparse version
    sklearn.datasets.make_blobs: simplified variant
    sklearn.datasets.make_multilabel_classification: unrelated generator for multilabel tasks
    """
    generator = check_random_state(random_state)

    total_features = n_groups * n_features_per_group
    total_informative = n_informative_groups * n_informative_per_group
    total_redundant = n_informative_groups * n_redundant_per_group
    total_repeated = n_informative_groups * n_repeated_per_group

    # Count features, clusters and samples
    if (n_informative_per_group + n_redundant_per_group + n_repeated_per_group
            > n_features_per_group):
        raise ValueError(
            "Number of informative, redundant and repeated features per group"
            " must sum to less than the number of total features per group.")

    # Generate a big classification problem for the total number of features
    # The `shuffle` argument is False so that the feature matrix X has
    # features stacked in the order: informative, redundant, repeated, useless
    X, y = make_classification(
        n_samples=n_samples,
        n_features=total_features,
        n_informative=total_informative,
        n_redundant=total_redundant,
        n_repeated=total_repeated,
        n_classes=n_classes,
        n_clusters_per_class=n_clusters_per_class,
        weights=weights,
        flip_y=flip_y,
        class_sep=class_sep,
        hypercube=hypercube,
        shift=shift,
        scale=scale,
        shuffle=False,
        random_state=generator,
    )

    total_useful = total_informative + total_redundant + total_repeated
    idx = np.arange(total_features) < total_useful

    # Evenly distribute the first `n_informative_groups * n_features_per_group`
    # features into the first `n_informative_groups` groups
    n_info_grp_features = n_informative_groups * n_features_per_group
    idx_range = np.arange(n_info_grp_features)

    idx_map_consolidated_2_grouped = (np.concatenate(
        [np.arange(0, n_info_grp_features, n_informative_groups)] *
        n_informative_groups) + idx_range // n_features_per_group)

    X = np.concatenate(
        [X[:, idx_map_consolidated_2_grouped], X[:, n_info_grp_features:]],
        axis=1)

    if useful_indices:
        idx = np.concatenate(
            [idx[idx_map_consolidated_2_grouped], idx[n_info_grp_features:]])

    if shuffle:
        # Randomly permute samples
        X, y = util_shuffle(X, y, random_state=generator)

        # Permute the groups, maintaining the order within them group_idx_map
        # maps feature indices to group indices. The group order is random
        # but all features in a single group are adjacent
        group_idx_map = np.concatenate([
            np.ones(n_features_per_group, dtype=np.int32) * i for i in
            generator.choice(np.arange(n_groups), size=n_groups, replace=False)
        ])

        permute_group_map = (np.concatenate([
            generator.choice(
                np.arange(n_features_per_group),
                size=n_features_per_group,
                replace=False,
            ) for _ in range(n_groups)
        ]) + group_idx_map * n_features_per_group)

        X = X[:, permute_group_map]

        if useful_indices:
            idx = idx[permute_group_map]
    else:
        group_idx_map = np.concatenate([
            np.ones(n_features_per_group, dtype=np.int32) * i
            for i in range(n_groups)
        ])

    groups = [np.where(group_idx_map == idx)[0] for idx in range(n_groups)]

    X = np.ascontiguousarray(X)
    if useful_indices:
        return X, y, groups, idx
    else:
        return X, y, groups
Beispiel #21
0
def make_group_regression(
    n_samples=100,
    n_groups=20,
    n_informative_groups=5,
    n_features_per_group=20,
    n_informative_per_group=5,
    effective_rank=None,
    noise=0.0,
    shift=0.0,
    scale=1.0,
    shuffle=False,
    coef=False,
    random_state=None,
):
    """Generate a sparse group regression problem.

    This function is a generalization of sklearn.datasets.make_regression
    to feature matrices with grouped covariates. Prior to shuffling, ``X``
    stacks a number of these primary "informative"
    features, and arbitrary noise for and remaining features.
    This method uses sklearn.datasets.make_regression to construct a
    giant unshuffled regression problem of size
    ``n_groups * n_features_per_group`` and then distributes the returned
    features to each group. It then optionally shuffles each group.

    Parameters
    ----------
    n_samples : int, optional (default=100)
        The number of samples.

    n_groups : int, optional (default=10)
        The number of feature groups.

    n_informative_groups : int, optional (default=2)
        The total number of informative groups. All other groups will be
        just noise.

    n_features_per_group : int, optional (default=20)
        The total number of features_per_group. These comprise `n_informative`
        informative features, and `n_features-n_informative` useless
        features drawn at random.

    n_informative_per_group : int, optional (default=2)
        The number of informative features_per_group that have a
        non-zero regression coefficient.

    effective_rank : int or None, optional (default=None)
        If not None, provides the number of singular vectors to explain the
        input data.

    noise : float, optional (default=0.0)
         The standard deviation of the gaussian noise applied to the output.

    shuffle : boolean, optional (default=False)
        Shuffle the samples and the features.

    coef : boolean, optional (default=False)
        If True, returns coefficient values used to generate samples via
        sklearn.datasets.make_regression.

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    Returns
    -------
    X : array of shape [n_samples, n_features]
        The generated samples.

    y : array of shape [n_samples]
        The integer labels for class membership of each sample.

    groups : list of arrays
        Each element is an array of feature indices that belong to that group

    coef : array of shape [n_features]
        A numpy array containing true regression coefficient values. Returned only if `coef` is True.

    See Also
    --------
    sklearn.datasets.make_regression: non-group-sparse version
    """
    generator = check_random_state(random_state)

    total_features = n_groups * n_features_per_group
    total_informative = n_informative_groups * n_informative_per_group

    if coef:
        X, y, reg_coefs = make_regression(
            n_samples=n_samples,
            n_features=total_features,
            n_informative=total_informative,
            effective_rank=effective_rank,
            bias=0.0,
            noise=noise,
            shuffle=False,
            coef=True,
            random_state=generator,
        )
    else:
        X, y = make_regression(
            n_samples=n_samples,
            n_features=total_features,
            n_informative=total_informative,
            effective_rank=effective_rank,
            bias=0.0,
            noise=noise,
            shuffle=False,
            coef=False,
            random_state=generator,
        )

    # Evenly distribute the first `n_informative_groups * n_features_per_group`
    # features into the first `n_informative_groups` groups
    n_info_grp_features = n_informative_groups * n_features_per_group
    idx_range = np.arange(n_info_grp_features)

    idx_map_consolidated_2_grouped = (np.concatenate(
        [np.arange(0, n_info_grp_features, n_informative_groups)] *
        n_informative_groups) + idx_range // n_features_per_group)

    X = np.concatenate(
        [X[:, idx_map_consolidated_2_grouped], X[:, n_info_grp_features:]],
        axis=1)

    group_idx_map = np.concatenate([
        np.ones(n_features_per_group, dtype=np.int32) * i
        for i in range(n_groups)
    ])

    if coef:
        reg_coefs = np.concatenate([
            reg_coefs[idx_map_consolidated_2_grouped],
            reg_coefs[n_info_grp_features:]
        ])

    # Randomly permute samples and features
    if shuffle:
        X, y = util_shuffle(X, y, random_state=generator)

        indices = np.arange(total_features)
        generator.shuffle(indices)
        X[:, :] = X[:, indices]
        group_idx_map = group_idx_map[indices]
        if coef:
            reg_coefs = reg_coefs[indices]

    X = np.ascontiguousarray(X)
    groups = [np.where(group_idx_map == idx)[0] for idx in range(n_groups)]
    if coef:
        return X, y, groups, reg_coefs
    else:
        return X, y, groups
Beispiel #22
0
def inf_train_gen(data, rng=None, batch_size=200):
    if rng is None:
        rng = np.random.RandomState()

    if data == "swissroll":
        data = sklearn.datasets.make_swiss_roll(n_samples=batch_size, noise=1.0)[0]
        data = data.astype("float32")[:, [0, 2]]
        data /= 5
        return data

    elif data == "circles":
        data = sklearn.datasets.make_circles(n_samples=batch_size, factor=.5, noise=0.08)[0]
        data = data.astype("float32")
        data *= 3
        return data

    elif data == "rings":
        n_samples4 = n_samples3 = n_samples2 = batch_size // 4
        n_samples1 = batch_size - n_samples4 - n_samples3 - n_samples2

        # so as not to have the first point = last point, we set endpoint=False
        linspace4 = np.linspace(0, 2 * np.pi, n_samples4, endpoint=False)
        linspace3 = np.linspace(0, 2 * np.pi, n_samples3, endpoint=False)
        linspace2 = np.linspace(0, 2 * np.pi, n_samples2, endpoint=False)
        linspace1 = np.linspace(0, 2 * np.pi, n_samples1, endpoint=False)

        circ4_x = np.cos(linspace4)
        circ4_y = np.sin(linspace4)
        circ3_x = np.cos(linspace4) * 0.75
        circ3_y = np.sin(linspace3) * 0.75
        circ2_x = np.cos(linspace2) * 0.5
        circ2_y = np.sin(linspace2) * 0.5
        circ1_x = np.cos(linspace1) * 0.25
        circ1_y = np.sin(linspace1) * 0.25

        X = np.vstack([
            np.hstack([circ4_x, circ3_x, circ2_x, circ1_x]),
            np.hstack([circ4_y, circ3_y, circ2_y, circ1_y])
        ]).T * 3.0
        X = util_shuffle(X, random_state=rng)

        # Add noise
        X = X + rng.normal(scale=0.08, size=X.shape)

        return X.astype("float32")

    elif data == "moons":
        data = sklearn.datasets.make_moons(n_samples=batch_size, noise=0.1)[0]
        data = data.astype("float32")
        data = data * 2 + np.array([-1, -0.2])
        return data

    elif data == "8gaussians":
        scale = 4.
        centers = [(1, 0), (-1, 0), (0, 1), (0, -1), (1. / np.sqrt(2), 1. / np.sqrt(2)),
                   (1. / np.sqrt(2), -1. / np.sqrt(2)), (-1. / np.sqrt(2),
                                                         1. / np.sqrt(2)), (-1. / np.sqrt(2), -1. / np.sqrt(2))]
        centers = [(scale * x, scale * y) for x, y in centers]

        dataset = []
        for i in range(batch_size):
            point = rng.randn(2) * 0.5
            idx = rng.randint(8)
            center = centers[idx]
            point[0] += center[0]
            point[1] += center[1]
            dataset.append(point)
        dataset = np.array(dataset, dtype="float32")
        dataset /= 1.414
        return dataset

    elif data == "pinwheel":
        radial_std = 0.3
        tangential_std = 0.1
        num_classes = 5
        num_per_class = batch_size // 5
        rate = 0.25
        rads = np.linspace(0, 2 * np.pi, num_classes, endpoint=False)

        features = rng.randn(num_classes*num_per_class, 2) \
            * np.array([radial_std, tangential_std])
        features[:, 0] += 1.
        labels = np.repeat(np.arange(num_classes), num_per_class)

        angles = rads[labels] + rate * np.exp(features[:, 0])
        rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
        rotations = np.reshape(rotations.T, (-1, 2, 2))

        return 2 * rng.permutation(np.einsum("ti,tij->tj", features, rotations))

    elif data == "2spirals":
        n = np.sqrt(np.random.rand(batch_size // 2, 1)) * 540 * (2 * np.pi) / 360
        d1x = -np.cos(n) * n + np.random.rand(batch_size // 2, 1) * 0.5
        d1y = np.sin(n) * n + np.random.rand(batch_size // 2, 1) * 0.5
        x = np.vstack((np.hstack((d1x, d1y)), np.hstack((-d1x, -d1y)))) / 3
        x += np.random.randn(*x.shape) * 0.1
        return x

    elif data == "checkerboard":
        x1 = np.random.rand(batch_size) * 4 - 2
        x2_ = np.random.rand(batch_size) - np.random.randint(0, 2, batch_size) * 2
        x2 = x2_ + (np.floor(x1) % 2)
        return np.concatenate([x1[:, None], x2[:, None]], 1) * 2

    elif data == "line":
        x = rng.rand(batch_size) * 5 - 2.5
        y = x
        return np.stack((x, y), 1)
    elif data == "cos":
        x = rng.rand(batch_size) * 5 - 2.5
        y = np.sin(x) * 2.5
        return np.stack((x, y), 1)
    else:
        return inf_train_gen("8gaussians", rng, batch_size)
Beispiel #23
0
    def data_sampler(batch_size):
        if args.dataset == "swissroll":
            data = sklearn.datasets.make_swiss_roll(n_samples=batch_size,
                                                    noise=1.0)[0]
            data = data.astype("float32")[:, [0, 2]]
            data /= 5
            data = torch.from_numpy(data)

        elif args.dataset == "circles":
            data = sklearn.datasets.make_circles(n_samples=batch_size,
                                                 factor=.5,
                                                 noise=0.08)[0]
            data = data.astype("float32")
            data *= 3
            data = torch.from_numpy(data)

        elif args.dataset == "rings":
            n_samples4 = n_samples3 = n_samples2 = batch_size // 4
            n_samples1 = batch_size - n_samples4 - n_samples3 - n_samples2

            # so as not to have the first point = last point, we set endpoint=False
            linspace4 = np.linspace(0, 2 * np.pi, n_samples4, endpoint=False)
            linspace3 = np.linspace(0, 2 * np.pi, n_samples3, endpoint=False)
            linspace2 = np.linspace(0, 2 * np.pi, n_samples2, endpoint=False)
            linspace1 = np.linspace(0, 2 * np.pi, n_samples1, endpoint=False)

            circ4_x = np.cos(linspace4)
            circ4_y = np.sin(linspace4)
            circ3_x = np.cos(linspace4) * 0.75
            circ3_y = np.sin(linspace3) * 0.75
            circ2_x = np.cos(linspace2) * 0.5
            circ2_y = np.sin(linspace2) * 0.5
            circ1_x = np.cos(linspace1) * 0.25
            circ1_y = np.sin(linspace1) * 0.25

            data = np.vstack([
                np.hstack([circ4_x, circ3_x, circ2_x, circ1_x]),
                np.hstack([circ4_y, circ3_y, circ2_y, circ1_y])
            ]).T * 3.0
            data = util_shuffle(data)

            # Add noise
            data = data.astype("float32")
            data += np.random.randn(*data.shape) * 0.1
            data = torch.from_numpy(data)

        elif args.dataset == "moons":
            data = sklearn.datasets.make_moons(n_samples=batch_size,
                                               noise=0.1)[0]
            data = data.astype("float32")
            data = data * 2 + np.array([-1, -0.2])
            data = data.astype("float32")
            data = torch.from_numpy(data)

        elif args.dataset == "pinwheel":
            radial_std = 0.3
            tangential_std = 0.1
            num_classes = 5
            num_per_class = batch_size // 5
            rate = 0.25
            rads = np.linspace(0, 2 * np.pi, num_classes, endpoint=False)

            features = np.random.randn(num_classes*num_per_class, 2) \
                * np.array([radial_std, tangential_std])
            features[:, 0] += 1.
            labels = np.repeat(np.arange(num_classes), num_per_class)

            angles = rads[labels] + rate * np.exp(features[:, 0])
            rotations = np.stack([
                np.cos(angles), -np.sin(angles),
                np.sin(angles),
                np.cos(angles)
            ])
            rotations = np.reshape(rotations.T, (-1, 2, 2))
            data = 2 * np.random.permutation(
                np.einsum("ti,tij->tj", features, rotations)).astype("float32")
            data = torch.from_numpy(data)

        elif args.dataset == "2spirals":
            n = np.sqrt(np.random.rand(batch_size // 2,
                                       1)) * 540 * (2 * np.pi) / 360
            d1x = -np.cos(n) * n + np.random.rand(batch_size // 2, 1) * 0.5
            d1y = np.sin(n) * n + np.random.rand(batch_size // 2, 1) * 0.5
            data = np.vstack((np.hstack((d1x, d1y)), np.hstack(
                (-d1x, -d1y)))) / 3
            data += np.random.randn(*data.shape) * 0.1
            data = data.astype("float32")
            data = torch.from_numpy(data)

        elif args.dataset == "checkerboard":
            x1 = np.random.rand(batch_size) * 4 - 2
            x2_ = np.random.rand(batch_size) - np.random.randint(
                0, 2, batch_size) * 2
            x2 = x2_ + (np.floor(x1) % 2)
            data = np.concatenate([x1[:, None], x2[:, None]],
                                  1).astype("float32") * 2
            data = torch.from_numpy(data)

        elif args.dataset == "line":
            x = np.random.rand(batch_size)
            x = x * 5 - 2.5
            y = x + np.random.randn(batch_size)
            data = np.stack((x, y), 1).astype("float32")
            data = torch.from_numpy(data)

        elif args.dataset == "cos":
            x = np.random.rand(batch_size) * 5 - 2.5
            y = np.sin(x) * 2.5
            data = np.stack((x, y), 1).astype("float32")
            data = torch.from_numpy(data)

        elif args.dataset == "joint_gaussian":
            x2 = torch.distributions.Normal(0., 4.).sample((batch_size, 1))
            x1 = torch.distributions.Normal(0., 1.).sample(
                (batch_size, 1)) + (x2**2) / 4
            data = torch.cat((x1, x2), 1)

        elif args.dataset == "8gaussians":
            scale = 4.0
            sq2 = 1.0 / np.sqrt(2)
            centers = [(1, 0), (-1, 0), (0, 1), (0, -1), (sq2, sq2),
                       (-sq2, sq2), (sq2, -sq2), (-sq2, -sq2)]
            centers = torch.tensor([(scale * x, scale * y)
                                    for x, y in centers]).float()
            noise = torch.randn(batch_size, 2)
            data = sq2 * (0.5 * noise +
                          centers[torch.randint(8, size=(batch_size, ))])

        elif args.dataset == "1gaussian":
            scale = 4.0
            sq2 = 1.0 / np.sqrt(2)
            centers = [(1, 0), (-1, 0)]
            centers = torch.tensor([(scale * x, scale * y)
                                    for x, y in centers]).float()
            noise = torch.randn(batch_size, 2)
            data = sq2 * (0.5 * noise +
                          centers[torch.randint(1, size=(batch_size, ))])

        elif args.dataset == "2gaussians":
            scale = 4.0
            sq2 = 1.0 / np.sqrt(2)
            centers = [(1, 0), (-1, 0)]
            centers = torch.tensor([(scale * x, scale * y)
                                    for x, y in centers]).float()
            noise = torch.randn(batch_size, 2)
            data = sq2 * (0.5 * noise +
                          centers[torch.randint(2, size=(batch_size, ))])

        elif args.dataset == "mog":
            num_clusters = args.mog_clusters
            mix_props = np.random.dirichlet([10.0] *
                                            num_clusters).astype("float32")
            mu = torch.from_numpy(
                np.random.normal(loc=[0.0, 0.0],
                                 scale=args.mog_sigma,
                                 size=[num_clusters, 2]).astype("float32"))
            sigma = np.repeat(np.eye(2)[None], num_clusters,
                              axis=0).astype("float32") * 0.8
            sigma[:, 1, 0] = np.random.uniform(low=0.0, high=0.8, size=[num_clusters]).astype("float32") *\
                np.random.choice([1, -1], size=[num_clusters])
            mix_props = torch.from_numpy(mix_props)
            sigma = torch.from_numpy(sigma)

            u_z = lambda z: -1.0 * torch.log(sum(
                torch.exp(D.MultivariateNormal(mu_i, sigma_i).log_prob(z)) * mix_props_i \
                for (mix_props_i, mu_i, sigma_i) in zip(mix_props, mu, sigma)))

            data = []
            for (mix_props_i, mu_i, sigma_i) in zip(mix_props, mu, sigma):
                data.extend(
                    np.random.multivariate_normal(
                        mu_i, sigma_i, size=[int(batch_size * mix_props_i)]))

            data = torch.from_numpy(np.array(data).reshape([batch_size, 2]))

        else:
            raise ValueError(
                f"The toy dataset {args.dataset} hasn't been defined!")

        return data