Пример #1
0
def generate_relative_risk_from_distribution(
        random_state: np.random.RandomState,
        parameters: dict) -> Union[float, pd.Series, np.ndarray]:
    first = pd.Series(list(parameters.values())[0])
    length = len(first)
    index = first.index

    for v in parameters.values():
        if length != len(pd.Series(v)) or not index.equals(pd.Series(v).index):
            raise ValueError(
                'If specifying vectorized parameters, all parameters '
                'must be the same length and have the same index.')

    if 'mean' in parameters:  # normal distribution
        rr_value = random_state.normal(parameters['mean'], parameters['se'])
    elif 'log_mean' in parameters:  # log distribution
        log_value = parameters[
            'log_mean'] + parameters['log_se'] * random_state.randn()
        if parameters['tau_squared']:
            log_value += random_state.normal(0, parameters['tau_squared'])
        rr_value = np.exp(log_value)
    else:
        raise NotImplementedError(
            f'Only normal distributions (supplying mean and se) and log distributions '
            f'(supplying log_mean, log_se, and tau_squared) are currently supported.'
        )

    rr_value = np.maximum(1, rr_value)

    return rr_value
Пример #2
0
def fixture_measure_params(
    measure_name: str,
    input_dim: int,
    cov_diagonal: bool,
    random_state: np.random.RandomState,
) -> Dict:
    params = {"name": measure_name}

    if measure_name == "gauss":
        # set up mean and covariance
        if input_dim == 1:
            mean = random_state.normal(0, 1)
            cov = random_state.uniform(0.5, 1.5)
        else:
            mean = random_state.normal(0, 1, size=(input_dim, 1))
            if cov_diagonal:
                cov = random_state.uniform(0.5, 1.5, size=(input_dim, 1))
            else:
                mat = random_state.normal(0, 1, size=(input_dim, input_dim))
                cov = mat @ mat.T

        params["mean"] = mean
        params["cov"] = cov

    elif measure_name == "lebesgue":
        # set up bounds
        rv = random_state.uniform(0, 1, size=(input_dim, 2))
        domain = (rv[:, 0] - 1.0, rv[:, 1] + 1.0)

        params["domain"] = domain
        params["normalized"] = True

    return params
Пример #3
0
 def add_noise(action: Dict, noise_rng: np.random.RandomState):
     left_gripper_noise = noise_rng.normal(scale=0.01, size=[3])
     right_gripper_noise = noise_rng.normal(scale=0.01, size=[3])
     return {
         'left_gripper_position':
         action['left_gripper_position'] + left_gripper_noise,
         'right_gripper_position':
         action['right_gripper_position'] + right_gripper_noise
     }
Пример #4
0
def _normal_random_recurrent_weights(
        hidden_layer_size: int, fan_in: int,
        random_state: np.random.RandomState) \
        -> Union[np.ndarray, scipy.sparse.csr.csr_matrix]:
    """
    Return normally distributed random reservoir weights.

    Parameters
    ----------
    hidden_layer_size : Union[int, np.integer]
    fan_in : Union[int, np.integer]
        Determines how many features are mapped to one neuron.
    random_state : numpy.random.RandomState

    Returns
    -------
    normal_random_recurrent_weights : Union[np.ndarray,
    scipy.sparse.csr.csr_matrix], shape=(hidden_layer_size, hidden_layer_size)
    """
    nr_entries = int(hidden_layer_size * fan_in)
    weights_array = random_state.normal(loc=0., scale=1., size=nr_entries)

    if fan_in < hidden_layer_size:
        indices = np.zeros(shape=nr_entries, dtype=int)
        indptr = np.arange(start=0,
                           stop=(hidden_layer_size + 1) * fan_in,
                           step=fan_in)

        for en in range(0, hidden_layer_size * fan_in, fan_in):
            indices[en:en + fan_in] = random_state.permutation(
                hidden_layer_size)[:fan_in].astype(int)
        recurrent_weights_init = scipy.sparse.csr_matrix(
            (weights_array, indices, indptr),
            shape=(hidden_layer_size, hidden_layer_size),
            dtype='float64')
    else:
        recurrent_weights_init = weights_array.reshape(
            (hidden_layer_size, hidden_layer_size))
    try:
        we = eigens(recurrent_weights_init,
                    k=np.minimum(10, hidden_layer_size - 2),
                    which='LM',
                    return_eigenvectors=False,
                    v0=random_state.normal(loc=0.,
                                           scale=1.,
                                           size=hidden_layer_size))
    except ArpackNoConvergence:
        print("WARNING: No convergence! Returning possibly invalid values!!!")
        we = ArpackNoConvergence.eigenvalues
    return recurrent_weights_init / np.amax(np.absolute(we))
Пример #5
0
def fixture_x1(request, input_dim: int,
               random_state: np.random.RandomState) -> Optional[np.ndarray]:
    """Random data from a standard normal distribution."""
    if request.param is None:
        return None
    else:
        return random_state.normal(0, 1, size=(request.param, input_dim))
Пример #6
0
def spiral_classification_dataset(
    n_sup: int,
    balance_classes: bool,
    rng: np.random.RandomState,
    N: int = 5000,
    spiral_radius: float = 20,
    img_size=(256, 256)) -> ClassificationDataset2D:
    # Generate spiral dataset
    # Taking the sqrt of the randomly drawn radii ensures uniform sample distribution
    # Using plain uniform distribution results in samples concentrated at the centre
    radius0 = np.sqrt(rng.uniform(low=1.0, high=spiral_radius**2, size=(N, )))
    radius1 = np.sqrt(rng.uniform(low=1.0, high=spiral_radius**2, size=(N, )))
    theta0 = radius0 * 0.5
    theta1 = radius1 * 0.5 + np.pi
    radius = np.append(radius0, radius1, axis=0)
    theta = np.append(theta0, theta1, axis=0)
    X = np.stack([np.sin(theta) * radius, np.cos(theta) * radius], axis=1)
    y = np.append(np.zeros(radius0.shape, dtype=int),
                  np.ones(radius1.shape, dtype=int),
                  axis=0)

    X = X + rng.normal(size=X.shape) * 0.2

    X = X / spiral_radius

    return SplitClassificationDataset2D(X, y, img_size, n_sup, balance_classes,
                                        rng)
Пример #7
0
def classification_dataset_from_image(
        image_path: str, region_erode_radius: int, img_noise_std: float,
        n_sup: int, balance_classes: bool,
        rng: np.random.RandomState) -> ClassificationDatasetFromImage2D:
    img = np.array(Image.open(image_path))
    img = img_as_float(rgb2grey(img))
    img_bin = img >= 0.5

    img_size = img_bin.shape

    if region_erode_radius > 0:
        img_cls_1 = binary_erosion(img_bin, iterations=region_erode_radius)
        img_cls_0 = binary_erosion(~img_bin, iterations=region_erode_radius)
    else:
        img_cls_1 = img_bin
        img_cls_0 = ~img_bin

    samples_0_y, samples_0_x = np.where(img_cls_0)
    samples_1_y, samples_1_x = np.where(img_cls_1)

    X_img_0 = np.stack([samples_0_y, samples_0_x], axis=1)
    X_img_1 = np.stack([samples_1_y, samples_1_x], axis=1)
    y_0 = np.zeros((len(X_img_0), ), dtype=int)
    y_1 = np.ones((len(X_img_1), ), dtype=int)
    X_img = np.append(X_img_0, X_img_1, axis=0)
    y = np.append(y_0, y_1, axis=0)

    X_img = X_img + rng.normal(loc=0, scale=img_noise_std, size=X_img.shape)

    X_real = ((X_img) / np.array(img_size)) * 2 - 1

    return ClassificationDatasetFromImage2D(img, X_real, y, img_size, n_sup,
                                            balance_classes, rng)
Пример #8
0
def fixture_args0(
    request,
    random_process: randprocs.RandomProcess,
    random_state: np.random.RandomState,
) -> np.ndarray:
    """Input(s) to a random process."""
    return random_state.normal(size=(request.param, random_process.input_dim))
Пример #9
0
    def get_neighbors(self,
                      value: Union[int, float],
                      rs: np.random.RandomState,
                      number: int = 4,
                      transform: bool = False) -> List[int]:
        neighbors = []  # type: List[int]
        while len(neighbors) < number:
            rejected = True
            iteration = 0
            while rejected:
                new_min_value = np.min([1, rs.normal(loc=value, scale=0.2)])
                new_value = np.max((0, new_min_value))
                int_value = self._transform(value)
                new_int_value = self._transform(new_value)
                if int_value != new_int_value:
                    rejected = False
                elif iteration > 100000:
                    raise ValueError('Probably caught in an infinite loop.')

            if transform:
                neighbors.append(self._transform(new_value))
            else:
                neighbors.append(new_value)

        return neighbors
Пример #10
0
    def simulate(self, t: np.array, n: int,
                 rnd: np.random.RandomState) -> np.array:
        dt = get_dt(t, n, rnd)

        # transposed simulation for automatic broadcasting
        W = rnd.normal(size=(n, t.size))
        W_drift = (W * np.sqrt(dt) * self.sigma + self.mu * dt).T
        return np.cumsum(W_drift, axis=0)
Пример #11
0
    def simulate(self, t: np.array, n: int,
                 rnd: np.random.RandomState) -> np.array:
        dt = get_dt(t, n, rnd)

        # transposed simulation for automatic broadcasting
        dW = (rnd.normal(size=(t.size, n)).T * np.sqrt(dt)).T
        W = np.cumsum(dW, axis=0)
        return np.exp(self.sigma * W.T + (self.mu - self.sigma**2 / 2) * t).T
 def __init__(self, n: int, p: int, σ: float, rs: np.random.RandomState):
     self.n = n
     self.p = p
     self.σ = σ
     self.x = rs.uniform(-10, 10, n * p).reshape((n, p))
     self.β = rs.uniform(-10, 10, p)
     ϵ = rs.normal(0, σ, n)
     self.y = self.x @ self.β + ϵ
Пример #13
0
 def get_neighbors(self,
                   value: float,
                   rs: np.random.RandomState,
                   number: int = 4,
                   transform: bool = False) -> List[float]:
     neighbors = []
     for i in range(number):
         neighbors.append(rs.normal(value, self.sigma))
     return neighbors
Пример #14
0
def _noisy_call(
    x: np.ndarray,
    transf: tp.Callable[[np.ndarray], np.ndarray],
    func: tp.Callable[[np.ndarray], float],
    noise_level: float,
    noise_dissymmetry: bool,
    random_state: np.random.RandomState,
) -> float:  # pylint: disable=unused-argument
    x_transf = transf(x)
    fx = func(x_transf)
    noise = 0
    if noise_level:
        if not noise_dissymmetry or x_transf.ravel()[0] <= 0:
            side_point = transf(x + random_state.normal(0, 1, size=len(x)))
            if noise_dissymmetry:
                noise_level *= 1.0 + x_transf.ravel()[0] * 100.0
            noise = noise_level * random_state.normal(0, 1) * (func(side_point) - fx)
    return fx + noise
Пример #15
0
def make_trials(system: VisionSystem, image_collection: ImageCollection,
                repeats: int, random: np.random.RandomState):
    # Get the true motions, for making trials
    true_motions = [
        image_collection.images[frame_idx - 1].camera_pose.find_relative(
            image_collection.images[frame_idx].camera_pose)
        if frame_idx > 0 else None
        for frame_idx in range(len(image_collection))
    ]

    # Make some plausible trial results
    trial_results = []
    for repeat in range(repeats):
        start_idx = random.randint(0, len(image_collection) - 2)
        frame_results = [
            FrameResult(
                timestamp=timestamp,
                image=image,
                pose=image.camera_pose,
                processing_time=random.uniform(0.001, 1.0),
                estimated_motion=true_motions[frame_idx].find_independent(
                    Transform(location=random.normal(0, 1, 3),
                              rotation=t3.quaternions.axangle2quat(
                                  random.uniform(-1, 1, 3),
                                  random.normal(0, np.pi / 2)),
                              w_first=True))
                if frame_idx > start_idx else None,
                tracking_state=TrackingState.OK
                if frame_idx > start_idx else TrackingState.NOT_INITIALIZED,
                num_matches=random.randint(10, 100))
            for frame_idx, (timestamp, image) in enumerate(image_collection)
        ]
        frame_results[start_idx].estimated_pose = Transform()
        trial_settings = {'random': random.randint(0, 10), 'repeat': repeat}
        trial_result = SLAMTrialResult(system=system,
                                       image_source=image_collection,
                                       success=True,
                                       results=frame_results,
                                       has_scale=False,
                                       settings=trial_settings)
        trial_result.save()
        trial_results.append(trial_result)
    return trial_results
Пример #16
0
 def simulate(self, t: np.ndarray, n: int,
              rnd: np.random.RandomState) -> np.ndarray:
     assert t.ndim == 1, "One dimensional time vector required"
     assert t.size > 0, "At least one time point is required"
     dt = np.concatenate((t[0:1], np.diff(t)))
     assert (dt >= 0).all(), "Increasing time vector required"
     # transposed simulation for automatic broadcasting
     W = rnd.normal(size=(n, t.size))
     W_drift = W * np.sqrt(dt) * self.sigma + self.mu_t(t)
     return np.cumsum(W_drift, axis=1)
Пример #17
0
 def simulate(self, t: np.ndarray, n: int,
              rnd: np.random.RandomState) -> np.ndarray:
     assert t.ndim == 1, "One dimensional time vector required"
     assert t.size > 0, "At least one time point is required"
     dt = np.concatenate((t[0:1], np.diff(t)))
     assert (dt >= 0).all(), "Increasing time vector required"
     # transposed simulation for automatic broadcasting
     dW = (rnd.normal(size=(t.size, n)).T * np.sqrt(dt)).T
     W = np.cumsum(dW, axis=0)
     return np.exp(self.sigma * W.T + self.mu_t(t) - self.sigma**2 / 2 * t)
Пример #18
0
def make_noisy_data(
        m: float = 0.1,
        b: float = 0.3,
        n_samples: int = 5,
        e_std: float = 0.01,
        random_state: np.random.RandomState = np.random.RandomState()):
    x = random_state.uniform(size=n_samples)
    e = random_state.normal(scale=e_std, size=len(x)).astype(np.float32)
    y = m * x + b + e
    return x, y
Пример #19
0
 def __init__(
     self,
     indices: tp.List[int],
     translation_factor: float = 1,
     rotation: bool = False,
     random_state: np.random.RandomState = None,
 ) -> None:
     dim = len(indices)
     assert dim
     if random_state is None:
         random_state = np.random.RandomState(0)
         random_state.set_state(np.random.get_state())
     self.indices = np.asarray(indices)
     self.translation: np.ndarray = random_state.normal(
         0, 1, dim) * translation_factor
     self.rotation_matrix: tp.Optional[np.ndarray] = None
     if rotation:
         self.rotation_matrix = np.linalg.qr(
             random_state.normal(0, 1, size=(dim, dim)))[0]
Пример #20
0
 def get_neighbors(self, value: Any, rs: np.random.RandomState, number: int = 4, transform: bool = False) -> List[float]:
     neighbors = []  # type: List[float]
     while len(neighbors) < number:
         neighbor = rs.normal(value, 0.2)
         if neighbor < 0 or neighbor > 1:
             continue
         if transform:
             neighbors.append(self._transform(neighbor))
         else:
             neighbors.append(neighbor)
     return neighbors
Пример #21
0
 def get_neighbors(self, value: Any, rs: np.random.RandomState, number: int = 4, transform: bool = False) -> List[float]:
     neighbors = []  # type: List[float]
     while len(neighbors) < number:
         neighbor = rs.normal(value, 0.2)
         if neighbor < 0 or neighbor > 1:
             continue
         if transform:
             neighbors.append(self._transform(neighbor))
         else:
             neighbors.append(neighbor)
     return neighbors
Пример #22
0
def test_evaluated_random_process_is_random_variable(
        random_process: randprocs.RandomProcess,
        random_state: np.random.RandomState):
    """Test whether evaluating a random process returns a random variable."""
    n_inputs_args0 = 10
    args0 = random_state.normal(size=(n_inputs_args0,
                                      random_process.input_dim))
    y0 = random_process(args0)

    assert isinstance(y0, randvars.RandomVariable), (
        f"Output of {repr(random_process)} is not a "
        f"random variable.")
Пример #23
0
    def crossover(
        self,
        parents_params: np.ndarray,
        rng: np.random.RandomState,
        study: Study,
        search_space_bounds: np.ndarray,
    ) -> np.ndarray:

        # https://ieeexplore.ieee.org/document/782672
        # Section 2 Unimodal Normal Distribution Crossover
        n = len(search_space_bounds)
        xp = (parents_params[0] + parents_params[1]) / 2  # Section 2 (2).
        d = parents_params[0] - parents_params[1]  # Section 2 (3).
        if self._sigma_eta is None:
            sigma_eta = 0.35 / np.sqrt(n)
        else:
            sigma_eta = self._sigma_eta

        etas = rng.normal(0, sigma_eta**2, size=n)
        xi = rng.normal(0, self._sigma_xi**2)
        es = self._orthonormal_basis_vector_to_psl(
            parents_params, n
        )  # Orthonormal basis vectors of the subspace orthogonal to the psl.
        one = xp  # Section 2 (5).
        two = xi * d  # Section 2 (5).

        if n > 1:  # When n=1, there is no subsearch component.
            three = np.zeros(n)  # Section 2 (5).
            D = self._distance_from_x_to_psl(parents_params)  # Section 2 (4).
            for i in range(n - 1):
                three += etas[i] * es[i]
            three *= D
            child_params = one + two + three

        else:
            child_params = one + two

        return child_params
Пример #24
0
def simulate_data(covariates: int, scales: Sequence[int],
                  levels: Sequence[int], singletons: float,
                  state: np.random.RandomState) -> Tuple[Array, Array, Array]:
    """Simulate IDs and data matrices."""

    # simulate fixed effects
    ids = np.array(
        list(
            itertools.product(*(np.repeat(np.arange(l), s)
                                for s, l in zip(scales, levels)))))
    fe = np.array(
        list(
            itertools.product(*(np.repeat(state.normal(size=l), s)
                                for s, l in zip(scales, levels)))))

    # count dimensions
    N, M = ids.shape

    # shuffle the IDs
    for index in range(M):
        indices = np.arange(N)
        state.shuffle(indices)
        ids[indices, index] = ids.copy()[:, index]

    # shuffle and replace shares of the data with singletons
    indices = np.arange(N)
    for index in range(M):
        state.shuffle(indices)
        singleton_indices = indices[:int(singletons * N / M)]
        ids[indices, index] = ids.copy()[:, index]
        ids[singleton_indices, index] = -np.arange(singleton_indices.size)

    # simulate remaining data
    error = state.normal(size=(N, 1))
    X = state.normal(size=(N, covariates))
    y = X.sum(axis=1, keepdims=True) + fe.sum(axis=1, keepdims=True) + error
    return ids, X, y
Пример #25
0
def test_rmatvec(
    linop: pn.linops.LinearOperator,
    matrix: np.ndarray,
    random_state: np.random.RandomState,
):
    vec = random_state.normal(size=linop.shape[0])

    linop_matvec = vec @ linop
    matrix_matvec = vec @ matrix

    assert linop_matvec.ndim == 1
    assert linop_matvec.shape == matrix_matvec.shape
    assert linop_matvec.dtype == matrix_matvec.dtype

    np.testing.assert_allclose(linop_matvec, matrix_matvec)
Пример #26
0
def diagonal_potential(d_1: int, d_2: int,
                       rng: np.random.RandomState) -> np.ndarray:
    factor_potential = rng.randint(4, 6, size=(d_1, d_2)) * 1.0
    dim = np.min([d_1, d_2])
    identity = np.eye(dim)
    if rng.normal(size=1) > 1:
        identity = np.flip(identity, axis=0)
    if d_2 > d_1:
        diagonal = np.concatenate([identity, np.zeros(dim, d_2 - dim)], axis=1)
    elif d_2 < d_1:
        diagonal = np.concatenate([identity, np.zeros(d_1 - dim, dim)], axis=0)
    else:
        diagonal = identity

    #diagonal = rng.permutation(diagonal)
    diagonal_dominance = np.exp(factor_potential) + diagonal * 50000
    return diagonal_dominance / np.mean(diagonal_dominance)
Пример #27
0
 def _generate_sample(self, rng_sample: np.random.RandomState):
     idx = random_index_based_on_weights(self.centroid_weights, rng_sample)
     current_centroid = self.centroids[idx]
     att_vals = dict()
     magnitude = 0.0
     for i in range(self.n_features):
         att_vals[i] = (rng_sample.rand() * 2.0) - 1.0
         magnitude += att_vals[i] * att_vals[i]
     magnitude = np.sqrt(magnitude)
     desired_mag = rng_sample.normal() * current_centroid.std_dev
     scale = desired_mag / magnitude
     x = {
         i: current_centroid.centre[i] + att_vals[i] * scale
         for i in range(self.n_features)
     }
     y = current_centroid.class_label
     return x, y
Пример #28
0
def test_matmat(
    linop: pn.linops.LinearOperator,
    matrix: np.ndarray,
    random_state: np.random.RandomState,
    ncols: int,
    order: str,
):
    mat = np.asarray(random_state.normal(size=(linop.shape[1], ncols)),
                     order=order)

    linop_matmat = linop @ mat
    matrix_matmat = matrix @ mat

    assert linop_matmat.ndim == 2
    assert linop_matmat.shape == matrix_matmat.shape
    assert linop_matmat.dtype == matrix_matmat.dtype

    np.testing.assert_allclose(linop_matmat, matrix_matmat)
Пример #29
0
    def integrate(
            self, dimensions: int, market_ids: Iterable[str],
            state: np.random.RandomState) -> Tuple[Vector, Matrix, Vector]:
        """Build concatenated IDs, nodes, and weights for each market ID."""
        ids_list: List[Vector] = []
        nodes_list: List[Matrix] = []
        weights_list: List[Vector] = []

        for market_id in market_ids:
            nodes, weights = state.normal(size=(self._ns,
                                                dimensions)), np.repeat(
                                                    1 / self._ns, self._ns)
            ids_list.append(np.repeat(market_id, len(nodes)))
            nodes_list.append(nodes)
            weights_list.append(weights)

        return np.concatenate(ids_list), np.vstack(nodes_list), np.concatenate(
            weights_list)
Пример #30
0
def get_white_noise_for_signal(time_signal,
                               *,
                               snr,
                               rng_state: np.random.RandomState = np.random):
    """
        Args:
            time_signal:
            snr: SNR or single speaker SNR.
            rng_state: A random number generator object or np.random
        """
    noise_signal = rng_state.normal(size=time_signal.shape)

    power_time_signal = np.mean(time_signal**2, keepdims=True)
    power_noise_signal = np.mean(noise_signal**2, keepdims=True)
    current_snr = 10 * np.log10(power_time_signal / power_noise_signal)

    factor = 10**(-(snr - current_snr) / 20)

    noise_signal *= factor
    return noise_signal
Пример #31
0
def crosshatch_classification_dataset(rng: np.random.RandomState, grid_size: int, points_per_cell: int,
                                      cell_off_std: float=0.05, n_sup:int=2, img_size=(256, 256)) -> \
        ClassificationDataset2D:
    # Generate cross-hatch dataset
    cell_size = 2.0 / grid_size
    cell_off_std = cell_off_std * cell_size

    g = np.linspace(-1, 1, grid_size + 1)
    x0, y0 = np.meshgrid(g, g)
    X0 = np.stack([y0, x0], axis=2).reshape((-1, 2))
    X0 = np.repeat(X0, points_per_cell, axis=0)

    x1, y1 = np.meshgrid(g[:-1] + cell_size * 0.5, g[:-1] + cell_size * 0.5)
    X1 = np.stack([y1, x1], axis=2).reshape((-1, 2))
    X1 = np.repeat(X1, points_per_cell, axis=0)

    X = np.append(X0, X1, axis=0)
    X = X + rng.normal(size=X.shape) * cell_off_std
    y = np.append(np.zeros((len(X0), ), dtype=int),
                  np.ones((len(X1), ), dtype=int),
                  axis=0)

    sup_X = np.array([[0.0, 0.0], [cell_size * 0.5, cell_size * 0.5]])
    sup_y = np.array([0, 1])

    if n_sup == -1:
        sup_indices = np.arange(len(y))
        unsup_indices = np.arange(2) + len(y)
    else:
        unsup_indices = np.arange(len(y))
        sup_indices = np.arange(2) + len(y)

    X = np.append(X, sup_X, axis=0)
    y = np.append(y, sup_y, axis=0)

    ds = ClassificationDataset2D(X, y, img_size, sup_indices, unsup_indices)

    ds.cell_size = cell_size
    ds.cell_off_std = cell_off_std

    return ds
Пример #32
0
    def get_neighbors(self, value: Union[int, float], rs: np.random.RandomState, number: int = 4, transform: bool = False) -> \
            List[Union[np.ndarray, float, int]]:
        neighbors = []  # type: List[Union[np.ndarray, float, int]]
        while len(neighbors) < number:
            rejected = True
            iteration = 0
            while rejected:
                iteration += 1
                new_value = rs.normal(value, self.sigma)
                int_value = self._transform(value)
                new_int_value = self._transform(new_value)
                if int_value != new_int_value:
                    rejected = False
                elif iteration > 100000:
                    raise ValueError('Probably caught in an infinite loop.')

            if transform:
                neighbors.append(self._transform(new_value))
            else:
                neighbors.append(new_value)
        return neighbors
Пример #33
0
    def get_neighbors(self, value: Union[int, float], rs: np.random.RandomState, number: int = 4, transform: bool = False) -> List[
        int]:
        neighbors = []  # type: List[int]
        while len(neighbors) < number:
            rejected = True
            iteration = 0
            while rejected:
                new_min_value = np.min([1, rs.normal(loc=value, scale=0.2)])
                new_value = np.max((0, new_min_value))
                int_value = self._transform(value)
                new_int_value = self._transform(new_value)
                if int_value != new_int_value:
                    rejected = False
                elif iteration > 100000:
                    raise ValueError('Probably caught in an infinite loop.')

            if transform:
                neighbors.append(self._transform(new_value))
            else:
                new_value = self._transform(new_value)
                new_value = self._inverse_transform(new_value)
                neighbors.append(new_value)

        return neighbors
Пример #34
0
 def get_neighbors(self, value: float, rs: np.random.RandomState, number: int = 4, transform: bool = False) -> List[float]:
     neighbors = []
     for i in range(number):
         neighbors.append(rs.normal(value, self.sigma))
     return neighbors
Пример #35
0
 def _sample(self, rs: np.random.RandomState, size: Union[None, int] = None) -> np.ndarray:
     mu = self.mu
     sigma = self.sigma
     return rs.normal(mu, sigma, size=size)