Esempio n. 1
0
    def get_neighbors(self, value: int, rs: np.random.RandomState, number: Union[int, float] = np.inf, transform: bool = False) -> \
            List[Union[float, int, str]]:
        neighbors = []  # type: List[Union[float, int, str]]
        if number < len(self.choices):
            while len(neighbors) < number:
                rejected = True
                index = int(value)
                while rejected:
                    neighbor_idx = rs.randint(0, self._num_choices)
                    if neighbor_idx != index:
                        rejected = False

                if transform:
                    candidate = self._transform(neighbor_idx)
                else:
                    candidate = float(neighbor_idx)

                if candidate in neighbors:
                    continue
                else:
                    neighbors.append(candidate)
        else:
            for candidate_idx, candidate_value in enumerate(self.choices):
                if int(value) == candidate_idx:
                    continue
                else:
                    if transform:
                        candidate = self._transform(candidate_idx)
                    else:
                        candidate = float(candidate_idx)

                    neighbors.append(candidate)

        return neighbors
Esempio n. 2
0
 def get_neighbors(self, value: Any, rs: np.random.RandomState, number: int = 4, transform: bool = False) -> List[float]:
     neighbors = []  # type: List[float]
     while len(neighbors) < number:
         neighbor = rs.normal(value, 0.2)
         if neighbor < 0 or neighbor > 1:
             continue
         if transform:
             neighbors.append(self._transform(neighbor))
         else:
             neighbors.append(neighbor)
     return neighbors
def mutate_weights(genome: Genome, rnd: np.random.RandomState,
                   config: NeatConfig) -> Genome:
    """
    Mutate the connection weights, using the given random generator and the config
    :param genome: the genome which weights should be mutated
    :param rnd: a random generator to determine, which weights and how much they will be changed
    :param config: a config that specifies the probability and magnitude of the changes
    :return: the mutated genome
    """
    for connection in genome.connections:
        # Should mutate weights?
        if rnd.uniform(0, 1) <= config.probability_weight_mutation:
            # Assign random weight or perturb existing weight?
            if rnd.uniform(0, 1) <= config.probability_random_weight_mutation:
                connection.weight = rnd.uniform(
                    low=config.connection_initial_min_weight,
                    high=config.connection_initial_max_weight)
            else:
                # Check how the connection weight should be mutated
                mutation_type = config.weight_mutation_type
                if mutation_type == "uniform":
                    connection.weight += rnd.uniform(
                        -config.weight_mutation_uniform_max_change,
                        config.weight_mutation_uniform_max_change)
                elif mutation_type == "normal":
                    connection.weight += rnd.normal(
                        loc=0, scale=config.weight_mutation_normal_sigma)
                else:
                    raise AssertionError(
                        "Unknown type of mutation type. Must be 'uniform' or 'normal'"
                    )

                connection.weight = np.clip(connection.weight,
                                            a_min=config.connection_min_weight,
                                            a_max=config.connection_max_weight)
    return genome
def set_new_genome_bias(genome: Genome, rnd: np.random.RandomState,
                        config: NeatConfig) -> Genome:
    """
    Set a new bias values in all nodes (except Input nodes) with the given random generator
    :param genome: the genome, where the bias value should be modified
    :param rnd: the random generator
    :param config: a neat config that specifies min and max values
    :return: the modified genome
    """
    for node in genome.nodes:
        if node.node_type == NodeType.INPUT:
            continue
        node.bias = rnd.uniform(low=config.bias_initial_min,
                                high=config.bias_initial_max)
    return genome
Esempio n. 5
0
 def get_neighbors(self,
                   value: Any,
                   rs: np.random.RandomState,
                   number: int = 4,
                   transform: bool = False) -> List[float]:
     neighbors = []  # type: List[float]
     while len(neighbors) < number:
         neighbor = rs.normal(value, 0.2)
         if neighbor < 0 or neighbor > 1:
             continue
         if transform:
             neighbors.append(self._transform(neighbor))
         else:
             neighbors.append(neighbor)
     return neighbors
Esempio n. 6
0
def _gen_soln(Z: np.ndarray, rng: np.random.RandomState):
    """
    Generates feasible integer solutions using a probabilistic sampling
    heuristic
    """
    Z_int = np.zeros(shape=Z.shape, dtype=np.int32)
    C, L = Z.shape

    # Go through each label, and sample proportional the "probability
    # distribution" defined by each row
    for i in range(C):
        group = rng.choice(a=np.arange(L), size=1, p=Z[i, :])
        Z_int[i, group] = 1

    return Z_int
Esempio n. 7
0
def move_one_object_to_the_air(
    current_placement: np.ndarray,
    height_range: Tuple[float, float],
    random_state: np.random.RandomState,
) -> np.ndarray:
    """
    Modify current_placement to move one object to the air.

    :param current_placement: np.ndarray of size (num_objects, 3) where columns are x, y, z
        coordinates of objects relative to the world frame.
    :param height_range: One object is moved along z direction to have height from table in a
        range (min_height, max_height). Height is randomly sampled.
    :param random_state: numpy RandomState to use for sampling
    :return: modified object placement. np.ndarray of (num_objects, 3) where columns are x, y, z
        coordinates of objects relative to the world frame.
    """
    n_objects = current_placement.shape[0]
    min_h, max_h = height_range

    height = random_state.uniform(low=min_h, high=max_h)
    target_i = random_state.randint(n_objects)

    current_placement[target_i, -1] += height
    return current_placement
Esempio n. 8
0
    def _generate_mini_batches(
        self,
        id_list: List[str],
        batches: Dict[str, List[torch.Tensor]],
        shuffle: bool,
        state: np.random.RandomState,
    ):
        if shuffle:
            indices = np.arange(0, len(id_list))
            state.shuffle(indices)
            batches = {k: [v[i] for i in indices] for k, v in batches.items()}
            id_list = [id_list[i] for i in indices]

        bs = self.batch_size
        while len(id_list) >= bs:
            # Make mini-batch and yield
            yield (
                id_list[:bs],
                {k: torch.stack(v[:bs], 0)
                 for k, v in batches.items()},
            )
            id_list = id_list[bs:]
            batches = {k: v[bs:] for k, v in batches.items()}
        return id_list, batches
Esempio n. 9
0
def random_reward(n_states: int,
                  n_actions: int,
                  rng: np.random.RandomState = np.random) -> np.ndarray:
    """Generates a random reward matrix.

    Args:
        n_states: The number of states.
        n_actions: The number of actions.
        rng: Random number generator.

    Returns:
        A three-dimensional array R, where R[s,a,s'] is the reward starting at state
        s, taking action a, and transitioning to state s'.
    """
    return rng.rand(n_states, n_actions, n_states)
Esempio n. 10
0
    def _measure(self, q, prng: np.random.RandomState) -> int:
        """Measures the q'th qubit.

        Reference: Section 4.1 "Simulating measurements"

        Returns: Computational basis measurement as 0 or 1.
        """
        w = self.s.copy()
        for i, v_i in enumerate(self.v):
            if v_i == 1:
                w[i] = bool(prng.randint(2))
        x_i = sum(w & self.G[q, :]) % 2
        # Project the state to the above measurement outcome.
        self.project_Z(q, x_i)
        return x_i
Esempio n. 11
0
def set_new_genome_weights(genome: Genome, rnd: np.random.RandomState,
                           config: NeatConfig) -> Genome:
    """
    Set new weights for the connections and the genome with the given random generator.
    :param genome: the genome, which weights should be randomized
    :param rnd: random generator to receive the new weights
    :param config: the neat config that specifies max and min weight
    :return: the modified genome
    """
    for connection in genome.connections:
        connection.weight = rnd.uniform(
            low=config.connection_initial_min_weight,
            high=config.connection_initial_max_weight)

    return genome
Esempio n. 12
0
def test_rmatvec(
    linop: pn.linops.LinearOperator,
    matrix: np.ndarray,
    random_state: np.random.RandomState,
):
    vec = random_state.normal(size=linop.shape[0])

    linop_matvec = vec @ linop
    matrix_matvec = vec @ matrix

    assert linop_matvec.ndim == 1
    assert linop_matvec.shape == matrix_matvec.shape
    assert linop_matvec.dtype == matrix_matvec.dtype

    np.testing.assert_allclose(linop_matvec, matrix_matvec)
Esempio n. 13
0
def default_format_fn(
    sample: Dict[str, Any],
    input_prefix: str,
    output_prefix: str,
    choice_prefix: str,
    rng: np.random.RandomState,
    append_choices_to_input: bool = True,
) -> Dict[str, Any]:
    """Default format for tasks.

    Args:
      sample: Dictionary with an 'input' entry and a 'target' or 'target_scores
        entry (or both), describing a single example.
      input_prefix: input prefix, prepended to all inputs.
      output_prefix: output prefix, prepended to outputs and choices (if present).
      choice_prefix: prefix prepended to each choice in a multiple-choice question.
      rng: random number generator
      append_choices_to_input: append choices to input for multiple choice.

    Returns:
      sample: Formatted dictionary, with 'choice' key added if present in input.
    Raises:
      Exception: If output not in choices.
    """

    def input_format(text):
        return input_prefix + text

    if "target_scores" in sample:
        choice_dic = sample["target_scores"]
        if append_choices_to_input:
            permuted_choices = rng.permutation(sorted(list(choice_dic.keys())))
            sample["input"] = (
                sample["input"] + choice_prefix + choice_prefix.join(permuted_choices)
            )
        if "target" not in sample:
            max_score = max(choice_dic.values())  # type: ignore
            # Target corresponds to maximum score.
            # If multiple choices have same score it will chose the first one.
            sample["target"] = [k for k, v in choice_dic.items() if v == max_score][
                0
            ]  # type: ignore
        sample["choice"] = list(sample["target_scores"].keys())

    sample["input"] = input_format(sample["input"]) + output_prefix
    if not isinstance(sample["target"], list):
        sample["target"] = [sample["target"]]
    return sample
Esempio n. 14
0
    def perform_measurement(
        self, qubits: Sequence[ops.Qid], prng: np.random.RandomState, collapse_state_vector=True
    ) -> List[int]:
        """Performs a measurement over one or more qubits.

        Args:
            qubits: The sequence of qids to measure, in that order.
            prng: A random number generator, used to simulate measurements.
            collapse_state_vector: A Boolean specifying whether we should mutate
                the state after the measurement.
        """
        results: List[int] = []

        if collapse_state_vector:
            state = self
        else:
            state = self.copy()

        for qubit in qubits:
            n = state.qubit_map[qubit]

            # Trace out other qubits
            M = state.partial_trace(keep_qubits={qubit})
            probs = np.diag(M).real
            sum_probs = sum(probs)

            # Because the computation is approximate, the probabilities do not
            # necessarily add up to 1.0, and thus we re-normalize them.
            if abs(sum_probs - 1.0) > self.simulation_options.sum_prob_atol:
                raise ValueError(f'Sum of probabilities exceeds tolerance: {sum_probs}')
            norm_probs = [x / sum_probs for x in probs]

            d = qubit.dimension
            result: int = int(prng.choice(d, p=norm_probs))

            collapser = np.zeros((d, d))
            collapser[result][result] = 1.0 / math.sqrt(probs[result])

            old_n = state.i_str(n)
            new_n = 'new_' + old_n

            collapser = qtn.Tensor(collapser, inds=(new_n, old_n))

            state.M[n] = (collapser @ state.M[n]).reindex({new_n: old_n})

            results.append(result)

        return results
Esempio n. 15
0
def _select_parent(
    study: Study,
    parent_population: Sequence[FrozenTrial],
    rng: np.random.RandomState,
    dominates: Callable[[FrozenTrial, FrozenTrial, Sequence[StudyDirection]],
                        bool],
) -> FrozenTrial:
    population_size = len(parent_population)
    candidate0 = parent_population[rng.choice(population_size)]
    candidate1 = parent_population[rng.choice(population_size)]

    # TODO(ohta): Consider crowding distance.
    if dominates(candidate0, candidate1, study.directions):
        return candidate0
    else:
        return candidate1
Esempio n. 16
0
def _uniform_random_bias(
        hidden_layer_size: int, random_state: np.random.RandomState) \
        -> np.ndarray:
    """
    Return uniform random bias in range [-1, 1].

    Parameters
    ----------
    hidden_layer_size : int
    random_state : numpy.random.RandomState

    Returns
    -------
    uniform_random_bias : ndarray of shape (hidden_layer_size, )
    """
    return random_state.uniform(low=-1., high=1., size=hidden_layer_size)
Esempio n. 17
0
def findposteriorfrequencies(x: int, tempdata: np.ndarray, maxMOI: int,
                             frequencies_RR, rand: np.random.RandomState):
    nalleles = frequencies_RR.lengths[x]

    # hard coded table() function from R
    data = tempdata[:, x * maxMOI:(x + 1) * maxMOI].astype(int)
    data_1d_array = data.flatten()
    data_1d_array = data_1d_array[data_1d_array != 0]
    data_1d_array = data_1d_array[data_1d_array <= nalleles]

    data_unique, data_counts = np.unique(data_1d_array, return_counts=True)
    # Start as ones for the frequency prior
    counts_table = np.ones(nalleles)
    counts_table[data_unique - 1] += data_counts

    frequencies_RR.matrix[x, :nalleles] = rand.dirichlet(counts_table, 1)
Esempio n. 18
0
    def _replace_invalid_address_alleles(self, dna: List[int],
                                         rng: np.random.RandomState) -> None:
        """Replace invalid alleles for unused address genes of all nodes
        by random permissible values.
        WARNING: Works only if self.n_rows==1.
        """
        assert self._n_rows == 1

        for gene_idx, gene_value in enumerate(dna):
            region_idx = self._get_region_idx(gene_idx)
            if self._is_hidden_address_gene(
                    gene_idx, region_idx) and gene_value > region_idx:
                permissible_values = self.determine_permissible_values_per_gene(
                    gene_idx)
                gene_value = rng.choice(permissible_values)
                dna[gene_idx] = gene_value
Esempio n. 19
0
def random_max(x:np.ndarray, rng:np.random.RandomState=None):
    """
    Returns a randomly selected index from a 1D array where that index's value equals the array maximum

    Args:
        x (np.ndarray): 1D array
        rng (np.random.RandomState): RNG instance

    Returns:
        [type]: [description]
    """

    if rng is None:
        rng = np.random

    return rng.choice(np.where(x == x.max())[0])
Esempio n. 20
0
    def crossover(
        self,
        parents_params: np.ndarray,
        rng: np.random.RandomState,
        study: Study,
        search_space_bounds: np.ndarray,
    ) -> np.ndarray:

        # https://www.researchgate.net/publication/201976488_Uniform_Crossover_in_Genetic_Algorithms
        # Section 1 Introduction

        n_params = len(search_space_bounds)
        masks = (rng.rand(n_params) >= self._swapping_prob).astype(int)
        child_params = parents_params[masks, range(n_params)]

        return child_params
Esempio n. 21
0
    def barabasi_albert(n_nodes: int, affinity: int,
                        rng: np.random.RandomState):
        """Generate a Barabási-Albert random graph.

        This method is used to generate a Barabási-Albert graph based on the specified affinity.

        Parameters
        ----------
        n_nodes:
            The number of nodes in the graph.
        affinity:
            The number of nodes each new node will be attached to, in the sampling scheme.
            This parameter must be an integer >= 1.
        rng:
            A random number generator.

        Returns
        -------
        Graph:
            The generated graph.

        """
        assert affinity >= 1 and affinity < n_nodes

        edges = set()
        degrees = np.zeros(n_nodes, dtype=int)
        neighbors = {node: set() for node in range(n_nodes)}
        for new_node in range(affinity, n_nodes):
            # first node is connected to all previous ones (star-shape)
            if new_node == affinity:
                neighborhood = np.arange(new_node)
            # remaining nodes are picked stochastically
            else:
                neighbor_prob = degrees[:new_node] / (2 * len(edges))
                neighborhood = rng.choice(new_node,
                                          affinity,
                                          replace=False,
                                          p=neighbor_prob)
            for node in neighborhood:
                edges.add((node, new_node))
                degrees[node] += 1
                degrees[new_node] += 1
                neighbors[node].add(new_node)
                neighbors[new_node].add(node)

        graph = Graph(n_nodes, edges, degrees, neighbors)
        return graph
Esempio n. 22
0
    def sample(self, rng: np.random.RandomState,
               size: int) -> Dict[str, np.ndarray]:

        multivariate_samples = {}
        active = rng.choice(len(self._weights), size, p=self._weights)

        for param_name, dist in self._search_space.items():

            if isinstance(dist, distributions.CategoricalDistribution):
                categorical_weights = self._categorical_weights[param_name]
                assert categorical_weights is not None
                weights = categorical_weights[active, :]
                samples = _MultivariateParzenEstimator._sample_from_categorical_dist(
                    rng, weights)

            else:
                # We restore parameters of parzen estimators.
                low = self._low[param_name]
                high = self._high[param_name]
                mus = self._mus[param_name]
                sigmas = self._sigmas[param_name]
                assert low is not None
                assert high is not None
                assert mus is not None
                assert sigmas is not None

                # We sample from truncnorm.
                trunc_low = (low - mus[active]) / sigmas[active]
                trunc_high = (high - mus[active]) / sigmas[active]
                samples = np.full((), fill_value=high + 1.0, dtype=np.float64)
                while (samples >= high).any():
                    samples = np.where(
                        samples < high,
                        samples,
                        truncnorm.rvs(
                            trunc_low,
                            trunc_high,
                            size=size,
                            loc=mus[active],
                            scale=sigmas[active],
                            random_state=rng,
                        ),
                    )
            multivariate_samples[param_name] = samples
        multivariate_samples = self._transform_from_uniform(
            multivariate_samples)
        return multivariate_samples
Esempio n. 23
0
def random_month_img(dataset: Dict[str, torch.Tensor],
                     writer: Union[int, None] = None,
                     rand: np.random.RandomState = np.random.RandomState(
                         seed=1234),
                     leading_zero: bool = True,
                     **kwargs) -> Tuple[torch.Tensor, int]:
    """Compose an image of a valid month."""
    value = rand.choice(list(range(1, 13)), size=1, replace=False).item()
    if leading_zero:
        s = f'{value:02d}'  # prepend a zero if the value is less than 10
    else:
        s = str(value)
    return (value_to_img(s,
                         dataset=dataset,
                         writer=writer,
                         rand=rand,
                         **kwargs), value)
Esempio n. 24
0
def random_state_only_reward(
        n_states: int,
        n_actions: int,
        rng: np.random.RandomState = np.random) -> np.ndarray:
    """Generates a random reward matrix, differing only in first axis.

    Args:
        n_states: The number of states.
        n_actions: The number of actions.
        rng: Random number generator.

    Returns:
        A three-dimensional array R, where R[s,a,s'] is the reward starting at state
        s, taking action a, and transitioning to state s'.
    """
    rew = rng.rand(n_states, 1, 1)
    return np.tile(rew, (1, n_actions, n_states))
Esempio n. 25
0
 def shuffle_participants(self, data: np.ndarray, random_state: np.random.RandomState) -> np.ndarray:
     shuffled_data = np.ndarray(data.shape, data.dtype)
     shuffled_data[:, :] = data
     for tid in (0, 1):
         for cont_member_id, rand_member_id in enumerate(list(random_state.permutation(5))):
             if cont_member_id == rand_member_id:
                 continue
             cont_pid = tid * 5 + cont_member_id
             rand_pid = tid * 5 + rand_member_id
             cont_key_part = "participants.{pid:d}.".format(pid=cont_pid)
             cont_ban_key_part = "teams.{tid:d}.bans.{mid:d}.".format(tid=tid, mid=cont_member_id)
             rand_key_part = "participants.{pid:d}.".format(pid=rand_pid)
             rand_ban_key_part = "teams.{tid:d}.bans.{mid:d}.".format(tid=tid, mid=rand_member_id)
             cont_cids = np.where([col.name.startswith(cont_key_part) or col.name.startswith(cont_ban_key_part) for col in self.specs])[0]
             rand_cids = np.where([col.name.startswith(rand_key_part) or col.name.startswith(rand_ban_key_part) for col in self.specs])[0]
             shuffled_data[:, cont_cids] = data[:, rand_cids]
     return shuffled_data
Esempio n. 26
0
def _blxalpha(x1: np.ndarray, x2: np.ndarray, rng: np.random.RandomState,
              alpha: float) -> np.ndarray:
    # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.465.6900&rep=rep1&type=pdf
    # Section 2 Crossover Operators for RCGA 2.1 Blend Crossover

    assert x1.shape == x2.shape
    assert x1.ndim == 1

    xs = np.stack([x1, x2])

    x_min = xs.min(axis=0)
    x_max = xs.max(axis=0)
    diff = alpha * (x_max - x_min)  # Equation (1).
    low = x_min - diff  # Equation (1).
    high = x_max + diff  # Equation (1).
    r = rng.uniform(0, 1, size=len(diff))
    child_params_array = (high - low) * r + low
    return child_params_array
Esempio n. 27
0
def test_matmat(
    linop: pn.linops.LinearOperator,
    matrix: np.ndarray,
    random_state: np.random.RandomState,
    ncols: int,
    order: str,
):
    mat = np.asarray(random_state.normal(size=(linop.shape[1], ncols)),
                     order=order)

    linop_matmat = linop @ mat
    matrix_matmat = matrix @ mat

    assert linop_matmat.ndim == 2
    assert linop_matmat.shape == matrix_matmat.shape
    assert linop_matmat.dtype == matrix_matmat.dtype

    np.testing.assert_allclose(linop_matmat, matrix_matmat)
Esempio n. 28
0
def generate_fake_risk(rs: np.random.RandomState, start_date: str, end_date: str, p0: float = 0.5) -> pd.DataFrame:
    """
    Generate fake risk data that goes between 0 and 1 with a random walk
    :param rs: a Numpy random state
    :param start_date: start date string formatted 'YY-mm'
    :param end_date: end date string formatted 'YY-mm'
    :param p0: the probability of step size 0. The probability of step size -1 / 1 will then be (1-p0)/2
    :return: dataframe with month and risk columns
    """
    # Make dataframe with dates
    df_risk = pd.DataFrame({'date': pd.date_range(start=start_date, end=end_date, freq='M').to_period('M')})
    # Create the steps
    steps = rs.choice(a=[-1, 0, 1], size=len(df_risk)-1, p=[(1 - p0) / 2, p0, (1 - p0) / 2])
    # Compute the path values based on the steps
    path = np.concatenate([[0], steps]).cumsum(0)
    # Normalize between 0 and 1
    df_risk['risk'] = (path - min(path)) / (max(path) - min(path))
    return df_risk
Esempio n. 29
0
    def integrate(
            self, dimensions: int, market_ids: Iterable[str],
            state: np.random.RandomState) -> Tuple[Vector, Matrix, Vector]:
        """Build concatenated IDs, nodes, and weights for each market ID."""
        ids_list: List[Vector] = []
        nodes_list: List[Matrix] = []
        weights_list: List[Vector] = []

        for market_id in market_ids:
            nodes, weights = state.normal(size=(self._ns,
                                                dimensions)), np.repeat(
                                                    1 / self._ns, self._ns)
            ids_list.append(np.repeat(market_id, len(nodes)))
            nodes_list.append(nodes)
            weights_list.append(weights)

        return np.concatenate(ids_list), np.vstack(nodes_list), np.concatenate(
            weights_list)
Esempio n. 30
0
    def next_lane(self, current_index: LaneIndex, route: Route = None, position: np.ndarray = None,
                  np_random: np.random.RandomState = np.random) -> LaneIndex:
        """
        Get the index of the next lane that should be followed after finishing the current lane.

        - If a plan is available and matches with current lane, follow it.
        - Else, pick next road randomly.
        - If it has the same number of lanes as current road, stay in the same lane.
        - Else, pick next road's closest lane.
        :param current_index: the index of the current lane.
        :param route: the planned route, if any.
        :param position: the vehicle position.
        :param np_random: a source of randomness.
        :return: the index of the next lane to be followed when current lane is finished.
        """
        _from, _to, _id = current_index
        next_to = next_id = None
        # Pick next road according to planned route
        if route:
            if route[0][:2] == current_index[:2]:  # We just finished the first step of the route, drop it.
                route.pop(0)
            if route and route[0][0] == _to:  # Next road in route is starting at the end of current road.
                _, next_to, next_id = route[0]
            elif route:
                logger.warning("Route {} does not start after current road {}.".format(route[0], current_index))
        # Randomly pick next road
        if not next_to:
            try:
                next_to = list(self.graph[_to].keys())[np_random.randint(len(self.graph[_to]))]
            except KeyError:
                # logger.warning("End of lane reached.")
                return current_index

        # If next road has same number of lane, stay on the same lane
        if len(self.graph[_from][_to]) == len(self.graph[_to][next_to]):
            if next_id is None:
                next_id = _id
        # Else, pick closest lane
        else:
            lanes = range(len(self.graph[_to][next_to]))
            next_id = min(lanes,
                          key=lambda l: self.get_lane((_to, next_to, l)).distance(position))

        return _to, next_to, next_id
    def sample_tailpoint_goal(self, environment: Dict,
                              rng: np.random.RandomState,
                              planner_params: Dict):
        # add more inflating to reduce the number of truly unacheivable gols
        env_inflated = inflate_tf_3d(
            env=environment['env'],
            radius_m=2 * planner_params['goal_params']['threshold'],
            res=environment['res'])
        goal_extent = planner_params['goal_params']['extent']

        while True:
            extent = np.array(goal_extent).reshape(3, 2)
            p = rng.uniform(extent[:, 0], extent[:, 1])
            goal = {'tail': p}
            row, col, channel = point_to_idx_3d_in_env(p[0], p[1], p[2],
                                                       environment)
            collision = env_inflated[row, col, channel] > 0.5
            if not collision:
                return goal
Esempio n. 32
0
def slice_sampler_step_out(log_pivot: float, scale: float,
        sliced_log_density: Callable[[float], float], random_state: np.random.RandomState) -> Tuple[float, float]:

    r = random_state.rand()
    lower_bound = -r * scale
    upper_bound = lower_bound + scale

    def bound_step_out(bound, direction):
        """direction -1 for lower bound, +1 for upper bound"""
        for _ in range(MAX_STEP_OUT):
            if sliced_log_density(bound) <= log_pivot:
                return bound
            else:
                bound += direction * scale
        raise SliceException("Reach maximum iteration ({}) while stepping out for bound ({})".format(MAX_STEP_OUT, direction))

    lower_bound = bound_step_out(lower_bound, -1.)
    upper_bound = bound_step_out(upper_bound, 1.)
    return lower_bound, upper_bound
Esempio n. 33
0
    def get_neighbors(self, value: Union[int, float], rs: np.random.RandomState, number: int = 4, transform: bool = False) -> \
            List[Union[np.ndarray, float, int]]:
        neighbors = []  # type: List[Union[np.ndarray, float, int]]
        while len(neighbors) < number:
            rejected = True
            iteration = 0
            while rejected:
                iteration += 1
                new_value = rs.normal(value, self.sigma)
                int_value = self._transform(value)
                new_int_value = self._transform(new_value)
                if int_value != new_int_value:
                    rejected = False
                elif iteration > 100000:
                    raise ValueError('Probably caught in an infinite loop.')

            if transform:
                neighbors.append(self._transform(new_value))
            else:
                neighbors.append(new_value)
        return neighbors
Esempio n. 34
0
    def get_neighbors(self, value: Union[int, float], rs: np.random.RandomState, number: int = 4, transform: bool = False) -> List[
        int]:
        neighbors = []  # type: List[int]
        while len(neighbors) < number:
            rejected = True
            iteration = 0
            while rejected:
                new_min_value = np.min([1, rs.normal(loc=value, scale=0.2)])
                new_value = np.max((0, new_min_value))
                int_value = self._transform(value)
                new_int_value = self._transform(new_value)
                if int_value != new_int_value:
                    rejected = False
                elif iteration > 100000:
                    raise ValueError('Probably caught in an infinite loop.')

            if transform:
                neighbors.append(self._transform(new_value))
            else:
                new_value = self._transform(new_value)
                new_value = self._inverse_transform(new_value)
                neighbors.append(new_value)

        return neighbors
Esempio n. 35
0
 def _sample(self, rs: np.random.RandomState, size: Union[int, None] = None) -> int:
     """
     returns a random sample from our sequence as order/position index
     """
     return rs.randint(0, self._num_elements, size=size)
Esempio n. 36
0
 def _sample(self, rs: np.random.RandomState, size: Union[None, int] = None) -> np.ndarray:
     mu = self.mu
     sigma = self.sigma
     return rs.normal(mu, sigma, size=size)
Esempio n. 37
0
 def _sample(self, rs: np.random.RandomState, size: int = None) -> Union[int, np.ndarray]:
     return rs.randint(0, self._num_choices, size=size)
Esempio n. 38
0
 def get_neighbors(self, value: float, rs: np.random.RandomState, number: int = 4, transform: bool = False) -> List[float]:
     neighbors = []
     for i in range(number):
         neighbors.append(rs.normal(value, self.sigma))
     return neighbors