Esempio n. 1
0
    def __call__(self, relevances: np.ndarray,
                 random_state: np.random.RandomState) -> np.ndarray:
        """
        Generates an indicator array of the click events for the ranked documents with relevance labels encoded in
        `relevances`.
        :param relevances: Relevance labels of the documents
        :param random_state: Random generator state
        :return: Indicator array of the clicks on the documents

        As an example, consider a model of a user who always clicks on a highly relevant result and immediately stops
        >>> model = CcmClickModel(click_relevance={0: 0.0, 1: 0.0, 2: 1.0},
        ...                       stop_relevance={0: 0.0, 1: 0.0, 2: 1.0}, name="Model", depth=10)
        >>> # With the result list with highly relevant docs on positions 2 and 4,
        >>> doc_relevances = np.array([1, 0, 2, 0, 2, 0])
        >>> # We expect the user to click on the 3rd document, as it the first highly relevant:
        >>> model(doc_relevances, np.random.RandomState(1)).tolist()
        [0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
        """
        n_docs = relevances.shape[0]
        result = np.zeros(n_docs)
        for i in range(min(self.depth, n_docs)):
            r = relevances[i]
            p_click = self.click_relevance[r]
            p_stop = self.stop_relevance[r]

            if random_state.uniform() < p_click:
                result[i] = 1
            if result[i] == 1 and random_state.uniform() < p_stop:
                break
        return result
Esempio n. 2
0
    def sample_midpoint_goal(self, environment: Dict,
                             rng: np.random.RandomState, planner_params: Dict):
        goal_extent = planner_params['goal_params']['extent']

        if environment == {}:
            rospy.loginfo("Assuming no obstacles in the environment")
            extent = np.array(goal_extent).reshape(3, 2)
            p = rng.uniform(extent[:, 0], extent[:, 1])
            goal = {'midpoint': p}
            return goal

        env_inflated = inflate_tf_3d(
            env=environment['env'],
            radius_m=planner_params['goal_params']['threshold'],
            res=environment['res'])
        # DEBUG visualize the inflated env
        # from copy import deepcopy
        # environment_ = deepcopy(environment)
        # environment_['env'] = env_inflated
        # self.plot_environment_rviz(environment_)
        # END DEBUG

        while True:
            extent = np.array(goal_extent).reshape(3, 2)
            p = rng.uniform(extent[:, 0], extent[:, 1])
            goal = {'midpoint': p}
            row, col, channel = grid_utils.point_to_idx_3d_in_env(
                p[0], p[1], p[2], environment)
            collision = env_inflated[row, col, channel] > 0.5
            if not collision:
                return goal
Esempio n. 3
0
    def sample_gripper_goal(environment: Dict, rng: np.random.RandomState,
                            planner_params: Dict):
        env_inflated = inflate_tf_3d(
            env=environment['env'],
            radius_m=planner_params['goal_params']['threshold'],
            res=environment['res'])
        goal_extent = planner_params['goal_params']['extent']

        while True:
            extent = np.array(goal_extent).reshape(3, 2)
            left_gripper = rng.uniform(extent[:, 0], extent[:, 1])
            right_gripper = rng.uniform(extent[:, 0], extent[:, 1])
            goal = {
                'left_gripper': left_gripper,
                'right_gripper': right_gripper,
            }
            row1, col1, channel1 = grid_utils.point_to_idx_3d_in_env(
                left_gripper[0], left_gripper[1], left_gripper[2], environment)
            row2, col2, channel2 = grid_utils.point_to_idx_3d_in_env(
                right_gripper[0], right_gripper[1], right_gripper[2],
                environment)
            collision1 = env_inflated[row1, col1, channel1] > 0.5
            collision2 = env_inflated[row2, col2, channel2] > 0.5
            if not collision1 and not collision2:
                return goal
Esempio n. 4
0
def fixture_measure_params(
    measure_name: str,
    input_dim: int,
    cov_diagonal: bool,
    random_state: np.random.RandomState,
) -> Dict:
    params = {"name": measure_name}

    if measure_name == "gauss":
        # set up mean and covariance
        if input_dim == 1:
            mean = random_state.normal(0, 1)
            cov = random_state.uniform(0.5, 1.5)
        else:
            mean = random_state.normal(0, 1, size=(input_dim, 1))
            if cov_diagonal:
                cov = random_state.uniform(0.5, 1.5, size=(input_dim, 1))
            else:
                mat = random_state.normal(0, 1, size=(input_dim, input_dim))
                cov = mat @ mat.T

        params["mean"] = mean
        params["cov"] = cov

    elif measure_name == "lebesgue":
        # set up bounds
        rv = random_state.uniform(0, 1, size=(input_dim, 2))
        domain = (rv[:, 0] - 1.0, rv[:, 1] + 1.0)

        params["domain"] = domain
        params["normalized"] = True

    return params
 def __init__(self, n: int, p: int, σ: float, rs: np.random.RandomState):
     self.n = n
     self.p = p
     self.σ = σ
     self.x = rs.uniform(-10, 10, n * p).reshape((n, p))
     self.β = rs.uniform(-10, 10, p)
     ϵ = rs.normal(0, σ, n)
     self.y = self.x @ self.β + ϵ
Esempio n. 6
0
def random_triplet_generator(
    size: Integer,
    limits: ArrayLike = np.array([[0, 1], [0, 1], [0, 1]]),
    random_state: np.random.RandomState = RANDOM_STATE,
) -> NDArray:
    """
    Return a generator yielding random triplets.

    Parameters
    ----------
    size
        Generator size.
    limits
        Random values limits on each triplet axis.
    random_state
         Mersenne Twister pseudo-random number generator.

    Returns
    -------
    :class:`numpy.ndarray`
        Random triplet generator.

    Notes
    -----
    -   The test is assuming that :func:`np.random.RandomState` definition
        will return the same sequence no matter which *OS* or *Python* version
        is used. There is however no formal promise about the *prng* sequence
        reproducibility of either *Python* or *Numpy* implementations, see
        :cite:`Laurent2012a`.

    Examples
    --------
    >>> from pprint import pprint
    >>> prng = np.random.RandomState(4)
    >>> random_triplet_generator(10, random_state=prng)
    ... # doctest: +ELLIPSIS
    array([[ 0.9670298...,  0.7793829...,  0.4361466...],
           [ 0.5472322...,  0.1976850...,  0.9489773...],
           [ 0.9726843...,  0.8629932...,  0.7863059...],
           [ 0.7148159...,  0.9834006...,  0.8662893...],
           [ 0.6977288...,  0.1638422...,  0.1731654...],
           [ 0.2160895...,  0.5973339...,  0.0749485...],
           [ 0.9762744...,  0.0089861...,  0.6007427...],
           [ 0.0062302...,  0.3865712...,  0.1679721...],
           [ 0.2529823...,  0.0441600...,  0.7333801...],
           [ 0.4347915...,  0.9566529...,  0.4084438...]])
    """

    limit_x, limit_y, limit_z = as_float_array(limits)

    return tstack(
        [
            random_state.uniform(limit_x[0], limit_x[1], size=size),
            random_state.uniform(limit_y[0], limit_y[1], size=size),
            random_state.uniform(limit_z[0], limit_z[1], size=size),
        ]
    )
def mutate_add_node(
        genome: Genome, rnd: np.random.RandomState,
        generator: InnovationNumberGeneratorInterface,
        config: NeatConfig) -> (Genome, Node, Connection, Connection):
    """
    Add with a given probability from the config a new node to the genome.
    A random connections is selected, which will be disabled. A new node will be placed between the in and out node of
    the connection. Then two new connections will be created, one which leads into the new node (weight=1) and one out
    (weight = weight of the disabled connection).
    :param genome: the genome that should be modified
    :param rnd: a random generator to determine if, the genome is mutated, and how
    :param generator: a generator for innovation number for nodes and connections
    :param config: a config that specifies the mutation params
    :return: the modified genome, as well as the generated node and the two connections (if they were mutated)
    """
    # Check if node should mutate
    if rnd.uniform(0, 1) > config.probability_mutate_add_node:
        return genome, None, None, None

    selected_connection = genome.connections[rnd.randint(
        0, len(genome.connections))]
    selected_connection.enabled = False

    in_node = next(x for x in genome.nodes
                   if x.innovation_number == selected_connection.input_node)
    out_node = next(x for x in genome.nodes
                    if x.innovation_number == selected_connection.output_node)

    # Select activation function either from one of the nodes
    new_node_activation = in_node.activation_function if rnd.uniform(
        0, 1) <= 0.5 else out_node.activation_function
    new_node_x_position = (in_node.x_position + out_node.x_position) / 2
    new_node = Node(
        generator.get_node_innovation_number(in_node,
                                             out_node), NodeType.HIDDEN,
        rnd.uniform(low=config.bias_initial_min, high=config.bias_initial_max),
        new_node_activation, new_node_x_position)

    new_connection_in = Connection(generator.get_connection_innovation_number(
        in_node, new_node),
                                   in_node.innovation_number,
                                   new_node.innovation_number,
                                   weight=1,
                                   enabled=True)
    new_connection_out = Connection(generator.get_connection_innovation_number(
        new_node, out_node),
                                    new_node.innovation_number,
                                    out_node.innovation_number,
                                    weight=selected_connection.weight,
                                    enabled=True)

    genome.nodes.append(new_node)
    genome.connections.append(new_connection_in)
    genome.connections.append(new_connection_out)

    return genome, new_node, new_connection_in, new_connection_out
Esempio n. 8
0
def random_angle(random_state: np.random.RandomState, max_pitch_roll: float):
    """
    Returns a random Euler angle where roll and pitch are limited to [-max_pitch_roll, max_pitch_roll].
    :param random_state: The random state used to generate the random numbers.
    :param max_pitch_roll: Maximum roll/pitch angle, in degrees.
    :return Euler: A new `Euler` object with randomized angles.
    """
    mpr = max_pitch_roll * math.pi / 180

    # small pitch, roll values, random yaw angle
    roll = random_state.uniform(low=-mpr, high=mpr)
    pitch = random_state.uniform(low=-mpr, high=mpr)
    yaw = random_state.uniform(low=-math.pi, high=math.pi)

    return Euler(roll, pitch, yaw)
Esempio n. 9
0
    def _get_tracking_seeds_from_mask(
        self,
        mask: np.ndarray,
        affine_seedsvox2dwivox: np.ndarray,
        n_seeds_per_voxel: int,
        rng: np.random.RandomState
    ) -> np.ndarray:
        """ Given a binary seeding mask, get seeds in DWI voxel
        space using the provided affine

        Parameters
        ----------
        mask : 3D `numpy.ndarray`
            Binary seeding mask
        affine_seedsvox2dwivox : `numpy.ndarray`
        n_seeds_per_voxel : int
        rng : `numpy.random.RandomState`

        Returns
        -------
        seeds : `numpy.ndarray`
        """
        seeds = []
        indices = np.array(np.where(mask)).T
        for idx in indices:
            seeds_in_seeding_voxel = idx + rng.uniform(
                -0.5,
                0.5,
                size=(n_seeds_per_voxel, 3))
            seeds_in_dwi_voxel = nib.affines.apply_affine(
                affine_seedsvox2dwivox,
                seeds_in_seeding_voxel)
            seeds.extend(seeds_in_dwi_voxel)
        seeds = np.array(seeds, dtype=np.float16)
        return seeds
Esempio n. 10
0
def spiral_classification_dataset(
    n_sup: int,
    balance_classes: bool,
    rng: np.random.RandomState,
    N: int = 5000,
    spiral_radius: float = 20,
    img_size=(256, 256)) -> ClassificationDataset2D:
    # Generate spiral dataset
    # Taking the sqrt of the randomly drawn radii ensures uniform sample distribution
    # Using plain uniform distribution results in samples concentrated at the centre
    radius0 = np.sqrt(rng.uniform(low=1.0, high=spiral_radius**2, size=(N, )))
    radius1 = np.sqrt(rng.uniform(low=1.0, high=spiral_radius**2, size=(N, )))
    theta0 = radius0 * 0.5
    theta1 = radius1 * 0.5 + np.pi
    radius = np.append(radius0, radius1, axis=0)
    theta = np.append(theta0, theta1, axis=0)
    X = np.stack([np.sin(theta) * radius, np.cos(theta) * radius], axis=1)
    y = np.append(np.zeros(radius0.shape, dtype=int),
                  np.ones(radius1.shape, dtype=int),
                  axis=0)

    X = X + rng.normal(size=X.shape) * 0.2

    X = X / spiral_radius

    return SplitClassificationDataset2D(X, y, img_size, n_sup, balance_classes,
                                        rng)
Esempio n. 11
0
    def _get_tracking_seeds_from_mask(mask: np.ndarray,
                                      affine_seedsvox2dwivox: np.ndarray,
                                      n_seeds_per_voxel: int,
                                      rng: np.random.RandomState) -> np.ndarray:
        """Given a binary seeding mask, get seeds in DWI voxel space using the
        provided affine.

        Parameters
        ----------
        mask : np.ndarray with shape (X,Y,Z)
            Binary seeding mask.
        affine_seedsvox2dwivox : np.ndarray
            Affine to bring the seeds from their voxel space to the input voxel
             space.
        n_seeds_per_voxel : int
            Number of seeds to generate in each voxel
        rng : np.random.RandomState
            Random number generator

        Returns
        -------
        seeds : np.ndarray with shape (N_seeds, 3)
            Position of each initial tracking seeds
        """
        seeds = []
        indices = np.array(np.where(mask)).T
        for idx in indices:
            seeds_in_seeding_voxel = idx + rng.uniform(-0.5, 0.5,
                                                       size=(n_seeds_per_voxel, 3))
            seeds_in_dwi_voxel = nib.affines.apply_affine(affine_seedsvox2dwivox,
                                                          seeds_in_seeding_voxel)
            seeds.extend(seeds_in_dwi_voxel)
        seeds = np.array(seeds, dtype=np.float32)
        return seeds
Esempio n. 12
0
    def _select_train_indices(
        self,
        n_samples: int,
        random_state: np.random.RandomState,
        y: Union[np.ndarray, pd.Series, pd.DataFrame, None],
    ) -> np.ndarray:

        mean_block_size = self.mean_block_size
        if mean_block_size < 1:
            # if mean block size was set as a percentage, calculate the actual mean
            # block size
            mean_block_size = n_samples * mean_block_size

        p_new_block = 1.0 / mean_block_size

        train = np.empty(n_samples, dtype=np.int64)

        for i in range(n_samples):
            if i == 0 or random_state.uniform() <= p_new_block:
                idx = random_state.randint(n_samples)
            else:
                # noinspection PyUnboundLocalVariable
                idx += 1
                if idx >= n_samples:
                    idx = 0
            train[i] = idx

        return train
    def _update_classifications(cls, state: SiteInstanceState,
                                likelihood_ratios: np.ndarray, num_ids: int,
                                rand: np.random.RandomState):
        '''
        Update the recrudescence/reinfection classification of each sample,
        based on the current state's calculate likelihood ratios

        :param state: The current state variables of the algorithm
        :param likelihood_ratios:
        :param num_ids:
        :param rand: The random number generator to use
        :return: The new classifications (will also modify the state
        classifications)
        '''
        z = rand.uniform(size=num_ids)
        new_classifications = np.copy(state.classification)
        new_classifications[np.logical_and(
            state.classification == SampleType.REINFECTION.value,
            z < likelihood_ratios)] = SampleType.RECRUDESCENCE.value
        # Add slight offset so likelihood ratios don't give div by 0 error
        new_likelihood_ratios = likelihood_ratios + 1e-9
        new_classifications[np.logical_and(
            state.classification == SampleType.RECRUDESCENCE.value,
            z < 1.0 / new_likelihood_ratios)] = SampleType.REINFECTION.value
        state.classification = new_classifications
        return state.classification
Esempio n. 14
0
    def erdos_renyi(n_nodes: int, edge_probability: float,
                    rng: np.random.RandomState):
        """Generate an Erdös-Rényi random graph.

        This method is used to generate an Erdös-Rényi graph by randomly adding edges with
        the specified probability.

        Parameters
        ----------
        n_nodes:
            The number of nodes in the graph.
        edge_probability:
            The probability of generating each edge.
            This value must be bound in the range [0,1].
        rng:
            A random number generator.

        Returns
        -------
        Graph:
            The generated graph.

        """
        edges = set()
        degrees = np.zeros(n_nodes, dtype=int)
        neighbors = {node: set() for node in range(n_nodes)}
        for edge in combinations(np.arange(n_nodes), 2):
            if rng.uniform() < edge_probability:
                edges.add(edge)
                degrees[edge[0]] += 1
                degrees[edge[1]] += 1
                neighbors[edge[0]].add(edge[1])
                neighbors[edge[1]].add(edge[0])
        graph = Graph(n_nodes, edges, degrees, neighbors)
        return graph
def _syc_with_adjacent_z_rotations(a: cirq.GridQubit, b: cirq.GridQubit,
                                   prng: np.random.RandomState):
    z_exponents = [prng.uniform(0, 1) for _ in range(4)]
    yield cirq.Z(a)**z_exponents[0]
    yield cirq.Z(b)**z_exponents[1]
    yield cirq.google.SYC(a, b)
    yield cirq.Z(a)**z_exponents[2]
    yield cirq.Z(b)**z_exponents[3]
Esempio n. 16
0
def GenerateGraph(
    rand: np.random.RandomState,
    num_nodes_min_max,
    dimensions: int = 2,
    theta: float = 1000.0,
    rate: float = 1.0,
    weight_name: str = "distance",
) -> nx.Graph:
    """Creates a connected graph.

  The graphs are geographic threshold graphs, but with added edges via a
  minimum spanning tree algorithm, to ensure all nodes are connected.

  Args:
    rand: A random seed for the graph generator.
    num_nodes_min_max: A sequence [lower, upper) number of nodes per graph.
    dimensions: (optional) An `int` number of dimensions for the positions.
      Default= 2.
    theta: (optional) A `float` threshold parameters for the geographic
      threshold graph's threshold. Large values (1000+) make mostly trees. Try
      20-60 for good non-trees. Default=1000.0.
    rate: (optional) A rate parameter for the node weight exponential sampling
      distribution. Default= 1.0.
    weight_name: The name for the weight edge attribute.

  Returns:
    The graph.
  """
    # Sample num_nodes.
    num_nodes = rand.randint(*num_nodes_min_max)

    # Create geographic threshold graph.
    pos_array = rand.uniform(size=(num_nodes, dimensions))
    pos = dict(enumerate(pos_array))
    weight = dict(enumerate(rand.exponential(rate, size=num_nodes)))
    geo_graph = nx.geographical_threshold_graph(num_nodes,
                                                theta,
                                                pos=pos,
                                                weight=weight)

    # Create minimum spanning tree across geo_graph's nodes.
    distances = spatial.distance.squareform(spatial.distance.pdist(pos_array))
    i_, j_ = np.meshgrid(range(num_nodes), range(num_nodes), indexing="ij")
    weighted_edges = list(zip(i_.ravel(), j_.ravel(), distances.ravel()))
    mst_graph = nx.Graph()
    mst_graph.add_weighted_edges_from(weighted_edges, weight=weight_name)
    mst_graph = nx.minimum_spanning_tree(mst_graph, weight=weight_name)
    # Put geo_graph's node attributes into the mst_graph.
    for i in mst_graph.nodes():
        mst_graph.node[i].update(geo_graph.node[i])

    # Compose the graphs.
    combined_graph = nx.compose_all((mst_graph, geo_graph.copy()))
    # Put all distance weights into edge attributes.
    for i, j in combined_graph.edges():
        combined_graph.get_edge_data(i, j).setdefault(weight_name,
                                                      distances[i, j])
    return combined_graph
Esempio n. 17
0
def make_trials(system: VisionSystem, image_collection: ImageCollection,
                repeats: int, random: np.random.RandomState):
    # Get the true motions, for making trials
    true_motions = [
        image_collection.images[frame_idx - 1].camera_pose.find_relative(
            image_collection.images[frame_idx].camera_pose)
        if frame_idx > 0 else None
        for frame_idx in range(len(image_collection))
    ]

    # Make some plausible trial results
    trial_results = []
    for repeat in range(repeats):
        start_idx = random.randint(0, len(image_collection) - 2)
        frame_results = [
            FrameResult(
                timestamp=timestamp,
                image=image,
                pose=image.camera_pose,
                processing_time=random.uniform(0.001, 1.0),
                estimated_motion=true_motions[frame_idx].find_independent(
                    Transform(location=random.normal(0, 1, 3),
                              rotation=t3.quaternions.axangle2quat(
                                  random.uniform(-1, 1, 3),
                                  random.normal(0, np.pi / 2)),
                              w_first=True))
                if frame_idx > start_idx else None,
                tracking_state=TrackingState.OK
                if frame_idx > start_idx else TrackingState.NOT_INITIALIZED,
                num_matches=random.randint(10, 100))
            for frame_idx, (timestamp, image) in enumerate(image_collection)
        ]
        frame_results[start_idx].estimated_pose = Transform()
        trial_settings = {'random': random.randint(0, 10), 'repeat': repeat}
        trial_result = SLAMTrialResult(system=system,
                                       image_source=image_collection,
                                       success=True,
                                       results=frame_results,
                                       has_scale=False,
                                       settings=trial_settings)
        trial_result.save()
        trial_results.append(trial_result)
    return trial_results
Esempio n. 18
0
def make_noisy_data(
        m: float = 0.1,
        b: float = 0.3,
        n_samples: int = 5,
        e_std: float = 0.01,
        random_state: np.random.RandomState = np.random.RandomState()):
    x = random_state.uniform(size=n_samples)
    e = random_state.normal(scale=e_std, size=len(x)).astype(np.float32)
    y = m * x + b + e
    return x, y
def sample_query(p_weights, dim_names: Sequence[str],
                 dim_unique_values: Sequence[Sequence],
                 r: np.random.RandomState) -> Dict:
    n_dims = len(p_weights)
    query_filter = dict()
    for dim_idx in range(n_dims):
        if r.uniform() < p_weights[dim_idx]:
            # filter on dimension
            cur_dim_value = r.choice(dim_unique_values[dim_idx])
            query_filter[dim_names[dim_idx]] = cur_dim_value
    return query_filter
Esempio n. 20
0
def lhs(dimensions: int, size: int, state: np.random.RandomState, modified: bool = False) -> Tuple[Array, Array]:
    """Use Latin Hypercube Sampling to generate nodes and weights for integration."""

    # generate the samples
    samples = np.zeros((size, dimensions))
    for dimension in range(dimensions):
        samples[:, dimension] = state.permutation(np.arange(size) + state.uniform(size=1 if modified else size)) / size

    # transform the samples and construct weights
    nodes = scipy.stats.norm().ppf(samples)
    weights = np.repeat(1 / size, size)
    return nodes, weights
    def sample_action(self,
                      action_rng: np.random.RandomState,
                      environment: Dict,
                      state: Dict,
                      action_params: Dict,
                      validate: bool,
                      stateless: Optional[bool] = False):
        action = None
        for _ in range(self.max_action_attempts):
            # sample the previous action with 80% probability, this improves exploration
            repeat_probability = action_params.get(
                'repeat_delta_gripper_motion_probability', 0.8)
            if self.last_action is not None and action_rng.uniform(
                    0, 1) < repeat_probability and not stateless:
                gripper_delta_position = self.last_action[
                    'gripper_delta_position']
            else:
                theta = action_rng.uniform(-np.pi, np.pi)
                displacement = action_rng.uniform(
                    0, action_params['max_distance_gripper_can_move'])

                dx = np.cos(theta) * displacement
                dy = np.sin(theta) * displacement

                gripper_delta_position = np.array([dx, dy, 0])

            gripper_position = state['gripper'] + gripper_delta_position
            action = {
                'gripper_position': gripper_position,
                'gripper_delta_position': gripper_delta_position,
                'timeout_s': action_params['dt'],
            }

            if not validate or self.is_action_valid(action, action_params):
                self.last_action = action
                return action

        rospy.logwarn(
            "Could not find a valid action, executing an invalid one")
        return action
def make_random_rope_configuration(extent, n_state, link_length, max_angle_rad,
                                   rng: np.random.RandomState):
    """
    First sample a head point, then sample angles for the other points
    :param max_angle_rad: NOTE, by sampling uniformly here we make certain assumptions about the planning task
    :param extent: bounds of the environment [xmin, xmax, ymin, ymax] (meters)
    :param link_length: length of each segment of the rope (meters)
    :return:
    """
    def oob(x, y):
        return not (extent[0] < x < extent[1] and extent[2] < y < extent[3])

    n_links = n_state_to_n_links(n_state)
    theta = rng.uniform(-np.pi, np.pi)
    valid = False
    while not valid:
        head_x = rng.uniform(extent[0], extent[1])
        head_y = rng.uniform(extent[2], extent[3])

        rope_configuration = np.zeros(n_state)
        rope_configuration[-2] = head_x
        rope_configuration[-1] = head_y

        j = n_state - 1
        valid = True
        for i in range(n_links):
            theta = theta + rng.uniform(-max_angle_rad, max_angle_rad)
            rope_configuration[
                j - 2] = rope_configuration[j] + np.cos(theta) * link_length
            rope_configuration[
                j -
                3] = rope_configuration[j - 1] + np.sin(theta) * link_length

            if oob(rope_configuration[j - 2], rope_configuration[j - 3]):
                valid = False
                break

            j = j - 2

    return rope_configuration
Esempio n. 23
0
    def sample_action(self,
                      action_rng: np.random.RandomState,
                      environment: Dict,
                      state: Dict,
                      action_params: Dict,
                      validate: bool,
                      stateless: Optional[bool] = False):
        self.viz_action_sample_bbox(
            self.left_gripper_bbox_pub,
            self.get_action_sample_extent(action_params, 'left'))
        self.viz_action_sample_bbox(
            self.right_gripper_bbox_pub,
            self.get_action_sample_extent(action_params, 'right'))

        action = None
        for _ in range(self.max_action_attempts):
            # move in the same direction as the previous action with some probability
            repeat_probability = action_params[
                'repeat_delta_gripper_motion_probability']
            if not stateless and self.last_action is not None and action_rng.uniform(
                    0, 1) < repeat_probability:
                left_gripper_delta_position = self.last_action[
                    'left_gripper_delta_position']
                right_gripper_delta_position = self.last_action[
                    'right_gripper_delta_position']
            else:
                # Sample a new random action
                left_gripper_delta_position = self.sample_delta_position(
                    action_params, action_rng)
                right_gripper_delta_position = self.sample_delta_position(
                    action_params, action_rng)

            # Apply delta and check for out of bounds
            left_gripper_position = state[
                'left_gripper'] + left_gripper_delta_position
            right_gripper_position = state[
                'right_gripper'] + right_gripper_delta_position

            action = {
                'left_gripper_position': left_gripper_position,
                'right_gripper_position': right_gripper_position,
                'left_gripper_delta_position': left_gripper_delta_position,
                'right_gripper_delta_position': right_gripper_delta_position,
            }

            if not validate or self.is_action_valid(action, action_params):
                self.last_action = action
                return action

        rospy.logwarn(
            "Could not find a valid action, executing an invalid one")
        return action
Esempio n. 24
0
def mutate_bias(genome: Genome, rnd: np.random.RandomState,
                config: NeatConfig) -> Genome:
    """
    Mutate the bias of the nodes
    :param genome: the genome, with nodes
    :param rnd: a random generator to determine, which nodes should be mutated and how
    :param config: neat config that specifies the probabilities
    :return: the modified genome
    """
    for node in genome.nodes:
        # Input nodes do not have a bias
        if node.node_type == NodeType.INPUT:
            continue

        if rnd.uniform(0, 1) <= config.probability_bias_mutation:
            # Assign random bias or perturb value
            if rnd.uniform(0, 1) <= config.probability_random_bias_mutation:
                node.bias = rnd.uniform(low=config.bias_initial_min,
                                        high=config.bias_initial_max)
            else:
                # Check how the connection weight should be mutated
                mutation_type = config.bias_mutation_type
                if mutation_type == "uniform":
                    node.bias += rnd.uniform(
                        low=-config.bias_mutation_uniform_max_change,
                        high=config.bias_mutation_uniform_max_change)
                elif mutation_type == "normal":
                    node.bias += rnd.normal(
                        loc=0, scale=config.bias_mutation_normal_sigma)
                else:
                    raise AssertionError(
                        "Unknown type of mutation type. Must be 'uniform' or 'normal'"
                    )

                node.bias = np.clip(node.bias,
                                    a_min=config.bias_min,
                                    a_max=config.bias_max)
    return genome
Esempio n. 25
0
    def __init__(self,
                 input_size,
                 output_size,
                 learning_rate,
                 activator,
                 L2_reg,
                 rng: np.random.RandomState = None):
        self.input_size = input_size
        self.output_size = output_size

        if rng is None:
            rng = np.random.RandomState(int(time.time()))

        if activator == 'tanh':
            self.activator = activators.TanhActivator()
            self.W = np.asarray(rng.uniform(
                low=-np.sqrt(6. / (input_size + output_size)),
                high=np.sqrt(6. / (input_size + output_size)),
                size=(input_size, output_size)),
                                dtype=np.float)
            self.W = self.W.T
        elif activator == 'sigmoid':
            self.activator = activators.SigmoidActivator()
            self.W = np.asarray(rng.uniform(
                low=-np.sqrt(6. / (input_size + output_size)) * 4,
                high=np.sqrt(6. / (input_size + output_size)) * 4,
                size=(output_size, input_size)),
                                dtype=np.float)

        self.b = np.zeros((output_size, 1))
        self.A = np.zeros((output_size, 1))
        self.X = 0
        self.Z = 0
        self.dW = 0
        self.db = 0
        self.learning_rate = learning_rate
        self.L2_reg = L2_reg
Esempio n. 26
0
def set_new_genome_bias(genome: Genome, rnd: np.random.RandomState,
                        config: NeatConfig) -> Genome:
    """
    Set a new bias values in all nodes (except Input nodes) with the given random generator
    :param genome: the genome, where the bias value should be modified
    :param rnd: the random generator
    :param config: a neat config that specifies min and max values
    :return: the modified genome
    """
    for node in genome.nodes:
        if node.node_type == NodeType.INPUT:
            continue
        node.bias = rnd.uniform(low=config.bias_initial_min,
                                high=config.bias_initial_max)
    return genome
Esempio n. 27
0
def mutate_weights(genome: Genome, rnd: np.random.RandomState,
                   config: NeatConfig) -> Genome:
    """
    Mutate the connection weights, using the given random generator and the config
    :param genome: the genome which weights should be mutated
    :param rnd: a random generator to determine, which weights and how much they will be changed
    :param config: a config that specifies the probability and magnitude of the changes
    :return: the mutated genome
    """
    for connection in genome.connections:
        # Should mutate weights?
        if rnd.uniform(0, 1) <= config.probability_weight_mutation:
            # Assign random weight or perturb existing weight?
            if rnd.uniform(0, 1) <= config.probability_random_weight_mutation:
                connection.weight = rnd.uniform(
                    low=config.connection_initial_min_weight,
                    high=config.connection_initial_max_weight)
            else:
                # Check how the connection weight should be mutated
                mutation_type = config.weight_mutation_type
                if mutation_type == "uniform":
                    connection.weight += rnd.uniform(
                        -config.weight_mutation_uniform_max_change,
                        config.weight_mutation_uniform_max_change)
                elif mutation_type == "normal":
                    connection.weight += rnd.normal(
                        loc=0, scale=config.weight_mutation_normal_sigma)
                else:
                    raise AssertionError(
                        "Unknown type of mutation type. Must be 'uniform' or 'normal'"
                    )

                connection.weight = np.clip(connection.weight,
                                            a_min=config.connection_min_weight,
                                            a_max=config.connection_max_weight)
    return genome
Esempio n. 28
0
def set_new_genome_weights(genome: Genome, rnd: np.random.RandomState,
                           config: NeatConfig) -> Genome:
    """
    Set new weights for the connections and the genome with the given random generator.
    :param genome: the genome, which weights should be randomized
    :param rnd: random generator to receive the new weights
    :param config: the neat config that specifies max and min weight
    :return: the modified genome
    """
    for connection in genome.connections:
        connection.weight = rnd.uniform(
            low=config.connection_initial_min_weight,
            high=config.connection_initial_max_weight)

    return genome
Esempio n. 29
0
def _uniform_random_bias(
        hidden_layer_size: int, random_state: np.random.RandomState) \
        -> np.ndarray:
    """
    Return uniform random bias in range [-1, 1].

    Parameters
    ----------
    hidden_layer_size : int
    random_state : numpy.random.RandomState

    Returns
    -------
    uniform_random_bias : ndarray of shape (hidden_layer_size, )
    """
    return random_state.uniform(low=-1., high=1., size=hidden_layer_size)
Esempio n. 30
0
def _blxalpha(x1: np.ndarray, x2: np.ndarray, rng: np.random.RandomState,
              alpha: float) -> np.ndarray:
    # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.465.6900&rep=rep1&type=pdf
    # Section 2 Crossover Operators for RCGA 2.1 Blend Crossover

    assert x1.shape == x2.shape
    assert x1.ndim == 1

    xs = np.stack([x1, x2])

    x_min = xs.min(axis=0)
    x_max = xs.max(axis=0)
    diff = alpha * (x_max - x_min)  # Equation (1).
    low = x_min - diff  # Equation (1).
    high = x_max + diff  # Equation (1).
    r = rng.uniform(0, 1, size=len(diff))
    child_params_array = (high - low) * r + low
    return child_params_array