Ejemplo n.º 1
0
 def setUp(self):
     self.seed = 1234567890
     self.prng = random.RandomState(self.seed)
     self.state = self.prng.get_state()
Ejemplo n.º 2
0
 def setUp(self):
     super(TestCase, self).setUp()
     self._rng = npr.RandomState(zlib.adler32(
         self._testMethodName.encode()))
Ejemplo n.º 3
0
def rand_nonzero():
  post = lambda x: onp.where(x == 0, 1, x)
  randn = npr.RandomState(0).randn
  return partial(_rand_dtype, randn, scale=3, post=post)
Ejemplo n.º 4
0
def rand_small():
    randn = npr.RandomState(0).randn
    return partial(_rand_dtype, randn, scale=1e-3)
Ejemplo n.º 5
0
def rand_small_positive():
    rand = npr.RandomState(0).rand
    return partial(_rand_dtype, rand, scale=2e-5)
Ejemplo n.º 6
0
def rand_bool():
  rng = npr.RandomState(0)
  def generator(shape, dtype):
    return _cast_to_shape(rng.rand(*_dims_of_shape(shape)) < 0.5, shape, dtype)
  return generator
Ejemplo n.º 7
0
class CarlaDataProvider(object):  # pylint: disable=too-many-public-methods
    """
    This class provides access to various data of all registered actors
    It buffers the data and updates it on every CARLA tick

    Currently available data:
    - Absolute velocity
    - Location
    - Transform

    Potential additions:
    - Acceleration

    In addition it provides access to the map and the transform of all traffic lights
    """

    _actor_velocity_map = dict()
    _actor_location_map = dict()
    _actor_transform_map = dict()
    _traffic_light_map = dict()
    _carla_actor_pool = dict()
    _client = None
    _world = None
    _map = None
    _sync_flag = False
    _spawn_points = None
    _spawn_index = 0
    _blueprint_library = None
    _ego_vehicle_route = None
    _rng = random.RandomState(2000)

    @staticmethod
    def register_actor(actor):
        """
        Add new actor to dictionaries
        If actor already exists, throw an exception
        """
        if actor in CarlaDataProvider._actor_velocity_map:
            raise KeyError(
                "Vehicle '{}' already registered. Cannot register twice!".
                format(actor.id))
        else:
            CarlaDataProvider._actor_velocity_map[actor] = 0.0

        if actor in CarlaDataProvider._actor_location_map:
            raise KeyError(
                "Vehicle '{}' already registered. Cannot register twice!".
                format(actor.id))
        else:
            CarlaDataProvider._actor_location_map[actor] = None

        if actor in CarlaDataProvider._actor_transform_map:
            raise KeyError(
                "Vehicle '{}' already registered. Cannot register twice!".
                format(actor.id))
        else:
            CarlaDataProvider._actor_transform_map[actor] = None

    @staticmethod
    def register_actors(actors):
        """
        Add new set of actors to dictionaries
        """
        for actor in actors:
            CarlaDataProvider.register_actor(actor)

    @staticmethod
    def on_carla_tick():
        """
        Callback from CARLA
        """
        for actor in CarlaDataProvider._actor_velocity_map:
            if actor is not None and actor.is_alive:
                CarlaDataProvider._actor_velocity_map[
                    actor] = calculate_velocity(actor)

        for actor in CarlaDataProvider._actor_location_map:
            if actor is not None and actor.is_alive:
                CarlaDataProvider._actor_location_map[
                    actor] = actor.get_location()

        for actor in CarlaDataProvider._actor_transform_map:
            if actor is not None and actor.is_alive:
                CarlaDataProvider._actor_transform_map[
                    actor] = actor.get_transform()

        world = CarlaDataProvider._world
        if world is None:
            print("WARNING: CarlaDataProvider couldn't find the world")

    @staticmethod
    def get_velocity(actor):
        """
        returns the absolute velocity for the given actor
        """
        for key in CarlaDataProvider._actor_velocity_map:
            if key.id == actor.id:
                return CarlaDataProvider._actor_velocity_map[key]

        # We are intentionally not throwing here
        # This may cause exception loops in py_trees
        print('{}.get_velocity: {} not found!'.format(__name__, actor))
        return 0.0

    @staticmethod
    def get_location(actor):
        """
        returns the location for the given actor
        """
        for key in CarlaDataProvider._actor_location_map:
            if key.id == actor.id:
                return CarlaDataProvider._actor_location_map[key]

        # We are intentionally not throwing here
        # This may cause exception loops in py_trees
        print('{}.get_location: {} not found!'.format(__name__, actor))
        return None

    @staticmethod
    def get_transform(actor):
        """
        returns the transform for the given actor
        """
        for key in CarlaDataProvider._actor_transform_map:
            if key.id == actor.id:
                return CarlaDataProvider._actor_transform_map[key]

        # We are intentionally not throwing here
        # This may cause exception loops in py_trees
        print('{}.get_transform: {} not found!'.format(__name__, actor))
        return None

    @staticmethod
    def set_client(client):
        """
        Set the CARLA client
        """
        CarlaDataProvider._client = client

    @staticmethod
    def get_client():
        """
        Get the CARLA client
        """
        return CarlaDataProvider._client

    @staticmethod
    def set_world(world):
        """
        Set the world and world settings
        """
        CarlaDataProvider._world = world
        CarlaDataProvider._sync_flag = world.get_settings().synchronous_mode
        CarlaDataProvider._map = world.get_map()
        CarlaDataProvider._blueprint_library = world.get_blueprint_library()
        CarlaDataProvider.generate_spawn_points()
        CarlaDataProvider.prepare_map()

    @staticmethod
    def get_world():
        """
        Return world
        """
        return CarlaDataProvider._world

    @staticmethod
    def get_map(world=None):
        """
        Get the current map
        """
        if CarlaDataProvider._map is None:
            if world is None:
                if CarlaDataProvider._world is None:
                    raise ValueError(
                        "class member \'world'\' not initialized yet")
                else:
                    CarlaDataProvider._map = CarlaDataProvider._world.get_map()
            else:
                CarlaDataProvider._map = world.get_map()

        return CarlaDataProvider._map

    @staticmethod
    def is_sync_mode():
        """
        @return true if syncronuous mode is used
        """
        return CarlaDataProvider._sync_flag

    @staticmethod
    def find_weather_presets():
        """
        Get weather presets from CARLA
        """
        rgx = re.compile(
            '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
        name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
        presets = [
            x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)
        ]
        return [(getattr(carla.WeatherParameters, x), name(x))
                for x in presets]

    @staticmethod
    def prepare_map():
        """
        This function set the current map and loads all traffic lights for this map to
        _traffic_light_map
        """
        if CarlaDataProvider._map is None:
            CarlaDataProvider._map = CarlaDataProvider._world.get_map()

        # Parse all traffic lights
        CarlaDataProvider._traffic_light_map.clear()
        for traffic_light in CarlaDataProvider._world.get_actors().filter(
                '*traffic_light*'):
            if traffic_light not in CarlaDataProvider._traffic_light_map.keys(
            ):
                CarlaDataProvider._traffic_light_map[
                    traffic_light] = traffic_light.get_transform()
            else:
                raise KeyError(
                    "Traffic light '{}' already registered. Cannot register twice!"
                    .format(traffic_light.id))

    @staticmethod
    def annotate_trafficlight_in_group(traffic_light):
        """
        Get dictionary with traffic light group info for a given traffic light
        """
        dict_annotations = {'ref': [], 'opposite': [], 'left': [], 'right': []}

        # Get the waypoints
        ref_location = CarlaDataProvider.get_trafficlight_trigger_location(
            traffic_light)
        ref_waypoint = CarlaDataProvider.get_map().get_waypoint(ref_location)
        ref_yaw = ref_waypoint.transform.rotation.yaw

        group_tl = traffic_light.get_group_traffic_lights()

        for target_tl in group_tl:
            if traffic_light.id == target_tl.id:
                dict_annotations['ref'].append(target_tl)
            else:
                # Get the angle between yaws
                target_location = CarlaDataProvider.get_trafficlight_trigger_location(
                    target_tl)
                target_waypoint = CarlaDataProvider.get_map().get_waypoint(
                    target_location)
                target_yaw = target_waypoint.transform.rotation.yaw

                diff = (target_yaw - ref_yaw) % 360

                if diff > 330:
                    continue
                elif diff > 225:
                    dict_annotations['right'].append(target_tl)
                elif diff > 135.0:
                    dict_annotations['opposite'].append(target_tl)
                elif diff > 30:
                    dict_annotations['left'].append(target_tl)

        return dict_annotations

    @staticmethod
    def get_trafficlight_trigger_location(traffic_light):  # pylint: disable=invalid-name
        """
        Calculates the yaw of the waypoint that represents the trigger volume of the traffic light
        """
        def rotate_point(point, angle):
            """
            rotate a given point by a given angle
            """
            x_ = math.cos(math.radians(angle)) * point.x - math.sin(
                math.radians(angle)) * point.y
            y_ = math.sin(math.radians(angle)) * point.x - math.cos(
                math.radians(angle)) * point.y

            return carla.Vector3D(x_, y_, point.z)

        base_transform = traffic_light.get_transform()
        base_rot = base_transform.rotation.yaw
        area_loc = base_transform.transform(
            traffic_light.trigger_volume.location)
        area_ext = traffic_light.trigger_volume.extent

        point = rotate_point(carla.Vector3D(0, 0, area_ext.z), base_rot)
        point_location = area_loc + carla.Location(x=point.x, y=point.y)

        return carla.Location(point_location.x, point_location.y,
                              point_location.z)

    @staticmethod
    def update_light_states(ego_light,
                            annotations,
                            states,
                            freeze=False,
                            timeout=1000000000):
        """
        Update traffic light states
        """
        reset_params = []

        for state in states:
            relevant_lights = []
            if state == 'ego':
                relevant_lights = [ego_light]
            else:
                relevant_lights = annotations[state]
            for light in relevant_lights:
                prev_state = light.get_state()
                prev_green_time = light.get_green_time()
                prev_red_time = light.get_red_time()
                prev_yellow_time = light.get_yellow_time()
                reset_params.append({
                    'light': light,
                    'state': prev_state,
                    'green_time': prev_green_time,
                    'red_time': prev_red_time,
                    'yellow_time': prev_yellow_time
                })

                light.set_state(states[state])
                if freeze:
                    light.set_green_time(timeout)
                    light.set_red_time(timeout)
                    light.set_yellow_time(timeout)

        return reset_params

    @staticmethod
    def reset_lights(reset_params):
        """
        Reset traffic lights
        """
        for param in reset_params:
            param['light'].set_state(param['state'])
            param['light'].set_green_time(param['green_time'])
            param['light'].set_red_time(param['red_time'])
            param['light'].set_yellow_time(param['yellow_time'])

    @staticmethod
    def get_next_traffic_light(actor, use_cached_location=True):
        """
        returns the next relevant traffic light for the provided actor
        """

        if not use_cached_location:
            location = actor.get_transform().location
        else:
            location = CarlaDataProvider.get_location(actor)

        waypoint = CarlaDataProvider.get_map().get_waypoint(location)
        # Create list of all waypoints until next intersection
        list_of_waypoints = []
        while waypoint and not waypoint.is_intersection:
            list_of_waypoints.append(waypoint)
            waypoint = waypoint.next(2.0)[0]

        # If the list is empty, the actor is in an intersection
        if not list_of_waypoints:
            return None

        relevant_traffic_light = None
        distance_to_relevant_traffic_light = float("inf")

        for traffic_light in CarlaDataProvider._traffic_light_map:
            if hasattr(traffic_light, 'trigger_volume'):
                tl_t = CarlaDataProvider._traffic_light_map[traffic_light]
                transformed_tv = tl_t.transform(
                    traffic_light.trigger_volume.location)
                distance = carla.Location(transformed_tv).distance(
                    list_of_waypoints[-1].transform.location)

                if distance < distance_to_relevant_traffic_light:
                    relevant_traffic_light = traffic_light
                    distance_to_relevant_traffic_light = distance

        return relevant_traffic_light

    @staticmethod
    def set_ego_vehicle_route(route):
        """
        Set the route of the ego vehicle

        @todo extend ego_vehicle_route concept to support multi ego_vehicle scenarios
        """
        CarlaDataProvider._ego_vehicle_route = route

    @staticmethod
    def get_ego_vehicle_route():
        """
        returns the currently set route of the ego vehicle
        Note: Can be None
        """
        return CarlaDataProvider._ego_vehicle_route

    @staticmethod
    def generate_spawn_points():
        """
        Generate spawn points for the current map
        """
        spawn_points = list(
            CarlaDataProvider.get_map(
                CarlaDataProvider._world).get_spawn_points())
        CarlaDataProvider._rng.shuffle(spawn_points)
        CarlaDataProvider._spawn_points = spawn_points
        CarlaDataProvider._spawn_index = 0

    @staticmethod
    def create_blueprint(model,
                         rolename='scenario',
                         color=None,
                         actor_category="car"):
        """
        Function to setup the blueprint of an actor given its model and other relevant parameters
        """

        _actor_blueprint_categories = {
            'car': 'vehicle.tesla.model3',
            'van': 'vehicle.volkswagen.t2',
            'truck': 'vehicle.carlamotors.carlacola',
            'trailer': '',
            'semitrailer': '',
            'bus': 'vehicle.volkswagen.t2',
            'motorbike': 'vehicle.kawasaki.ninja',
            'bicycle': 'vehicle.diamondback.century',
            'train': '',
            'tram': '',
            'pedestrian': 'walker.pedestrian.0001',
        }

        # Set the model
        try:
            blueprint = CarlaDataProvider._rng.choice(
                CarlaDataProvider._blueprint_library.filter(model))
        except IndexError:
            # The model is not part of the blueprint library. Let's take a default one for the given category
            bp_filter = "vehicle.*"
            new_model = _actor_blueprint_categories[actor_category]
            if new_model != '':
                bp_filter = new_model
            print("WARNING: Actor model {} not available. Using instead {}".
                  format(model, new_model))
            blueprint = CarlaDataProvider._rng.choice(
                CarlaDataProvider._blueprint_library.filter(bp_filter))

        # Set the color
        if color:
            if not blueprint.has_attribute('color'):
                print(
                    "WARNING: Cannot set Color ({}) for actor {} due to missing blueprint attribute"
                    .format(color, blueprint.id))
            else:
                default_color_rgba = blueprint.get_attribute(
                    'color').as_color()
                default_color = '({}, {}, {})'.format(default_color_rgba.r,
                                                      default_color_rgba.g,
                                                      default_color_rgba.b)
                try:
                    blueprint.set_attribute('color', color)
                except ValueError:
                    # Color can't be set for this vehicle
                    print(
                        "WARNING: Color ({}) cannot be set for actor {}. Using instead: ({})"
                        .format(color, blueprint.id, default_color))
                    blueprint.set_attribute('color', default_color)
        else:
            if blueprint.has_attribute('color') and rolename != 'hero':
                color = CarlaDataProvider._rng.choice(
                    blueprint.get_attribute('color').recommended_values)
                blueprint.set_attribute('color', color)

        # Make pedestrians mortal
        if blueprint.has_attribute('is_invincible'):
            blueprint.set_attribute('is_invincible', 'false')

        # Set the rolename
        if blueprint.has_attribute('role_name'):
            blueprint.set_attribute('role_name', rolename)

        return blueprint

    @staticmethod
    def handle_actor_batch(batch):
        """
        Forward a CARLA command batch to spawn actors to CARLA, and gather the responses.
        Returns list of actors on success, none otherwise
        """

        actors = []

        sync_mode = CarlaDataProvider.is_sync_mode()

        if CarlaDataProvider._client and batch is not None:
            responses = CarlaDataProvider._client.apply_batch_sync(
                batch, sync_mode)
        else:
            return None

        # wait for the actors to be spawned properly before we do anything
        if sync_mode:
            CarlaDataProvider._world.tick()
        else:
            CarlaDataProvider._world.wait_for_tick()

        actor_ids = []
        if responses:
            for response in responses:
                if not response.error:
                    actor_ids.append(response.actor_id)

        carla_actors = CarlaDataProvider._world.get_actors(actor_ids)
        for actor in carla_actors:
            actors.append(actor)

        return actors

    @staticmethod
    def request_new_actor(model,
                          spawn_point,
                          rolename='scenario',
                          autopilot=False,
                          random_location=False,
                          color=None,
                          actor_category="car"):
        """
        This method tries to create a new actor, returning it if successful (None otherwise).
        """
        blueprint = CarlaDataProvider.create_blueprint(model, rolename, color,
                                                       actor_category)

        if random_location:
            actor = None
            while not actor:
                spawn_point = CarlaDataProvider._rng.choice(
                    CarlaDataProvider._spawn_points)
                actor = CarlaDataProvider._world.try_spawn_actor(
                    blueprint, spawn_point)

        else:
            # slightly lift the actor to avoid collisions with ground when spawning the actor
            # DO NOT USE spawn_point directly, as this will modify spawn_point permanently
            _spawn_point = carla.Transform(carla.Location(),
                                           spawn_point.rotation)
            _spawn_point.location.x = spawn_point.location.x
            _spawn_point.location.y = spawn_point.location.y
            # 0.2 -> 0.1
            _spawn_point.location.z = spawn_point.location.z + 0.2
            actor = CarlaDataProvider._world.try_spawn_actor(
                blueprint, _spawn_point)

        if actor is None:
            raise RuntimeError(
                "Error: Unable to spawn vehicle {} at {}".format(
                    blueprint.id, spawn_point))
        else:
            # Let's deactivate the autopilot of the actor if it belongs to vehicle
            if actor in CarlaDataProvider._blueprint_library.filter(
                    'vehicle.*'):
                actor.set_autopilot(autopilot)
            else:
                pass

        # wait for the actor to be spawned properly before we do anything
        if CarlaDataProvider.is_sync_mode():
            CarlaDataProvider._world.tick()
        else:
            CarlaDataProvider._world.wait_for_tick()

        if actor is None:
            return None

        CarlaDataProvider._carla_actor_pool[actor.id] = actor
        CarlaDataProvider.register_actor(actor)
        return actor

    @staticmethod
    def request_new_actors(actor_list):
        """
        This method tries to series of actor in batch. If this was successful,
        the new actors are returned, None otherwise.

        param:
        - actor_list: list of ActorConfigurationData
        """

        SpawnActor = carla.command.SpawnActor  # pylint: disable=invalid-name
        PhysicsCommand = carla.command.SetSimulatePhysics  # pylint: disable=invalid-name
        FutureActor = carla.command.FutureActor  # pylint: disable=invalid-name
        ApplyTransform = carla.command.ApplyTransform  # pylint: disable=invalid-name
        SetAutopilot = carla.command.SetAutopilot  # pylint: disable=invalid-name

        batch = []
        actors = []

        CarlaDataProvider.generate_spawn_points()

        for actor in actor_list:

            # Get the blueprint
            blueprint = CarlaDataProvider.create_blueprint(
                actor.model, actor.rolename, actor.color, actor.category)

            # Get the spawn point
            transform = actor.transform
            if actor.random_location:
                if CarlaDataProvider._spawn_index >= len(
                        CarlaDataProvider._spawn_points):
                    print("No more spawn points to use")
                    break
                else:
                    _spawn_point = CarlaDataProvider._spawn_points[
                        CarlaDataProvider._spawn_index]
                    CarlaDataProvider._spawn_index += 1

            else:
                _spawn_point = carla.Transform()
                _spawn_point.rotation = transform.rotation
                _spawn_point.location.x = transform.location.x
                _spawn_point.location.y = transform.location.y
                _spawn_point.location.z = transform.location.z + 0.2

            # Get the command
            command = SpawnActor(blueprint, _spawn_point)
            command.then(SetAutopilot(FutureActor, actor.autopilot))

            if actor.category == 'misc':
                command.then(PhysicsCommand(FutureActor, True))
            elif actor.args is not None and 'physics' in actor.args and actor.args[
                    'physics'] == "off":
                command.then(ApplyTransform(FutureActor, _spawn_point)).then(
                    PhysicsCommand(FutureActor, False))

            batch.append(command)

        actors = CarlaDataProvider.handle_actor_batch(batch)

        if not actors:
            return None

        for actor in actors:
            if actor is None:
                continue
            CarlaDataProvider._carla_actor_pool[actor.id] = actor
            CarlaDataProvider.register_actor(actor)
        return actors

    @staticmethod
    def request_new_batch_actors(model,
                                 amount,
                                 spawn_points,
                                 autopilot=False,
                                 random_location=False,
                                 rolename='scenario'):
        """
        Simplified version of "request_new_actors". This method also create several actors in batch.

        Instead of needing a list of ActorConfigurationData, an "amount" parameter is used.
        This makes actor spawning easier but reduces the amount of configurability.

        Some parameters are the same for all actors (rolename, autopilot and random location)
        while others are randomized (color)
        """

        SpawnActor = carla.command.SpawnActor  # pylint: disable=invalid-name
        SetAutopilot = carla.command.SetAutopilot  # pylint: disable=invalid-name
        FutureActor = carla.command.FutureActor  # pylint: disable=invalid-name

        CarlaDataProvider.generate_spawn_points()

        batch = []

        for i in range(amount):
            # Get vehicle by model
            blueprint = CarlaDataProvider.create_blueprint(model, rolename)

            if random_location:
                if CarlaDataProvider._spawn_index >= len(
                        CarlaDataProvider._spawn_points):
                    print(
                        "No more spawn points to use. Spawned {} actors out of {}"
                        .format(i + 1, amount))
                    break
                else:
                    spawn_point = CarlaDataProvider._spawn_points[
                        CarlaDataProvider._spawn_index]
                    CarlaDataProvider._spawn_index += 1
            else:
                try:
                    spawn_point = spawn_points[i]
                except IndexError:
                    print(
                        "The amount of spawn points is lower than the amount of vehicles spawned"
                    )
                    break

            if spawn_point:
                batch.append(
                    SpawnActor(blueprint, spawn_point).then(
                        SetAutopilot(FutureActor, autopilot)))

        actors = CarlaDataProvider.handle_actor_batch(batch)

        if actors is None:
            return None

        ###############################################
        # code for walker ai controllers: currently not working due to seg fault
        if 'walker' in model:
            walkers_list = []
            for actor in actors:
                walkers_list.append({"id": actor.id})
            batch = []
            walker_controller_bp = CarlaDataProvider._world.get_blueprint_library(
            ).find('controller.ai.walker')
            for i in range(len(walkers_list)):
                batch.append(
                    SpawnActor(walker_controller_bp, carla.Transform(),
                               walkers_list[i]["id"]))
            actors_controllers = CarlaDataProvider.handle_actor_batch(batch)
            for j, actor in enumerate(actors_controllers):
                walkers_list[j]['con'] = actor.id
            # CarlaDataProvider._world.wait_for_tick()
            print('start')
            for i in range(len(actors_controllers)):
                print(i, '/', len(actors_controllers))
                # start walker
                actors_controllers[i].start()
                # set walk to random point
                actors_controllers[i].go_to_location(
                    CarlaDataProvider._world.
                    get_random_location_from_navigation())
                # random max speed
                actors_controllers[i].set_max_speed(
                    1 + random.random()
                )  # max speed between 1 and 2 (default is 1.4 m/s)
            print('finish')
        ###############################################

        for actor in actors:
            if actor is None:
                continue
            CarlaDataProvider._carla_actor_pool[actor.id] = actor
            CarlaDataProvider.register_actor(actor)
        return actors

    @staticmethod
    def get_actors():
        """
        Return list of actors and their ids

        Note: iteritems from six is used to allow compatibility with Python 2 and 3
        """
        return iteritems(CarlaDataProvider._carla_actor_pool)

    @staticmethod
    def actor_id_exists(actor_id):
        """
        Check if a certain id is still at the simulation
        """
        if actor_id in CarlaDataProvider._carla_actor_pool:
            return True

        return False

    @staticmethod
    def get_hero_actor():
        """
        Get the actor object of the hero actor if it exists, returns none otherwise.
        """
        for actor_id in CarlaDataProvider._carla_actor_pool:
            if CarlaDataProvider._carla_actor_pool[actor_id].attributes[
                    'role_name'] == 'hero':
                return CarlaDataProvider._carla_actor_pool[actor_id]
        return None

    @staticmethod
    def get_actor_by_id(actor_id):
        """
        Get an actor from the pool by using its ID. If the actor
        does not exist, None is returned.
        """
        if actor_id in CarlaDataProvider._carla_actor_pool:
            return CarlaDataProvider._carla_actor_pool[actor_id]

        print("Non-existing actor id {}".format(actor_id))
        return None

    @staticmethod
    def remove_actor_by_id(actor_id):
        """
        Remove an actor from the pool using its ID
        """
        if actor_id in CarlaDataProvider._carla_actor_pool:
            CarlaDataProvider._carla_actor_pool[actor_id].destroy()
            CarlaDataProvider._carla_actor_pool[actor_id] = None
            CarlaDataProvider._carla_actor_pool.pop(actor_id)
        else:
            print(
                "Trying to remove a non-existing actor id {}".format(actor_id))

    @staticmethod
    def remove_actors_in_surrounding(location, distance):
        """
        Remove all actors from the pool that are closer than distance to the
        provided location
        """
        for actor_id in CarlaDataProvider._carla_actor_pool.copy():
            if CarlaDataProvider._carla_actor_pool[actor_id].get_location(
            ).distance(location) < distance:
                CarlaDataProvider._carla_actor_pool[actor_id].destroy()
                CarlaDataProvider._carla_actor_pool.pop(actor_id)

        # Remove all keys with None values
        CarlaDataProvider._carla_actor_pool = dict({
            k: v
            for k, v in CarlaDataProvider._carla_actor_pool.items() if v
        })

    @staticmethod
    def cleanup():
        """
        Cleanup and remove all entries from all dictionaries
        """
        DestroyActor = carla.command.DestroyActor  # pylint: disable=invalid-name
        batch = []

        for actor_id in CarlaDataProvider._carla_actor_pool.copy():
            batch.append(
                DestroyActor(CarlaDataProvider._carla_actor_pool[actor_id]))

        if CarlaDataProvider._client:
            try:
                CarlaDataProvider._client.apply_batch_sync(batch)
            except RuntimeError as e:
                if "time-out" in str(e):
                    pass
                else:
                    raise e

        CarlaDataProvider._actor_velocity_map.clear()
        CarlaDataProvider._actor_location_map.clear()
        CarlaDataProvider._actor_transform_map.clear()
        CarlaDataProvider._traffic_light_map.clear()
        CarlaDataProvider._map = None
        CarlaDataProvider._world = None
        CarlaDataProvider._sync_flag = False
        CarlaDataProvider._ego_vehicle_route = None
        CarlaDataProvider._carla_actor_pool = dict()
        CarlaDataProvider._client = None
        CarlaDataProvider._spawn_points = None
        CarlaDataProvider._spawn_index = 0
Ejemplo n.º 8
0
 def _get_random_stream(self):
     """ Returns a new RNG stream. DO NOT ACCESS FROM OUTSIDE.
     """
     return rand.RandomState(self._master_RNG())
Ejemplo n.º 9
0
 def _conv_layer_params(self):
     weights_shape = 2, 2, 3, 4
     biases_shape = weights_shape[-1:]
     weights = array_ops.constant(npr.RandomState(0).randn(*weights_shape))
     biases = array_ops.constant(npr.RandomState(1).randn(*biases_shape))
     return (weights, biases)
Ejemplo n.º 10
0
def init_random_params(scale, layer_sizes, rng=npr.RandomState(0)):
  return [(scale * rng.randn(m, n), scale * rng.randn(n))
          for m, n, in zip(layer_sizes[:-1], layer_sizes[1:])]
Ejemplo n.º 11
0
 def seed(self, new_seed=None):
     self._seed = new_seed
     self._master_RNG_fct = rand.RandomState(new_seed)
     self._master_RNG = (
         lambda: self._master_RNG_fct.random_integers(2**16 - 1))
Ejemplo n.º 12
0
def pick_backend(backend: str = None, samples: int = None, noise: NoiseModel = None, device=None,
                 exclude_symbolic: bool = True) -> str:

    """
    choose, or verify, a backend for the user.
    Parameters
    ----------
    backend: str, optional:
        what backend to choose or verify. if None: choose for the user.
    samples: int, optional:
        if int and not None, choose (verify) a simulator which supports sampling.
    noise: str or NoiseModel, optional:
        if not None, choose (verify) a simulator supports the specified noise.
    device: optional:
        verify that a given backend supports the specified device. MUST specify backend, if not None.
        if None: do not emulate or use real device.
    exclude_symbolic: bool, optional:
        whether or not to exclude the tequila debugging simulator from the available simulators, when choosing.

    Returns
    -------
    str:
        the name of the chosen (or verified) backend.
    """

    if len(INSTALLED_SIMULATORS) == 0:
        raise TequilaException("No simulators installed on your system")

    if backend is None and device is not None:
        raise TequilaException('device use requires backend specification!')

    if backend is None:
        if noise is None:
            if samples is None:
                for f in SUPPORTED_BACKENDS:
                    if f in INSTALLED_SIMULATORS:
                        return f
            else:
                for f in INSTALLED_SAMPLERS.keys():
                    return f
        else:
            if samples is None:
                raise TequilaException(
                    "Noise requires sampling; please provide a positive, integer value for samples")
            for f in SUPPORTED_NOISE_BACKENDS:
                if noise == 'device':
                    raise TequilaException('device noise requires a device, which requires a named backend!')
                else:
                    return f
            raise TequilaException(
                            'Could not find any installed sampler!')


    if hasattr(backend, "lower"):
        backend = backend.lower()

    if backend == "random":
        if device is not None:
            raise TequilaException('cannot ask for a random backend and a specific device!')
        from numpy import random as random
        import time
        state = random.RandomState(int(str(time.process_time()).split('.')[-1]) % 2 ** 32)
        if samples is None:
            backend = state.choice(list(INSTALLED_SIMULATORS.keys()), 1)[0]
        else:
            backend = state.choice(list(INSTALLED_SAMPLERS.keys()), 1)[0]

        if exclude_symbolic:
            while (backend == "symbolic"):
                backend = state.choice(list(INSTALLED_SIMULATORS.keys()), 1)[0]
        return backend

    if device is not None and samples is None:
        raise TequilaException('Use of a device requires sampling!')
    if noise == 'device' and device is None:
        raise TequilaException('Use of device noise requires a device!')

    if backend not in SUPPORTED_BACKENDS:
        raise TequilaException("Backend {backend} not supported ".format(backend=backend))

    elif noise is False and samples is None and backend not in INSTALLED_SIMULATORS.keys():
        raise TequilaException("Backend {backend} not installed ".format(backend=backend))
    elif noise is False and samples is not None and backend not in INSTALLED_SAMPLERS.keys():
        raise TequilaException("Backend {backend} not installed or sampling not supported".format(backend=backend))
    elif noise is not False and samples is not None and backend not in INSTALLED_NOISE_SAMPLERS.keys():
        raise TequilaException(
            "Backend {backend} not installed or else Noise has not been implemented".format(backend=backend))

    return backend
Ejemplo n.º 13
0
 def indexed_loss_fun(w, L2_vect, i_iter):
     rs = npr.RandomState(npr.RandomState(global_seed + i_hyper).randint(1000))
     seed = i_hyper * 10**6 + i_iter   # Deterministic seed needed for backwards pass.
     idxs = rs.randint(N_train, size=batch_size)
     return loss_fun(w, train_data['X'][idxs], train_data['T'][idxs], L2_vect)
Ejemplo n.º 14
0
N_classes = 10
N_train = 1000
N_valid = 10**3
N_tests = 10**3
N_batches = N_train / batch_size
N_iters = N_epochs * N_batches
# ----- Initial values of learned hyper-parameters -----
init_log_L2_reg = 0.0
init_log_alphas = -2.0
init_invlogit_betas = inv_logit(0.9)
init_log_param_scale = -2.0
# ----- Superparameters -----
meta_alpha = 0.05
N_meta_iter = 100

global_seed = npr.RandomState(3).randint(1000)

def fill_parser(parser, items):
    partial_vects = [np.full(parser[name].size, items[i])
                     for i, name in enumerate(parser.names)]
    return np.concatenate(partial_vects, axis=0)

def run():
    train_data, valid_data, tests_data = load_data_dicts(N_train, N_valid, N_tests)
    parser, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes)
    N_weight_types = len(parser.names)
    hyperparams = VectorParser()
    hyperparams['log_L2_reg']      = np.full(N_weight_types, init_log_L2_reg)
    hyperparams['log_param_scale'] = np.full(N_weight_types, init_log_param_scale)
    hyperparams['log_alphas']      = np.full(N_iters, init_log_alphas)
Ejemplo n.º 15
0
 def _make_tensor(self):
   x = array_ops.placeholder(dtypes.float64, (3, 1))
   w = array_ops.constant(npr.RandomState(0).randn(3, 3))
   y = math_ops.matmul(w, x)
   g = gradients_impl.gradients(y, x)[0]
   return g
Ejemplo n.º 16
0
    def __init__(self,
                 traindata,
                 x,
                 y,
                 input_len,
                 kernel,
                 stride,
                 Epsilon,
                 sigma=1.0,
                 learning_rate=0.5,
                 decay_function=asymptotic_decay,
                 neighborhood_function='gaussian',
                 topology='rectangular',
                 activation_distance='euclidean',
                 random_seed=None):
        """Initializes a Self Organizing Maps.

        A rule of thumb to set the size of the grid for a dimensionality
        reduction task is that it should contain 5*sqrt(N) neurons
        where N is the number of samples in the dataset to analyze.

        E.g. if your dataset has 150 samples, 5*sqrt(150) = 61.23
        hence a map 8-by-8 should perform well.

        Parameters
        ----------
        x : int
            x dimension of the SOM.

        y : int
            y dimension of the SOM.

        input_len : int
            Number of the elements of the vectors in input.

        sigma : float, optional (default=1.0)
            Spread of the neighborhood function, needs to be adequate
            to the dimensions of the map.
            (at the iteration t we have sigma(t) = sigma / (1 + t/T)
            where T is #num_iteration/2)
        learning_rate : initial learning rate
            (at the iteration t we have
            learning_rate(t) = learning_rate / (1 + t/T)
            where T is #num_iteration/2)

        decay_function : function (default=None)
            Function that reduces learning_rate and sigma at each iteration
            the default function is:
                        learning_rate / (1+t/(max_iterarations/2))

            A custom decay function will need to to take in input
            three parameters in the following order:

            1. learning rate
            2. current iteration
            3. maximum number of iterations allowed


            Note that if a lambda function is used to define the decay
            MiniSom will not be pickable anymore.

        neighborhood_function : string, optional (default='gaussian')
            Function that weights the neighborhood of a position in the map.
            Possible values: 'gaussian', 'mexican_hat', 'bubble', 'triangle'

        topology : string, optional (default='rectangular')
            Topology of the map.
            Possible values: 'rectangular', 'hexagonal'

        activation_distance : string, optional (default='euclidean')
            Distance used to activate the map.
            Possible values: 'euclidean', 'cosine', 'manhattan', 'chebyshev'

        random_seed : int, optional (default=None)
            Random seed to use.
        """
        if sigma >= x or sigma >= y:
            warn('Warning: sigma is too high for the dimension of the map.')

        self._random_generator = random.RandomState(
            random_seed)  #生成符合正态分布的随机数[0,1]
        self._learning_rate = learning_rate  #初始学习率
        self._sigma = sigma  #邻域函数初始值为1,代表100%覆盖,迭代的过程中,sigma的值会随迭代次数不断减小,sigma(t) = sigma / (1 + t/T)
        self._input_len = input_len  #输入向量的维度,也是权值的维度
        # random initialization
        self._weights = self._random_generator.rand(
            x, y, input_len) * 2 - 1  #生成符合正态分布的权重矩阵
        self._weights /= linalg.norm(
            self._weights, axis=-1, keepdims=True
        )  #权值标准化,linalg.norm求范数,默认是二范数,axis=-1,按行向量求范数,keepdims保持二维特性

        self._activation_map = zeros((x, y))  #生成和网络一样大小的零矩阵,用来记录
        self._neigx = arange(x)  #得到一个列表[0,1,...,x-1]
        self._neigy = arange(y)  # 用来求邻域函数矩阵的值

        if topology not in ['hexagonal', 'rectangular']:  #判断是蜂窝网络还是矩阵网络
            msg = '%s not supported only hexagonal and rectangular available'
            raise ValueError(msg % topology)
        self.topology = topology  #确定网络属性
        self._xx, self._yy = meshgrid(self._neigx, self._neigy)  #生成网络坐标矩阵
        self._xx = self._xx.astype(float)  #坐标类型转换成浮点型
        self._yy = self._yy.astype(float)
        if topology == 'hexagonal':
            self._xx[::-2] -= 0.5
            if neighborhood_function in ['triangle']:
                warn('triangle neighborhood function does not ' +
                     'take in account hexagonal topology')

        self._decay_function = decay_function  #学习率递减函数,learning_rate / (1+t/(max_iter/2))

        neig_functions = {
            'gaussian': self._gaussian,
        }

        if neighborhood_function not in neig_functions:
            msg = '%s not supported. Functions available: %s'
            raise ValueError(
                msg %
                (neighborhood_function, ', '.join(neig_functions.keys())))

        if neighborhood_function in [
                'triangle', 'bubble'
        ] and (divmod(sigma, 1)[1] != 0 or sigma < 1):
            warn('sigma should be an integer >=1 when triangle or bubble' +
                 'are used as neighborhood function')

        self.neighborhood = neig_functions[neighborhood_function]  #确定领域函数

        distance_functions = {
            'euclidean': self._euclidean_distance,
        }

        if activation_distance not in distance_functions:
            msg = '%s not supported. Distances available: %s'
            raise ValueError(
                msg %
                (activation_distance, ', '.join(distance_functions.keys())))

        self._activation_distance = distance_functions[
            activation_distance]  #相似度计算函数,一般采用欧氏距离,这里如果采用模糊的话,要在这里更改,添加一个Fuzzy距离函数
        self.kernel = kernel  # 是一个滑动窗口的尺寸,如[3,3]
        self.stride = stride  # 滑动窗口的步长
        self.Epsilon = Epsilon  # 模糊训练截止的条件
        self.w_distance = 100  #记录权值的变化大小,用于终止训练
        self.spldata = []  # 里面是以图片为单位的data
        self.allfeature = []  # 里面是所有图片的所有特征,用于初始化权重和获取训练误差
        self.traindata = traindata
        self.processdata(self.traindata)
        self.x = x
        self.y = y
Ejemplo n.º 17
0
def rand_int(low, high=None):
  randint = npr.RandomState(0).randint
  def fn(shape, dtype):
    return randint(low, high=high, size=shape, dtype=dtype)
  return fn
Ejemplo n.º 18
0
def main(opts):
    USAGE = "usage: %prog [options] image_filename"
    parser = optparse.OptionParser(
        usage=USAGE,
        description=
        '''A stegnography algorithm that works by reading and writting 3bytes at a time
									 hence the name''')
    parser.add_option('-w',
                      '--write',
                      action='store_true',
                      help='Write message data to image file.')
    parser.add_option('-f',
                      '--file',
                      dest='FN',
                      help='File to read message data from.')
    parser.add_option('-o',
                      '--output',
                      dest='out',
                      type=str,
                      default='./out.png',
                      help='Output filename to write new image in write mode.')
    parser.add_option(
        '-r',
        '--read',
        action='store_true',
        help=
        'Read message data from image file to stdout   (redirect to file a if you want).'
    )
    parser.add_option('-v',
                      '--verbosity',
                      action='store_true',
                      help='Set verbosity.')
    parser.add_option(
        '-p',
        '--paranoid',
        action='store_true',
        help=
        'Performs a read operation after writting to check if message data was written correctly.'
    )
    parser.add_option('-s',
                      '--seed',
                      default=1937007975,
                      type=int,
                      help='The seed for the algo\'s PRNG default=1937007975.')
    (options, args) = parser.parse_args()

    if (len(args) == 0) or (options.write == None and options.read == None):
        parser.print_help()
        sys.exit(1)

    if (options.write and options.read):
        print("[!] Cannot have both read and write options passed at once.")
        sys.exit(2)

    if (options.read and options.paranoid):
        print("[-] Option paranoia would have not effect on a read operation.")

    prog_inst = ThreeFactorStegAlgo(args[0], options.out, options.seed,
                                    options.verbosity)

    if (options.write):
        try:
            with open(options.FN, 'r') as msg_file:
                data = msg_file.read()
                prog_inst.write(data)

                if (options.paranoid):
                    prog_inst.wout(
                        "[+] Paranoia mode: Checking to see if message data was written correctly."
                    )

                    prog_inst.img_inst = Image.open(options.out)
                    prog_inst.rand_gen = random.RandomState(options.seed)
                    prog_inst.verbosity = False

                    msg = prog_inst.read()

                    check = cmp(msg, data)

                    if (check == 0):
                        print("[+] Message data was written correctly")
                        return 0
                    else:
                        print("[-] Message data was not written correctly,")
                        print(
                            "    try changing a few parameters like seed or better still use a larger image"
                        )
                        return -1
        except IOError as io:
            sys.stderr.write("[!] 0x%08X: Could not find %s: %s.\n" %
                             (io.errno, io.filename, io.strerror))
            sys.exit(io.errno)

    if (options.read):
        print prog_inst.read()
Ejemplo n.º 19
0
    def __init__(self,
                 x,
                 y,
                 input_len,
                 sigma=1.0,
                 learning_rate=0.5,
                 decay_function=None,
                 neighborhood_function='gaussian',
                 random_seed=None,
                 trained_weights=None,
                 normalize_weights=True,
                 neigh_threshold=0.0):
        """Initializes a Self Organizing Maps.

        Parameters
        ----------
        decision_tree : decision tree
        The decision tree to be exported.

        x : int
            x dimension of the SOM

        y : int
            y dimension of the SOM

        input_len : int
            Number of the elements of the vectors in input.

        sigma : float, optional (default=1.0)
            Spread of the neighborhood function, needs to be adequate
            to the dimensions of the map.
            (at the iteration t we have sigma(t) = sigma / (1 + t/T)
            where T is #num_iteration/2)
            learning_rate, initial learning rate
            (at the iteration t we have
            learning_rate(t) = learning_rate / (1 + t/T)
            where T is #num_iteration/2)

        decay_function : function (default=None)
            Function that reduces learning_rate and sigma at each iteration
            default function:
            lambda x, current_iteration, max_iter :
                        x/(1+current_iteration/max_iter)

        neighborhood_function : function, optional (default='gaussian')
            Function that weights the neighborhood of a position in the map
            possible values: 'gaussian', 'mexican_hat'

        random_seed : int, optiona (default=None)
            Random seed to use.
        
        trained_weights : initial weights (to load from previously saved ones)

        normalize_weights : whether to normalize weights (default: True)

        neigh_threshold : limit neighborhood to cells where g[] is bigger than this value times g[bmu]

        """
        if sigma >= x / 2.0 or sigma >= y / 2.0:
            warn('Warning: sigma is too high for the dimension of the map.')
        if random_seed:
            self._random_generator = random.RandomState(random_seed)
        else:
            self._random_generator = random.RandomState(random_seed)
        if decay_function:
            self._decay_function = decay_function
        else:
            self._decay_function = lambda x, t, max_iter: x / (1 + t / max_iter
                                                               )
        self._learning_rate = learning_rate
        self._sigma = sigma
        self._randomSeed = random_seed
        self._normalizeWeights = normalize_weights
        self._neigh_threshold = neigh_threshold
        if trained_weights is not None:
            # There's a pre-trained Weight matrix
            assert trained_weights.shape[0] == x and trained_weights.shape[
                1] == y
            self._weights = trained_weights
        else:
            # New training -> random initialization
            # Random inizialization between (-1,1)
            self._weights = self._random_generator.rand(x, y,
                                                        input_len) * 2 - 1
        if self._normalizeWeights:
            for i in range(x):
                for j in range(y):
                    # normalization
                    norm = fast_norm(self._weights[i, j])
                    self._weights[i, j] = self._weights[i, j] / norm
        self._activation_map = zeros((x, y))
        self._neigx = arange(x)
        self._neigy = arange(y)  # used to evaluate the neighborhood function
        neig_functions = {
            'gaussian': self._gaussian,
            'mexican_hat': self._mexican_hat
        }
        if neighborhood_function not in neig_functions:
            msg = '%s not supported. Functions available: %s'
            raise ValueError(
                msg %
                (neighborhood_function, ', '.join(neig_functions.keys())))
        self.neighborhood = neig_functions[neighborhood_function]
Ejemplo n.º 20
0
epochs = 1000
batch_size = 5

n_a0 = \
    dict(n_input=f_height*f_width, # 784 image
         encoder_net=[200,200], 
         n_z=2,  # dimensionality of latent space
         decoder_net=[200,200]) 


    
list_of_archs = [n_a0]
list_of_archs_i = [0]


rs = npr.RandomState(0)

#Ramdomly select a batch
batch = []
while len(batch) != batch_size:
    datapoint = test_x[rs.randint(0,len(test_x))]
    batch.append(datapoint)


for k in list_of_k_samples:

    for m in list_of_models:

        for arch in list_of_archs_i:

            # saved_parameter_file = m + '_struc' + str(arch) + '_k' + str(k) + '_1000.ckpt' 
Ejemplo n.º 21
0
    def __init__(self,
                 x,
                 y,
                 input_len,
                 sigma=1.0,
                 learning_rate=0.5,
                 decay_function=asymptotic_decay,
                 neighborhood_function='gaussian',
                 random_seed=None):
        """Initializes a Self Organizing Maps.

        A rule of thumb to set the size of the grid for a dimensionality
        reduction task is that it should contain 5*sqrt(N) neurons
        where N is the number of samples in the dataset to analyze.

        E.g. if your dataset has 150 samples, 5*sqrt(150) = 61.23
        hence a map 8-by-8 should perform well.

        Parameters
        ----------
        x : int
            x dimension of the SOM.

        y : int
            y dimension of the SOM.

        input_len : int
            Number of the elements of the vectors in input.

        sigma : float, optional (default=1.0)
            Spread of the neighborhood function, needs to be adequate
            to the dimensions of the map.
            (at the iteration t we have sigma(t) = sigma / (1 + t/T)
            where T is #num_iteration/2)
            learning_rate, initial learning rate
            (at the iteration t we have
            learning_rate(t) = learning_rate / (1 + t/T)
            where T is #num_iteration/2)

        decay_function : function (default=None)
            Function that reduces learning_rate and sigma at each iteration
            the default function is:
                        learning_rate / (1+t/(max_iterarations/2))

            A custom decay function will need to to take in input
            three parameters in the following order:

            1. learning rate
            2. current iteration
            3. maximum number of iterations allowed


            Note that if a lambda function is used to define the decay
            MiniSom will not be pickable anymore.

        neighborhood_function : function, optional (default='gaussian')
            Function that weights the neighborhood of a position in the map
            possible values: 'gaussian', 'mexican_hat', 'bubble'

        random_seed : int, optional (default=None)
            Random seed to use.
        """
        if sigma >= x or sigma >= y:
            warn('Warning: sigma is too high for the dimension of the map.')

        self._random_generator = random.RandomState(random_seed)

        self._learning_rate = learning_rate
        self._sigma = sigma
        self._input_len = input_len
        # random initialization
        self._weights = self._random_generator.rand(x, y, input_len) * 2 - 1

        for i in range(x):
            for j in range(y):
                # normalization
                norm = fast_norm(self._weights[i, j])
                self._weights[i, j] = self._weights[i, j] / norm

        self._activation_map = zeros((x, y))
        self._neigx = arange(x)
        self._neigy = arange(y)  # used to evaluate the neighborhood function
        self._decay_function = decay_function

        neig_functions = {
            'gaussian': self._gaussian,
            'mexican_hat': self._mexican_hat,
            'bubble': self._bubble,
            'triangle': self._triangle
        }

        if neighborhood_function not in neig_functions:
            msg = '%s not supported. Functions available: %s'
            raise ValueError(
                msg %
                (neighborhood_function, ', '.join(neig_functions.keys())))

        if neighborhood_function in ['triangle', 'bubble'
                                     ] and divmod(sigma, 1)[1] != 0:
            warn('sigma should be an integer when triangle or bubble' +
                 'are used as neighborhood function')

        self.neighborhood = neig_functions[neighborhood_function]
Ejemplo n.º 22
0
import numpy as np
import numpy.random as nr
from dateutil.relativedelta import relativedelta
from scipy.stats import truncnorm

seed = nr.RandomState(0)


def get_truncated(lower, upper, mu, sigma):
    return truncnorm((lower - mu) / sigma, (upper - mu) / sigma,
                     loc=mu,
                     scale=sigma)


def gen_birth(contract):
    # Borrower has to be at least 25 at contract time, and not over 80 at contract end
    # Hence, limiting age between 26 and 74 years old (9515, 27085 days)
    return contract - relativedelta(days=nr.randint(9515, 27085))


def gen_amortization(birth, contract):
    # Amortization is conditional on age and contract date
    amortization = nr.randint(60, 360)
    # Checking to see if oldest borrower at the end of contract is above age limit of 80 years old
    count = 0
    while (birth + relativedelta(years=79) <
           contract + relativedelta(months=amortization)) is True:
        amortization = nr.randint(60, 360)
        count += 1
        if count > 360:
            amortization = 60
Ejemplo n.º 23
0
def rand_not_small(offset=10.):
    post = lambda x: x + onp.where(x > 0, offset, -offset)
    randn = npr.RandomState(0).randn
    return partial(_rand_dtype, randn, scale=3., post=post)
Ejemplo n.º 24
0
    def testScanRnn(self):
        r = npr.RandomState(0)

        n_in = 4
        n_hid = 2
        n_out = 1
        length = 3

        W_trans = r.randn(n_hid, n_hid + n_in)
        W_out = r.randn(n_out, n_hid + n_in)
        params = W_trans, W_out

        inputs = r.randn(length, n_in)
        targets = r.randn(length, n_out)

        def step(params, state, input):
            W_trans, W_out = params
            stacked = np.concatenate([state, input])
            output = np.tanh(np.dot(W_out, stacked))
            next_state = np.tanh(np.dot(W_trans, stacked))
            return next_state, output

        def rnn(params, inputs):
            init_state = np.zeros(n_hid)
            _, outputs = lax.scan(partial(step, params), init_state, inputs)
            return outputs

        def loss(params, inputs, targets):
            predictions = rnn(params, inputs)
            return np.sum((predictions - targets)**2)

        # evaluation doesn't crash
        loss(params, inputs, targets)

        # jvp evaluation doesn't crash
        api.jvp(lambda params: loss(params, inputs, targets), (params, ),
                (params, ))

        # jvp numerical check passes
        jtu.check_grads(loss, (params, inputs, targets),
                        order=2,
                        modes=["fwd"])

        # linearize works
        _, expected = api.jvp(loss, (params, inputs, targets),
                              (params, inputs, targets))
        _, linfun = api.linearize(loss, params, inputs, targets)
        ans = linfun(params, inputs, targets)
        self.assertAllClose(ans, expected, check_dtypes=False)

        # gradient evaluation doesn't crash
        api.grad(loss)(params, inputs, targets)

        # gradient check passes
        jtu.check_grads(loss, (params, inputs, targets), order=2)

        # we can vmap to batch things
        batch_size = 7
        batched_inputs = r.randn(batch_size, length, n_in)
        batched_targets = r.randn(batch_size, length, n_out)
        batched_loss = api.vmap(lambda x, y: loss(params, x, y))
        losses = batched_loss(batched_inputs, batched_targets)
        expected = onp.stack(
            list(
                map(lambda x, y: loss(params, x, y), batched_inputs,
                    batched_targets)))
        self.assertAllClose(losses, expected, check_dtypes=False)
Ejemplo n.º 25
0
def rand_uniform(low=0.0, high=1.0):
    assert low < high
    rand = npr.RandomState(0).rand
    post = lambda x: x * (high - low) + low
    return partial(_rand_dtype, rand, post=post)
Ejemplo n.º 26
0
                    break
                # 判断是否画线覆盖所有0,若覆盖,进行加减操作
                if 0 not in c:
                    row_sub = list(set(row_and_col) - set(delete_count_of_row))
                    min_value = np.min(c)
                    for i in row_sub:
                        b[i] = b[i] - min_value
                    for i in delete_count_of_rol:
                        b[:, i] = b[:, i] + min_value
                    break
        row_ind, col_ind = linear_sum_assignment(b)
        min_cost = task_matrix[row_ind, col_ind].sum()
        best_solution = list(task_matrix[row_ind, col_ind])
        return min_cost, best_solution


# 生成开销矩阵
rd = random.RandomState(10000)
task_matrix = rd.randint(1, 10, size=(5, 5))
# 用全排列方法实现任务分配
ass_by_per = TaskAssignment(task_matrix, 'all_permutation')
# 用匈牙利方法实现任务分配
ass_by_Hun = TaskAssignment(task_matrix, 'Hungary')
print('cost matrix = ', '\n', task_matrix)
print('全排列方法任务分配:')
print('min cost = ', ass_by_per.min_cost)
print('best solution = ', ass_by_per.best_solution)
print('匈牙利方法任务分配:')
print('min cost = ', ass_by_Hun.min_cost)
print('best solution = ', ass_by_Hun.best_solution)
Ejemplo n.º 27
0
def rand_default():
  randn = npr.RandomState(0).randn
  return partial(_rand_dtype, randn, scale=3)
Ejemplo n.º 28
0
def cases_from_list(xs):
    rng = npr.RandomState(42)
    xs = list(xs)
    k = min(len(xs), FLAGS.num_generated_cases)
    indices = rng.choice(onp.arange(len(xs)), k, replace=False)
    return [xs[i] for i in indices]
Ejemplo n.º 29
0
def rand_positive():
  post = lambda x: x + 1
  rand = npr.RandomState(0).rand
  return partial(_rand_dtype, rand, scale=2, post=post)
Ejemplo n.º 30
0
 def setup_method(self, method):
     self.rs = random.RandomState(1234567890)