예제 #1
0
def main():
    logger = logging.getLogger("Mapper")
    logger.debug("Start")
    mapper = Mapper(mw_endpoint, key_queue_endpoint, ventilator_endpoint,
                    mappers_ready_endpoint)
    mapper.start(map_fun)
    logger.debug("End")
예제 #2
0
파일: main.py 프로젝트: jlessa/mapa_api
def cria_questao():
    data = request.json['data']
    if len(ast.literal_eval(data)['enunciado']) == 0:
        output = {
            "error": "Questão Vazia",
            "url": request.url,
        }
        res = jsonify(output)
        res.status_code = 401
    else:
        if data:
            mapper = Mapper()
            questao = mapper.salva_questao(data)
            output = {
                "ok": "Sucesso",
                "url": request.url,
            }
            res = jsonify(output)
            res.status_code = 200
        else:
            output = {
                "error": "No results found. Check url again",
                "url": request.url,
            }
            res = jsonify(output)
            res.status_code = 404
    return res
예제 #3
0
 def setUp(self) -> None:
     self.mapper = Mapper(
         playerSocket=Mock(spec=socket.socket),
         gameSocket=Mock(spec=socket.socket),
         outputFormat="normal",
         interface="text",
         promptTerminator=None,
         gagPrompts=False,
         findFormat="",
         isEmulatingOffline=False,
     )
     self.mapper.daemon = True  # Allow unittest to quit if mapper thread does not close properly.
     self.legacyHandlerNames: Generator[str, None, None] = (
         handlerName for handlerName in dir(self.mapper)
         if handlerName.startswith("mud_event_")
         and callable(getattr(Mapper, handlerName)))
     for handlerName in self.legacyHandlerNames:
         setattr(self.mapper, handlerName, Mock())
예제 #4
0
 def setUp(self) -> None:
     self.mapper: Mapper = Mapper(
         playerSocket=Mock(spec=socket.socket),
         gameSocket=Mock(spec=socket.socket),
         outputFormat="normal",
         interface="text",
         promptTerminator=None,
         gagPrompts=False,
         findFormat="",
         isEmulatingOffline=False,
     )
     self.mapper.daemon = True  # Allow unittest to quit if mapper thread does not close properly.
예제 #5
0
class TestMapper(unittest.TestCase):
    _example_as_row = '2016;5;2016.02.06.;0;0 Ft;30;1 910 065 Ft;2955;20 530 Ft;86050;1 370 Ft;11;15;26;39;78'
    _example_as_dict = [(11, 1), (15, 1), (26, 1), (39, 1), (78, 1)]
    _otos_min_expected_log = 'resources/otos.min.expected.txt'
    _otos_min_result_log = 'resources/otos.min.result.txt'
    _otos_min_csv = 'resources/otos.min.txt'

    def setUp(self):
        self.test_subject = Mapper()

    def tearDown(self):
        self.test_subject = None

    def test_getNumbers_with_valid_input(self):
        expected = self._example_as_dict
        actual = self.test_subject.getNumbers(self._example_as_row)
        self.assertEqual(expected, actual)

    def test_getNumbers_with_invalid_input(self):
        expected = []
        actual = self.test_subject.getNumbers('')
        self.assertEqual(expected, actual)

    def test_processInput_with_valid_input(self):
        with open(self._otos_min_csv) as f:
            self._remove_logfile(self._otos_min_result_log)
            with open(self._otos_min_result_log, 'a') as r:
                self.test_subject.processInput(f.readlines(), r)
        with open(self._otos_min_expected_log) as f:
            expected = f.read().strip()
            with open(self._otos_min_result_log) as r:
                actual = r.read().strip()
        self.assertEqual(expected, actual)

    def _remove_logfile(self, filename):
        try:
            remove(filename)
        except Exception:
            pass
    def _setup_anticipator(self, ppo_cfg: Config, ans_cfg: Config) -> None:
        r"""Sets up actor critic and agent for PPO.

        Args:
            ppo_cfg: config node with relevant params
            ans_cfg: config node for ActiveNeuralSLAM model

        Returns:
            None
        """
        try:
            os.mkdir(self.config.TENSORBOARD_DIR)
        except:
            pass

        logger.add_filehandler(os.path.join(self.config.TENSORBOARD_DIR, "run.log"))

        sem_cfg = ans_cfg.SEMANTIC_ANTICIPATOR
        mapper_cfg = ans_cfg.MAPPER
        # Create occupancy anticipation model
        [imgh, imgw] = ans_cfg['image_scale_hw']
        sem_model = SemAnticipationWrapper(
            SemAnticipator(sem_cfg), mapper_cfg.map_size, (imgh, imgw)
        )

        self.mapper = Mapper(mapper_cfg,sem_model)

        self.mapper_agent = MapUpdate(
            self.mapper,
            lr=mapper_cfg.lr,
            eps=mapper_cfg.eps,
            label_id=mapper_cfg.label_id,
            max_grad_norm=mapper_cfg.max_grad_norm,
            pose_loss_coef=mapper_cfg.pose_loss_coef,
            semantic_anticipator_type=ans_cfg.SEMANTIC_ANTICIPATOR.type,
            freeze_projection_unit=mapper_cfg.freeze_projection_unit,
            num_update_batches=mapper_cfg.num_update_batches,
            batch_size=mapper_cfg.map_batch_size,
            mapper_rollouts=self.mapper_rollouts,
        )

        if ans_cfg.model_path != "":
            self.resume_checkpoint(ans_cfg.model_path)
ans_cfg = trainer.config.RL.ANS
mapper_cfg = trainer.config.RL.ANS.MAPPER
occ_cfg = trainer.config.RL.ANS.SEMANTIC_ANTICIPATOR
trainer.device = (
    torch.device("cuda", 1)
    if torch.cuda.is_available()
    else torch.device("cpu")
)
sem_cfg = ans_cfg.SEMANTIC_ANTICIPATOR
mapper_cfg = ans_cfg.MAPPER
[imgh, imgw] = ans_cfg['image_scale_hw']
sem_model = SemAnticipationWrapper(
    SemAnticipator(sem_cfg), mapper_cfg.map_size, (imgh, imgw)
)

trainer.mapper = Mapper(mapper_cfg,sem_model).to(trainer.device)


checkpoints = glob.glob(f"{trainer.config.CHECKPOINT_FOLDER}/*.pth")
ppo_cfg = trainer.config.RL.PPO
# Load lastest checkpoint
last_ckpt = sorted(checkpoints, key=lambda x: int(x.split(".")[1]))[-1]
checkpoint_path = last_ckpt
# Restore checkpoints to models
# ckpt_dict = trainer.load_checkpoint('data/new_checkpoints_se_256/ckpt.179.pth')
ckpt_dict = trainer.load_checkpoint(checkpoint_path)
trainer.mapper.load_state_dict(ckpt_dict["mapper"])

depth_projection_net = DepthProjectionNet(
            trainer.config.RL.ANS.SEMANTIC_ANTICIPATOR.EGO_PROJECTION
        )
예제 #8
0
class TestMapper_handleMudEvent(TestCase):
    @patch.object(Mapper, "loadRooms", Mock())  # Speedup test execution.
    def setUp(self) -> None:
        self.mapper = Mapper(
            playerSocket=Mock(spec=socket.socket),
            gameSocket=Mock(spec=socket.socket),
            outputFormat="normal",
            interface="text",
            promptTerminator=None,
            gagPrompts=False,
            findFormat="",
            isEmulatingOffline=False,
        )
        self.mapper.daemon = True  # Allow unittest to quit if mapper thread does not close properly.
        self.legacyHandlerNames: Generator[str, None, None] = (
            handlerName for handlerName in dir(self.mapper)
            if handlerName.startswith("mud_event_")
            and callable(getattr(Mapper, handlerName)))
        for handlerName in self.legacyHandlerNames:
            setattr(self.mapper, handlerName, Mock())

    def test_legacyMudEventHandlers(self) -> None:
        events: Generator[str, None,
                          None] = (handlerName[len("mud_event_"):]
                                   for handlerName in self.legacyHandlerNames)
        handlers: Generator[Mock, None, None] = (getattr(
            self.mapper,
            handlerName) for handlerName in self.legacyHandlerNames)
        for event, handler in zip(events, handlers):
            sampleInput1: bytes = b"Helol oje"
            sampleInput2: bytes = b"no sir, away. a papaya war is on"
            sampleInput3: bytes = b"delting no sir, away. a papaya war is on"
            self.mapper.registerMudEventHandler(event, handler)
            self.mapper.handleMudEvent(event, sampleInput1)
            handler.assert_called_once_with(sampleInput1.decode("us-ascii"))
            handler.reset_mock()
            self.mapper.handleMudEvent(event, sampleInput2)
            handler.assert_called_once_with(sampleInput2.decode("us-ascii"))
            handler.reset_mock()
            self.mapper.deregisterMudEventHandler(event, handler)
            self.mapper.handleMudEvent(event, sampleInput3)
            handler.assert_not_called()

    def test_newMudEventHandlers(self) -> None:
        events: Tuple[str, ...] = (
            "sillyEvent",
            "room",
            "otherEvent",
        )
        for event in events:
            handler: Mock = Mock()
            sampleInput1: bytes = b"Helol oje"
            sampleInput2: bytes = b"no sir, away. a papaya war is on"
            sampleInput3: bytes = b"delting no sir, away. a papaya war is on"
            self.mapper.registerMudEventHandler(event, handler)
            self.mapper.handleMudEvent(event, sampleInput1)
            handler.assert_called_once_with(sampleInput1.decode("us-ascii"))
            handler.reset_mock()
            self.mapper.handleMudEvent(event, sampleInput2)
            handler.assert_called_once_with(sampleInput2.decode("us-ascii"))
            handler.reset_mock()
            self.mapper.deregisterMudEventHandler(event, handler)
            self.mapper.handleMudEvent(event, sampleInput3)
            handler.assert_not_called()

    def test_handleMudEvent_failsGracefullyWhenHandlingAnUnknownEvent(
            self) -> None:
        unknownEvents: Tuple[str, ...] = (
            "unkk",
            "New_game_event",
            "room",
            "<interesting-tag-<in>-a-tag>",
        )
        for unknownEvent in unknownEvents:
            # simply require this to execute without raising an exception
            self.mapper.handleMudEvent(unknownEvent, b"meaningless input")
def get_episode_map(env:habitat.RLEnv, mapper:Mapper, M:int, config:CN, device:torch.device):
    """Given the environment and the configuration, compute the global
    top-down wall and seen area maps by sampling maps for individual locations
    along a uniform grid in the environment, and registering them.
    """
    # Initialize a global map for the episode
    global_wall_map = torch.zeros(1, 2, M, M).to(device)
    global_seen_map = torch.zeros(1, 2, M, M).to(device)

    grid_size = config.TASK.GT_EGO_MAP.MAP_SCALE
    coordinate_max = maps.COORDINATE_MAX
    coordinate_min = maps.COORDINATE_MIN
    resolution = (coordinate_max - coordinate_min) / grid_size
    grid_resolution = (int(resolution), int(resolution))

    top_down_map = maps.get_topdown_map(
        env.habitat_env.sim, grid_resolution, 20000, draw_border=False,
    )

    map_w, map_h = top_down_map.shape

    intervals = (max(int(0.5 / grid_size), 1), max(int(0.5 / grid_size), 1))
    x_vals = np.arange(0, map_w, intervals[0], dtype=int)
    y_vals = np.arange(0, map_h, intervals[1], dtype=int)
    coors = np.stack(np.meshgrid(x_vals, y_vals), axis=2)  # (H, W, 2)
    coors = coors.reshape(-1, 2)  # (H*W, 2)
    map_vals = top_down_map[coors[:, 0], coors[:, 1]]
    valid_coors = coors[map_vals > 0]

    real_x_vals = coordinate_max - valid_coors[:, 0] * grid_size
    real_z_vals = coordinate_min + valid_coors[:, 1] * grid_size
    start_y = env.habitat_env.sim.get_agent_state().position[1]

    for j in tqdm.tqdm(range(real_x_vals.shape[0]),desc='occupacy map', position=2):
        for theta in np.arange(-np.pi, np.pi, np.pi / 3.0):
            position = [
                real_x_vals[j].item(),
                start_y.item(),
                real_z_vals[j].item(),
            ]
            rotation = [
                0.0,
                np.sin(theta / 2).item(),
                0.0,
                np.cos(theta / 2).item(),
            ]

            sim_obs = env.habitat_env.sim.get_observations_at(
                position, rotation, keep_agent_at_new_pose=True,
            )
            episode = env.habitat_env.current_episode
            obs = env.habitat_env.task.sensor_suite.get_observations(
                observations=sim_obs, episode=episode, task=env.habitat_env.task
            )
            ego_map_gt = torch.Tensor(obs["ego_map_gt"]).to(device)
            ego_map_gt = rearrange(ego_map_gt, "h w c -> () c h w")
            ego_wall_map_gt = torch.Tensor(obs["ego_wall_map_gt"]).to(device)
            ego_wall_map_gt = rearrange(ego_wall_map_gt, "h w c -> () c h w")
            pose_gt = torch.Tensor(obs["pose_gt"]).unsqueeze(0).to(device)
            global_seen_map = mapper.ext_register_map(
                global_seen_map, ego_map_gt, pose_gt
            )
            global_wall_map = mapper.ext_register_map(
                global_wall_map, ego_wall_map_gt, pose_gt
            )

    global_wall_map_np = asnumpy(rearrange(global_wall_map, "b c h w -> b h w c")[0])
    global_seen_map_np = asnumpy(rearrange(global_seen_map, "b c h w -> b h w c")[0])

    return global_seen_map_np, global_wall_map_np
def main(args):
    
    config = get_config()

    mapper_config = config.RL.ANS.MAPPER
    mapper_config.defrost()
    mapper_config.map_size = 130
    mapper_config.map_scale = 0.02
    mapper_config.freeze()

    mapper = Mapper(mapper_config, None)

    M = args.global_map_size
    skip_scene = args.skip_scene
    config_path = args.config_path
    save_dir = args.save_dir
    safe_mkdir(save_dir)

    seen_map_save_root = os.path.join(save_dir, "seen_area_maps")
    wall_map_save_root = os.path.join(save_dir, "wall_maps")
    semantic_map_save_root = os.path.join(save_dir, "semantic_maps")
    json_save_path = os.path.join(save_dir, "all_maps_info.json")

    config = habitat_extensions.get_extended_config(config_path)

    scenes_list = glob.glob(f"")
    dataset_path = config.DATASET.DATA_PATH.replace("{split}", config.DATASET.SPLIT)
    with gzip.open(dataset_path, "rt") as fp:
        dataset = json.load(fp)

    num_episodes = len(dataset["episodes"])

    print("===============> Loading data per scene")
    scene_to_data = {}
    if num_episodes == 0:
        content_path = os.path.join(
            dataset_path[: -len(f"{config.DATASET.SPLIT}.json.gz")], "content"
        )
        scene_paths = glob.glob(f"{content_path}/*")
        print(f"Number of scenes found: {len(scene_paths)}")
        for scene_data_path in scene_paths:
            with gzip.open(scene_data_path, "rt") as fp:
                scene_data = json.load(fp)
            num_episodes += len(scene_data["episodes"])
            scene_id = scene_data["episodes"][0]["scene_id"].split("/")[-1]
            scene_to_data[scene_id] = scene_data["episodes"]
    else:
        for ep in dataset["episodes"]:
            scene_id = ep["scene_id"].split("/")[-1]
            if scene_id not in scene_to_data:
                scene_to_data[scene_id] = []
            scene_to_data[scene_id].append(ep)

    print("===============> Computing heights for different floors in each scene")
    scenes_to_floor_heights = {}
    for scene_id, scene_data in scene_to_data.items():
        # Identify the number of unique floors in this scene
        floor_heights = []
        for ep in scene_data:
            height = ep["start_position"][1]
            if len(floor_heights) == 0:
                floor_heights.append(height)
            # Measure height difference from all existing floors
            d2floors = map(lambda x: abs(x - height), floor_heights)
            d2floors = np.array(list(d2floors))
            if not np.any(d2floors < 0.5):
                floor_heights.append(height)
        # Store this in the dict
        scenes_to_floor_heights[scene_id] = floor_heights

    env = DummyRLEnv(config=config)
    env.seed(1234)
    _ = env.reset()
    device = torch.device("cuda:0")

    safe_mkdir(seen_map_save_root)
    safe_mkdir(wall_map_save_root)
    safe_mkdir(semantic_map_save_root)

    # Data format for saving top-down maps per scene:
    # For each split, create a json file that contains the following dictionary:
    # key - scene_id
    # value - [{'floor_height': ...,
    #           'seen_map_path': ...,
    #           'wall_map_path': ...,
    #           'world_position': ...,
    #           'world_heading': ...},
    #          .,
    #          .,
    #          .,
    #         ]
    # The floor_height specifies a single height value on that floor.
    # All other heights within 0.5m of this height will correspond to this floor.
    # The *_map_path specifies the path to a .npy file that contains the
    # corresponding map. This map is in the world coordinate system, not episode
    # centric start-view coordinate system.
    # The world_position is the (X, Y, Z) position of the agent w.r.t. which this
    # map was computed. The world_heading is the clockwise rotation (-Z to X)
    # of the agent in the world coordinates.
    # The .npy files will be stored in seen_map_save_root and wall_map_save_root.

    # Create top-down maps per scene, per floor
    per_scene_per_floor_maps = {}
    print("===============> generate meta information for gt map")
    for target_scene in tqdm.tqdm(scene_to_data.keys()):
        per_scene_per_floor_maps[target_scene] = {}
        for episode in scene_to_data[target_scene]:
            scene_id = target_scene
            start_position = episode['start_position']
            start_rotation = episode['start_rotation']
            start_height = start_position[1]
            floor_heights = scenes_to_floor_heights[scene_id]
            d2floors = map(lambda x: abs(x - start_height), floor_heights)
            d2floors = np.array(list(d2floors))
            floor_idx = np.where(d2floors < 0.5)[0][0].item()   
            if floor_idx in per_scene_per_floor_maps[scene_id]:
                continue
            start_heading = compute_heading_from_quaternion(quaternion_from_coeff(start_rotation))
            seen_map_save_path = f"{seen_map_save_root}/{scene_id}_{floor_idx}.npy"
            wall_map_save_path = f"{wall_map_save_root}/{scene_id}_{floor_idx}.npy"
            semantic_map_save_path = f"{semantic_map_save_root}/{scene_id}_{floor_idx}.npy"


            save_dict = {
            "seen_map_path": seen_map_save_path,
            "wall_map_path": wall_map_save_path,
            "semantic_map_path": semantic_map_save_path,
            "floor_height": start_height,
            "start_rotation": start_rotation,
            "world_position": start_position,
            "world_heading": start_heading,
            "scene_id": episode['scene_id']
            }

            per_scene_per_floor_maps[scene_id][floor_idx] = save_dict
            if len(per_scene_per_floor_maps[scene_id]) == len(scenes_to_floor_heights[scene_id]):
                break
    print("===============> save meta information for gt map")        
    save_json = {}
    for scene in per_scene_per_floor_maps.keys():
        scene_save_data = []
        for floor_idx, floor_data in per_scene_per_floor_maps[scene].items():
            scene_save_data.append(floor_data)
        save_json[scene] = scene_save_data

    json.dump(save_json, open(json_save_path, "w"))    

    print("===============> start to draw semantic map")  
    scene_ids = sorted(list(per_scene_per_floor_maps.keys()))
    print(scene_ids)
    start_scene = scene_ids[skip_scene]
    print(f"===============> start with scene {start_scene}")

    for target_scene in tqdm.tqdm(scene_ids[skip_scene:],desc='scenes', position=0):
        for floor_idx in per_scene_per_floor_maps[target_scene]:
            
            scene_meta_info = per_scene_per_floor_maps[target_scene][floor_idx]
            # don't regenerate maps
            
            if os.path.isfile(scene_meta_info['semantic_map_path']):
                continue
            print(scene_meta_info)

            env.habitat_env.current_episode.start_position = scene_meta_info['world_position']
            env.habitat_env.current_episode.start_rotation = scene_meta_info['start_rotation']
            env.habitat_env.current_episode.scene_id = scene_meta_info['scene_id']

            env.habitat_env.reconfigure(env.habitat_env._config)
            _ = env.habitat_env.task.reset(env.habitat_env.current_episode)

            scene_id = target_scene
            agent_state = env.habitat_env.sim.get_agent_state()
            start_position = np.array(agent_state.position)

            global_seen_map, global_wall_map = get_episode_map(
                env, mapper, M, config, device
            )

            #generate semantic layers
            global_semantic_map = generate_semantic_layers(env,mapper, M,config,global_seen_map)

            seen_map_save_path = f"{seen_map_save_root}/{scene_id}_{floor_idx}.npy"
            wall_map_save_path = f"{wall_map_save_root}/{scene_id}_{floor_idx}.npy"
            semantic_map_save_path = f"{semantic_map_save_root}/{scene_id}_{floor_idx}.npy"
            np.save(seen_map_save_path, (global_seen_map > 0))
            np.save(wall_map_save_path, (global_wall_map > 0))
            np.save(semantic_map_save_path, (global_semantic_map > 0))

            # clean the memory to avoid overflow
            global_seen_map = None
            global_wall_map = None
            global_semantic_map = None
            gc.collect()
예제 #11
0
    u,v=np.meshgrid(u,v)
    a = 2
    b = 9
    x = (b + a*np.cos(u)) * np.cos(v)
    y = (b + a*np.cos(u)) * np.sin(v)
    z = a * np.sin(u)

    return np.vstack((x.ravel(), y.ravel(), z.ravel())).T

points = torus()


mapper = Mapper(
    bins=4,
    filter_function='by_coordinate',
    coordinate=1,
    clustering_function='agglomerative',
    linkage="average",
    distance=15
)

graph = mapper.fit(points)
mapper.plot_vertices()
mapper.plot_intervals()
mapper.plot_clusters()
mapper.plot_graph()
mapper.plot_graph_in_plane()
mapper.plot_persistence_homology()

# Silhouette score
mapper = Mapper(
    bins=4,
예제 #12
0
import numpy as np
from mapper.mapper import Mapper

with open('point_clouds/pliers.txt') as f:
    data = f.readlines()

points = np.array([list(map(float, p.strip().split(' '))) for p in data])

mapper = Mapper(bins=5,
                clustering_function="agglomerative",
                linkage="average",
                coordinate=-1,
                max_k=5)

graph = mapper.fit(points)
mapper.plot_vertices()
mapper.plot_intervals()
mapper.plot_clusters()
mapper.plot_graph()
mapper.plot_graph_in_plane()
mapper.plot_persistence_homology()
예제 #13
0
 def setUp(self):
     self.test_subject = Mapper()