def test_make_agent_walk_along_nx_graph(self):
        nx_skeleton = networkx_utils.NxSkeleton()
        coords = np.array([[100, 100, 100], [100, 101, 100],
                           [100, 102, 101], [100, 103, 102], [100, 104, 103], [100, 105, 104]])
        edgelist = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)]
        nx_skeleton.initialize_from_edgelist(coords, edgelist)

        nm = neuron_maze.NeuronMaze()
        nm.make_agent_walk_along_nx_graph(nx_skeleton, 0, 5)
        self.assertEqual(nm.hero.visited_positions[1], tuple(coords[1]))
        first_direction = Vector3(0, 1, 0)
        self.assertEqual(nm.directions.index(first_direction), nm.hero.taken_actions[0])


        current_nx_graph = nm.nx_skeletons.nx_graph_dic[1]
        nx_skeleton = networkx_utils.NxSkeleton(nx_graph=current_nx_graph)

        source = networkx_utils.get_nodes_with_a_specific_degree(current_nx_graph,
                                                                  degree_value=1)
        source_node = source[0]
        number_of_steps = 5
        successor_dic = nx.dfs_successors(current_nx_graph, source=source[0])
        for steps in range(number_of_steps):
            source = successor_dic[source[0]]
        target_node = source[0]
        print 'source node', source_node
        print 'target node', target_node
        nm = neuron_maze.NeuronMaze()
        nm.make_agent_walk_along_nx_graph(nx_skeleton, source_node, target_node)
Пример #2
0
    def reinitialize_world(self, nx_graph_id=None, center_node=False, start_node=False, number_of_initial_steps=0):
        # assert nx_graph_id is None and node_id is not None,'node id can only be specified when also nx_graph id is specified'
        self.hero.reset()
        blocked_objects = []
        if 'nx_skeleton_filename' in self.settings:
            # Select random skeleton
            node_to_jump = None
            if nx_graph_id is None:
                nx_graph_id = choice(self.nx_skeletons.nx_graph_dic.keys())
            current_graph = self.nx_skeletons.nx_graph_dic[nx_graph_id]
            if center_node:
                node_to_jump = networkx_utils.get_center_node_from_nx_graph(current_graph)
                if not len(node_to_jump) == 1:
                    node_to_jump = choice(current_graph.nodes())
                else:
                    node_to_jump = node_to_jump[0][0]
            if start_node:
                assert center_node is False, 'either start node or center node have to be set to False'
                node_to_jump = networkx_utils.get_nodes_with_a_specific_degree(current_graph)
                if len(node_to_jump) == 2:
                    node_to_jump = node_to_jump[0]
            if node_to_jump is None:
                node_to_jump = choice(current_graph.nodes())

            if number_of_initial_steps > 0 and current_graph.number_of_nodes()-2 > number_of_initial_steps:
                successor_dic = nx.dfs_successors(current_graph, source=node_to_jump)
                source = [node_to_jump]
                source_node = node_to_jump
                # Save the starting position for later to make sure that it is
                # not added as an endpoint in the game
                source_pos = current_graph.node[node_to_jump]['position']
                source_pos = Point3(source_pos[0], source_pos[1], source_pos[2])
                blocked_objects.append(source_pos)
                for steps in range(number_of_initial_steps):
                    source = successor_dic[source[0]]
                target_node = source[0]
                self.make_agent_walk_along_nx_graph(current_graph, source_node, target_node)
                # Remove potential endnode objects from the source node


                # print 'hero walked already a bit', self.hero.visited_positions
                # print 'hero walked already a bit', self.hero.taken_actions
                # print 'hero walked already a bit', self.hero.observed_observations
            else:
                pos = self.nx_graph.node[node_to_jump]['position']

                self.hero.jump(pos)
        else:
            self.jump()

        #Get id for position
        pos = self.hero.position
        if 'volumetric_objects_filename' in self.settings:
            seg_id = self.volumetric_object_matrix[pos.x, pos.y, pos.z]
            # new_reward_matrix = self.reward_matrix_ori.copy()
            # new_observation_matrix = self.reward_matrix_ori.copy()
            # new_reward_matrix[self.volumetric_object_matrix != seg_id] = 0
            # self.reward_matrix = new_reward_matrix
            # self.observation_matrix = new_observation_matrix
            # self.observation_matrix = new_reward_matrix
        else:
            seg_id= None
        # #Reinitialize the skeletons
        # for obj in self.removed_objects:
        #     self.objects.append(obj)
        self.objects = []
        self.removed_objects= []
        self.objects_eaten['skeleton'] = 0
        self.skeletons_to_objects(seg_id=seg_id, blocked_objects=blocked_objects)
        self.games_played += 1