Esempio n. 1
0
def test_node_group_inverse_projection_fork_inside(device):
    float_dtype = get_float(device)

    graph = Topology(device)

    source_node = ConstantNode(shape=(2, 4), constant=0)
    fork_node = ForkNode(dim=1, split_sizes=[1, 3])
    join_node = JoinNode(dim=1, n_inputs=2)
    group_node = graph.create_generic_node_group('group', 1, 2)

    graph.add_node(source_node)
    group_node.add_node(fork_node)
    graph.add_node(join_node)

    Connector.connect(source_node.outputs.output, group_node.inputs[0])

    Connector.connect(group_node.inputs[0].output, fork_node.inputs.input)

    Connector.connect(fork_node.outputs[0], group_node.outputs[0].input)
    Connector.connect(fork_node.outputs[1], group_node.outputs[1].input)

    Connector.connect(group_node.outputs[0], join_node.inputs[0])
    Connector.connect(group_node.outputs[1], join_node.inputs[1])

    graph.step()

    output_tensor = torch.rand((2, 4), device=device, dtype=float_dtype)
    inverse_pass_packet = InversePassOutputPacket(output_tensor, join_node.outputs.output)
    results = join_node.recursive_inverse_projection_from_output(inverse_pass_packet)

    assert 1 == len(results)
    assert same(results[0].tensor, output_tensor)
Esempio n. 2
0
def test_inverse_fork_join():
    """Checks that if you fork and join, the inverse projection will only have one tensor result."""

    device = 'cpu'
    dtype = torch.float32

    graph = Topology(device)

    source_node = ConstantNode(shape=(2, 4), constant=0)
    fork_node = ForkNode(dim=1, split_sizes=[1, 3])
    join_node = JoinNode(dim=1, n_inputs=2)

    graph.add_node(source_node)
    graph.add_node(fork_node)
    graph.add_node(join_node)

    Connector.connect(source_node.outputs.output, fork_node.inputs.input)
    Connector.connect(fork_node.outputs[0], join_node.inputs[0])
    Connector.connect(fork_node.outputs[1], join_node.inputs[1])

    graph.step()

    output_tensor = torch.rand((2, 4), device=device, dtype=dtype)
    inverse_pass_packet = InversePassOutputPacket(output_tensor,
                                                  join_node.outputs.output)
    results = join_node.recursive_inverse_projection_from_output(
        inverse_pass_packet)

    assert 1 == len(results)
    assert same(results[0].tensor, output_tensor)
Esempio n. 3
0
def test_join_node_inverse_flatten():
    device = 'cpu'
    creator = AllocatingCreator(device)
    dtype = creator.float32

    # The result of the inverse projection should only be one tensor.
    expected_results = [
        creator.tensor([[1, 2, 3, 4], [5, 6, 7, 8]],
                       dtype=dtype,
                       device=device),
        creator.tensor([9, 10], dtype=dtype, device=device)
    ]

    input_memory_blocks = [MemoryBlock(), MemoryBlock()]
    input_memory_blocks[0].tensor = creator.zeros((2, 4))
    input_memory_blocks[1].tensor = creator.zeros((2, ))

    output_tensor = creator.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
                                   dtype=dtype,
                                   device=device)

    join_node = JoinNode(flatten=True)
    Connector.connect(input_memory_blocks[0], join_node.inputs[0])
    Connector.connect(input_memory_blocks[1], join_node.inputs[1])

    output_inverse_packet = InversePassOutputPacket(output_tensor,
                                                    join_node.outputs.output)

    join_node.allocate_memory_blocks(creator)
    results = join_node.recursive_inverse_projection_from_output(
        output_inverse_packet)

    for expected, result in zip(expected_results, results):
        assert same(expected, result.tensor)
Esempio n. 4
0
def test_join_node_inverse_0():
    # TODO (Test): Make a dim = 1 variant
    # TODO (Test): Then, refactor tests here, maybe something to match the test class above, but for the backward projection.
    creator = AllocatingCreator(device='cpu')
    dtype = creator.float32

    dim = 0

    # The result of the inverse projection should only be one tensor.
    expected_results = [
        creator.tensor([[1, 2, 3, 4], [5, 6, 7, 8]],
                       dtype=dtype,
                       device=creator.device),
        creator.tensor([[9, 10, 11, 12], [13, 14, 15, 16]],
                       dtype=dtype,
                       device=creator.device)
    ]

    input_memory_blocks = [MemoryBlock(), MemoryBlock()]
    input_memory_blocks[0].tensor = creator.zeros((2, 4))
    input_memory_blocks[1].tensor = creator.zeros((2, 4))

    output_tensor = creator.tensor(
        [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]],
        dtype=dtype,
        device=creator.device)

    join_node = JoinNode(dim, n_inputs=2)
    Connector.connect(input_memory_blocks[0], join_node.inputs[0])
    Connector.connect(input_memory_blocks[1], join_node.inputs[1])

    output_inverse_packet = InversePassOutputPacket(output_tensor,
                                                    join_node.outputs.output)

    join_node.allocate_memory_blocks(creator)
    results = join_node.recursive_inverse_projection_from_output(
        output_inverse_packet)

    for expected, result in zip(expected_results, results):
        assert same(expected, result.tensor)
    def __init__(self):
        super().__init__("cuda")
        actions_descriptor = GridWorldActionDescriptor()
        node_action_monitor = ActionMonitorNode(actions_descriptor)

        grid_world_params = GridWorldParams('MapE')
        grid_world_params.tile_size = 5
        node_grid_world = GridWorldNode(grid_world_params)

        join_node = JoinNode(flatten=True)

        unsqueeze_node = UnsqueezeNode(dim=0)

        # GridWorld sizes
        # egocentric
        width = grid_world_params.egocentric_width * grid_world_params.tile_size
        height = grid_world_params.egocentric_height * grid_world_params.tile_size

        #one-hot matrix
        width = grid_world_params.world_width
        height = grid_world_params.world_height

        fork_node = ForkNode(dim=0, split_sizes=[width * height, 4])
        input_size = (1, width * height + 4)
        random_noise_node_params = RandomNoiseParams()
        random_noise_node_params.amplitude = 0.1
        random_noise_node = RandomNoiseNode(random_noise_node_params)

        def squeeze(inputs, outputs):
            outputs[0].copy_(inputs[0].view(-1))

        squeeze_node = LambdaNode(squeeze,
                                  1, [(sum(fork_node._split_sizes), )],
                                  name="squeeze lambda node")

        to_one_hot_node = ToOneHotNode()

        random_action_generator = RandomNumberNode(upper_bound=4)

        self.add_node(squeeze_node)
        self.add_node(node_grid_world)
        self.add_node(unsqueeze_node)
        self.add_node(node_action_monitor)
        self.add_node(join_node)
        self.add_node(fork_node)
        self.add_node(random_noise_node)
        self.add_node(to_one_hot_node)
        self.add_node(random_action_generator)

        Connector.connect(node_grid_world.outputs.egocentric_image,
                          random_noise_node.inputs.input)
        # egocentric
        # Connector.connect(random_noise_node.outputs.output, join_node.inputs[0])
        # one-hot matrix
        Connector.connect(node_grid_world.outputs.output_pos_one_hot_matrix,
                          join_node.inputs[0])
        Connector.connect(node_grid_world.outputs.output_action,
                          join_node.inputs[1])
        Connector.connect(join_node.outputs.output,
                          unsqueeze_node.inputs.input)

        self._create_and_connect_agent(unsqueeze_node.outputs.output,
                                       squeeze_node.inputs[0],
                                       input_size + (1, ))

        Connector.connect(squeeze_node.outputs[0], fork_node.inputs.input)

        # Connector.connect(fork_node.outputs[1],
        #                   to_one_hot_node.inputs.input)
        Connector.connect(random_action_generator.outputs.one_hot_output,
                          to_one_hot_node.inputs.input)

        Connector.connect(to_one_hot_node.outputs.output,
                          node_action_monitor.inputs.action_in)
        Connector.connect(node_action_monitor.outputs.action_out,
                          node_grid_world.inputs.agent_action,
                          is_backward=True)
Esempio n. 6
0
 def init(self):
     self.se_io.install_nodes(self)
     self._join_node = JoinNode(flatten=True)
     self.add_node(self._join_node)
     self._install_experts()
     self._connect_expert_output()
Esempio n. 7
0
 def _create_node(self):
     return JoinNode(flatten=self._flatten, n_inputs=3)
Esempio n. 8
0
 def _create_node(self):
     return JoinNode(dim=self._dim, n_inputs=3)