Exemplo n.º 1
0
def test_inverse_fork_join():
    """Checks that if you fork and join, the inverse projection will only have one tensor result."""

    device = 'cpu'
    dtype = torch.float32

    graph = Topology(device)

    source_node = ConstantNode(shape=(2, 4), constant=0)
    fork_node = ForkNode(dim=1, split_sizes=[1, 3])
    join_node = JoinNode(dim=1, n_inputs=2)

    graph.add_node(source_node)
    graph.add_node(fork_node)
    graph.add_node(join_node)

    Connector.connect(source_node.outputs.output, fork_node.inputs.input)
    Connector.connect(fork_node.outputs[0], join_node.inputs[0])
    Connector.connect(fork_node.outputs[1], join_node.inputs[1])

    graph.step()

    output_tensor = torch.rand((2, 4), device=device, dtype=dtype)
    inverse_pass_packet = InversePassOutputPacket(output_tensor,
                                                  join_node.outputs.output)
    results = join_node.recursive_inverse_projection_from_output(
        inverse_pass_packet)

    assert 1 == len(results)
    assert same(results[0].tensor, output_tensor)
Exemplo n.º 2
0
def test_node_group_inverse_projection_fork_inside(device):
    float_dtype = get_float(device)

    graph = Topology(device)

    source_node = ConstantNode(shape=(2, 4), constant=0)
    fork_node = ForkNode(dim=1, split_sizes=[1, 3])
    join_node = JoinNode(dim=1, n_inputs=2)
    group_node = graph.create_generic_node_group('group', 1, 2)

    graph.add_node(source_node)
    group_node.add_node(fork_node)
    graph.add_node(join_node)

    Connector.connect(source_node.outputs.output, group_node.inputs[0])

    Connector.connect(group_node.inputs[0].output, fork_node.inputs.input)

    Connector.connect(fork_node.outputs[0], group_node.outputs[0].input)
    Connector.connect(fork_node.outputs[1], group_node.outputs[1].input)

    Connector.connect(group_node.outputs[0], join_node.inputs[0])
    Connector.connect(group_node.outputs[1], join_node.inputs[1])

    graph.step()

    output_tensor = torch.rand((2, 4), device=device, dtype=float_dtype)
    inverse_pass_packet = InversePassOutputPacket(output_tensor, join_node.outputs.output)
    results = join_node.recursive_inverse_projection_from_output(inverse_pass_packet)

    assert 1 == len(results)
    assert same(results[0].tensor, output_tensor)
Exemplo n.º 3
0
    def test_expert_dimensions(self):
        """Tests multi-dimensional expert indexes."""
        device = 'cpu'
        parent_rf_size_x = parent_rf_size_y = 4
        n_channels = 4
        image_grid_size_x = image_grid_size_y = 16
        input_dimensions = (image_grid_size_y, image_grid_size_x, n_channels)
        parent_rf_dims = Size2D(parent_rf_size_x, parent_rf_size_y)
        parent_grid_dimensions = (4, 4)

        graph = Topology(device)

        node = ReceptiveFieldNode(input_dimensions, parent_rf_dims)

        graph.add_node(node)

        memory_block = MemoryBlock()
        memory_block.tensor = torch.zeros(image_grid_size_y,
                                          image_grid_size_x,
                                          n_channels,
                                          device=device)
        memory_block.tensor[0, parent_rf_size_x, 0] = 1

        Connector.connect(memory_block, node.inputs.input)

        graph.prepare()

        graph.step()

        node_output = node.outputs.output.tensor

        assert node_output.shape == torch.Size(parent_grid_dimensions +
                                               (parent_rf_size_y,
                                                parent_rf_size_x, n_channels))
        assert node_output[0, 1, 0, 0, 0] == 1
Exemplo n.º 4
0
def test_node_group_empty():
    graph = Topology('cpu')

    source_node = create_source_node()
    group_node = graph.create_generic_node_group('group', 1, 1)

    Connector.connect(source_node.outputs[0], group_node.inputs[0])

    graph.add_node(source_node)
    graph.step()
Exemplo n.º 5
0
def test_node_group_pass_through():
    graph = Topology('cpu')

    source_node = create_source_node()
    group_node = graph.create_generic_node_group('group', 1, 1)
    destination_node = create_pass_through_node('destination')

    Connector.connect(group_node.inputs[0].output, group_node.outputs[0].input)

    Connector.connect(source_node.outputs[0], group_node.inputs[0])
    Connector.connect(group_node.outputs[0], destination_node.inputs[0])

    graph.add_node(source_node)
    graph.add_node(destination_node)

    graph.step()

    assert 1 == destination_node.outputs[0].tensor[0]
Exemplo n.º 6
0
    def test_rf_node(self, device):
        float_dtype = get_float(device)
        parent_rf_size_x = parent_rf_size_y = 4
        n_channels = 4
        image_grid_size_x = image_grid_size_y = 16
        dimensions = (image_grid_size_y, image_grid_size_x, n_channels)
        parent_rf_dims = Size2D(parent_rf_size_y, parent_rf_size_x)

        graph = Topology(device)

        node = ReceptiveFieldNode(dimensions,
                                  parent_rf_dims,
                                  flatten_output_grid_dimensions=True)

        graph.add_node(node)

        memory_block = MemoryBlock()
        memory_block.tensor = torch.zeros(image_grid_size_y,
                                          image_grid_size_x,
                                          n_channels,
                                          dtype=float_dtype,
                                          device=device)
        memory_block.tensor[0, parent_rf_size_x, 0] = 1

        Connector.connect(memory_block, node.inputs.input)

        graph.prepare()

        graph.step()

        node_output = node.outputs.output.tensor

        n_parent_rfs = (image_grid_size_y // parent_rf_size_y) * (
            image_grid_size_x // parent_rf_size_x)
        assert node_output.shape == torch.Size(
            [n_parent_rfs, parent_rf_size_y, parent_rf_size_x, n_channels])
        assert node_output[1, 0, 0, 0] == 1

        back_projection = node.recursive_inverse_projection_from_output(
            InversePassOutputPacket(node_output, node.outputs.output))
        # assert back_projection.interpret_shape == input_image.shape
        assert same(back_projection[0].tensor, memory_block.tensor)
Exemplo n.º 7
0
def test_node_group_no_data_on_output():
    graph = Topology('cpu')

    source_node = create_source_node()
    destination_node = create_pass_through_node('destination')

    group_node = graph.create_generic_node_group('group', 1, 1)
    pass_through_node = create_pass_through_node()
    group_node.add_node(pass_through_node)

    Connector.connect(group_node.inputs[0].output, pass_through_node.inputs[0])

    Connector.connect(source_node.outputs[0], group_node.inputs[0])
    Connector.connect(group_node.outputs[0], destination_node.inputs[0])

    graph.add_node(source_node)
    graph.add_node(destination_node)

    # This should fail because the node output does not have anything connected to it from the inside.
    with pytest.raises(TypeError):
        graph.step()
Exemplo n.º 8
0
def test_skip_execution():
    graph = Topology('cpu')

    node = RandomNodeStub()
    graph.add_node(node)

    graph.step()
    output_1 = torch.clone(node.outputs.output.tensor)

    graph.step()
    output_2 = torch.clone(node.outputs.output.tensor)

    node.skip_execution = True
    graph.step()
    output_3 = torch.clone(node.outputs.output.tensor)

    node.skip_execution = False
    graph.step()
    output_4 = torch.clone(node.outputs.output.tensor)

    assert not same(output_1, output_2)
    assert same(output_2, output_3)
    assert not same(output_3, output_4)
def make_n_steps(topology: Topology, num_steps: int):
    for step in range(num_steps):
        topology.step()