示例#1
0
def test_graph_node_group_ordering():
    graph = Topology('cpu')

    node1 = NodeStub()
    node_group = GenericNodeGroup('group', 1, 1)
    inner_node = NodeStub()
    node2 = NodeStub()

    graph.add_node(node1)
    graph.add_node(node_group)
    graph.add_node(node2)

    node_group.add_node(inner_node)

    Connector.connect(node1.outputs[0], node_group.inputs[0])

    Connector.connect(node_group.inputs[0].output, inner_node.inputs[0])
    Connector.connect(inner_node.outputs[0], node_group.outputs[0].input)

    Connector.connect(node_group.outputs[0], node2.inputs[0])

    Connector.connect(node1.outputs[1], node2.inputs[1])

    graph.order_nodes()
    assert [node1, node_group, node2] == graph._ordered_nodes
    assert [inner_node] == node_group._ordered_nodes
示例#2
0
def test_node_group_inverse_projection_fork_inside(device):
    float_dtype = get_float(device)

    graph = Topology(device)

    source_node = ConstantNode(shape=(2, 4), constant=0)
    fork_node = ForkNode(dim=1, split_sizes=[1, 3])
    join_node = JoinNode(dim=1, n_inputs=2)
    group_node = graph.create_generic_node_group('group', 1, 2)

    graph.add_node(source_node)
    group_node.add_node(fork_node)
    graph.add_node(join_node)

    Connector.connect(source_node.outputs.output, group_node.inputs[0])

    Connector.connect(group_node.inputs[0].output, fork_node.inputs.input)

    Connector.connect(fork_node.outputs[0], group_node.outputs[0].input)
    Connector.connect(fork_node.outputs[1], group_node.outputs[1].input)

    Connector.connect(group_node.outputs[0], join_node.inputs[0])
    Connector.connect(group_node.outputs[1], join_node.inputs[1])

    graph.step()

    output_tensor = torch.rand((2, 4), device=device, dtype=float_dtype)
    inverse_pass_packet = InversePassOutputPacket(output_tensor, join_node.outputs.output)
    results = join_node.recursive_inverse_projection_from_output(inverse_pass_packet)

    assert 1 == len(results)
    assert same(results[0].tensor, output_tensor)
示例#3
0
def test_validation():
    graph = Topology('cpu')

    class ValidationNodeStub(WorkerNodeBase):
        def __init__(self, fails_validation):
            super().__init__()
            self.fails_validation = fails_validation

        def _create_unit(self, creator: TensorCreator) -> Unit:
            return RandomUnitStub(creator)

        def _step(self):
            pass

        def validate(self):
            if self.fails_validation:
                raise NodeValidationException('Node failed to validate')

    node = ValidationNodeStub(fails_validation=True)
    graph.add_node(node)

    with pytest.raises(NodeValidationException):
        graph.prepare()

    node.fails_validation = False

    graph.prepare()
def test_inverse_fork_join():
    """Checks that if you fork and join, the inverse projection will only have one tensor result."""

    device = 'cpu'
    dtype = torch.float32

    graph = Topology(device)

    source_node = ConstantNode(shape=(2, 4), constant=0)
    fork_node = ForkNode(dim=1, split_sizes=[1, 3])
    join_node = JoinNode(dim=1, n_inputs=2)

    graph.add_node(source_node)
    graph.add_node(fork_node)
    graph.add_node(join_node)

    Connector.connect(source_node.outputs.output, fork_node.inputs.input)
    Connector.connect(fork_node.outputs[0], join_node.inputs[0])
    Connector.connect(fork_node.outputs[1], join_node.inputs[1])

    graph.step()

    output_tensor = torch.rand((2, 4), device=device, dtype=dtype)
    inverse_pass_packet = InversePassOutputPacket(output_tensor,
                                                  join_node.outputs.output)
    results = join_node.recursive_inverse_projection_from_output(
        inverse_pass_packet)

    assert 1 == len(results)
    assert same(results[0].tensor, output_tensor)
示例#5
0
    def test_expert_dimensions(self):
        """Tests multi-dimensional expert indexes."""
        device = 'cpu'
        parent_rf_size_x = parent_rf_size_y = 4
        n_channels = 4
        image_grid_size_x = image_grid_size_y = 16
        input_dimensions = (image_grid_size_y, image_grid_size_x, n_channels)
        parent_rf_dims = Size2D(parent_rf_size_x, parent_rf_size_y)
        parent_grid_dimensions = (4, 4)

        graph = Topology(device)

        node = ReceptiveFieldNode(input_dimensions, parent_rf_dims)

        graph.add_node(node)

        memory_block = MemoryBlock()
        memory_block.tensor = torch.zeros(image_grid_size_y,
                                          image_grid_size_x,
                                          n_channels,
                                          device=device)
        memory_block.tensor[0, parent_rf_size_x, 0] = 1

        Connector.connect(memory_block, node.inputs.input)

        graph.prepare()

        graph.step()

        node_output = node.outputs.output.tensor

        assert node_output.shape == torch.Size(parent_grid_dimensions +
                                               (parent_rf_size_y,
                                                parent_rf_size_x, n_channels))
        assert node_output[0, 1, 0, 0, 0] == 1
示例#6
0
def test_node_group_empty():
    graph = Topology('cpu')

    source_node = create_source_node()
    group_node = graph.create_generic_node_group('group', 1, 1)

    Connector.connect(source_node.outputs[0], group_node.inputs[0])

    graph.add_node(source_node)
    graph.step()
示例#7
0
def test_node_group_pass_through():
    graph = Topology('cpu')

    source_node = create_source_node()
    group_node = graph.create_generic_node_group('group', 1, 1)
    destination_node = create_pass_through_node('destination')

    Connector.connect(group_node.inputs[0].output, group_node.outputs[0].input)

    Connector.connect(source_node.outputs[0], group_node.inputs[0])
    Connector.connect(group_node.outputs[0], destination_node.inputs[0])

    graph.add_node(source_node)
    graph.add_node(destination_node)

    graph.step()

    assert 1 == destination_node.outputs[0].tensor[0]
示例#8
0
def test_node_group_no_data_on_output():
    graph = Topology('cpu')

    source_node = create_source_node()
    destination_node = create_pass_through_node('destination')

    group_node = graph.create_generic_node_group('group', 1, 1)
    pass_through_node = create_pass_through_node()
    group_node.add_node(pass_through_node)

    Connector.connect(group_node.inputs[0].output, pass_through_node.inputs[0])

    Connector.connect(source_node.outputs[0], group_node.inputs[0])
    Connector.connect(group_node.outputs[0], destination_node.inputs[0])

    graph.add_node(source_node)
    graph.add_node(destination_node)

    # This should fail because the node output does not have anything connected to it from the inside.
    with pytest.raises(TypeError):
        graph.step()
示例#9
0
def create_graph():
    graph = Topology('cpu')

    node_1 = RandomNodeStub()
    node_2 = RandomNodeStub()
    node_3 = RandomNodeStub()

    graph.add_node(node_1)
    graph.add_node(node_2)
    graph.add_node(node_3)

    return graph
示例#10
0
    def test_rf_node(self, device):
        float_dtype = get_float(device)
        parent_rf_size_x = parent_rf_size_y = 4
        n_channels = 4
        image_grid_size_x = image_grid_size_y = 16
        dimensions = (image_grid_size_y, image_grid_size_x, n_channels)
        parent_rf_dims = Size2D(parent_rf_size_y, parent_rf_size_x)

        graph = Topology(device)

        node = ReceptiveFieldNode(dimensions,
                                  parent_rf_dims,
                                  flatten_output_grid_dimensions=True)

        graph.add_node(node)

        memory_block = MemoryBlock()
        memory_block.tensor = torch.zeros(image_grid_size_y,
                                          image_grid_size_x,
                                          n_channels,
                                          dtype=float_dtype,
                                          device=device)
        memory_block.tensor[0, parent_rf_size_x, 0] = 1

        Connector.connect(memory_block, node.inputs.input)

        graph.prepare()

        graph.step()

        node_output = node.outputs.output.tensor

        n_parent_rfs = (image_grid_size_y // parent_rf_size_y) * (
            image_grid_size_x // parent_rf_size_x)
        assert node_output.shape == torch.Size(
            [n_parent_rfs, parent_rf_size_y, parent_rf_size_x, n_channels])
        assert node_output[1, 0, 0, 0] == 1

        back_projection = node.recursive_inverse_projection_from_output(
            InversePassOutputPacket(node_output, node.outputs.output))
        # assert back_projection.interpret_shape == input_image.shape
        assert same(back_projection[0].tensor, memory_block.tensor)
示例#11
0
    def test_inverse_projection(self, device):
        dtype = get_float(device)
        params = ExpertParams()
        params.flock_size = 2
        params.n_cluster_centers = 4

        params.spatial.input_size = 6
        params.spatial.buffer_size = 7
        params.spatial.batch_size = 3
        params.temporal.n_frequent_seqs = 2
        params.temporal.seq_length = 3
        input_size = (3, 2)

        graph = Topology(device)
        node = ExpertFlockNode(params=params)

        graph.add_node(node)

        input_block = MemoryBlock()
        input_block.tensor = torch.rand((params.flock_size, ) + input_size,
                                        dtype=dtype,
                                        device=device)
        Connector.connect(input_block, node.inputs.sp.data_input)

        graph.prepare()

        node._unit.flock.sp_flock.cluster_centers = torch.tensor(
            [[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0.5, 0.5, 0, 0],
              [0, 0, 0.5, 0, 0.5, 0]],
             [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
              [0, 0, 0, 1, 0, 0]]],
            dtype=dtype,
            device=device)

        # Just SP inverse projection
        data = torch.tensor([[0, 0, 1, 0], [0.2, 0.3, 0.4, 0.1]],
                            dtype=dtype,
                            device=device)

        packet = InversePassOutputPacket(data,
                                         node.outputs.tp.projection_outputs)
        projected = node.recursive_inverse_projection_from_output(packet)

        # The result of the projection itself would be [[0, 0, 0.5, 0.5, 0, 0], ...], and it should be viewed as (2, 3, 2).
        expected_projection = torch.tensor(
            [[[0, 0], [0.5, 0.5], [0, 0]], [[0.2, 0.3], [0.4, 0.1], [0, 0]]],
            dtype=dtype,
            device=device)

        assert same(expected_projection, projected[0].tensor)
示例#12
0
    if args.use_sp_only:
        topology_class = SCN_SC1_R1
    else:
        topology_class = CN_C1_R1

    params = {
        "name": "C5C1R1",
        "l_0_cluster_centers": 10,
        "l_1_cluster_centers": 20,
        "l_0_rf_dims": (3, 3),
        "l_0_rf_stride": None,
        "l_1_rf_dims": (2, 2),
        "sp_n_cluster_centers": 10
    }

    t = Topology('cuda')
    if args.space_engineers:
        se_params = SeEnvironmentParams(
            shapes=list(range(SeEnvironmentParams.n_shapes)))
        params["bottom_layer_size"] = 5
        params["env_size"] = se_params.env_size
        params["label_length"] = se_params.n_shapes
        env = SEEnvironment(se_params)
    else:
        params["bottom_layer_size"] = 2
        params["env_size"] = (24, 24)
        params["label_length"] = 3
        env = BallEnvironment(BallEnvironmentParams())

    cnc1r1 = topology_class(**params)
    t.add_node(cnc1r1)
示例#13
0
def test_skip_execution():
    graph = Topology('cpu')

    node = RandomNodeStub()
    graph.add_node(node)

    graph.step()
    output_1 = torch.clone(node.outputs.output.tensor)

    graph.step()
    output_2 = torch.clone(node.outputs.output.tensor)

    node.skip_execution = True
    graph.step()
    output_3 = torch.clone(node.outputs.output.tensor)

    node.skip_execution = False
    graph.step()
    output_4 = torch.clone(node.outputs.output.tensor)

    assert not same(output_1, output_2)
    assert same(output_2, output_3)
    assert not same(output_3, output_4)
示例#14
0
 def __init__(self):
     Topology.__init__(self, device='cuda')
示例#15
0
def test_is_initialized():
    graph = Topology('cpu')
    assert not graph.is_initialized()

    graph.add_node(NodeStub())
    assert not graph.is_initialized()

    graph.prepare()
    assert graph.is_initialized()

    with raises(IllegalStateException):
        graph.add_node(NodeStub())
def make_n_steps(topology: Topology, num_steps: int):
    for step in range(num_steps):
        topology.step()