def test_expert_dimensions(self): """Tests multi-dimensional expert indexes.""" device = 'cpu' parent_rf_size_x = parent_rf_size_y = 4 n_channels = 4 image_grid_size_x = image_grid_size_y = 16 input_dimensions = (image_grid_size_y, image_grid_size_x, n_channels) parent_rf_dims = Size2D(parent_rf_size_x, parent_rf_size_y) parent_grid_dimensions = (4, 4) graph = Topology(device) node = ReceptiveFieldNode(input_dimensions, parent_rf_dims) graph.add_node(node) memory_block = MemoryBlock() memory_block.tensor = torch.zeros(image_grid_size_y, image_grid_size_x, n_channels, device=device) memory_block.tensor[0, parent_rf_size_x, 0] = 1 Connector.connect(memory_block, node.inputs.input) graph.prepare() graph.step() node_output = node.outputs.output.tensor assert node_output.shape == torch.Size(parent_grid_dimensions + (parent_rf_size_y, parent_rf_size_x, n_channels)) assert node_output[0, 1, 0, 0, 0] == 1
def test_join_node_inverse_flatten(): device = 'cpu' creator = AllocatingCreator(device) dtype = creator.float32 # The result of the inverse projection should only be one tensor. expected_results = [ creator.tensor([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=dtype, device=device), creator.tensor([9, 10], dtype=dtype, device=device) ] input_memory_blocks = [MemoryBlock(), MemoryBlock()] input_memory_blocks[0].tensor = creator.zeros((2, 4)) input_memory_blocks[1].tensor = creator.zeros((2, )) output_tensor = creator.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype, device=device) join_node = JoinNode(flatten=True) Connector.connect(input_memory_blocks[0], join_node.inputs[0]) Connector.connect(input_memory_blocks[1], join_node.inputs[1]) output_inverse_packet = InversePassOutputPacket(output_tensor, join_node.outputs.output) join_node.allocate_memory_blocks(creator) results = join_node.recursive_inverse_projection_from_output( output_inverse_packet) for expected, result in zip(expected_results, results): assert same(expected, result.tensor)
def test_fork_node_inverse_0(): # TODO (Test): add for dim = 1, then refactor. creator = AllocatingCreator(device='cpu') dtype = creator.float32 dim = 0 expected_results = [ creator.tensor( [[1, 2, 3, 4], [5, 6, 7, 8], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=dtype, device=creator.device) ] input_memory_block = MemoryBlock() input_memory_block.tensor = creator.zeros((4, 4)) output_tensor = creator.tensor([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=dtype, device=creator.device) fork_node = ForkNode(dim, split_sizes=[2, 2]) Connector.connect(input_memory_block, fork_node.inputs.input) output_inverse_packet = InversePassOutputPacket(output_tensor, fork_node.outputs[0]) fork_node.allocate_memory_blocks(creator) results = fork_node.recursive_inverse_projection_from_output( output_inverse_packet) for expected, result in zip(expected_results, results): assert same(expected, result.tensor)
def _create_graph(): node1 = NodeStub() node2 = NodeStub() Connector.connect(node1.outputs.output1, node2.inputs.input1) return node1, node2
def __init__(self, input_dims, rf_output_dims, stride: Tuple[int, int] = None, expert_params: ExpertParams = None, num_flocks: int = 1, name="", seed: int = None, sub_field_size=6): super().__init__(input_dims, rf_output_dims, stride, expert_params, num_flocks, name, seed) n_sub_fields = len(self.expert_flock_nodes) subfield_node = RandomSubfieldForkNode(n_outputs=n_sub_fields, n_samples=sub_field_size, first_non_expanded_dim=-3) self.add_node(subfield_node) Connector.connect(self.lrf_node.outputs.output, subfield_node.inputs.input) for i, expert_flock in enumerate(self.expert_flock_nodes): Connector.disconnect_input(expert_flock.inputs.sp.data_input) Connector.connect(subfield_node.outputs[i], expert_flock.inputs.sp.data_input)
def __init__(self, dataset_seed: int = 123, model_seed: int = 321, baseline_seed: int = 333, num_cc: int = 100, batch_s: int = 300, cbt: int = 1000, lr=0.1, no_landmarks: int = 100, rand_order: bool = False, mbt: int = 1000): super().__init__(device="cuda") flock_size = 1 # TODO flock_size > 1 not supported by the adapter yet flock_input_size = IMAGE_SIZE.value * IMAGE_SIZE.value * DatasetSeBase.N_CHANNELS flock_input_size_tuple, flock_output_size = compute_lrf_params( IMAGE_SIZE.value, IMAGE_SIZE.value, DatasetSeBase.N_CHANNELS, eoy=1, eox=1) # define params self._sp_params = MnistSpTopology.get_sp_params( num_cluster_centers=num_cc, cluster_boost_threshold=cbt, learning_rate=lr, buffer_size=2 * batch_s, batch_size=batch_s, input_size=flock_input_size, flock_size=flock_size, max_boost_time=mbt) self.output_dimension = flock_size * num_cc self._se_params = SeDatasetSpTopology.get_se_params( random_order=rand_order, no_landmarks=no_landmarks) # define nodes self.node_sp = SpatialPoolerFlockNode(self._sp_params.clone(), seed=model_seed) self._lrf_node = ReceptiveFieldNode(flock_input_size_tuple, flock_output_size) self.node_dataset = DatasetSeNavigationNode(self._se_params, dataset_seed) self.node_random = RandomNumberNode(upper_bound=self.output_dimension, seed=baseline_seed) # add nodes and connect the graph self.add_node(self.node_dataset) self.add_node(self.node_sp) self.add_node(self._lrf_node) self.add_node(self.node_random) # connect SEDataset->LRF->SP Connector.connect(self.node_dataset.outputs.image_output, self._lrf_node.inputs[0]) Connector.connect(self._lrf_node.outputs[0], self.node_sp.inputs.sp.data_input)
def __init__( self, top_layer_params: Optional[ MultipleLayersParams] = MultipleLayersParams(), conv_layers_params: MultipleLayersParams = MultipleLayersParams(), model_seed: int = 321, # DATASET image_size=SeDatasetSize.SIZE_24, baseline_seed: int = 123, class_filter: List[int] = None, random_order: bool = False, noise_amp: float = 0.0): """ Constructor of the TA topology which should solve the Task0. Args: model_seed: seed of the model image_size: size of the dataset image class_filter: filters the classes in the dataset baseline_seed: seed for the baseline nodes """ super().__init__('cuda') layer_sizes = conv_layers_params.read_list_of_params( 'n_cluster_centers') if top_layer_params is not None: layer_sizes += top_layer_params.read_list_of_params( 'n_cluster_centers') self.se_group = SeNodeGroup(baseline_seed=baseline_seed, layer_sizes=layer_sizes, class_filter=class_filter, image_size=image_size, random_order=random_order, noise_amp=noise_amp) self.add_node(self.se_group) if top_layer_params is None: self.model = NCMGroup(conv_layers_params=conv_layers_params, image_size=(image_size.value, image_size.value, 3), model_seed=model_seed) else: self.model = Nc1r1GroupWithAdapter( conv_layers_params=conv_layers_params, top_layer_params=top_layer_params, num_labels=20, image_size=(image_size.value, image_size.value, 3), model_seed=model_seed) self.add_node(self.model) Connector.connect(self.se_group.outputs.image, self.model.inputs.image) if isinstance(self.model, Nc1r1GroupWithAdapter): Connector.connect(self.se_group.outputs.labels, self.model.inputs.label)
def test_to_one_hot_speed(self, capsys): @measure_time(iterations=100, function_repetitions=100) def measured_step(): mb.tensor.copy_( torch.rand(input_shape, dtype=get_float(device), device=device)) to_one_hot.step() input_shape = (150, ) device = 'cuda' vector = torch.zeros(input_shape, dtype=get_float(device), device=device) mb = MemoryBlock() mb.tensor = torch.tensor(vector, device=device, dtype=get_float(device)) to_one_hot = ToOneHotNode(mode=ToOneHotMode.RANDOM) Connector.connect(mb, to_one_hot.inputs.input) to_one_hot.allocate_memory_blocks(AllocatingCreator(device)) with capsys.disabled(): measured_step()
def test_graph_node_group_ordering(): graph = Topology('cpu') node1 = NodeStub() node_group = GenericNodeGroup('group', 1, 1) inner_node = NodeStub() node2 = NodeStub() graph.add_node(node1) graph.add_node(node_group) graph.add_node(node2) node_group.add_node(inner_node) Connector.connect(node1.outputs[0], node_group.inputs[0]) Connector.connect(node_group.inputs[0].output, inner_node.inputs[0]) Connector.connect(inner_node.outputs[0], node_group.outputs[0].input) Connector.connect(node_group.outputs[0], node2.inputs[0]) Connector.connect(node1.outputs[1], node2.inputs[1]) graph.order_nodes() assert [node1, node_group, node2] == graph._ordered_nodes assert [inner_node] == node_group._ordered_nodes
def _create_and_connect_agent(self, input_image: MemoryBlock, output_reconstruction: InputSlot, input_size: Tuple[int, int, int]): params = MultipleLayersParams() params.num_conv_layers = 3 params.n_cluster_centers = [28, 14, 7] params.compute_reconstruction = True params.conv_classes = ConvLayer params.sp_buffer_size = 5000 params.sp_batch_size = 500 params.learning_rate = 0.2 params.cluster_boost_threshold = 1000 params.max_encountered_seqs = 1000 params.max_frequent_seqs = 500 params.seq_lookahead = 2 params.seq_length = 4 params.exploration_probability = 0 params.rf_size = [(input_size[0], input_size[1]), (1, 1), (1, 1)] params.rf_stride = None ta_group = R1NCMGroup(conv_layers_params=params, model_seed=None, image_size=input_size) self.add_node(ta_group) Connector.connect(input_image, ta_group.inputs.image) Connector.connect(ta_group.outputs.predicted_reconstructed_input, output_reconstruction)
def _add_layer(self, layer: NodeBase, input_slot: InputSlot, output_slot: OutputSlotBase) -> NodeBase: self.add_node(layer) if self._last_output is not None: Connector.connect(self._last_output, input_slot) self._last_output = output_slot return layer
def __init__(self, seed: int = None): super().__init__('cuda') self.se_world_params = DatasetSENavigationParams( dataset_size=SeDatasetSize.SIZE_24) self.se_world_params.sampling_method = SamplingMethod.ORDERED self.sp_params = ExpertParams() self.sp_params.n_cluster_centers = 10 self.sp_params.spatial.input_size = \ self.se_world_params.dataset_dims[0] * \ self.se_world_params.dataset_dims[1] * \ DatasetSeBase.N_CHANNELS self.sp_params.flock_size = 3 self.sp_params.spatial.buffer_size = 100 self.sp_params.spatial.batch_size = 45 self.sp_params.spatial.cluster_boost_threshold = 30 # create the node instances se_dataset = DatasetSeNavigationNode(self.se_world_params, seed=seed) expand_node = ExpandNode(dim=0, desired_size=self.sp_params.flock_size) sp_node = SpatialPoolerFlockNode(self.sp_params, seed=seed) self.add_node(se_dataset) self.add_node(expand_node) self.add_node(sp_node) Connector.connect(se_dataset.outputs.image_output, expand_node.inputs.input) Connector.connect(expand_node.outputs.output, sp_node.inputs.sp.data_input) set_global_seeds(seed)
def _create_and_connect_agent(self, input_image: MemoryBlock, input_size: Tuple[int, int, int]): params = MultipleLayersParams() params.num_conv_layers = 4 params.n_flocks = [5, 5, 1, 1] params.n_cluster_centers = [30, 60, 60, 9] params.compute_reconstruction = True params.conv_classes = SpConvLayer params.sp_buffer_size = 5000 params.sp_batch_size = 500 params.learning_rate = 0.1 params.cluster_boost_threshold = 1000 params.max_encountered_seqs = 1000 params.max_frequent_seqs = 500 params.seq_lookahead = 2 params.seq_length = 4 params.exploration_probability = 0 params.rf_size = (2, 2) params.rf_stride = None ta_group = NCMGroup(conv_layers_params=params, model_seed=None, image_size=input_size) self.add_node(ta_group) Connector.connect(input_image, ta_group.inputs.image)
def __init__( self, num_predictors: int, learning_rate: Optional[float] = 0.1, coefficients_minimum_max: float = 0.1, hidden_size: int = 10, n_layers: int = 1, output_activation: Optional[OutputActivation] = OutputActivation. IDENTITY, name: str = "FlockNetworkGroup", ): super().__init__(name, inputs=PredictorGroupInputs(self), outputs=PredictorGroupOutputs(self)) p_node_params = NetworkFlockNodeParams() p_node_params.flock_size = num_predictors p_node_params.do_delay_coefficients = False p_node_params.do_delay_input = True p_node_params.normalize_coefficients = False p_node_params.negative_coefficients_removal = True p_node_params.buffer_size = 500 p_node_params.batch_size = 400 p_node_params.learning_period = 20 p_network_params = NeuralNetworkFlockParams() p_network_params.flock_size = p_node_params.flock_size p_network_params.input_size = 1 # determined form the input size in the Node._derive_params p_network_params.hidden_size = hidden_size p_network_params.output_size = 1 # determined from the target size in the Node._derive_params p_network_params.output_activation = output_activation p_network_params.learning_rate = learning_rate p_network_params.coefficients_minimum_max = coefficients_minimum_max p_network_params.n_hidden_layers = n_layers predictors = NetworkFlockNode(node_params=p_node_params, network_params=p_network_params, name="Predictors") self.predictors = predictors self.add_node(predictors) # input of the group to both input and target of the flock Connector.connect(self.inputs.data.output, predictors.inputs.input_data) Connector.connect(self.inputs.data.output, predictors.inputs.target_data) # learning coefficients Connector.connect(self.inputs.learning_coefficients.output, predictors.inputs.learning_coefficients) # flock -> group outputs Connector.connect(predictors.outputs.prediction_output, self.outputs.predictors_activations.input) Connector.connect(predictors.outputs.error_output, self.outputs.predictors_activation_errors.input)
def _connect_expert_output(self): label_size = SeIoAccessor.get_num_labels(self.se_io) self._fork_node = ForkNode(1, [self._top_level_expert_output_size(), label_size]) self.add_node(self._fork_node) Connector.connect(self._get_agent_output(), self._fork_node.inputs.input) Connector.connect(self._fork_node.outputs[1], self.se_io.inputs.agent_to_task_label, is_backward=True)
def __init__(self): super().__init__(device='cuda') mnist_node = DatasetMNISTNode(params=self._mnist_params) self.add_node(mnist_node) noise_node = RandomNoiseNode() self.add_node(noise_node) Connector.connect(mnist_node.outputs.data, noise_node.inputs.input)
def test_node_group_empty(): graph = Topology('cpu') source_node = create_source_node() group_node = graph.create_generic_node_group('group', 1, 1) Connector.connect(source_node.outputs[0], group_node.inputs[0]) graph.add_node(source_node) graph.step()
def __init__(self, dataset_seed: int = 123, model_seed: int = 321, baseline_seed: int = 333, num_cc: int = 10, batch_s: int = 300, cbt: int = 1000, lr=0.1, examples_per_cl: int = None, mbt: int = 1000): super().__init__("cuda") flock_size = 1 # TODO flock_size > 1 not supported by the adapter yet # define params self._sp_params = MnistSpTopology.get_sp_params( num_cluster_centers=num_cc, cluster_boost_threshold=cbt, learning_rate=lr, buffer_size=2 * batch_s, batch_size=batch_s, input_size=28 * 28, flock_size=flock_size, max_boost_time=mbt) self.output_dimension = flock_size * num_cc _mnist_params = MnistSpTopology.get_mnist_params(examples_per_cl) flock_input_size, flock_output_size = compute_lrf_params(28, 28, 1, eoy=1, eox=1) # define nodes self.node_sp = SpatialPoolerFlockNode(self._sp_params.clone(), seed=model_seed) self._lrf_node = ReceptiveFieldNode(flock_input_size, flock_output_size) self.node_mnist = DatasetMNISTNode(params=_mnist_params, seed=dataset_seed) self.node_random = RandomNumberNode(upper_bound=self.output_dimension, seed=baseline_seed) # add nodes and connect the graph self.add_node(self.node_mnist) self.add_node(self.node_sp) self.add_node(self._lrf_node) self.add_node(self.node_random) # connect MNIST->LRF->SP Connector.connect(self.node_mnist.outputs.data, self._lrf_node.inputs[0]) Connector.connect(self._lrf_node.outputs[0], self.node_sp.inputs.sp.data_input)
def __init__(self, name: str, update_period: int): super().__init__(name, update_period, inputs=SimpleGroupInputs(self, n_inputs=1), outputs=SimpleGroupOutputs(self, n_outputs=1)) join_node = JoinNode(n_inputs=1) self.add_node(join_node) Connector.connect(self.inputs[0].output, join_node.inputs[0]) Connector.connect(join_node.outputs.output, self.outputs[0].input) self.order_nodes()
def _prepare_node(self): inputs = self._generate_input_tensors().__next__() sources = self._inputs_to_sources(inputs) node = self._create_node() for source, input_block in zip(sources, node.inputs): Connector.connect(source, input_block) node.allocate_memory_blocks(self._creator) node.validate() return node, sources
def set_testing_model(self): # noinspection PyProtectedMember self._node_spatial_pooler._unit.copy_to( self._node_spatial_pooler_backup._unit) self._node_spatial_pooler.switch_learning(False) self._node_mnist.skip_execution = True self._node_mnist_test.skip_execution = False Connector.disconnect_input(self._noise_node.inputs[0]) Connector.connect(self._node_mnist_test.outputs.data, self._noise_node.inputs[0])
def test_scatter_node(self, device, input, mask, output_shape, dimension, expected_result): float_dtype = get_float(device) input_mb = MemoryBlock() input_mb.tensor = torch.tensor(input, device=device, dtype=float_dtype) expected = torch.tensor(expected_result, device=device, dtype=float_dtype) sn = ScatterNode(mapping=mask, output_shape=output_shape, dimension=dimension, device=device) Connector.connect(input_mb, sn.inputs.input) sn.allocate_memory_blocks(AllocatingCreator(device)) sn.step() assert same(sn.outputs.output.tensor, expected)
def create_connected_conv_layers(layers_params: List[ConvLayerParams], input_dims: Tuple[int, int, int]): conv_layers = [] for i, layer_params in enumerate(layers_params): conv_layer, input_dims = \ create_conv_layer(layer_params, input_dims) conv_layers.append(conv_layer) for output_layer, input_layer in zip(conv_layers, conv_layers[1:]): Connector.connect(output_layer.outputs.data, input_layer.inputs.data) return conv_layers, dim_prod(input_dims)
def __init__(self, se_group: SeNodeGroup, model: ClassificationModelGroup): super().__init__('cuda') self._se_group = se_group self._model = model self.add_node(self._se_group) self.add_node(self._model) Connector.connect(self._se_group.outputs.image, self._model.inputs.image) Connector.connect(self._se_group.outputs.labels, self._model.inputs.label)
def __init__(self, se_group: SeNodeGroup, model: MultilayerModelGroup): super().__init__('cuda') self.se_group = se_group self.model = model self.add_node(self.se_group) self.add_node(self.model) Connector.connect(self.se_group.outputs.image, self.model.inputs.image) Connector.connect(self.se_group.outputs.labels, self.model.inputs.label)
def _create_and_connect_agent(self, join_node: JoinNode, fork_node: ForkNode): params = ExpertParams() params.flock_size = 1 params.n_cluster_centers = 28 params.compute_reconstruction = True params.spatial.cluster_boost_threshold = 1000 params.spatial.buffer_size = 500 params.spatial.batch_size = 500 params.spatial.learning_rate = 0.3 params.spatial.learning_period = 50 # conv_expert = ConvExpertFlockNode(params, name="Conv. expert") # conv_expert = SpatialPoolerFlockNode(params, name=" SP") conv_expert = ExpertFlockNode(params, name=" expert") self.add_node(conv_expert) unsqueeze_node_0 = UnsqueezeNode(0) self.add_node(unsqueeze_node_0) Connector.connect(join_node.outputs.output, unsqueeze_node_0.inputs.input) Connector.connect(unsqueeze_node_0.outputs.output, conv_expert.inputs.sp.data_input) def squeeze(inputs, outputs): outputs[0].copy_(inputs[0].squeeze(0)) squeeze_node = LambdaNode(squeeze, 1, [(sum(fork_node._split_sizes),)], name="squeeze lambda node") self.add_node(squeeze_node) Connector.connect(conv_expert.outputs.sp.predicted_reconstructed_input, squeeze_node.inputs[0]) Connector.connect(squeeze_node.outputs[0], fork_node.inputs.input)
def test_inverse_projection(self, device): dtype = get_float(device) params = ExpertParams() params.flock_size = 2 params.n_cluster_centers = 4 params.spatial.input_size = 6 params.spatial.buffer_size = 7 params.spatial.batch_size = 3 params.temporal.n_frequent_seqs = 2 params.temporal.seq_length = 3 input_size = (3, 2) graph = Topology(device) node = ExpertFlockNode(params=params) graph.add_node(node) input_block = MemoryBlock() input_block.tensor = torch.rand((params.flock_size, ) + input_size, dtype=dtype, device=device) Connector.connect(input_block, node.inputs.sp.data_input) graph.prepare() node._unit.flock.sp_flock.cluster_centers = torch.tensor( [[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0.5, 0.5, 0, 0], [0, 0, 0.5, 0, 0.5, 0]], [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0]]], dtype=dtype, device=device) # Just SP inverse projection data = torch.tensor([[0, 0, 1, 0], [0.2, 0.3, 0.4, 0.1]], dtype=dtype, device=device) packet = InversePassOutputPacket(data, node.outputs.tp.projection_outputs) projected = node.recursive_inverse_projection_from_output(packet) # The result of the projection itself would be [[0, 0, 0.5, 0.5, 0, 0], ...], and it should be viewed as (2, 3, 2). expected_projection = torch.tensor( [[[0, 0], [0.5, 0.5], [0, 0]], [[0.2, 0.3], [0.4, 0.1], [0, 0]]], dtype=dtype, device=device) assert same(expected_projection, projected[0].tensor)
def test_validate_context_input_error_message(self, sp_data_input_shape, flock_size, context_size, context_input_shape, exception_message): params = ExpertParams() params.flock_size = flock_size params.temporal.incoming_context_size = context_size expert = ExpertFlockNode(params) Connector.connect(self.memory_block(torch.zeros(sp_data_input_shape)), expert.inputs.sp.data_input) Connector.connect(self.memory_block(torch.zeros(context_input_shape)), expert.inputs.tp.context_input) with raises(NodeValidationException, match=exception_message): expert.validate()
def __init__(self, params: BallEnvironmentParams, name: str = "BallEnvironment"): super().__init__(params, name) bouncing_params = SimpleBouncingBallNodeParams(sx=params.env_size[0], sy=params.env_size[1], ball_radius=params.ball_radius, ball_shapes=params.shapes, dir_x=1, dir_y=2, noise_amplitude=params.noise_amplitude, switch_next_shape_after=params.switch_shape_after, random_position_direction_switch_after= params.random_position_direction_switch_after ) ball_node = SimpleBouncingBallNode(bouncing_params) self.add_node(ball_node) self.ball_node = ball_node Connector.connect(ball_node.outputs.bitmap, self.outputs.data.input) switch_node = SwitchNode(2) self.add_node(switch_node) self.switch_node = switch_node nan_node = ConstantNode(params.n_shapes, math.nan) self.add_node(nan_node) Connector.connect(ball_node.outputs.label_one_hot, switch_node.inputs[0]) Connector.connect(nan_node.outputs.output, switch_node.inputs[1]) Connector.connect(switch_node.outputs.output, self.outputs.label.input)
def __init__(self): super().__init__("cuda") actions_descriptor = GridWorldActionDescriptor() node_action_monitor = ActionMonitorNode(actions_descriptor) grid_world_params = GridWorldParams('MapE') grid_world_params.tile_size = 3 node_grid_world = GridWorldNode(grid_world_params) random_action_generator = RandomNumberNode(upper_bound=len(actions_descriptor.action_names())) join_node = JoinNode(flatten=True) # GridWorld sizes width = grid_world_params.egocentric_width * grid_world_params.tile_size height = grid_world_params.egocentric_height * grid_world_params.tile_size fork_node = ForkNode(dim=0, split_sizes=[width * height, 4]) self.add_node(node_grid_world) self.add_node(node_action_monitor) self.add_node(random_action_generator) self.add_node(join_node) self.add_node(fork_node) Connector.connect(node_grid_world.outputs.egocentric_image, join_node.inputs[0]) Connector.connect(node_grid_world.outputs.output_action, join_node.inputs[1]) self._create_and_connect_agent(join_node, fork_node) Connector.connect(random_action_generator.outputs.one_hot_output, node_action_monitor.inputs.action_in) Connector.connect(node_action_monitor.outputs.action_out, node_grid_world.inputs.agent_action)