示例#1
0
    def __init__(self):
        super().__init__(device='cuda')
        self._actions_descriptor = SpaceEngineersActionsDescriptor()
        self._se_config = SpaceEngineersConnectorConfig()

        self._node_se_connector = SpaceEngineersConnectorNode(
            self._actions_descriptor, self._se_config)
        self._node_action_monitor = ActionMonitorNode(self._actions_descriptor)

        self._blank_action = ConstantNode(
            shape=self._actions_descriptor.ACTION_COUNT, constant=0)
        self._blank_task_data = ConstantNode(
            shape=self._se_config.agent_to_task_buffer_size, constant=0)
        self._blank_task_control = ConstantNode(
            shape=self._se_config.TASK_CONTROL_SIZE, constant=0)

        self.add_node(self._node_se_connector)
        self.add_node(self._node_action_monitor)
        self.add_node(self._blank_action)
        self.add_node(self._blank_task_data)
        self.add_node(self._blank_task_control)

        Connector.connect(self._blank_action.outputs.output,
                          self._node_action_monitor.inputs.action_in)
        Connector.connect(self._node_action_monitor.outputs.action_out,
                          self._node_se_connector.inputs.agent_action)
        Connector.connect(self._blank_task_data.outputs.output,
                          self._node_se_connector.inputs.agent_to_task_label)
        Connector.connect(self._blank_task_control.outputs.output,
                          self._node_se_connector.inputs.task_control)
示例#2
0
    def __init__(self):
        super().__init__(device='cuda')

        self._se_config = SpaceEngineersConnectorConfig()
        self.SX = self._se_config.render_width
        self.SY = self._se_config.render_height

        # setup the params
        # SeToyArchDebugTopology.config_se_communication()
        expert_params = self.get_expert_params()

        # create the nodes
        self._actions_descriptor = SpaceEngineersActionsDescriptor()
        self._node_se_connector = SpaceEngineersConnectorNode(
            self._actions_descriptor, self._se_config)
        self._node_action_monitor = ActionMonitorNode(self._actions_descriptor)

        self._blank_action = ConstantNode(
            shape=self._actions_descriptor.ACTION_COUNT, constant=0)
        self._blank_task_data = ConstantNode(
            shape=self._se_config.agent_to_task_buffer_size, constant=0)
        self._blank_task_control = ConstantNode(
            shape=self._se_config.TASK_CONTROL_SIZE, constant=0)

        # parent_rf_dims = (self.lrf_width, self.lrf_height, 3)
        self._node_lrf = ReceptiveFieldNode((self.SY, self.SX, 3),
                                            Size2D(self.SX // EOX,
                                                   self.SY // EOY))

        self._node_flock = ExpertFlockNode(expert_params)

        self.add_node(self._node_se_connector)
        self.add_node(self._node_action_monitor)
        self.add_node(self._blank_action)
        self.add_node(self._blank_task_data)
        self.add_node(self._blank_task_control)
        self.add_node(self._node_flock)
        self.add_node(self._node_lrf)

        Connector.connect(self._blank_action.outputs.output,
                          self._node_action_monitor.inputs.action_in)
        Connector.connect(self._node_action_monitor.outputs.action_out,
                          self._node_se_connector.inputs.agent_action)

        Connector.connect(self._blank_task_data.outputs.output,
                          self._node_se_connector.inputs.agent_to_task_label)
        Connector.connect(self._blank_task_control.outputs.output,
                          self._node_se_connector.inputs.task_control)

        # SE -> Flock (no LRF)
        # Connector.connect(self._node_se_connector.outputs.image_output,
        # self._node_flock.inputs.sp.data_input)

        # Image -> LRF -> Flock
        Connector.connect(self._node_se_connector.outputs.image_output,
                          self._node_lrf.inputs[0])
        Connector.connect(self._node_lrf.outputs[0],
                          self._node_flock.inputs.sp.data_input)
示例#3
0
    def __init__(self, params: BallEnvironmentParams, name: str = "BallEnvironment"):
        super().__init__(params, name)

        bouncing_params = SimpleBouncingBallNodeParams(sx=params.env_size[0],
                                                       sy=params.env_size[1],
                                                       ball_radius=params.ball_radius,
                                                       ball_shapes=params.shapes,
                                                       dir_x=1,
                                                       dir_y=2,
                                                       noise_amplitude=params.noise_amplitude,
                                                       switch_next_shape_after=params.switch_shape_after,
                                                       random_position_direction_switch_after=
                                                       params.random_position_direction_switch_after
                                                       )

        ball_node = SimpleBouncingBallNode(bouncing_params)

        self.add_node(ball_node)
        self.ball_node = ball_node

        Connector.connect(ball_node.outputs.bitmap, self.outputs.data.input)

        switch_node = SwitchNode(2)
        self.add_node(switch_node)
        self.switch_node = switch_node

        nan_node = ConstantNode(params.n_shapes, math.nan)
        self.add_node(nan_node)

        Connector.connect(ball_node.outputs.label_one_hot, switch_node.inputs[0])
        Connector.connect(nan_node.outputs.output, switch_node.inputs[1])
        Connector.connect(switch_node.outputs.output, self.outputs.label.input)
示例#4
0
    def __init__(self,
                 num_predictors: int,
                 learning_rate: float = 0.05,
                 hidden_size: int = 10,
                 name: str = "GateNetworkGroup"):

        super().__init__(name,
                         inputs=GateGroupInputs(self),
                         outputs=GateGroupOutputs(self))

        # gate
        g_node_params = NetworkFlockNodeParams()
        g_node_params.flock_size = 1
        g_node_params.do_delay_coefficients = False
        g_node_params.do_delay_input = True
        g_node_params.learning_period = 20
        g_node_params.buffer_size = 1000
        g_node_params.batch_size = 900

        g_network_params = NeuralNetworkFlockParams()
        g_network_params.input_size = 1  # this should be determined automatically from the input shape
        g_network_params.mini_batch_size = 100
        g_network_params.hidden_size = hidden_size
        g_network_params.output_size = num_predictors  # might be determined form the input target size
        g_network_params.output_activation = OutputActivation.SOFTMAX
        g_network_params.learning_rate = learning_rate

        # gate itself
        self.gate = NetworkFlockNode(node_params=g_node_params,
                                     network_params=g_network_params,
                                     name="Gate Network")
        self.add_node(self.gate)

        # const
        learning_constant = ConstantNode([1], 1)
        self.add_node(learning_constant)

        # squeeze
        squeeze_node = SqueezeNode(0)
        self.add_node(squeeze_node)

        Connector.connect(learning_constant.outputs.output,
                          self.gate.inputs.learning_coefficients)
        Connector.connect(self.inputs.data.output, self.gate.inputs.input_data)
        Connector.connect(self.inputs.targets.output,
                          self.gate.inputs.target_data)

        Connector.connect(self.gate.outputs.prediction_output,
                          squeeze_node.inputs[0])
        Connector.connect(squeeze_node.outputs[0], self.outputs.outputs.input)
示例#5
0
    def __init__(self):
        super().__init__('cuda')

        noise_amplitude: float = 0
        env_size: Tuple[int, int] = (27, 27)
        ball_radius: int = 5
        switch_shape_after = 200

        sp_n_cluster_centers = 200  # free

        ball_env_params = BallEnvironmentParams(
            switch_shape_after=switch_shape_after,
            noise_amplitude=noise_amplitude,
            ball_radius=ball_radius,
            env_size=env_size)

        ball_env = BallEnvironment(ball_env_params)
        self.add_node(ball_env)
        self.ball_env = ball_env

        # topmost layer
        ep_sp = ExpertParams()
        ep_sp.flock_size = 1
        ep_sp.n_cluster_centers = sp_n_cluster_centers
        sp_reconstruction_layer = SpReconstructionLayer(
            env_size[0] * env_size[1],
            ball_env_params.n_shapes,
            sp_params=ep_sp,
            name="L0")
        self.add_node(sp_reconstruction_layer)
        self.sp_reconstruction_layer = sp_reconstruction_layer

        switch_node = SwitchNode(2)
        self.add_node(switch_node)
        self.switch_node = switch_node

        nan_node = ConstantNode(ball_env_params.n_shapes, math.nan)
        self.add_node(nan_node)

        Connector.connect(ball_env.outputs.data,
                          sp_reconstruction_layer.inputs.data)

        Connector.connect(ball_env.outputs.label, switch_node.inputs[0])
        Connector.connect(nan_node.outputs.output, switch_node.inputs[1])
        Connector.connect(switch_node.outputs.output,
                          sp_reconstruction_layer.inputs.label)

        self.is_training = True
    def __init__(self):
        super().__init__("cuda")

        images_dataset_params = ImagesDatasetParams()
        images_dataset_params.images_path = os.path.join(
            'data', 'datasets', 'image_datasets', 'landmark_world')
        node_images_dataset = ImagesDatasetNode(images_dataset_params)

        # GridWorld sizes
        # egocentric
        width = 160
        height = 95
        fov_size = 16
        fov_half_size = fov_size // 2

        self.add_node(node_images_dataset)

        # to extract an image
        focus_node = FocusNode()
        focus_node._params.trim_output = True
        focus_node._params.trim_output_size = fov_size
        self.add_node(focus_node)

        rnx = RandomNumberNode(upper_bound=width - fov_size)
        self.add_node(rnx)
        rny = RandomNumberNode(upper_bound=height - fov_size)
        self.add_node(rny)
        constant_node = ConstantNode(shape=(2, 1), constant=fov_size)
        self.add_node(constant_node)

        join_node = JoinNode(n_inputs=3, flatten=True)
        self.add_node(join_node)

        # create FOV position and shape
        Connector.connect(rny.outputs.scalar_output, join_node.inputs[0])
        Connector.connect(rnx.outputs.scalar_output, join_node.inputs[1])
        Connector.connect(constant_node.outputs.output, join_node.inputs[2])

        Connector.connect(join_node.outputs.output,
                          focus_node.inputs.coordinates)
        Connector.connect(node_images_dataset.outputs.output_image,
                          focus_node.inputs.input_image)

        self._create_and_connect_agent(focus_node.outputs.focus_output,
                                       (fov_size, fov_size, 3))
示例#7
0
    def _install_baselines(self, flock_layers: List[ExpertFlockNode],
                           baseline_seed: int):
        """For each layer in the topology installs own random baseline with a corresponding output size."""
        self.baselines = []
        for layer in flock_layers:
            output_dimension = FlockNodeAccessor.get_sp_output_size(layer)

            node = RandomNumberNode(upper_bound=output_dimension,
                                    seed=baseline_seed)
            self.add_node(node)
            self.baselines.append(node)

        # baseline for the labels separately
        self.label_baseline = ConstantNode(shape=self.se_io.get_num_labels(),
                                           constant=0,
                                           name='label_const')
        self.random_label_baseline = RandomNumberNode(
            upper_bound=self.se_io.get_num_labels(), seed=baseline_seed)

        self.add_node(self.label_baseline)
        self.add_node(self.random_label_baseline)
示例#8
0
    def __init__(self,
                 node_group1: NodeGroupStubBase,
                 node_group2: NodeGroupStubBase,
                 device: str = 'cpu'):
        super().__init__(device)
        self.source = ConstantNode((2, 2), constant=42)
        self.node_group1 = node_group1
        self.node_group2 = node_group2
        self.sink = LambdaNode(
            lambda i, o: torch.add(input=i[0], other=i[1], out=o[0]),
            n_inputs=2,
            output_shapes=[(2, 2)])

        self.add_node(self.source)
        self.add_node(self.node_group1)
        self.add_node(self.node_group2)
        self.add_node(self.sink)

        Connector.connect(self.source.outputs.output,
                          self.node_group1.inputs.input)
        Connector.connect(self.source.outputs.output,
                          self.node_group2.inputs.input)
        Connector.connect(self.node_group1.outputs.output, self.sink.inputs[0])
        Connector.connect(self.node_group2.outputs.output, self.sink.inputs[1])
示例#9
0
    def __init__(self):
        super().__init__(device='cuda')
        actions_descriptor = GridWorldActionDescriptor()
        node_action_monitor = ActionMonitorNode(actions_descriptor)

        params = GridWorldParams(map_name='MapE')
        noise_params = RandomNoiseParams(amplitude=0.0001)
        node_grid_world = GridWorldNode(params)
        expert_params = ExpertParams()
        unsqueeze_node = UnsqueezeNode(dim=0)
        noise_node = RandomNoiseNode(noise_params)
        constant_node = ConstantNode(shape=(1, 1, 3, 48))
        one_hot_node = ToOneHotNode()

        def context(inputs, outputs):
            con = inputs[0]
            con[:, :, 1:, 24:] = float('nan')
            outputs[0].copy_(con)

        def f(inputs, outputs):
            probs = inputs[0]
            outputs[0].copy_(probs[0, -1, :4] + SMALL_CONSTANT)

        action_parser = LambdaNode(func=f, n_inputs=1, output_shapes=[(4,)])
        context_assembler = LambdaNode(func=context, n_inputs=1, output_shapes=[(1, 1, 3, 48)])

        expert_params.flock_size = 1
        expert_params.n_cluster_centers = 24
        expert_params.produce_actions = True
        expert_params.temporal.seq_length = 9
        expert_params.temporal.seq_lookahead = 7
        expert_params.temporal.n_frequent_seqs = 700
        expert_params.temporal.max_encountered_seqs = 1000
        expert_params.temporal.exploration_probability = 0.01
        expert_params.temporal.batch_size = 200
        expert_params.temporal.own_rewards_weight = 20
        expert_params.temporal.incoming_context_size = 48

        expert_params.compute_reconstruction = True

        #expert_node = ConvExpertFlockNode(expert_params)
        expert_node = ExpertFlockNode(expert_params)

        self.add_node(node_grid_world)
        self.add_node(node_action_monitor)
        self.add_node(expert_node)
        self.add_node(unsqueeze_node)
        self.add_node(action_parser)
        self.add_node(noise_node)
        self.add_node(constant_node)
        self.add_node(context_assembler)
        self.add_node(one_hot_node)

        Connector.connect(node_grid_world.outputs.egocentric_image_action, noise_node.inputs.input)
        Connector.connect(noise_node.outputs.output, unsqueeze_node.inputs.input)
        Connector.connect(unsqueeze_node.outputs.output, expert_node.inputs.sp.data_input)
        Connector.connect(node_grid_world.outputs.reward, expert_node.inputs.tp.reward_input)

        Connector.connect(constant_node.outputs.output, context_assembler.inputs[0])
        Connector.connect(context_assembler.outputs[0], expert_node.inputs.tp.context_input)

        Connector.connect(expert_node.outputs.sp.predicted_reconstructed_input, action_parser.inputs[0])
        Connector.connect(action_parser.outputs[0], one_hot_node.inputs.input)
        Connector.connect(one_hot_node.outputs.output, node_action_monitor.inputs.action_in)
        Connector.connect(node_action_monitor.outputs.action_out, node_grid_world.inputs.agent_action, is_backward=True)
示例#10
0
    def __init__(self,
                 num_predictors,
                 spatial_input_size,
                 predicted_input_size,
                 name: str = "NNGateNodeGroup"):
        super().__init__(name,
                         inputs=GateInputs(self),
                         outputs=GateOutputs(self))

        # join input and label
        inputs_join_node = JoinNode(flatten=True)
        self.add_node(inputs_join_node)

        # gate
        g_node_params = NetworkFlockNodeParams()
        g_node_params.flock_size = 1
        g_node_params.do_delay_coefficients = False
        g_node_params.do_delay_input = True
        g_node_params.learning_period = 20
        g_node_params.buffer_size = 1000
        g_node_params.batch_size = 900

        g_network_params = NeuralNetworkFlockParams()
        g_network_params.input_size = spatial_input_size + predicted_input_size
        g_network_params.mini_batch_size = 100
        g_network_params.hidden_size = g_network_params.input_size * 2
        g_network_params.output_size = num_predictors
        g_network_params.output_activation = OutputActivation.SOFTMAX
        g_network_params.learning_rate = 0.05

        gate = NetworkFlockNode(node_params=g_node_params,
                                network_params=g_network_params,
                                name="gate")
        self.gate = gate
        self.add_node(gate)

        # const
        learning_constant = ConstantNode([1], 1)
        self.add_node(learning_constant)

        # squeeze
        squeeze_node = SqueezeNode(0)
        self.add_node(squeeze_node)

        to_one_hot_lambda_node_p = LambdaNode(
            partial(to_one_hot, predicted_input_size), 1,
            [(predicted_input_size, )])
        self.add_node(to_one_hot_lambda_node_p)

        to_one_hot_lambda_node_s = LambdaNode(
            partial(to_one_hot, spatial_input_size), 1,
            [(spatial_input_size, )])
        self.add_node(to_one_hot_lambda_node_s)

        # connections
        Connector.connect(self.inputs.data.output,
                          to_one_hot_lambda_node_s.inputs[0])
        Connector.connect(to_one_hot_lambda_node_s.outputs[0],
                          inputs_join_node.inputs[0])
        Connector.connect(self.inputs.data_predicted.output,
                          to_one_hot_lambda_node_p.inputs[0])
        Connector.connect(to_one_hot_lambda_node_p.outputs[0],
                          inputs_join_node.inputs[1])

        Connector.connect(learning_constant.outputs.output,
                          gate.inputs.learning_coefficients)
        Connector.connect(inputs_join_node.outputs.output,
                          gate.inputs.input_data)
        Connector.connect(self.inputs.predictor_activations_target.output,
                          gate.inputs.target_data)

        Connector.connect(gate.outputs.prediction_output,
                          squeeze_node.inputs[0])
        Connector.connect(squeeze_node.outputs[0],
                          self.outputs.gate_activations.input)
示例#11
0
    def __init__(self, curriculum: tuple = (1, -1)):
        super().__init__()

        se_config = SpaceEngineersConnectorConfig()
        se_config.render_width = 16
        se_config.render_height = 16
        se_config.curriculum = list(curriculum)

        base_expert_params = ExpertParams()
        base_expert_params.flock_size = 1
        base_expert_params.n_cluster_centers = 100
        base_expert_params.compute_reconstruction = False
        base_expert_params.spatial.cluster_boost_threshold = 1000
        base_expert_params.spatial.learning_rate = 0.2
        base_expert_params.spatial.batch_size = 1000
        base_expert_params.spatial.buffer_size = 1010
        base_expert_params.spatial.learning_period = 100

        base_expert_params.temporal.batch_size = 1000
        base_expert_params.temporal.buffer_size = 1010
        base_expert_params.temporal.learning_period = 200
        base_expert_params.temporal.forgetting_limit = 20000

        # parent_expert_params = ExpertParams()
        # parent_expert_params.flock_size = 1
        # parent_expert_params.n_cluster_centers = 20
        # parent_expert_params.compute_reconstruction = True
        # parent_expert_params.temporal.exploration_probability = 0.9
        # parent_expert_params.spatial.cluster_boost_threshold = 1000
        # parent_expert_params.spatial.learning_rate = 0.2
        # parent_expert_params.spatial.batch_size = 1000
        # parent_expert_params.spatial.buffer_size = 1010
        # parent_expert_params.spatial.learning_period = 100
        # parent_expert_params.temporal.context_without_rewards_size = se_config.LOCATION_SIZE_ONE_HOT

        # SE nodes
        actions_descriptor = SpaceEngineersActionsDescriptor()
        node_se_connector = SpaceEngineersConnectorNode(
            actions_descriptor, se_config)
        node_action_monitor = ActionMonitorNode(actions_descriptor)

        # flock-related nodes
        flock_node = ExpertFlockNode(base_expert_params)
        blank_task_control = ConstantNode((se_config.TASK_CONTROL_SIZE, ))
        blank_task_labels = ConstantNode((20, ))

        # parent_flock_node = ExpertFlockNode(parent_expert_params)

        join_node = JoinNode(flatten=True)

        actions = ['FORWARD', 'BACKWARD', 'LEFT', 'RIGHT']
        action_count = len(actions)

        pass_actions_node = PassNode(output_shape=(action_count, ),
                                     name="pass actions")
        fork_node = ForkNode(
            0, [base_expert_params.n_cluster_centers, action_count])

        def squeeze(inputs, outputs):
            outputs[0].copy_(inputs[0].squeeze())

        squeeze_node = LambdaNode(
            squeeze,
            1, [(base_expert_params.n_cluster_centers + action_count, )],
            name="squeeze lambda node")

        def stack_and_unsqueeze(inputs, outputs):
            outputs[0].copy_(torch.stack([inputs[0], inputs[1]]).unsqueeze(0))

        stack_unsqueeze_node = LambdaNode(
            stack_and_unsqueeze,
            2, [(1, 2, se_config.LOCATION_SIZE_ONE_HOT)],
            name="stack and unsqueeze node")

        to_one_hot_node = ToOneHotNode()

        action_parser_node = AgentActionsParserNode(actions_descriptor,
                                                    actions)

        random_node = RandomNumberNode(0,
                                       action_count,
                                       name="random action generator",
                                       generate_new_every_n=5,
                                       randomize_intervals=True)

        switch_node = SwitchNode(2)

        # add nodes to the graph
        self.add_node(flock_node)
        # self.add_node(parent_flock_node)
        self.add_node(node_se_connector)
        self.add_node(node_action_monitor)
        self.add_node(blank_task_control)
        self.add_node(blank_task_labels)
        # self.add_node(join_node)
        # self.add_node(fork_node)
        # self.add_node(pass_actions_node)
        # self.add_node(squeeze_node)
        # self.add_node(to_one_hot_node)
        # self.add_node(stack_unsqueeze_node)
        self.add_node(action_parser_node)
        self.add_node(random_node)
        # self.add_node(switch_node)

        # first layer
        Connector.connect(node_se_connector.outputs.image_output,
                          flock_node.inputs.sp.data_input)

        # Connector.connect(
        #     flock_node.outputs.tp.projection_outputs,
        #     join_node.inputs[0]
        # )
        # Connector.connect(
        #     pass_actions_node.outputs.output,
        #     join_node.inputs[1]
        # )

        # # second layer
        # Connector.connect(
        #     join_node.outputs.output,
        #     parent_flock_node.inputs.sp.data_input
        # )

        # Connector.connect(
        #     node_se_connector.outputs.task_to_agent_location_one_hot,
        #     stack_unsqueeze_node.inputs[0]
        # )
        # Connector.connect(
        #     node_se_connector.outputs.task_to_agent_location_target_one_hot,
        #     stack_unsqueeze_node.inputs[1]
        # )
        # Connector.connect(
        #     stack_unsqueeze_node.outputs[0],
        #     parent_flock_node.inputs.tp.context_input
        # )
        #
        # # actions
        # Connector.connect(
        #     parent_flock_node.outputs.sp.predicted_reconstructed_input,
        #     squeeze_node.inputs[0]
        # )
        # Connector.connect(
        #     squeeze_node.outputs[0],
        #     fork_node.inputs.input
        # )
        # Connector.connect(
        #     fork_node.outputs[1],
        #     to_one_hot_node.inputs.input
        # )
        # Connector.connect(
        #     random_node.outputs.one_hot_output,
        #     switch_node.inputs[0]
        # )
        # Connector.connect(
        #     to_one_hot_node.outputs.output,
        #     switch_node.inputs[1]
        # )
        # Connector.connect(
        #     switch_node.outputs.output,
        #     action_parser_node.inputs.input
        # )
        # directly use random exploration
        Connector.connect(random_node.outputs.one_hot_output,
                          action_parser_node.inputs.input)

        Connector.connect(action_parser_node.outputs.output,
                          node_action_monitor.inputs.action_in)
        # Connector.connect(
        #     switch_node.outputs.output,
        #     pass_actions_node.inputs.input,
        #     is_low_priority=True
        # )
        Connector.connect(
            node_action_monitor.outputs.action_out,
            node_se_connector.inputs.agent_action,
            # is_low_priority=True
            is_backward=False)

        # blank connection
        Connector.connect(blank_task_control.outputs.output,
                          node_se_connector.inputs.task_control)
        Connector.connect(blank_task_labels.outputs.output,
                          node_se_connector.inputs.agent_to_task_label)

        # Save the SE connector so we can check testing/training phase.
        # When the se_io interface has been added, this can be removed.
        self._node_se_connector = node_se_connector
示例#12
0
    def __init__(self):
        super().__init__(device='cuda')

        params1 = ExpertParams()
        params1.flock_size = 1
        params1.n_cluster_centers = 10
        params1.spatial.buffer_size = 100
        params1.temporal.buffer_size = 100
        params1.temporal.incoming_context_size = 9
        params1.temporal.n_providers = 2
        params1.spatial.batch_size = 50
        params1.temporal.batch_size = 50
        params1.spatial.input_size = 28 * 28

        params2 = ExpertParams()
        params2.flock_size = 1
        params2.n_cluster_centers = 9
        params2.spatial.buffer_size = 100
        params2.temporal.buffer_size = 100
        params2.temporal.incoming_context_size = 5
        params2.temporal.n_providers = 2
        params2.spatial.batch_size = 50
        params2.temporal.batch_size = 50
        params2.spatial.input_size = params1.n_cluster_centers

        mnist_seq_params = DatasetSequenceMNISTNodeParams([[0, 1, 2],
                                                           [3, 1, 4]])
        mnist_params = DatasetMNISTParams(class_filter=[0, 1, 2, 3, 4],
                                          one_hot_labels=False,
                                          examples_per_class=1)
        mnist_node = DatasetSequenceMNISTNode(params=mnist_params,
                                              seq_params=mnist_seq_params)

        zero_context = ConstantNode(
            shape=(params2.flock_size, params2.temporal.n_providers,
                   NUMBER_OF_CONTEXT_TYPES,
                   params2.temporal.incoming_context_size),
            constant=0)

        node1 = ExpertFlockNode(params1)
        node2 = ExpertFlockNode(params2)

        self.add_node(mnist_node)
        self.add_node(node1)
        self.add_node(node2)
        self.add_node(zero_context)

        unsqueeze_node_0 = UnsqueezeNode(0)
        self.add_node(unsqueeze_node_0)

        Connector.connect(mnist_node.outputs.data,
                          unsqueeze_node_0.inputs.input)
        Connector.connect(unsqueeze_node_0.outputs.output,
                          node1.inputs.sp.data_input)
        Connector.connect(node1.outputs.tp.projection_outputs,
                          node2.inputs.sp.data_input)
        Connector.connect(node2.outputs.output_context,
                          node1.inputs.tp.context_input,
                          is_backward=True)
        Connector.connect(zero_context.outputs.output,
                          node2.inputs.tp.context_input)
示例#13
0
    def __init__(
        self,
        baseline_seed: int = None,
        layer_sizes: List[int] = (100, 100),
        class_filter: List[int] = None,
        image_size=SeDatasetSize.SIZE_24,
        random_order: bool = False,
        noise_amp: float = 0.0,
        use_se:
        bool = False  # True: use a running instance of SE for getting data; False: use a dataset
    ):
        """

        Args:
            baseline_seed:
            layer_sizes:
            class_filter:
            image_size:
            random_order:
            noise_amp:  in case the noise_amp is > 0, superpose noise with mean 0 and variance=noise_amp to the image
        """
        super().__init__("Task 0 - SeNodeGroup",
                         outputs=Task0BaseGroupOutputs(self))

        self.use_se = use_se
        if use_se:
            self._se_io = SeIoGeneral()
            self._se_io.se_config.render_width = image_size.value
            self._se_io.se_config.render_height = image_size.value
        else:
            # dataset and params
            params = DatasetSeObjectsParams()
            params.dataset_config = DatasetConfig.TRAIN_ONLY
            params.dataset_size = image_size
            params.class_filter = class_filter
            params.random_order = random_order
            params.seed = baseline_seed
            self._se_io = SeIoTask0Dataset(params)

        self._se_io.install_nodes(self)

        if use_se:
            blank_task_control = ConstantNode(
                (self._se_io.se_config.TASK_CONTROL_SIZE, ))

            actions_node = ConstantNode((4, ), name="actions")

            blank_task_labels = ConstantNode((20, ), name="labels")

            self.add_node(blank_task_control)
            self.add_node(actions_node)
            self.add_node(blank_task_labels)

            Connector.connect(blank_task_control.outputs.output,
                              self._se_io.inputs.task_control)
            Connector.connect(actions_node.outputs.output,
                              self._se_io.inputs.agent_action)
            Connector.connect(blank_task_labels.outputs.output,
                              self._se_io.inputs.agent_to_task_label)

        # baselines for each layer
        self._baselines = []
        for layer_size in layer_sizes:

            node = RandomNumberNode(upper_bound=layer_size, seed=baseline_seed)
            self.add_node(node)
            self._baselines.append(node)

        # baseline for the labels separately
        self._label_baseline = ConstantNode(shape=self._se_io.get_num_labels(),
                                            constant=0,
                                            name='label_const')
        self._random_label_baseline = RandomNumberNode(
            upper_bound=self._se_io.get_num_labels(), seed=baseline_seed)

        self.add_node(self._label_baseline)
        self.add_node(self._random_label_baseline)

        if noise_amp > 0.0:
            # add the noise to the output image?
            _random_noise_params = RandomNoiseParams()
            _random_noise_params.distribution = 'Normal'
            _random_noise_params.amplitude = noise_amp
            self._random_noise = RandomNoiseNode(_random_noise_params)
            self.add_node(self._random_noise)
            Connector.connect(self._se_io.outputs.image_output,
                              self._random_noise.inputs.input)
            Connector.connect(self._random_noise.outputs.output,
                              self.outputs.image.input)
        else:
            Connector.connect(self._se_io.outputs.image_output,
                              self.outputs.image.input)

        Connector.connect(self._se_io.outputs.task_to_agent_label,
                          self.outputs.labels.input)
示例#14
0
    def __init__(self,
                 num_labels: int,
                 buffer_s: int,
                 batch_s: int,
                 model_seed: int,
                 lr: float,
                 num_epochs: int,
                 image_size=SeDatasetSize.SIZE_24,
                 num_channels=3):
        """
        Initialize the node group containing the NN used as a baseline for Task0
        Args:
            num_labels: num labels in the dataset (20 for the Task0)
            image_size: size of the image, 24 by default (the result is 24*24*3) then
            model_seed: used for deterministic experiments
            lr: learning rate
        """
        super().__init__("Task 0 - NN Model", inputs=ClassificationTaskInputs(self))

        # output layer size
        self._num_labels = num_labels  # the network should configure output size from here ideally
        img_size = image_size.value  # input size is 3 * img_size **2

        kwargs = {'lr': lr,
                  'buffer_size': buffer_s,
                  'batch_size': batch_s,
                  'seed': model_seed,
                  'input_shape': (num_channels, img_size, img_size),  # note: this is correct (see node.step())
                  'output_size': self._num_labels,
                  'num_epochs': num_epochs}

        # set topology params and configs
        self._params = NNetParams(NNetParams.default_params())
        self._params.set_params(_nn_node_params)  # params defined in this file
        self._params.set_params(kwargs)  # params defined in the constructor

        # observation storage params
        self._observation_types = {
            'x': (self._params.buffer_size, *self._params.input_shape),  # observations
            'y': (self._params.buffer_size, self._params.output_size),  # labels
        }

        # data storage
        self._storage = ObservationStorage(
            self._params.buffer_size,
            self._observation_types)
        self._storage.to('cpu' if self._params.mixed_mode else self.device)

        # network needs to have the global seeds to have set before creating (outside of the node in this case)
        set_global_seeds(seed=self._params.seed)

        # neural network setup
        self._network = NNet(
            input_shape=self._params.input_shape,
            output_shape=self._params.output_size
        ).to('cuda' if self._params.mixed_mode else self.device)

        # neural net optimizer
        self._optimizer = optim.Adam(self._network.parameters(),
                                     lr=self._params.lr)

        # NNet Node
        self._nnet_node = NNetNode(
            self._network,
            self._optimizer,
            self._storage,
            self._params,
            name='Neural Network Node')

        self.add_node(self._nnet_node)

        # connect the input of the network
        Connector.connect(
            self.inputs.image.output,
            self._nnet_node.inputs.input
        )
        # source of targets for learning here
        Connector.connect(
            self.inputs.label.output,
            self._nnet_node.inputs.label
        )

        # switching train/test is done by input
        self._constant_zero = ConstantNode([1], constant=0, name="zero")
        self._constant_one = ConstantNode([1], constant=1, name="one")
        self._switch_node = SwitchNode(2)  # outputs 1 if is_testing

        self.add_node(self._constant_zero)
        self.add_node(self._constant_one)
        self.add_node(self._switch_node)

        Connector.connect(
            self._constant_zero.outputs.output,
            self._switch_node.inputs[0]
        )
        Connector.connect(
            self._constant_one.outputs.output,
            self._switch_node.inputs[1]
        )
        Connector.connect(
            self._switch_node.outputs.output,
            self._nnet_node.inputs.testing_phase
        )

        self._is_training = True