def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 action_logits_shapes: Dict[str, Sequence[int]],
                 non_lin: Union[str,
                                type(nn.Module)], hidden_units: List[int]):
        nn.Module.__init__(self)
        CustomComplexLatentNet.__init__(self, obs_shapes, non_lin,
                                        hidden_units)

        # build action heads
        for action_key, action_shape in action_logits_shapes.items():
            self.perception_dict[action_key] = LinearOutputBlock(
                in_keys='latent',
                out_keys=action_key,
                in_shapes=self.perception_dict['latent'].out_shapes(),
                output_units=int(np.prod(action_shape)))

        # build inference block
        in_keys = list(self.obs_shapes.keys())
        # Specifically add 'latent_screen' as an out_key to the network, so it will get returned when calling the
        #   forward method and can be reused by the critic network.
        out_keys = list(action_logits_shapes.keys()) + ['latent_screen']
        self.perception_net = InferenceBlock(
            in_keys=in_keys,
            out_keys=out_keys,
            perception_blocks=self.perception_dict,
            in_shapes=[self.obs_shapes[key] for key in in_keys])

        # apply weight init
        self.perception_net.apply(make_module_init_normc(1.0))
        for action_key in action_logits_shapes.keys():
            self.perception_dict[action_key].apply(
                make_module_init_normc(0.01))
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 head_units: List[int], non_lin: nn.Module):
        super().__init__()

        self.perception_dict: Dict[str, PerceptionBlock] = dict()
        # build action head

        # build perception part
        self.perception_dict["head"] = DenseBlock(
            in_keys="latent",
            out_keys="head",
            in_shapes=obs_shapes["latent"],
            hidden_units=head_units,
            non_lin=non_lin)

        self.perception_dict["value"] = LinearOutputBlock(
            in_keys="head",
            out_keys="value",
            in_shapes=self.perception_dict["head"].out_shapes(),
            output_units=1)

        self.perception_dict['head'].apply(make_module_init_normc(std=1.0))
        self.perception_dict["value"].apply(make_module_init_normc(std=0.01))

        # compile inference model
        self.net = InferenceBlock(in_keys=list(obs_shapes.keys()),
                                  out_keys="value",
                                  in_shapes=list(obs_shapes.values()),
                                  perception_blocks=self.perception_dict)
Beispiel #3
0
    def __init__(self, obs_shapes: Dict[str, Sequence[int]], non_lin: Union[str, type(nn.Module)]):
        nn.Module.__init__(self)
        self.obs_shapes = obs_shapes

        hidden_units = 32

        self.perception_dict = OrderedDict()

        self.perception_dict['order_feat'] = DenseBlock(
            in_keys='ordered_piece', out_keys='order_feat', in_shapes=self.obs_shapes['ordered_piece'],
            hidden_units=[hidden_units], non_lin=non_lin)

        self.perception_dict['selected_feat'] = DenseBlock(
            in_keys='selected_piece', out_keys='selected_feat', in_shapes=self.obs_shapes['selected_piece'],
            hidden_units=[hidden_units], non_lin=non_lin)

        self.perception_dict['latent'] = ConcatenationBlock(
            in_keys=['order_feat', 'selected_feat'], out_keys='latent',
            in_shapes=[[hidden_units], [hidden_units], [hidden_units]], concat_dim=-1)

        self.perception_dict['value'] = LinearOutputBlock(
            in_keys='latent', out_keys='value', in_shapes=self.perception_dict['latent'].out_shapes(), output_units=1)

        in_keys = ['ordered_piece', 'selected_piece']
        self.perception_net = InferenceBlock(
            in_keys=in_keys, out_keys='value',
            in_shapes=[self.obs_shapes[key] for key in in_keys],
            perception_blocks=self.perception_dict)

        # initialize model weights
        self.perception_net.apply(make_module_init_normc(1.0))
        self.perception_dict['value'].apply(make_module_init_normc(0.01))
Beispiel #4
0
    def __init__(self, obs_shapes: Dict[str, Sequence[int]], action_logits_shapes: Dict[str, Sequence[int]],
                 non_lin: type(nn.Module)):
        super().__init__(obs_shapes, non_lin)

        for action_head_name in action_logits_shapes.keys():
            head_hidden_units = [lambda out_shape: out_shape[0] * 5,
                                 lambda out_shape: out_shape[0] * 2,
                                 lambda out_shape: out_shape[0]]
            head_hidden_units = [func(action_logits_shapes[action_head_name]) for func in head_hidden_units]

            self.perception_dict[f'{action_head_name}_net'] = DenseBlock(
                in_keys='hidden_out', in_shapes=self.perception_dict['hidden_out'].out_shapes(),
                out_keys=f'{action_head_name}_net', hidden_units=head_hidden_units[:-1], non_lin=non_lin)

            self.perception_dict[f'{action_head_name}'] = LinearOutputBlock(
                in_keys=f'{action_head_name}_net',
                in_shapes=self.perception_dict[f'{action_head_name}_net'].out_shapes(),
                out_keys=action_head_name, output_units=head_hidden_units[-1]
            )

        # Set up inference block
        self.perception_net = InferenceBlock(
            in_keys=list(self.obs_shapes.keys()), out_keys=list(action_logits_shapes.keys()),
            in_shapes=[self.obs_shapes[key] for key in self.obs_shapes.keys()],
            perception_blocks=self.perception_dict)

        self.perception_net.apply(make_module_init_normc(1.0))
        for action_head_name in action_logits_shapes.keys():
            self.perception_dict[f'{action_head_name}'].apply(make_module_init_normc(0.01))
    def __init__(self,
                 obs_shapes: Dict[str, Sequence[int]],
                 action_logits_shapes: Dict[str, Sequence[int]],
                 hidden_units: List[int],
                 head_units: List[int],
                 non_lin=nn.Module):
        super().__init__(obs_shapes, hidden_units, non_lin)

        # build perception part
        self.perception_dict["head"] = DenseBlock(
            in_keys="latent",
            out_keys="head",
            in_shapes=self.perception_dict["latent"].out_shapes(),
            hidden_units=head_units,
            non_lin=self.non_lin)

        self.perception_dict['head'].apply(make_module_init_normc(std=1.0))

        # build action head
        for action, shape in action_logits_shapes.items():
            self.perception_dict[action] = LinearOutputBlock(
                in_keys="head",
                out_keys=action,
                in_shapes=self.perception_dict["head"].out_shapes(),
                output_units=action_logits_shapes[action][-1])

            module_init = make_module_init_normc(std=0.01)
            self.perception_dict[action].apply(module_init)

        # compile inference model
        self.net = InferenceBlock(in_keys=list(obs_shapes.keys()),
                                  out_keys=list(action_logits_shapes.keys()) +
                                  ['latent'],
                                  in_shapes=list(obs_shapes.values()),
                                  perception_blocks=self.perception_dict)
Beispiel #6
0
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 non_lin: type(nn.Module)):
        super().__init__(obs_shapes, non_lin)

        self.perception_dict['value_head_net'] = DenseBlock(
            in_keys='hidden_out',
            in_shapes=self.perception_dict['hidden_out'].out_shapes(),
            out_keys='value_head_net',
            hidden_units=[5, 2],
            non_lin=non_lin)

        self.perception_dict['value'] = LinearOutputBlock(
            in_keys='value_head_net',
            in_shapes=self.perception_dict['value_head_net'].out_shapes(),
            out_keys='value',
            output_units=1)

        # Set up inference block
        self.perception_net = InferenceBlock(
            in_keys=list(self.obs_shapes.keys()),
            out_keys='value',
            in_shapes=[self.obs_shapes[key] for key in self.obs_shapes.keys()],
            perception_blocks=self.perception_dict)

        # initialize model weights
        self.perception_net.apply(make_module_init_normc(1.0))
        self.perception_dict['value'].apply(make_module_init_normc(0.01))
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 non_lin: Union[str,
                                type(nn.Module)], hidden_units: List[int]):
        nn.Module.__init__(self)
        CustomComplexLatentNet.__init__(self, obs_shapes, non_lin,
                                        hidden_units)

        # build action heads
        self.perception_dict['value'] = LinearOutputBlock(
            in_keys='latent',
            out_keys='value',
            in_shapes=self.perception_dict['latent'].out_shapes(),
            output_units=1)

        # build inference block
        in_keys = list(self.obs_shapes.keys())
        self.perception_net = InferenceBlock(
            in_keys=in_keys,
            out_keys='value',
            in_shapes=[self.obs_shapes[key] for key in in_keys],
            perception_blocks=self.perception_dict)

        # apply weight init
        self.perception_net.apply(make_module_init_normc(1.0))
        self.perception_dict['value'].apply(make_module_init_normc(0.01))
    def __init__(self, obs_shapes: Dict[str, Sequence[int]], action_logits_shapes: Dict[str, Sequence[int]],
                 non_lin: Union[str, type(nn.Module)], hidden_units: List[int]):
        super().__init__()

        # Maze relies on dictionaries to represent the inference graph
        self.perception_dict = OrderedDict()

        # build latent embedding block
        self.perception_dict['latent'] = DenseBlock(
            in_keys='observation', out_keys='latent', in_shapes=obs_shapes['observation'],
            hidden_units=hidden_units,non_lin=non_lin)

        # build action head
        self.perception_dict['action'] = LinearOutputBlock(
            in_keys='latent', out_keys='action', in_shapes=self.perception_dict['latent'].out_shapes(),
            output_units=int(np.prod(action_logits_shapes["action"])))

        # build inference block
        self.perception_net = InferenceBlock(
            in_keys='observation', out_keys='action', in_shapes=obs_shapes['observation'],
            perception_blocks=self.perception_dict)

        # apply weight init
        self.perception_net.apply(make_module_init_normc(1.0))
        self.perception_dict['action'].apply(make_module_init_normc(0.01))
Beispiel #9
0
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 action_logits_shapes: Dict[str, Sequence[int]],
                 non_lin: Union[str, type(nn.Module)]):
        super().__init__()
        self.obs_shapes = obs_shapes
        action_key = list(action_logits_shapes.keys())[0]
        # build perception part
        self.perception_dict = OrderedDict()
        self.perception_dict['embedding'] = DenseBlock(
            in_keys="observation",
            out_keys="embedding",
            in_shapes=obs_shapes['observation'],
            hidden_units=[256, 256],
            non_lin=non_lin)

        # build action head
        self.perception_dict[action_key] = LinearOutputBlock(
            in_keys="embedding",
            out_keys=action_key,
            in_shapes=self.perception_dict['embedding'].out_shapes(),
            output_units=action_logits_shapes[action_key][0])

        self.perception_net = InferenceBlock(
            in_keys='observation',
            out_keys=action_key,
            in_shapes=[self.obs_shapes['observation']],
            perception_blocks=self.perception_dict)

        # initialize model weights
        self.perception_net.apply(make_module_init_normc(1.0))
        self.perception_dict[action_key].apply(make_module_init_normc(0.01))
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 non_lin: Union[str,
                                type(nn.Module)], hidden_units: List[int]):
        nn.Module.__init__(self)

        # Maze relies on dictionaries to represent the inference graph
        self.perception_dict = OrderedDict()

        # build latent feature embedding block
        self.perception_dict['latent_inventory'] = DenseBlock(
            in_keys='observation_inventory',
            out_keys='latent_inventory',
            in_shapes=obs_shapes['observation_inventory'],
            hidden_units=[128],
            non_lin=non_lin)

        # Concatenate latent features
        self.perception_dict['latent_concat'] = ConcatenationBlock(
            in_keys=['latent_inventory', 'latent_screen'],
            out_keys='latent_concat',
            in_shapes=self.perception_dict['latent_inventory'].out_shapes() +
            [obs_shapes['latent_screen']],
            concat_dim=-1)

        # Add latent dense block
        self.perception_dict['latent_dense'] = DenseBlock(
            in_keys='latent_concat',
            out_keys='latent_dense',
            hidden_units=hidden_units,
            non_lin=non_lin,
            in_shapes=self.perception_dict['latent_concat'].out_shapes())

        # Add recurrent block
        self.perception_dict['latent'] = LSTMLastStepBlock(
            in_keys='latent_dense',
            out_keys='latent',
            in_shapes=self.perception_dict['latent_dense'].out_shapes(),
            hidden_size=32,
            num_layers=1,
            bidirectional=False,
            non_lin=non_lin)

        # build action heads
        self.perception_dict['value'] = LinearOutputBlock(
            in_keys='latent',
            out_keys='value',
            in_shapes=self.perception_dict['latent'].out_shapes(),
            output_units=1)

        # build inference block
        in_keys = list(obs_shapes.keys())
        self.perception_net = InferenceBlock(
            in_keys=in_keys,
            out_keys='value',
            in_shapes=[obs_shapes[key] for key in in_keys],
            perception_blocks=self.perception_dict)

        # apply weight init
        self.perception_net.apply(make_module_init_normc(1.0))
        self.perception_dict['value'].apply(make_module_init_normc(0.01))
Beispiel #11
0
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 non_lin: Union[str, type(nn.Module)]):
        nn.Module.__init__(self)

        # initialize the perception dictionary
        self.perception_dict = OrderedDict()

        # concatenate all observations in dictionary
        self.perception_dict['concat'] = ConcatenationBlock(
            in_keys=[
                'cart_position', 'cart_velocity', 'pole_angle',
                'pole_angular_velocity'
            ],
            out_keys='concat',
            in_shapes=[
                obs_shapes['cart_position'], obs_shapes['cart_velocity'],
                obs_shapes['pole_angle'], obs_shapes['pole_angular_velocity']
            ],
            concat_dim=-1)

        # process concatenated representation with two dense layers
        self.perception_dict['embedding'] = DenseBlock(
            in_keys='concat',
            in_shapes=self.perception_dict['concat'].out_shapes(),
            hidden_units=[128, 128],
            non_lin=non_lin,
            out_keys='embedding')

        # add a linear output block
        self.perception_dict['value'] = LinearOutputBlock(
            in_keys='embedding',
            out_keys='value',
            in_shapes=self.perception_dict['embedding'].out_shapes(),
            output_units=1)

        # compile an inference block
        self.perception_net = InferenceBlock(
            in_keys=[
                'cart_position', 'cart_velocity', 'pole_angle',
                'pole_angular_velocity'
            ],
            out_keys='value',
            in_shapes=[
                obs_shapes[key] for key in [
                    'cart_position', 'cart_velocity', 'pole_angle',
                    'pole_angular_velocity'
                ]
            ],
            perception_blocks=self.perception_dict)

        # initialize model weights
        self.perception_net.apply(make_module_init_normc(1.0))
        self.perception_dict['value'].apply(make_module_init_normc(0.01))
Beispiel #12
0
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 action_spaces_dict: Dict[Union[str, int], spaces.Space],
                 non_lin: Union[str, type(nn.Module)]):
        super().__init__()
        self.obs_shapes = obs_shapes
        # build perception part
        self.perception_dict = OrderedDict()
        self.perception_dict['latent-obs'] = DenseBlock(
            in_keys="observation",
            out_keys="latent-obs",
            in_shapes=obs_shapes['observation'],
            hidden_units=[256],
            non_lin=non_lin)
        self.perception_dict['latent-act'] = DenseBlock(
            in_keys="action",
            out_keys="latent-act",
            in_shapes=obs_shapes['action'],
            hidden_units=[256],
            non_lin=non_lin)

        self.perception_dict['concat'] = ConcatenationBlock(
            in_keys=['latent-obs', 'latent-act'],
            in_shapes=self.perception_dict['latent-obs'].out_shapes() +
            self.perception_dict['latent-act'].out_shapes(),
            concat_dim=-1,
            out_keys='concat')

        self.perception_dict['latent'] = DenseBlock(
            in_keys="concat",
            out_keys="latent",
            in_shapes=self.perception_dict['concat'].out_shapes(),
            hidden_units=[256],
            non_lin=non_lin)

        # build action head
        self.perception_dict['q_value'] = LinearOutputBlock(
            in_keys="latent",
            out_keys="q_value",
            in_shapes=self.perception_dict['latent'].out_shapes(),
            output_units=1)

        self.perception_net = InferenceBlock(
            in_keys=['observation', 'action'],
            out_keys='q_value',
            in_shapes=[
                self.obs_shapes['observation'], self.obs_shapes['action']
            ],
            perception_blocks=self.perception_dict)

        # initialize model weights
        self.perception_net.apply(make_module_init_normc(1.0))
        self.perception_dict['q_value'].apply(make_module_init_normc(0.01))
Beispiel #13
0
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 hidden_units: List[int], non_lin: nn.Module,
                 support_range: Tuple[int, int]):
        super().__init__(obs_shapes, hidden_units, non_lin)

        # build categorical value head
        support_set_size = support_range[1] - support_range[0] + 1
        self.perception_dict["probabilities"] = LinearOutputBlock(
            in_keys="latent",
            out_keys="probabilities",
            in_shapes=self.perception_dict["latent"].out_shapes(),
            output_units=support_set_size)

        # compute value as probability weighted sum of supports
        def _to_scalar(x: torch.Tensor) -> torch.Tensor:
            return support_to_scalar(x, support_range=support_range)

        self.perception_dict["value"] = FunctionalBlock(
            in_keys="probabilities",
            out_keys="value",
            in_shapes=self.perception_dict["probabilities"].out_shapes(),
            func=_to_scalar)

        module_init = make_module_init_normc(std=0.01)
        self.perception_dict["probabilities"].apply(module_init)

        # compile inference model
        self.net = InferenceBlock(in_keys=list(obs_shapes.keys()),
                                  out_keys=["probabilities", "value"],
                                  in_shapes=list(obs_shapes.values()),
                                  perception_blocks=self.perception_dict)
Beispiel #14
0
def test_graph_cnn_block_trainable_self_importance():
    """Test the graph conv block with a trainable self importance scalar"""

    in_dict = build_input_dict(dims=[100, 5, 7])
    adj_matrix_batch = construct_pre_processing_matrix()[0].repeat([100, 1, 1])
    in_dict['adj_matrix'] = adj_matrix_batch
    net: GraphConvBlock = GraphConvBlock(
        in_keys=list(in_dict.keys()),
        out_keys='out_key',
        in_shapes=[value.shape[1:] for value in in_dict.values()],
        hidden_features=[11, 13],
        bias=[False, True],
        non_lins=[nn.ReLU, 'torch.nn.Identity'],
        node_self_importance=1.0,
        trainable_node_self_importance=True,
        preprocess_adj=True)

    str(net)
    out_dict = net(in_dict)

    assert isinstance(out_dict, Dict)
    assert set(net.out_keys).issubset(set(out_dict.keys()))
    assert net.output_features == 13
    assert out_dict[net.out_keys[0]].shape[-1] == net.output_features
    assert out_dict[net.out_keys[0]].shape == (100, 5, 13)
    assert net.out_shapes() == [out_dict[net.out_keys[0]].shape[-2:]]
    assert net.get_num_of_parameters() == (7 * 11) + (11 * 13) + 13 + 1

    net.apply(make_module_init_normc(1.0))
Beispiel #15
0
def test_graph_attention_block():
    """Test the graph conv block"""

    in_dict = build_input_dict(dims=[100, 5, 7])
    adj_matrix_batch = construct_pre_processing_matrix_adj_bar()[0].repeat(
        [100, 1, 1])
    in_dict['adj_matrix'] = adj_matrix_batch
    net: GraphAttentionBlock = GraphAttentionBlock(
        in_keys=list(in_dict.keys()),
        out_keys='out_key',
        in_shapes=[value.shape[1:] for value in in_dict.values()],
        hidden_features=[11, 13],
        non_lins=[nn.ReLU, nn.Identity],
        attention_alpha=0.2,
        n_heads=[3, 1],
        attention_dropout=0,
        avg_last_head_attentions=False)

    str(net)
    out_dict = net(in_dict)

    assert isinstance(out_dict, Dict)
    assert set(net.out_keys).issubset(set(out_dict.keys()))
    assert net.output_features == 13
    assert out_dict[net.out_keys[0]].shape[-1] == net.output_features
    assert out_dict[net.out_keys[0]].shape == (100, 5, 13)
    assert net.out_shapes() == [out_dict[net.out_keys[0]].shape[-2:]]
    assert net.get_num_of_parameters() == (7 * 11 + 11 * 2) * 3 + (33 * 13 +
                                                                   13 * 2)

    net.apply(make_module_init_normc(1.0))
Beispiel #16
0
 def re_init_networks(self) -> None:
     """Reinitialize all parameters of the network."""
     for key, critic in self.networks.items():
         # initialize model weights
         if isinstance(critic, InferenceBlock):
             critic.apply(make_module_init_normc(1.0))
             for block_key in critic.perception_dict:
                 if block_key == 'q_value' or block_key.endswith('_q_values'):
                     critic.perception_dict[block_key].apply(make_module_init_normc(0.01))
         else:
             inference_blocks = list(filter(lambda cc: isinstance(cc, InferenceBlock), critic.children()))
             if len(inference_blocks) == 1:
                 inference_blocks[0].apply(make_module_init_normc(1.0))
                 for block_key in inference_blocks[0].perception_dict:
                     if block_key == 'q_value' or block_key.endswith('_q_values'):
                         inference_blocks[0].perception_dict[block_key].apply(make_module_init_normc(0.01))
             else:
                 BColors.print_colored(f'More or less than one inference block was found for'
                                       f' {key}, therefore the model could not be reinitialized', BColors.WARNING)
Beispiel #17
0
    def __init__(self, obs_shapes, non_lin=nn.Tanh):
        super().__init__()

        # build perception part
        self.perception_network = DenseBlock(in_keys="observation", out_keys="latent",
                                             in_shapes=obs_shapes['observation'],
                                             hidden_units=[32, 32], non_lin=non_lin)

        module_init = make_module_init_normc(std=1.0)
        self.perception_network.apply(module_init)

        # build action head
        self.value_head = LinearOutputBlock(in_keys="latent", out_keys="value",
                                            in_shapes=self.perception_network.out_shapes(),
                                            output_units=1)

        module_init = make_module_init_normc(std=0.01)
        self.value_head.apply(module_init)

        # compile inference model
        self.net = InferenceBlock(in_keys="observation", out_keys="value", in_shapes=list(obs_shapes.values()),
                                  perception_blocks={"latent": self.perception_network,
                                                     "value": self.value_head})
    def template_policy_net(
        self, observation_space: spaces.Dict, action_space: spaces.Dict,
        shared_embedding_keys: List[str]
    ) -> Tuple[InferenceBlock, InferenceBlock]:
        """Compiles a template policy network.

        :param observation_space: The input observations for the perception network.
        :param action_space: The action space that defines the network action heads.
        :param shared_embedding_keys: The list of embedding keys for this substep's model.
        :return: A policy network (actor) InferenceBlock, as well as the embedding net InferenceBlock if shared keys
            have been specified.
        """

        # build perception net
        embedding_net = self.template_perception_net(observation_space)

        # build action head
        perception_dict = embedding_net.perception_dict
        action_heads = []
        for action_head, action_space in action_space.spaces.items():
            # initialize action head
            action_net = LinearOutputBlock(
                in_keys="latent",
                out_keys=action_head,
                in_shapes=perception_dict["latent"].out_shapes(),
                output_units=self._distribution_mapper.required_logits_shape(
                    action_head)[0])

            module_init = make_module_init_normc(std=0.01)
            action_net.apply(module_init)

            # extent perception dictionary
            perception_dict[action_head] = action_net
            action_heads.append(action_head)

        # compile inference model
        shared_embedding_keys_remove_input = list(
            filter(lambda x: x not in embedding_net.in_keys,
                   shared_embedding_keys))
        net = InferenceBlock(in_keys=embedding_net.in_keys,
                             out_keys=action_heads +
                             shared_embedding_keys_remove_input,
                             in_shapes=embedding_net.in_shapes,
                             perception_blocks=perception_dict)

        if len(shared_embedding_keys_remove_input) == 0:
            embedding_net = None

        return net, embedding_net
    def template_perception_net(
            self, observation_space: spaces.Dict) -> InferenceBlock:
        """Compiles a template perception network for a given observation space.

        :param observation_space: The observation space tp build the model for.
        :return: A Perception Inference Block.
        """

        # build model from parameters
        perception_net = self.model_builder.from_observation_space(
            observation_space=observation_space)

        # initialize model weights
        module_init = make_module_init_normc(std=1.0)
        perception_net.apply(module_init)

        return perception_net
Beispiel #20
0
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 hidden_units: List[int], non_lin: nn.Module):
        super().__init__(obs_shapes, hidden_units, non_lin)

        # build action head
        self.perception_dict["value"] = LinearOutputBlock(
            in_keys="latent",
            out_keys="value",
            in_shapes=self.perception_dict["latent"].out_shapes(),
            output_units=1)

        module_init = make_module_init_normc(std=0.01)
        self.perception_dict["value"].apply(module_init)

        # compile inference model
        self.net = InferenceBlock(in_keys=list(obs_shapes.keys()),
                                  out_keys="value",
                                  in_shapes=list(obs_shapes.values()),
                                  perception_blocks=self.perception_dict)
Beispiel #21
0
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 hidden_units: List[int], non_lin: nn.Module):
        super().__init__()
        self.hidden_units = hidden_units
        self.non_lin = non_lin

        self.perception_dict: Dict[str, PerceptionBlock] = dict()

        # first, flatten all observations
        flat_keys = []
        for obs, shape in obs_shapes.items():
            out_key = f'{obs}_flat'
            flat_keys.append(out_key)
            self.perception_dict[out_key] = FlattenBlock(
                in_keys=obs,
                out_keys=out_key,
                in_shapes=shape,
                num_flatten_dims=len(shape))

        # next, concatenate flat observations
        in_shapes = [
            self.perception_dict[k].out_shapes()[0] for k in flat_keys
        ]
        self.perception_dict["concat"] = ConcatenationBlock(
            in_keys=flat_keys,
            out_keys='concat',
            in_shapes=in_shapes,
            concat_dim=-1)

        # build perception part
        self.perception_dict["latent"] = DenseBlock(
            in_keys="concat",
            out_keys="latent",
            in_shapes=self.perception_dict["concat"].out_shapes(),
            hidden_units=self.hidden_units,
            non_lin=self.non_lin)

        # initialize model weights
        module_init = make_module_init_normc(std=1.0)
        for key in self.perception_dict.keys():
            self.perception_dict[key].apply(module_init)
    def template_q_value_net(
            self,
            observation_space: Optional[spaces.Dict],
            action_space: spaces.Dict,
            only_discrete_spaces: bool,
            perception_net: Optional[InferenceBlock] = None) -> InferenceBlock:
        """Compiles a template state action (Q) value network.

        :param observation_space: The input observations for the perception network.
        :param action_space: The action space that defines the network action heads.
        :param perception_net: A initial network to continue from.
                               (e.g. useful for shared weights. Model building continues from the key 'latent'.)
        :param only_discrete_spaces: A dict specifying if the action spaces w.r.t. the step only hold discrete action
                                     spaces.
        :return: A q value network (critic) InferenceBlock.
        """
        assert all(map(lambda space: isinstance(space, (spaces.Discrete, spaces.Box)),
                       action_space.spaces.values())), 'Only discrete and box spaces supported thus far for q values ' \
                                                       'critic.'

        if not only_discrete_spaces:
            discrete_space = list(
                filter(
                    lambda kk: isinstance(action_space.spaces[kk], spaces.
                                          Discrete), action_space.spaces))
            if len(discrete_space) > 0:
                new_action_space = {}
                for key in action_space.spaces.keys():
                    if key in discrete_space:
                        new_action_space[key] = OneHotPreProcessor(
                            action_space.spaces[key]).processed_space()
                    else:
                        new_action_space[key] = action_space.spaces[key]
                action_space = spaces.Dict(new_action_space)
            observation_space = spaces.Dict({
                **observation_space.spaces,
                **action_space.spaces
            })
            value_heads = {'q_value': 1}
        else:
            value_heads = {
                f'{act_key}_q_values': act_space.n
                for act_key, act_space in action_space.spaces.items()
            }

        # check if actions are considered as observations for the state-action critic
        for action_head in action_space.spaces.keys():
            if action_head not in self.model_builder.observation_modality_mapping:
                BColors.print_colored(
                    f'TemplateModelComposer: The action \'{action_head}\' could not be found in the '
                    f'model_builder.observation_modality_mapping and wont be considered '
                    f'as an input to the state-action critic!', BColors.FAIL)

        # build perception net
        if perception_net is None:
            perception_net = self.template_perception_net(observation_space)

        perception_dict = perception_net.perception_dict
        for value_head, output_units in value_heads.items():
            # initialize action head
            value_net = LinearOutputBlock(
                in_keys="latent",
                out_keys=value_head,
                in_shapes=perception_dict["latent"].out_shapes(),
                output_units=output_units)

            module_init = make_module_init_normc(std=0.01)
            value_net.apply(module_init)

            # extent perception dictionary
            perception_dict[value_head] = value_net

        # compile inference model
        net = InferenceBlock(in_keys=perception_net.in_keys,
                             out_keys=list(value_heads.keys()),
                             in_shapes=perception_net.in_shapes,
                             perception_blocks=perception_dict)

        return net
Beispiel #23
0
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 action_logits_shapes: Dict[str, Sequence[int]],
                 non_lin: Union[str, type(nn.Module)], with_mask: bool):
        nn.Module.__init__(self)
        self.obs_shapes = obs_shapes

        hidden_units, embedding_dim = 32, 7

        self.perception_dict = OrderedDict()

        # embed inventory
        # ---------------
        self.perception_dict['inventory_feat'] = DenseBlock(
            in_keys='inventory',
            out_keys='inventory_feat',
            in_shapes=self.obs_shapes['inventory'],
            hidden_units=[hidden_units],
            non_lin=non_lin)

        self.perception_dict['inventory_embed'] = LinearOutputBlock(
            in_keys='inventory_feat',
            out_keys='inventory_embed',
            in_shapes=self.perception_dict['inventory_feat'].out_shapes(),
            output_units=embedding_dim)

        # embed ordered_piece
        # ------------------_
        self.perception_dict['order_unsqueezed'] = FunctionalBlock(
            in_keys='ordered_piece',
            out_keys='order_unsqueezed',
            in_shapes=self.obs_shapes['ordered_piece'],
            func=lambda x: torch.unsqueeze(x, dim=-2))

        self.perception_dict['order_feat'] = DenseBlock(
            in_keys='order_unsqueezed',
            out_keys='order_feat',
            in_shapes=self.perception_dict['order_unsqueezed'].out_shapes(),
            hidden_units=[hidden_units],
            non_lin=non_lin)

        self.perception_dict['order_embed'] = LinearOutputBlock(
            in_keys='order_feat',
            out_keys='order_embed',
            in_shapes=self.perception_dict['order_feat'].out_shapes(),
            output_units=embedding_dim)

        # compute dot product score
        # -------------------------
        in_shapes = self.perception_dict['inventory_embed'].out_shapes()
        in_shapes += self.perception_dict['order_embed'].out_shapes()
        out_key = 'corr_score' if with_mask else 'piece_idx'
        self.perception_dict[out_key] = CorrelationBlock(
            in_keys=['inventory_embed', 'order_embed'],
            out_keys=out_key,
            in_shapes=in_shapes,
            reduce=True)

        # apply action masking
        if with_mask:
            self.perception_dict['piece_idx'] = ActionMaskingBlock(
                in_keys=['corr_score', 'inventory_mask'],
                out_keys='piece_idx',
                in_shapes=self.perception_dict['corr_score'].out_shapes() +
                [self.obs_shapes['inventory_mask']],
                num_actors=1,
                num_of_actor_actions=None)

        assert self.perception_dict['piece_idx'].out_shapes(
        )[0][0] == action_logits_shapes['piece_idx'][0]

        in_keys = ['ordered_piece', 'inventory']
        if with_mask:
            in_keys.append('inventory_mask')
        self.perception_net = InferenceBlock(
            in_keys=in_keys,
            out_keys='piece_idx',
            in_shapes=[self.obs_shapes[key] for key in in_keys],
            perception_blocks=self.perception_dict)

        # initialize model weights
        self.perception_net.apply(make_module_init_normc(1.0))
        self.perception_dict['inventory_embed'].apply(
            make_module_init_normc(0.01))
        self.perception_dict['order_embed'].apply(make_module_init_normc(0.01))
Beispiel #24
0
    def __init__(self, obs_shapes: Dict[str, Sequence[int]],
                 action_logits_shapes: Dict[str, Sequence[int]],
                 non_lin: Union[str, type(nn.Module)], with_mask: bool):
        nn.Module.__init__(self)
        self.obs_shapes = obs_shapes

        hidden_units = 32

        self.perception_dict = OrderedDict()

        self.perception_dict['selected_feat'] = DenseBlock(
            in_keys='selected_piece',
            out_keys='selected_feat',
            in_shapes=self.obs_shapes['selected_piece'],
            hidden_units=[hidden_units],
            non_lin=non_lin)

        self.perception_dict['order_feat'] = DenseBlock(
            in_keys='ordered_piece',
            out_keys='order_feat',
            in_shapes=self.obs_shapes['ordered_piece'],
            hidden_units=[hidden_units],
            non_lin=non_lin)

        self.perception_dict['latent'] = ConcatenationBlock(
            in_keys=['selected_feat', 'order_feat'],
            out_keys='latent',
            in_shapes=[[hidden_units], [hidden_units]],
            concat_dim=-1)

        rotation_out_key = 'cut_rotation_logits' if with_mask else 'cut_rotation'
        self.perception_dict[rotation_out_key] = LinearOutputBlock(
            in_keys='latent',
            out_keys=rotation_out_key,
            in_shapes=self.perception_dict['latent'].out_shapes(),
            output_units=action_logits_shapes['cut_rotation'][0])

        if with_mask:
            self.perception_dict['cut_rotation'] = ActionMaskingBlock(
                in_keys=['cut_rotation_logits', 'cutting_mask'],
                out_keys='cut_rotation',
                in_shapes=self.perception_dict['cut_rotation_logits'].
                out_shapes() + [self.obs_shapes['cutting_mask']],
                num_actors=1,
                num_of_actor_actions=None)

        self.perception_dict['cut_order'] = LinearOutputBlock(
            in_keys='latent',
            out_keys='cut_order',
            in_shapes=self.perception_dict['latent'].out_shapes(),
            output_units=action_logits_shapes['cut_order'][0])

        in_keys = ['selected_piece', 'ordered_piece']
        if with_mask:
            in_keys.append('cutting_mask')
        self.perception_net = InferenceBlock(
            in_keys=in_keys,
            out_keys=['cut_rotation', 'cut_order'],
            in_shapes=[self.obs_shapes[key] for key in in_keys],
            perception_blocks=self.perception_dict)

        # initialize model weights
        self.perception_net.apply(make_module_init_normc(1.0))
        self.perception_dict[rotation_out_key].apply(
            make_module_init_normc(0.01))
        self.perception_dict['cut_order'].apply(make_module_init_normc(0.01))
Beispiel #25
0
def test_module_init_normc():
    """ perception test """
    module_init = make_module_init_normc(0.1)
    module = nn.Linear(10, 10)
    module.apply(module_init)
    def template_value_net(
            self,
            observation_space: Optional[spaces.Dict],
            shared_embedding_keys: List[str] = None,
            perception_net: Optional[InferenceBlock] = None) -> InferenceBlock:
        """Compiles a template value network.

        :param observation_space: The input observations for the perception network.
        :param shared_embedding_keys: The shared embedding keys for this substep's model (input)
        :param perception_net: The embedding network of the policy network if shared keys have been specified, in order
            reuse the block and share the embedding.
        :return: A value network (critic) InferenceBlock.
        """

        shared_embedding_keys = [] if shared_embedding_keys is None else shared_embedding_keys
        # # build perception net
        if perception_net is None:
            assert len(shared_embedding_keys) == 0
            perception_net = self.template_perception_net(observation_space)
            perception_dict = perception_net.perception_dict
            inference_block_in_keys = perception_net.in_keys
            inference_block_in_shapes = perception_net.in_shapes
        else:
            assert len(shared_embedding_keys) > 0
            perception_dict = dict()
            _block_not_present_in_perception_net = list(
                filter(lambda x: x not in perception_net.perception_dict,
                       shared_embedding_keys))
            assert len(_block_not_present_in_perception_net) == 0, \
                f'All given latent keys, must be present in the default perception dict (created for the policy).' \
                f'But the following keys {_block_not_present_in_perception_net} was not found.'
            _blocks_with_more_outputs = list(
                filter(
                    lambda x: len(perception_net.perception_dict[x].out_shapes(
                    )) > 1, shared_embedding_keys))
            assert len(_blocks_with_more_outputs) == 0, \
                f'All given latent keys, must refer to a block with only a single output. But the following keys ' \
                f'have more than one output {_blocks_with_more_outputs}.'

            new_perception_net = self.template_perception_net(
                observation_space)
            new_perception_dict = new_perception_net.perception_dict
            start_adding_blocks = False
            for idx, block_keys in perception_net.execution_plan.items():
                if start_adding_blocks:
                    for block_key in block_keys:
                        perception_dict[block_key] = new_perception_dict[
                            block_key]
                if any([
                        block_key in shared_embedding_keys
                        for block_key in block_keys
                ]):
                    assert all([[block_key in shared_embedding_keys for block_key in block_keys]]), \
                        f'In the template model composer, all shared embedding keys given must be on the same level' \
                        f'in the execution plan of the inference block.'
                    start_adding_blocks = True

            inference_block_in_keys = shared_embedding_keys
            inference_block_in_shapes = sum([
                perception_net.perception_dict[key].out_shapes()
                for key in shared_embedding_keys
            ], [])

        # build value head
        value_net = LinearOutputBlock(
            in_keys="latent",
            out_keys="value",
            in_shapes=perception_net.perception_dict["latent"].out_shapes(),
            output_units=1)

        module_init = make_module_init_normc(std=0.01)
        value_net.apply(module_init)

        # extent perception dictionary
        perception_dict["value"] = value_net

        # compile inference model
        net = InferenceBlock(in_keys=inference_block_in_keys,
                             out_keys="value",
                             in_shapes=inference_block_in_shapes,
                             perception_blocks=perception_dict)

        return net