def get_convolution_net( in_channels: int, history_len: int = 1, channels: List = None, kernel_sizes: List = None, strides: List = None, groups: List = None, use_bias: Union[bool, List] = False, normalization: Union[str, List] = None, dropout_rate: Union[float, List] = None, activation: Union[str, List] = None, residual: Union[bool, str] = False, layer_order: List = None, ) -> nn.Module: """@TODO: Docs. Contribution is welcome.""" channels = channels or [32, 64, 64] kernel_sizes = kernel_sizes or [8, 4, 3] strides = strides or [4, 2, 1] groups = groups or [1, 1, 1] assert len(channels) == len(kernel_sizes) == len(strides) == len(groups) use_bias = _process_additional_params(use_bias, channels) layer_fn = [ { "module": nn.Conv2d, "bias": bias, "kernel_size": kernel_size, "stride": stride, "groups": group, } for bias, kernel_size, stride, group in zip( use_bias, kernel_sizes, strides, groups ) ] if dropout_rate is not None: dropout_fn = ( {"module": nn.Dropout2d, "p": dropout_rate} if isinstance(dropout_rate, float) else [ {"module": nn.Dropout2d, "p": p} if p is not None else None for p in dropout_rate ] ) else: dropout_fn = None channels.insert(0, history_len * in_channels) net = SequentialNet( hiddens=channels, layer_fn=layer_fn, norm_fn=normalization, dropout_fn=dropout_fn, activation_fn=activation, residual=residual, layer_order=layer_order, ) return net
def get_from_params( cls, heads_params: Dict, encoder_params: Dict = None, embedders_params: Dict = None, in_features: int = None, ) -> "Hydra": """@TODO: Docs. Contribution is welcome.""" heads_params_copy = deepcopy(heads_params) encoder_params_copy = deepcopy(encoder_params) embedders_params_copy = deepcopy(embedders_params) def _get_normalization_keyword(dct: Dict): return (dct.pop(Hydra.normalize_keyword, False) if dct is not None else False) if encoder_params_copy is not None: normalize_embeddings: bool = _get_normalization_keyword( encoder_params_copy) encoder = SequentialNet(**encoder_params_copy) in_features = encoder_params_copy["hiddens"][-1] if normalize_embeddings: encoder = nn.Sequential(encoder, Normalize()) else: assert in_features is not None encoder = None heads = Hydra.parse_head_params(head_params=heads_params_copy, in_features=in_features) assert isinstance(heads, nn.ModuleDict) embedders = {} if embedders_params_copy is not None: for key, head_params in embedders_params_copy.items(): if isinstance(head_params, int): head_params = {"num_embeddings": head_params} need_normalize = head_params.pop(Hydra.normalize_keyword, False) block = [( "embedding", nn.Embedding( embedding_dim=in_features, **head_params, ), )] if need_normalize: block.append(("normalize", Normalize())) block = OrderedDict(block) block = nn.Sequential(block) embedders[key] = block embedders = nn.ModuleDict(embedders) net = cls(heads=heads, encoder=encoder, embedders=embedders) return net
def get_linear_net( in_features: int, history_len: int = 1, features: List = None, use_bias: Union[bool, List] = False, normalization: Union[str, List] = None, dropout_rate: Union[float, List] = None, activation: Union[str, List] = None, residual: Union[bool, str] = False, layer_order: List = None, ) -> nn.Module: """@TODO: Docs. Contribution is welcome.""" features = features or [64, 128, 64] layer_fn = ({ "module": nn.Linear, "bias": use_bias } if isinstance(use_bias, bool) else [{ "module": nn.Linear, "bias": bias } for bias in use_bias]) if dropout_rate is not None: dropout_fn = ({ "module": nn.Dropout, "p": dropout_rate } if isinstance(dropout_rate, float) else [{ "module": nn.Dropout, "p": p } if p is not None else None for p in dropout_rate]) else: dropout_fn = None features.insert(0, history_len * in_features) net = SequentialNet( hiddens=features, layer_fn=layer_fn, norm_fn=normalization, dropout_fn=dropout_fn, activation_fn=activation, residual=residual, layer_order=layer_order, ) return net
def parse_head_params( head_params: Dict, in_features: int, is_leaf: bool = False, ) -> Union[nn.Module, nn.ModuleDict]: """@TODO: Docs. Contribution is welcome.""" if is_leaf: if isinstance(head_params, int): head_params = {"hiddens": [head_params]} normalize = head_params.pop(Hydra.normalize_keyword, False) head_params["hiddens"].insert(0, in_features) output = [("net", SequentialNet(**head_params))] if normalize: output.append(("normalize", Normalize())) output = OrderedDict(output) output = nn.Sequential(output) else: output = {} hidden_params = head_params.pop(Hydra.hidden_keyword, None) if hidden_params is not None: in_features = (hidden_params if isinstance(hidden_params, int) else hidden_params["hiddens"][-1]) output[Hydra.hidden_keyword] = Hydra.parse_head_params( head_params=hidden_params, in_features=in_features, is_leaf=True, ) for head_branch_name, head_branch_params in head_params.items(): output[head_branch_name] = Hydra.parse_head_params( head_params=head_branch_params, in_features=in_features, is_leaf=not head_branch_name.startswith( Hydra.parent_keyword), ) output = nn.ModuleDict(output) return output