예제 #1
0
 def _build_impala(self, input_shape, depths):
     in_channels = input_shape[0]
     layers = nn.ModuleList()
     for d in depths:
         layers.append(ImpalaSequential(in_channels, d))
         in_channels = d
     return nn.Sequential(*layers)
예제 #2
0
        def _build_sequential_mlp(self,
                                  input_size,
                                  units,
                                  activation,
                                  dense_func,
                                  norm_only_first_layer=False,
                                  norm_func_name=None):
            print('build mlp:', input_size)
            in_size = input_size
            layers = []
            need_norm = True
            for unit in units:
                layers.append(dense_func(in_size, unit))
                layers.append(self.activations_factory.create(activation))

                if not need_norm:
                    continue
                if norm_only_first_layer and norm_func_name is not None:
                    need_norm = False
                if norm_func_name == 'layer_norm':
                    layers.append(torch.nn.LayerNorm(unit))
                elif norm_func_name == 'batch_norm':
                    layers.append(torch.nn.BatchNorm1d(unit))
                in_size = unit

            return nn.Sequential(*layers)
예제 #3
0
 def _build_cnn1d(self,
                  input_shape,
                  convs,
                  activation,
                  norm_func_name=None):
     print('conv1d input shape:', input_shape)
     in_channels = input_shape[0]
     layers = []
     for conv in convs:
         layers.append(
             torch.nn.Conv1d(in_channels, conv['filters'],
                             conv['kernel_size'], conv['strides'],
                             conv['padding']))
         act = self.activations_factory.create(activation)
         layers.append(act)
         in_channels = conv['filters']
         if norm_func_name == 'layer_norm':
             layers.append(torch.nn.LayerNorm(in_channels))
         elif norm_func_name == 'batch_norm':
             layers.append(torch.nn.BatchNorm2d(in_channels))
     return nn.Sequential(*layers)
예제 #4
0
 def _build_cnn2d(self,
                  input_shape,
                  convs,
                  activation,
                  conv_func=torch.nn.Conv2d,
                  norm_func_name=None):
     in_channels = input_shape[0]
     layers = []
     for conv in convs:
         layers.append(
             conv_func(in_channels=in_channels,
                       out_channels=conv['filters'],
                       kernel_size=conv['kernel_size'],
                       stride=conv['strides'],
                       padding=conv['padding']))
         conv_func = torch.nn.Conv2d
         act = self.activations_factory.create(activation)
         layers.append(act)
         in_channels = conv['filters']
         if norm_func_name == 'layer_norm':
             layers.append(torch_ext.LayerNorm2d(in_channels))
         elif norm_func_name == 'batch_norm':
             layers.append(torch.nn.BatchNorm2d(in_channels))
     return nn.Sequential(*layers)