示例#1
0
 def __init__(self, c_in=512, c_out=513, c_h=512, emb_size=128, ns=0.2):
     super().__init__()
     self.ns = ns
     self.conv1 = Conv1d(c_in + emb_size, 2 * c_h, kernel_size=3)
     self.conv2 = Conv1d(c_h + emb_size, c_h, kernel_size=3)
     self.conv3 = Conv1d(c_h + emb_size, 2 * c_h, kernel_size=3)
     self.conv4 = Conv1d(c_h + emb_size, c_h, kernel_size=3)
     self.conv5 = Conv1d(c_h + emb_size, 2 * c_h, kernel_size=3)
     self.conv6 = Conv1d(c_h + emb_size, c_h, kernel_size=3)
     self.dense1 = Linear(c_h + emb_size, c_h)
     self.dense2 = Linear(c_h + emb_size, c_h)
     self.dense3 = Linear(c_h + emb_size, c_h)
     self.dense4 = Linear(c_h + emb_size, c_h)
     self.RNN = GRU(
         input_size=c_h + emb_size,
         hidden_size=c_h // 2,
         num_layers=1,
         bidirectional=True,
     )
     self.dense5 = Linear(2 * c_h + emb_size, c_h)
     self.linear = Linear(c_h, c_out)
     # normalization layer
     self.ins_norm1 = InstanceNorm1d(c_h)
     self.ins_norm2 = InstanceNorm1d(c_h)
     self.ins_norm3 = InstanceNorm1d(c_h)
     self.ins_norm4 = InstanceNorm1d(c_h)
     self.ins_norm5 = InstanceNorm1d(c_h)
示例#2
0
    def __init__(self,
                 channels: List[int],
                 norm: Optional[str] = None,
                 bias: bool = True,
                 dropout: float = 0.):
        """
        Construct a MLP model
        
        :param channels: list of hidden channels
        :param norm: type of normalization
        :bias: bias
        :dropout: ratio of dropout
        """
        m = []
        for i in range(1, len(channels)):
            m.append(Linear(channels[i - 1], channels[i], bias))

            if i < len(channels) - 1:
                if norm and norm == 'batch':
                    m.append(BatchNorm1d(channels[i], affine=True))
                elif norm and norm == 'layer':
                    m.append(LayerNorm(channels[i], elementwise_affine=True))
                elif norm and norm == 'instance':
                    m.append(InstanceNorm1d(channels[i], affine=False))
                elif norm:
                    raise NotImplementedError(
                        f'Normalization layer "{norm}" not supported.')
                m.append(ReLU())
                m.append(Dropout(dropout))

        super(MLP, self).__init__(*m)
示例#3
0
 def __init__(self, c_in=512, c_h=512, n_class=8, dp=0.1, ns=0.01):
     super().__init__()
     self.dp, self.ns = dp, ns
     self.conv1 = Conv1d(c_in, c_h, kernel_size=5)
     self.conv2 = Conv1d(c_h, c_h, kernel_size=5)
     self.conv3 = Conv1d(c_h, c_h, kernel_size=5)
     self.conv4 = Conv1d(c_h, c_h, kernel_size=5)
     self.conv5 = Conv1d(c_h, c_h, kernel_size=5)
     self.conv6 = Conv1d(c_h, c_h, kernel_size=5)
     self.conv6 = Conv1d(c_h, c_h, kernel_size=5)
     self.conv7 = Conv1d(c_h, c_h, kernel_size=5)
     self.conv8 = Conv1d(c_h, c_h, kernel_size=5)
     self.conv9 = Conv1d(c_h, n_class, kernel_size=16)
     self.drop1 = Dropout(p=dp)
     self.drop2 = Dropout(p=dp)
     self.drop3 = Dropout(p=dp)
     self.drop4 = Dropout(p=dp)
     self.ins_norm1 = InstanceNorm1d(c_h)
     self.ins_norm2 = InstanceNorm1d(c_h)
     self.ins_norm3 = InstanceNorm1d(c_h)
     self.ins_norm4 = InstanceNorm1d(c_h)
    def __init__(self, params):
        super(VerticalAttention, self).__init__()
        self.att_fc_size = params["att_fc_size"]
        self.features_size = params["features_size"]
        self.use_location = params["use_location"]
        self.use_coverage_vector = params["use_coverage_vector"]
        self.coverage_mode = params["coverage_mode"]
        self.use_hidden = params["use_hidden"]
        self.min_height = params["min_height_feat"]
        self.min_width = params["min_width_feat"]
        self.stop_mode = params["stop_mode"]

        self.ada_pool = AdaptiveMaxPool2d((None, self.min_width))
        self.dense_width = Linear(self.min_width, 1)

        self.dense_enc = Linear(self.features_size, self.att_fc_size)
        self.dense_align = Linear(self.att_fc_size, 1)

        if self.stop_mode == "learned":
            self.ada_pool_height = AdaptiveMaxPool1d(self.min_height)
            self.conv_decision = Conv1d(self.att_fc_size,
                                        self.att_fc_size,
                                        kernel_size=5,
                                        padding=2)
            self.dense_height = Linear(self.min_height, 1)
            if self.use_hidden:
                self.dense_decision = Linear(
                    params["hidden_size"] + self.att_fc_size, 2)
            else:
                self.dense_decision = Linear(self.att_fc_size, 2)
        in_ = 0
        if self.use_location:
            in_ += 1
        if self.use_coverage_vector:
            in_ += 1

        self.norm = InstanceNorm1d(in_, track_running_stats=False)
        self.conv_block = Conv1d(in_, 16, kernel_size=15, padding=7)
        self.dense_conv_block = Linear(16, self.att_fc_size)

        if self.use_hidden:
            self.hidden_size = params["hidden_size"]
            self.dense_hidden = Linear(self.hidden_size, self.att_fc_size)

        self.dropout = Dropout(params["att_dropout"])

        self.h_features = None
示例#5
0
 def __init__(self, features, ns):
     super().__init__()
     in_features = features[:-1]
     out_features = features[1:]
     layers = sum(
         (
             (
                 Linear(in_features=in_size, out_features=out_size),
                 InstanceNorm1d(num_features=out_size),
                 LeakyReLU(negative_slope=ns),
                 Dropout(0.5),
             )
             for (in_size, out_size) in zip(in_features, out_features)
         ),
         (),
     )
     self.layers = ModuleList(layers)
示例#6
0
def create_norm(num_features):
    """Creates a normalization layer.

    Note:
        The normalization is configured via :meth:`pytorch_layers.Config.norm`,
        and :attr:`pytorch_layers.Config.norm_kwargs`, and the saptial dimension
        is configured via :attr:`pytorch_layers.Config.dim`.

    Args:
        num_features (int): The number of input channels.

    Returns:
        torch.nn.Module: The created normalization layer.

    """
    config = Config()
    if config.norm_mode is NormMode.GROUP:
        from torch.nn import GroupNorm
        kwargs = config.norm_kwargs.copy()
        num_groups = kwargs.pop('num_groups')
        return GroupNorm(num_groups, num_features, **kwargs)
    elif config.norm_mode is NormMode.NONE:
        from torch.nn import Identity
        return Identity()
    if config.dim is Dim.ONE:
        if config.norm_mode is NormMode.INSTANCE:
            from torch.nn import InstanceNorm1d
            return InstanceNorm1d(num_features, **config.norm_kwargs)
        elif config.norm_mode is NormMode.BATCH:
            from torch.nn import BatchNorm1d
            return BatchNorm1d(num_features, **config.norm_kwargs)
    elif config.dim is Dim.TWO:
        if config.norm_mode is NormMode.INSTANCE:
            from torch.nn import InstanceNorm2d
            return InstanceNorm2d(num_features, **config.norm_kwargs)
        elif config.norm_mode is NormMode.BATCH:
            from torch.nn import BatchNorm2d
            return BatchNorm2d(num_features, **config.norm_kwargs)
    elif config.dim is Dim.THREE:
        if config.norm_mode is NormMode.INSTANCE:
            from torch.nn import InstanceNorm3d
            return InstanceNorm3d(num_features, **config.norm_kwargs)
        elif config.norm_mode is NormMode.BATCH:
            from torch.nn import BatchNorm3d
            return BatchNorm3d(num_features, **config.norm_kwargs)
示例#7
0
 def __init__(self,
              c_in=513,
              c_h1=128,
              c_h2=512,
              c_h3=128,
              ns=0.2,
              dp=0.3,
              emb_size=128):
     super().__init__()
     self.ns = ns
     self.conv1s = ModuleList(
         Conv1d(c_in, c_h1, kernel_size=k) for k in range(1, 8))
     self.conv2 = Conv1d(len(self.conv1s) * c_h1 + c_in,
                         c_h2,
                         kernel_size=1)
     self.emb2 = Sequential(
         AdaptiveAvgPool1d(output_size=1),
         Squeeze(dim=-1),
         Linear(c_h2, emb_size),
     )
     self.conv3 = Conv1d(c_h2, c_h2, kernel_size=5)
     self.conv4 = Conv1d(c_h2, c_h2, kernel_size=5, stride=2)
     self.emb4 = Sequential(
         AdaptiveAvgPool1d(output_size=1),
         Squeeze(dim=-1),
         Linear(c_h2, emb_size),
     )
     self.conv5 = Conv1d(c_h2, c_h2, kernel_size=5)
     self.conv6 = Conv1d(c_h2, c_h2, kernel_size=5, stride=2)
     self.emb6 = Sequential(
         AdaptiveAvgPool1d(output_size=1),
         Squeeze(dim=-1),
         Linear(c_h2, emb_size),
     )
     self.conv7 = Conv1d(c_h2, c_h2, kernel_size=5)
     self.conv8 = Conv1d(c_h2, c_h2, kernel_size=5, stride=2)
     self.emb8 = Sequential(
         AdaptiveAvgPool1d(output_size=1),
         Squeeze(dim=-1),
         Linear(c_h2, emb_size),
     )
     self.dense1 = Linear(c_h2, c_h2)
     self.dense2 = Linear(c_h2, c_h2)
     self.embd2 = Sequential(
         AdaptiveAvgPool1d(output_size=1),
         Squeeze(dim=-1),
         Linear(c_h2, emb_size),
     )
     self.dense3 = Linear(c_h2, c_h2)
     self.dense4 = Linear(c_h2, c_h2)
     self.embd4 = Sequential(
         AdaptiveAvgPool1d(output_size=1),
         Squeeze(dim=-1),
         Linear(c_h2, emb_size),
     )
     self.RNN = GRU(input_size=c_h2,
                    hidden_size=c_h3,
                    num_layers=1,
                    bidirectional=True)
     self.embrnn = Sequential(
         AdaptiveAvgPool1d(output_size=1),
         Squeeze(dim=-1),
         Linear(c_h2, emb_size),
     )
     self.linear = Linear(c_h2 + 2 * c_h3, c_h2)
     # normalization layer
     self.ins_norm1 = InstanceNorm1d(c_h2)
     self.ins_norm2 = InstanceNorm1d(c_h2)
     self.ins_norm3 = InstanceNorm1d(c_h2)
     self.ins_norm4 = InstanceNorm1d(c_h2)
     self.ins_norm5 = InstanceNorm1d(c_h2)
     self.ins_norm6 = InstanceNorm1d(c_h2)
     self.drop1 = Dropout(p=dp)
     self.drop2 = Dropout(p=dp)
     self.drop3 = Dropout(p=dp)
     self.drop4 = Dropout(p=dp)
     self.drop5 = Dropout(p=dp)
     self.drop6 = Dropout(p=dp)
示例#8
0
 def get_norm(self, constant_size, out_features):
     if not constant_size:
         norm = InstanceNorm1d(out_features)
     else:
         norm = BatchNorm1d(out_features)
     return norm
示例#9
0
 def __init__(self, features, ns):
     super().__init__()
     self.T = Linear(in_features=features, out_features=features)
     self.H = Linear(in_features=features, out_features=features)
     self.leaky_relu = LeakyReLU(negative_slope=ns)
     self.norm = InstanceNorm1d(num_features=features)