示例#1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 gcn_num,
                 bias,
                 poolsize,
                 act,
                 end_cnn=False,
                 **kwargs):
        super().__init__()

        #same padding calc
        self.kernel_size = kernel_size
        self.stride = stride
        self.convs = nn.ModuleList()
        self.maxpools = nn.ModuleList()
        self.pool_stride = poolsize
        self.pool_size = poolsize
        self.activation = create_act(act)
        self.end_cnn = end_cnn
        self.gcn_num = gcn_num
        self.out_channels = out_channels

        for i in range(gcn_num):
            self.convs.append(
                nn.Conv2d(in_channels,
                          out_channels,
                          kernel_size,
                          stride,
                          bias=bias))
            self.convs[-1].apply(self.weights_init)
            self.maxpools.append(nn.MaxPool2d(poolsize, stride=poolsize))
    def __init__(self, input_dim, feature_map_dim, apply_u, inneract, bias):
        super(NTN, self).__init__()

        self.feature_map_dim = feature_map_dim
        self.apply_u = apply_u
        self.bias = bias
        self.inneract = create_act(inneract)

        self.vars = {}

        self.vars['V'] = glorot([feature_map_dim, input_dim * 2])
        self.vars['W'] = glorot([feature_map_dim, input_dim, input_dim])
        if self.bias:
            self.vars['b'] = nn.Parameter(
                torch.randn(feature_map_dim).to(FLAGS.device))
        if self.apply_u:
            self.vars['U'] = glorot([feature_map_dim, 1])
 def __init__(self,
              input_dim,
              output_dim,
              activation_type='relu',
              num_hidden_lyr=2,
              hidden_channels=None):
     super().__init__()
     self.out_dim = output_dim
     if not hidden_channels:
         hidden_channels = [input_dim for _ in range(num_hidden_lyr)]
     elif len(hidden_channels) != num_hidden_lyr:
         raise ValueError(
             "number of hidden layers should be the same as the lengh of hidden_channels"
         )
     self.layer_channels = [input_dim] + hidden_channels + [output_dim]
     self.activation = create_act(activation_type)
     self.layers = nn.ModuleList(
         list(
             map(self.weight_init, [
                 nn.Linear(self.layer_channels[i],
                           self.layer_channels[i + 1])
                 for i in range(len(self.layer_channels) - 1)
             ])))
    def __init__(self, input_dim, inneract):
        super(MNE, self).__init__()

        self.inneract = create_act(inneract)
        self.input_dim = input_dim