コード例 #1
0
    def __init__(self, input_size, outputclass):
        super(stackedIndRNN_encoder, self).__init__()
        hidden_size = args.hidden_size

        self.DIs = nn.ModuleList()
        denseinput = nn.Linear(input_size, hidden_size, bias=True)
        self.DIs.append(denseinput)
        for x in range(args.num_layers - 1):
            denseinput = nn.Linear(hidden_size, hidden_size, bias=True)
            self.DIs.append(denseinput)

        self.BNs = nn.ModuleList()
        for x in range(args.num_layers):
            bn = Batch_norm_overtime(hidden_size, args.seq_len)
            self.BNs.append(bn)

        self.RNNs = nn.ModuleList()
        rnn = IndRNN(hidden_size=hidden_size)  #IndRNN
        self.RNNs.append(rnn)
        for x in range(args.num_layers - 1):
            rnn = IndRNN(hidden_size=hidden_size)  #IndRNN
            self.RNNs.append(rnn)

        self.lastfc = nn.Linear(hidden_size, outputclass, bias=True)
        self.init_weights()
コード例 #2
0
    def __init__(self, outputclass):
        super(stackedIndRNN_encoder, self).__init__()

        self.embed = torch.nn.Embedding(outputclass, args.hidden_size)

        self.DIs = nn.ModuleList()
        denseinput = nn.Linear(args.hidden_size, args.hidden_size, bias=True)
        self.DIs.append(denseinput)
        for x in range(args.num_layers - 1):
            denseinput = nn.Linear(args.hidden_size,
                                   args.hidden_size,
                                   bias=True)
            self.DIs.append(denseinput)

        self.BNs = nn.ModuleList()
        for x in range(args.num_layers):
            bn = Batch_norm_step(args.hidden_size, args.seq_len)
            self.BNs.append(bn)

        self.RNNs = nn.ModuleList()
        for x in range(args.num_layers):
            rnn = IndRNN(hidden_size=args.hidden_size)  #IndRNN
            self.RNNs.append(rnn)

        self.lastfc = nn.Linear(args.hidden_size, outputclass)
        self.init_weights()
        self.outputclass = outputclass
コード例 #3
0
 def __init__(self, hidden_size, seq_len, bn_location='bn_before'):
     super(IndRNNwithBN, self).__init__()
     if bn_location == "bn_before":
         self.add_module('norm1', BN(hidden_size, args.seq_len))
     self.add_module('indrnn1', IndRNN(hidden_size))
     if bn_location == "bn_after":
         self.add_module('norm1', BN(hidden_size, args.seq_len))
     if (bn_location != 'bn_before') and (bn_location != 'bn_after'):
         print('Please select a batch normalization mode.')
         assert 2 == 3
コード例 #4
0
 def __init__(self, hidden_size, seq_len,bn_location='bn_before'):
     super(IndRNNwithBN, self).__init__()  
     if bn_location=="bn_before":      
         self.add_module('norm1', BN(hidden_size, args.seq_len))
     self.add_module('indrnn1', IndRNN(hidden_size))        
     if bn_location=="bn_after":   
         self.add_module('norm1', BN(hidden_size, args.seq_len))
     if (bn_location!='bn_before') and (bn_location!='bn_after'):
         print('Please select a batch normalization mode.')
         assert 2==3
     if (bn_location!='bn_before'):
         print('You are selecting the bn_before mode, where batch normalization is used before the recurrent connection.\
         It generally provides a stable but worse results than the bn_after mode. So use bn_after first.')