Exemplo n.º 1
0
 def __init__(self, state_dim, action_dim, seq_len):
     super(Actor, self).__init__()
     # self.tcn = TCN(state_dim, state_dim, num_channels, kernel_size=kernel_size, dropout=dropout).to(device)  # 预测所有状态
     self.bn1 = nn.BatchNorm1d(state_dim)
     self.tcn = TemporalConvNet(input_channels, num_channels, kernel_size=kernel_size, dropout=dropout)
     self.fc1 = nn.Linear(num_channels[-1], state_dim) 
     self.fc2 = nn.Linear(num_channels[-1], action_dim)
Exemplo n.º 2
0
    def __init__(self,
                 input_size,
                 output_size,
                 num_channels,
                 kernel_size=2,
                 dropout=0.3,
                 emb_dropout=0.1,
                 tied_weights=False):
        super(TCN, self).__init__()
        self.encoder = nn.Embedding(output_size, input_size)
        self.tcn = TemporalConvNet(input_size,
                                   num_channels,
                                   kernel_size,
                                   dropout=dropout)

        self.decoder = nn.Linear(num_channels[-1], output_size)
        if tied_weights:
            if num_channels[-1] != input_size:
                raise ValueError(
                    'When using the tied flag, nhid must be equal to emsize')
            self.decoder.weight = self.encoder.weight
            print("Weight tied")
        self.drop = nn.Dropout(emb_dropout)
        self.emb_dropout = emb_dropout
        self.init_weights()
Exemplo n.º 3
0
 def __init__(self,
              input_size,
              output_size,
              num_channels,
              kernel_size=2,
              dropout=0.3,
              emb_dropout=0.1,
              tied_weights=False):
     super(TCN, self).__init__()
     # 将one-hot encoding 部分送入编码器作为一个批量的词嵌入向量
     # output_size为词汇量,input_size是词向量的长度
     self.encoder = nn.Embedding(output_size, input_size)
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size,
                                dropout=dropout)
     # 定义最后线性变换的维度,即最后一个卷积层的通道数到所有词汇的映射
     self.decoder = nn.Linear(num_channels[-1], output_size)
     if tied_weights:
         # 是否共享编码器与解码器的权重,默认值为共享
         # 共享时需要保持隐藏单元数等于词嵌入的长度
         # 此时将预测的向量认为是词嵌入向量
         if num_channels[-1] != input_size:
             raise ValueError(
                 'When using the tied flag, nhid must be equal to emsize')
         self.decoder.weight = self.encoder.weight
         print("Weight tied")
     self.drop = nn.Dropout(emb_dropout)
     # 对输入词嵌入进行dropout
     self.emb_dropout = emb_dropout
     self.init_weights()
Exemplo n.º 4
0
 def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
     super(TCN, self).__init__()
     # 构建tcn网络
     self.tcn = TemporalConvNet(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)
     # 将网络输出映射到实际输出,使用线性映射
     self.linear = nn.Linear(num_channels[-1], output_size)
     self.init_weights()
Exemplo n.º 5
0
    def __init__(self,
                 input_size,
                 output_size,
                 num_channels,
                 kernel_size=2,
                 dropout=0.3,
                 emb_dropout=0.1,
                 tied_weights=False):

        #input_size = 600
        #output_size = 10000     vocab_size 不重复单词个数
        #num_channels = [600,600,600,600]
        #kernel_size = 3
        #dropout = 0.45
        #emb_dropout = 0.25
        #tied_weights= True

        super(TCN, self).__init__()
        self.encoder = nn.Embedding(output_size,
                                    input_size)  #vocab_10000 * 600  随机初始化
        self.tcn = TemporalConvNet(input_size,
                                   num_channels,
                                   kernel_size,
                                   dropout=dropout)

        self.decoder = nn.Linear(num_channels[-1], output_size)
        if tied_weights:
            if num_channels[-1] != input_size:
                raise ValueError(
                    'When using the tied flag, nhid must be equal to emsize')
            self.decoder.weight = self.encoder.weight
            print("Weight tied")
        self.drop = nn.Dropout(emb_dropout)
        self.emb_dropout = emb_dropout
        self.init_weights()
Exemplo n.º 6
0
 def __init__(self, input_size, output_size, num_channels, kernel_size,
              dropout):
     super(TCN, self).__init__()
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
     self.linear = nn.Linear(num_channels[-1], output_size)
Exemplo n.º 7
0
 def __init__(self, input_size, output_size, num_channels, kernel_size,
              dropout, use_fixup_init):
     super(TCN, self).__init__()
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size,
                                dropout=dropout,
                                use_fixup_init=use_fixup_init)
     self.linear = nn.Linear(num_channels[-1], output_size)
     self.sig = nn.Sigmoid()
Exemplo n.º 8
0
    def __init__(self,
                 args,
                 num_inputs,
                 num_actions,
                 hidden_size,
                 action_range=1.,
                 init_w=3e-3,
                 log_std_min=-20,
                 log_std_max=2):
        super(PolicyNetwork, self).__init__()

        # # Example of using Sequential
        # model = nn.Sequential(
        #         nn.Conv2d(1,20,5),
        #         nn.ReLU(),
        #         nn.Conv2d(20,64,5),
        #         nn.ReLU()
        #         )

        self.log_std_min = log_std_min
        self.log_std_max = log_std_max

        self.num_inputs = num_inputs
        # self.output_dim = num_actions
        # self.hidden_dim = hidden_size

        # self.bn1 = nn.BatchNorm1d(state_dim)
        self.tcn = TemporalConvNet(input_channels,
                                   [args['nhid']] * args['levels'],
                                   kernel_size=kernel_size,
                                   dropout=args['dropout'])
        # self.fc1 = nn.Linear(num_channels[-1], hidden_size)

        self.model = nn.Sequential(
            nn.Linear(num_channels[-1], hidden_size),  # 输入10维,隐层20维
            # nn.BatchNorm1d(hidden_size, affine=True, track_running_stats=True),     # BN层,参数为隐层的个数
            nn.LayerNorm(hidden_size, elementwise_affine=True),
            nn.ReLU(),  # 激活函数

            # nn.Linear(hidden_size, hidden_size),           # 输入10维,隐层20维
            # # nn.BatchNorm1d(hidden_size, affine=True, track_running_stats=True),     # BN层,参数为隐层的个数
            # nn.LayerNorm(hidden_size, elementwise_affine=True),
            # nn.ReLU(),
        )

        self.mean_linear = nn.Linear(hidden_size, num_actions)
        self.mean_linear.weight.data.uniform_(-init_w, init_w)
        self.mean_linear.bias.data.uniform_(-init_w, init_w)

        self.log_std_linear = nn.Linear(hidden_size, num_actions)
        self.log_std_linear.weight.data.uniform_(-init_w, init_w)
        self.log_std_linear.bias.data.uniform_(-init_w, init_w)

        self.action_range = action_range
        self.num_actions = num_actions
Exemplo n.º 9
0
 def __init__(self, input_size, output_size, num_channels, kernel_size,
              dropout):
     # input_size = 88, output_size = 88, num_channels = [150] * 4, kernel_size = 5
     super(TCN, self).__init__()
     # With Batch size as N, Seq len as L and Number of features as C, initial data is N x L x C (C = 88)
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size,
                                dropout=dropout)
     self.linear = nn.Linear(num_channels[-1], output_size)
     self.sig = nn.Sigmoid()
Exemplo n.º 10
0
def build_tcn(inputs, tcn_dropout, kernel_size, num_channels):
    # inputs = placeholder
    # self.dropout = tf.placeholder_with_default(0., shape=())

    # num_channels = [hidden1, hidden2, ...., outputchannel]
    # kernel_size
    tcn = TemporalConvNet(num_channels,
                          stride=1,
                          kernel_size=kernel_size,
                          dropout=tcn_dropout)
    outputs = tcn(inputs)
    return outputs
Exemplo n.º 11
0
Arquivo: model.py Projeto: shivadb/TCN
class TCN(nn.Module):
    def __init__(self, input_size=1, output_size=10, num_channels=None, kernel_size=7, dropout=0.05):
        super(TCN, self).__init__()
        num_channels = [25] * 8 if num_channels is None else num_channels
        self.tcn = TemporalConvNet(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)
        self.linear = nn.Linear(num_channels[-1], output_size)
        self.receptive_field = self.tcn.receptive_field

    def forward(self, inputs):
        """Inputs have to have dimension (N, C_in, L_in)"""
        y1 = self.tcn(inputs)  # input should have dimension (N, C, L)
        o = self.linear(y1[:, :, -1])
        return F.log_softmax(o, dim=1)

    def single_forward(self, input):
        return F.log_softmax(self.linear(self.tcn.single_forward(input).squeeze(dim=2)), dim=1)
    
    def fast_inference(self, batch_size):
        self.tcn.fast_inference(batch_size)

    def compare(self, inputs):
        y1 = self.tcn(inputs)
        y2 = torch.zeros(y1.size()).to(y1.device)
        for i in range(inputs.size()[2]):
            y2[:,:,i] = self.tcn.single_forward(inputs[:,:,i].view(inputs.size()[0], inputs.size()[1], 1)).squeeze()
        
        return (y1 == y2).all().item()
    
    def reset_cache(self):
        self.tcn.reset_cache()
Exemplo n.º 12
0
 def __init__(self, input_size, output_size, num_channels, kernel_size,
              dropout):
     # input_size = 1, output_size = 10, num_channels = [nhid] * levels = [10] * 8, kernel_size = 8
     super(TCN, self).__init__()
     # With Batch size as N, Seq len as L and 1 feature, initial data is 32 x 1 x 1020
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
     # input to linear is N x L x nhid = 32 x 1020 x 10
     self.linear = nn.Linear(num_channels[-1], output_size)
     # input of linear is N x L x nhid = 32 x 1020 x 10
     self.init_weights()
Exemplo n.º 13
0
 def __init__(self, input_size, output_size, num_channels, kernel_size,
              dropout):
     # input_size = 2, output_size = 1, num_channels = [nhid] * levels = [30] * 4, kernel_size = 7
     super(TCN, self).__init__()
     # With Batch size as N, Seq len as L and 2 features, initial data is N x 2 x L
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
     # input to linear is N x nhid x l = N x 30 x 1 where l is last element of sequence
     self.linear = nn.Linear(num_channels[-1], output_size)
     # output of linear is N x 1 x 1
     self.init_weights()
 def __init__(self, input_size, output_size, num_channels, window_size,
              num_fingers, kernel_size, dropout):
     super(TCN, self).__init__()
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
     self.linear1 = nn.Linear(
         (8 + (window_size - 1) * num_fingers) * num_channels[-1],
         (8 + (window_size - 1) * num_fingers) * num_channels[-1])
     self.linear2 = nn.Linear(
         (8 + (window_size - 1) * num_fingers) * num_channels[-1],
         output_size)
Exemplo n.º 15
0
 def __init__(self, output_size: int, seq_length: int, num_channels: list,
              kernel_size: int, dropout: float, dt):
     super(LowResolutionTCN, self).__init__()
     self.tcn = TemporalConvNet(output_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
     self.linear1 = nn.Linear(num_channels[-1] + 2, output_size)
     # self.linear2 = nn.Linear(output_size, output_size)
     # self.fc = nn.Sequential(self.linear1, nn.Sigmoid(), self.linear2)
     self.euler_clock = TimePassing(dt)
     self.output_size = output_size
     self.init_weights()
Exemplo n.º 16
0
    def __init__(self,
                 word2vec_model,
                 embedding_dim,
                 hidden_dim,
                 output_dim,
                 num_layers=1,
                 dropout=0.2,
                 use_gdelt=False,
                 use_TCN=False,
                 effective_history=91):
        """

        :param word2vec_model: the actual model that would embed the tweets
        :param embedding_dim:
        :param hidden_dim:
        :param num_layers:
        :param dropout:
        """
        super().__init__()

        self.word2vec_model = word2vec_model
        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.use_gdelt = use_gdelt

        self.use_TCN = use_TCN

        if use_TCN:
            num_levels, kernel_size = get_TCN_params_from_effective_history(
                effective_history)
            num_channels = [hidden_dim] * num_levels
            self.temporal_extractor = TemporalConvNet(embedding_dim,
                                                      num_channels,
                                                      kernel_size, dropout)
        else:
            self.temporal_extractor = nn.LSTM(embedding_dim,
                                              hidden_dim,
                                              num_layers,
                                              batch_first=True,
                                              dropout=dropout)

        # at the moment this is without considering additional info about the tweets like the number of mentions, etc...
        # also the structure is arbitrary at the moment
        self.num_handmade_features = 4
        self.feature_extractor = nn.Linear(
            hidden_dim + self.num_handmade_features, output_dim)

        self.means = torch.Tensor([1.0901, 0.0135, 0.8929, 2.8000])
        self.stds = torch.Tensor([17.6291, 0.1156, 0.3092, 1.9421])
Exemplo n.º 17
0
 def __init__(self,
              input_size,
              output_size,
              num_channels,
              kernel_size=2,
              dropout=0.2,
              emb_dropout=0.2):
     super(TCN, self).__init__()
     self.encoder = nn.Embedding(output_size, input_size)
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
     self.decoder = nn.Linear(input_size, output_size)
     self.decoder.weight = self.encoder.weight
     self.drop = nn.Dropout(emb_dropout)
     self.init_weights()
Exemplo n.º 18
0
Arquivo: model.py Projeto: shivadb/TCN
 def __init__(self, input_size=1, output_size=10, num_channels=None, kernel_size=7, dropout=0.05):
     super(TCN, self).__init__()
     num_channels = [25] * 8 if num_channels is None else num_channels
     self.tcn = TemporalConvNet(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)
     self.linear = nn.Linear(num_channels[-1], output_size)
     self.receptive_field = self.tcn.receptive_field