Ejemplo n.º 1
0
    def get_temporal_layers(self, tcn_in, tcn_channels, num_dilations, tcn_kernel_size, dropout, use_norm):
        # input of TCN should have dimension (N, C, L)
        if self.num_stacks_tcn == 1:
            temporal_layers = TemporalConvNet(tcn_in, (tcn_channels[0],) * num_dilations, tcn_kernel_size, dropout,
                                              use_norm=use_norm)
        else:
            list_layers = []
            for idx in range(self.num_stacks_tcn):
                tcn_in_index = tcn_in if idx == 0 else tcn_channels[idx - 1]
                list_layers.append(
                    TemporalConvNet(tcn_in_index, (tcn_channels[idx],) * num_dilations, tcn_kernel_size, dropout,
                                    use_norm=use_norm))
            temporal_layers = nn.Sequential(*list_layers)

        return temporal_layers
Ejemplo n.º 2
0
    def __init__(self,
                 input_channel,
                 output_channel,
                 kernel_size,
                 layer_channels,
                 dropout=0.2):
        """
        Temporal Convolution Network with PReLU activations
        Input: torch array [batch x frames x input_size]
        Output: torch array [batch x frames x out_size]

        :param input_channel: num. channels in input
        :param output_channel: num. channels in output
        :param kernel_size: size of convolution kernel (must be odd)
        :param layer_channels: array specifying num. of channels in each layer
        :param dropout: dropout probability
        """

        super(TCNSeqNetwork, self).__init__()
        self.kernel_size = kernel_size
        self.num_layers = len(layer_channels)

        self.tcn = TemporalConvNet(input_channel, layer_channels, kernel_size,
                                   dropout)
        self.output_layer = torch.nn.Conv1d(layer_channels[-1], output_channel,
                                            1)
        self.output_dropout = torch.nn.Dropout(dropout)
        self.net = torch.nn.Sequential(self.tcn, self.output_dropout,
                                       self.output_layer)
        self.init_weights()
Ejemplo n.º 3
0
    def __init__(self, tcn_out_channel=64, c3d_path='', tcn_path=''):
        super(C3D_TCN, self).__init__()

        self.c3d = C3D(in_channels=3) 
        self.tcn = TCN(245760, [128,128,64,tcn_out_channel]) # 245760 == 128, 983040 == 256, 384000 == 160

        self.load_models(c3d_path, tcn_path)
Ejemplo n.º 4
0
Archivo: MPM.py Proyecto: chenjie04/MPM
    def __init__(self, nb_users, nb_items, embed_dim, history_size):
        super(Multi_Preference_Model, self).__init__()

        self.nb_users = nb_users
        self.nb_items = nb_items
        self.embed_dim = embed_dim
        self.history_size = history_size

        #user and item embedding
        self.user_embed = nn.Embedding(self.nb_users, self.embed_dim)
        self.item_embed = nn.Embedding(self.nb_items, self.embed_dim)
        self.user_embed.weight.data.normal_(0., 0.01)
        self.item_embed.weight.data.normal_(0., 0.01)

        #TCN
        nhid = self.embed_dim
        level = 5
        num_channels = [nhid] * (level - 1) + [embed_dim]
        self.tcn = TemporalConvNet(num_inputs=self.embed_dim,
                                   num_channels=num_channels,
                                   kernel_size=3,
                                   dropout=0.25)

        #MLP
        mlp_layer_sizes = [self.embed_dim * 2, 128, 64, 32]
        nb_mlp_layers = len(mlp_layer_sizes)
        self.mlp = nn.ModuleList()
        for i in range(1, nb_mlp_layers):
            self.mlp.extend(
                [nn.Linear(mlp_layer_sizes[i - 1], mlp_layer_sizes[i])])

        #Output Module
        self.output_1 = nn.Linear(mlp_layer_sizes[-1] *
                                  (self.history_size + 1),
                                  128,
                                  bias=True)
        self.output_2 = nn.Linear(128, 64, bias=True)
        self.output_3 = nn.Linear(64, 32, bias=True)
        self.output_4 = nn.Linear(32, 1, bias=True)

        def golorot_uniform(layer):
            fan_in, fan_out = layer.in_features, layer.out_features
            limit = np.sqrt(6. / (fan_in + fan_out))
            layer.weight.data.uniform_(-limit, limit)

        def lecunn_uniform(layer):
            fan_in, fan_out = layer.in_features, layer.out_features  # noqa: F841, E501
            limit = np.sqrt(3. / fan_in)
            layer.weight.data.uniform_(-limit, limit)

        for layer in self.mlp:
            if type(layer) != nn.Linear:
                continue
            golorot_uniform(layer)

        lecunn_uniform(self.output_1)
        lecunn_uniform(self.output_2)
        lecunn_uniform(self.output_3)
        lecunn_uniform(self.output_4)
Ejemplo n.º 5
0
 def __init__(self, input_size, output_size, num_channels, kernel_size,
              dropout):
     super(TCN, self).__init__()
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
     self.linear = nn.Linear(num_channels[-1], output_size)
Ejemplo n.º 6
0
    def __init__(self, output_size, num_channels, kernel_size, dropout):
        super(TCN, self).__init__()
        init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01)

        self.temporalCN = TemporalConvNet(num_channels,
                                          kernel_size=kernel_size,
                                          dropout=dropout)
        self.linear = tf.keras.layers.Dense(output_size,
                                            kernel_initializer=init)
Ejemplo n.º 7
0
 def __init__(self, input_size, output_size, num_channels, kernel_size,
              dropout):
     super(TCN, self).__init__()
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
     #self.linear = nn.Linear(num_channels[-1], output_size)
     self.dense = nn.Linear(num_channels[-1], output_size)
     self.softmax = nn.LogSoftmax(dim=1)  #nn.Softmax(dim=1)
Ejemplo n.º 8
0
 def __init__(self, input_size, output_size, num_channels, kernel_size,
              dropout):  # 1, 10, [25, 25 ... 25], 7.
     super(TCN, self).__init__()
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
     # print('Ready to linearize')
     # IP.embed()
     self.relu = nn.ReLU()
     self.linear = nn.Linear(num_channels[-1], output_size)
Ejemplo n.º 9
0
    def __init__(self, input_size, output_size, num_channels, kernel_size,
                 dropout):
        super(TCN, self).__init__()
        self.tcn = TemporalConvNet(input_size,
                                   num_channels,
                                   kernel_size=kernel_size,
                                   dropout=dropout)
        # self.linear = nn.Linear(num_channels[-1], output_size)

        self.critic_linear = nn.Linear(num_channels[-1] * 10, 1)
        self.actor_linear = nn.Linear(num_channels[-1] * 10, output_size)
        self.init_weights()
Ejemplo n.º 10
0
 def __init__(self,
              input_size,
              output_size,
              num_channels,
              kernel_size=3,
              dropout=0.1):
     super(DBS_tcn, self).__init__()
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size,
                                dropout=dropout).double()
     self.hidden2dbs = nn.Linear(num_channels[-1], output_size,
                                 bias=False).double()
Ejemplo n.º 11
0
    def __init__(self):
        super(TCN00, self).__init__()
        input_size = 1
        output_size = 5
        num_channels = [16] * 4
        kernel_size = 10
        dropout = 0.2

        self.tcn = TemporalConvNet(input_size,
                                   num_channels,
                                   kernel_size,
                                   dropout=dropout)
        self.linear = nn.Linear(num_channels[-1], output_size)
        self.sig = nn.Sigmoid()
Ejemplo n.º 12
0
class C3D_TCN(nn.Module):

    def __init__(self, tcn_out_channel=64, c3d_path='', tcn_path=''):
        super(C3D_TCN, self).__init__()

        self.c3d = C3D(in_channels=3) 
        self.tcn = TCN(245760, [128,128,64,tcn_out_channel]) # 245760 == 128, 983040 == 256, 384000 == 160

        self.load_models(c3d_path, tcn_path)

    def load_models(self, c3d_path, tcn_path):
        if os.path.exists(c3d_path):
            self.c3d.load_state_dict(torch.load(c3d_path))
        if os.path.exists(tcn_path):
            self.tcn.load_state_dict(torch.load(tcn_path))

    def save_models(self, c3d_path, tcn_path):
        torch.save(self.c3d.state_dict(), c3d_path)
        torch.save(self.tcn.state_dict(), tcn_path)
    
    def forward(self, X):
        N, WC, RGB, F, W, H = X.shape
        shape = [N*WC, RGB, F, W, H]

        X = self.c3d(X.reshape(shape))

        shape = [N, WC, -1]
        X = X.reshape(shape)

        X = torch.transpose(X, 1, 2)
        X = self.tcn(X)
        X = torch.transpose(X, 1, 2)

        shape = [N, -1]
        X = X.reshape(shape)

        return X
Ejemplo n.º 13
0
 def __init__(self,
              num_inputs,
              num_outputs,
              num_channels,
              kernel_size=2,
              dropout=0.2,
              activation='ReLU',
              nonlinear=True):
     super(Sigmoid_TCN, self).__init__()
     self.tcn = TemporalConvNet(num_inputs, num_channels, kernel_size,
                                dropout, activation)
     self.linear = nn.Linear(num_channels[-1], num_outputs)
     self.nonlinear = nonlinear
     if self.nonlinear:
         self.sigmoid = nn.Sigmoid()
Ejemplo n.º 14
0
    def __init__(self, model, input_size, output_channels, num_channels, kernel_size, output_length):
        super(TCN, self).__init__()
        self.output_length = output_length
        self.model = model
        if model == "tcn":
            self.conv = TemporalConvNet(input_size, num_channels, kernel_size=kernel_size, dropout=0)
        elif model == "sequnet":
            self.conv = Sequnet(input_size, num_channels, num_channels[-1], kernel_size=kernel_size, dropout=0, target_output_size=output_length)
        elif model == "sequnet_res":
            self.conv = SequnetRes(input_size, num_channels[0], len(num_channels), num_channels[-1], kernel_size=kernel_size, target_output_size=output_length)
        else:
            raise NotImplementedError("Could not find this model " + model)

        self.linear = nn.Linear(num_channels[-1], output_channels)
        self.init_weights()
Ejemplo n.º 15
0
    def __init__(self, input_size, output_size, num_channels,
                 kernel_size=2, dropout=0.3, emb_dropout=0.1, tied_weights=False):
        super(TCN, self).__init__()
        self.encoder = nn.Embedding(output_size, input_size)
        self.tcn = TemporalConvNet(input_size, num_channels, kernel_size, dropout=dropout)

        self.decoder = nn.Linear(num_channels[-1], output_size)
        if tied_weights:
            if num_channels[-1] != input_size:
                raise ValueError('When using the tied flag, nhid must be equal to emsize')
            self.decoder.weight = self.encoder.weight
            print("Weight tied")
        self.drop = nn.Dropout(emb_dropout)
        self.emb_dropout = emb_dropout
        self.init_weights()
Ejemplo n.º 16
0
    def __init__(self, config):
        self.config = config

        # 三个待输入的数据
        self.input_x = tf.placeholder(tf.int32, [None, self.config.seq_length],
                                      name='input_x')
        self.input_y = tf.placeholder(tf.float32,
                                      [None, self.config.num_classes],
                                      name='input_y')
        self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
        num_channels = [self.config.hidden_dim] * self.config.levels
        self.tcn = TemporalConvNet(num_channels,
                                   stride=1,
                                   kernel_size=self.config.kernel_size,
                                   dropout=self.config.dropout_keep_prob)
        self.tcn_init()
Ejemplo n.º 17
0
 def __init__(self):
     super(VideoModule, self).__init__()
     self.tcn = TemporalConvNet(LEN_FEATURE_V, [2048, 1024, 512, 256],
                                kernel_size=3,
                                dropout=DROP_OUT)
     self.conv1d = nn.ModuleList([
         nn.Conv1d(in_channels=256,
                   out_channels=256,
                   kernel_size=kernel_size,
                   stride=stride,
                   dilation=1)
         for stride, kernel_size in zip(STRIDE, KERNEL_SIZE)
     ])
     self.net = nn.Sequential(nn.RReLU(), nn.Linear(256, LEN_FEATURE_B),
                              nn.RReLU(),
                              nn.Linear(LEN_FEATURE_B, LEN_FEATURE_B))
Ejemplo n.º 18
0
    def __init__(self,
                 input_size,
                 output_size,
                 num_channels,
                 kernel_size=2,
                 dropout=0.3):
        super(TCN, self).__init__()
        self.tcn = TemporalConvNet(input_size,
                                   num_channels,
                                   kernel_size,
                                   dropout=dropout)

        self.decoder = nn.Linear(num_channels[-1], output_size)
        self.sigmoid = nn.Sigmoid()

        self.init_weights()
Ejemplo n.º 19
0
  def __init__(self, embedding_dim: int, max_length: int, channel=200, level=3,
               kernel_size=3, dropout=0.2, emb_dropout=0., tied_weights=False, attention=False):
    super(TCN, self).__init__()

    self.channel = channel
    self.channels = [channel] * level

    self.embedding_dim = embedding_dim
    self.character_size = 252
    self.max_length = max_length

    self.embeddings = nn.Embedding(self.character_size, self.embedding_dim, padding_idx=0)
    self.pe = nn.Embedding(self.max_length, self.embedding_dim, padding_idx=0)
    self.pe.weight.data.copy_(position_encoding_init(self.max_length, self.embedding_dim))
    self.pe.weight.requires_grad = False
    self.tcn = TemporalConvNet(embedding_dim, self.channels, kernel_size, dropout=dropout, max_length=max_length, attention=attention)
Ejemplo n.º 20
0
    def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
        super(TCN, self).__init__()
        self.tcn = TemporalConvNet(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)
        self.linear_T = nn.Linear(num_channels[-1], output_size)
        self.linear_G = nn.Linear(num_channels[-1], 2)
        self.linear_E = nn.Linear(25, output_size)

        self.conv = nn.Sequential(
            nn.Conv1d(25, 25, kernel_size=4, padding=0, stride=4),
            nn.BatchNorm1d(25),
            nn.ReLU(inplace=True),
            nn.Conv1d(25, 25, kernel_size=4, padding=0, stride=4),
            nn.BatchNorm1d(25),
            nn.ReLU(inplace=True),
            nn.Conv1d(25, 2, kernel_size=4, padding=2, stride=4)
            )
Ejemplo n.º 21
0
 def __init__(self,
              input_size,
              output_size,
              num_channels,
              kernel_size=2,
              dropout=0.2,
              emb_dropout=0.2):
     super(TCN, self).__init__()
     self.encoder = nn.Embedding(output_size, input_size)
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
     self.decoder = nn.Linear(input_size, output_size)
     #self.decoder.weight = self.encoder.weight
     self.drop = nn.Dropout(emb_dropout)
     self.init_weights()
Ejemplo n.º 22
0
    def __init__(self, input_size, output_size, num_channels, kernel_size,
                 temp, dropout, win_size, mean_emb):
        super(TCN, self).__init__()
        self.tcn = TemporalConvNet(input_size,
                                   num_channels,
                                   kernel_size,
                                   dropout=dropout)
        self.linear = nn.Linear(num_channels[-1], output_size)
        self.batch_norm = nn.BatchNorm1d(win_size)

        self.temp = temp
        self.mean_emb = mean_emb
        if self.mean_emb == 0:
            self.sig = nn.Softmax(
                dim=2)  # needs to b2 when not average. 1 when averaged
        else:
            self.sig = nn.Softmax(
                dim=1)  # needs to b2 when not average. 1 when averaged
Ejemplo n.º 23
0
    def __init__(self, args, num_words, num_channels):
        super(TCN, self).__init__()
        self.encoder = nn.Embedding(num_words, args.emsize)

        if args.model == "tcn":
            self.conv = TemporalConvNet(args.emsize,
                                        num_channels,
                                        kernel_size=args.ksize,
                                        dropout=args.dropout)
        elif args.model == "sequnet" or args.model == "sequnet_res":
            if args.model == "sequnet":
                self.conv = sequnet.Sequnet(
                    args.emsize,
                    num_channels,
                    args.emsize,
                    kernel_size=args.ksize,
                    dropout=args.dropout,
                    target_output_size=args.validseqlen)
            else:
                self.conv = sequnet_res.Sequnet(
                    args.emsize,
                    num_channels[0],
                    len(num_channels),
                    args.emsize,
                    kernel_size=args.ksize,
                    target_output_size=args.validseqlen)
            args.validseqlen = self.conv.output_size
            args.seq_len = self.conv.input_size
            print("Using Seq-U-Net with " + str(args.validseqlen) +
                  " outputs and " + str(args.seq_len) + " inputs")
        else:
            raise NotImplementedError("Could not find this model " +
                                      args.model)

        self.decoder = nn.Linear(num_channels[-1], num_words)
        if args.tied:
            if num_channels[-1] != args.emsize:
                raise ValueError(
                    'When using the tied flag, nhid must be equal to emsize')
            self.decoder.weight = self.encoder.weight
            print("Weight tied")
        self.drop = nn.Dropout(args.emb_dropout)
        self.emb_dropout = args.emb_dropout
        self.init_weights()
Ejemplo n.º 24
0
    def _build(self):
        self.x = tf.placeholder(tf.int32,
                                shape=(None, None),
                                name='input_chars')
        self.y = tf.placeholder(tf.int32,
                                shape=(None, None),
                                name='next_chars')
        self.lr = tf.placeholder(tf.float32, shape=None, name='lr')
        self.eff_history = tf.placeholder(tf.int32,
                                          shape=None,
                                          name='eff_history')
        self.dropout = tf.placeholder_with_default(0., shape=())
        self.emb_dropout = tf.placeholder_with_default(0., shape=())

        embedding = tf.get_variable('char_embedding',
                                    shape=(self.output_size, self.emb_size),
                                    dtype=tf.float32,
                                    initializer=tf.random_uniform_initializer(
                                        -0.1, 0.1))
        inputs = tf.nn.embedding_lookup(embedding, self.x)

        self.tcn = TemporalConvNet(self.num_channels,
                                   stride=1,
                                   kernel_size=self.kernel_size,
                                   dropout=self.dropout)
        outputs = self.tcn(inputs)
        reshaped_outputs = tf.reshape(outputs, (-1, self.emb_size))
        logits = tf.matmul(reshaped_outputs, embedding, transpose_b=True)

        logits_shape = tf.concat(
            [tf.shape(outputs)[:2], (tf.constant(self.output_size), )], 0)
        logits = tf.reshape(logits, shape=logits_shape)
        eff_logits = tf.slice(logits, [0, self.eff_history, 0], [-1, -1, -1])
        eff_labels = tf.slice(self.y, [0, self.eff_history], [-1, -1])
        CE_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=eff_labels, logits=eff_logits)
        self.loss = tf.reduce_mean(CE_loss)

        optimizer = tf.train.GradientDescentOptimizer(self.lr)
        gvs = optimizer.compute_gradients(self.loss)
        capped_gvs = [(tf.clip_by_value(grad, -self.clip_value,
                                        self.clip_value), var)
                      for grad, var in gvs]
        self.train_op = optimizer.apply_gradients(capped_gvs)
Ejemplo n.º 25
0
def TCN(input_layer, output_size, num_channels, embedding_size, kernel_size, dropout, init=False):
    """ 
      shapes:
      input_layer: b_s, L contains the integer ID
      output_size should be vocab_size

    """
    initrange = 0.1
    keep_prob_emb = 1.0
    sequence_length = input_layer.get_shape()[-1]
    embeddings = tf.get_variable('embedding', shape=[output_size, embedding_size], dtype=tf.float32,
        initializer = tf.initializers.random_uniform(-initrange, initrange))
    embedded_input = tf.nn.embedding_lookup(embeddings, input_layer)
    drop = tf.nn.dropout(embedded_input, keep_prob_emb)
    tcn = TemporalConvNet(input_layer=drop, num_channels=num_channels, sequence_length=sequence_length,
     kernel_size=kernel_size, dropout=dropout, init=init)
    decoder = tf.contrib.layers.fully_connected(tcn, output_size, activation_fn=None, 
        weights_initializer=tf.initializers.random_uniform(-initrange, initrange))
    return decoder
Ejemplo n.º 26
0
 def __init__(self, input_size, output_size, num_channels, kernel_size,
              dropout):
     """
     Sequential encoder with TCN as the main building block
     @param input_size: len of the input vector at each time step
     @param output_size: len of the output vector at each time step 
     @param num_channels: number of neurons as a list. e.g. [500, 1000, 1000, 500]
     @param kernel_size: length of the receptive field
     @param dropout: dropout; in tasks like this, it is usually effective to turn dropout to around 0.1
     
     The effective history (how many past timesteps can an output neuron look at)
     effective history length = (kernel_size - 1) * 2^len(num_channels)
     e.g.  kernel_size = 3, num_channels = [100,100,100,100] => history length = 32
     """
     super(Encoder_TCN, self).__init__()
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
Ejemplo n.º 27
0
def build_tcn(x,
              y,
              domain,
              grl_lambda,
              keep_prob,
              training,
              num_classes,
              num_features,
              adaptation,
              units,
              multi_class=False,
              bidirectional=False,
              class_weights=1.0,
              x_dims=None,
              use_feature_extractor=True):
    """ TCN as an alternative to using RNNs """
    # Build TCN
    with tf.variable_scope("tcn_model"):
        dropout = 1 - keep_prob
        tcn = TemporalConvNet([units, units, units, units], 2, dropout)
        tcn_output = tcn(x, training=training)[:, -1]

    # Other model components passing in output from TCN
    task_output, domain_softmax, task_loss, domain_loss, \
        feature_extractor, summaries = build_model(
            tcn_output, y, domain, grl_lambda, keep_prob, training,
            num_classes, adaptation, multi_class, class_weights,
            use_feature_extractor=use_feature_extractor)

    # Total loss is the sum
    with tf.variable_scope("total_loss"):
        total_loss = task_loss

        if adaptation:
            total_loss += domain_loss

    # We can't generate with a TCN
    extra_outputs = None

    return task_output, domain_softmax, total_loss, \
        feature_extractor, summaries, extra_outputs
Ejemplo n.º 28
0
 def __init__(self, device, num_inputs, num_channels, num_hidden, num_classes, kernel_size=2, dropout=0.2, c_dropout=0.0, r_dropout=0.0):
     super(TCRAE, self).__init__()
     self.device=device
     self.num_inputs=num_inputs
     self.num_channels=num_channels
     self.kernel_size=kernel_size
     self.dropout=dropout
     self.c_dropout=c_dropout
     self.r_dropout=r_dropout
     
     self.TCN=TemporalConvNet(num_inputs=num_inputs, num_channels=num_channels, kernel_size=kernel_size, dropout=dropout)   
     self.rnn=nn.GRU(1, num_hidden, batch_first=True)  
     
     # fix the input weights and bias to form a GRU without input
     self.rnn.weight_ih_l0.data.fill_(0)
     self.rnn.bias_ih_l0.data.fill_(0)
     self.rnn.weight_ih_l0.requires_grad=False
     self.rnn.bias_ih_l0.requires_grad=False
     
     self.hidden=nn.Sequential(nn.Linear(num_channels[-1], num_hidden), nn.Tanh())       
     self.c_linear=nn.Linear(num_hidden, num_classes)       
     self.r_linear=nn.Linear(num_hidden, num_inputs)
Ejemplo n.º 29
0
    def __init__(self, input_size, output_size, num_channels, kernel_size,
                 dropout):
        super(TCN, self).__init__()

        s_dim = input_size
        a_dim = output_size

        self.tcn = TemporalConvNet(input_size,
                                   num_channels,
                                   kernel_size=kernel_size,
                                   dropout=dropout)
        self.s_dim = s_dim
        self.a_dim = a_dim
        # self.a1 = nn.Linear(s_dim, 200)
        self.mu = nn.Linear(num_channels[-1], a_dim)
        self.sigma = nn.Linear(num_channels[-1], a_dim)

        self.c1 = nn.Linear(num_channels[-1], 100)
        self.v = nn.Linear(100, 1)

        self.distribution = torch.distributions.Normal

        self.init_weights()
Ejemplo n.º 30
0
    def __init__(self,
                 output_size,
                 num_channels,
                 kernel_size,
                 dropout,
                 embedding_dim,
                 sequence_length,
                 emb_dropout=0.1):  # embedding的dropout

        super(TCN, self).__init__()
        self.sequence_length = sequence_length
        self.embedding_dim = embedding_dim
        self.vocab_size = output_size  # vocab_size equals to output_size

        # Embedding层
        self.embedding = EmbeddingSharedWeights(self.vocab_size, embedding_dim)
        # self.embedding = layers.Embedding(self.vocab_size,
        #             embedding_dim, input_length=sequence_length)
        self.drop = layers.Dropout(rate=emb_dropout)

        # TCN
        self.temporalCN = TemporalConvNet(num_channels,
                                          kernel_size=kernel_size,
                                          dropout=dropout)