コード例 #1
0
ファイル: gan.py プロジェクト: aksbaih/drone-trajectory
class Generator(nn.Module):
    def __init__(self,
                 src_len,
                 tgt_len,
                 enc_inp_size,
                 dec_inp_size,
                 dec_out_size,
                 N=6,
                 d_model=512,
                 d_ff=2048,
                 h=8,
                 dropout=0.1,
                 device='cpu'):
        super(Generator, self).__init__()
        self.device = device
        self.src_len = src_len
        self.tgt_len = tgt_len
        self.dec_inp_size = dec_inp_size

        c = copy.deepcopy
        attn = MultiHeadAttention(h, d_model)
        ff = PointerwiseFeedforward(d_model, d_ff, dropout)
        position = PositionalEncoding(d_model, dropout)
        self.generator = EncoderDecoder(
            Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
            Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout),
                    N),
            nn.Sequential(LinearEmbedding(enc_inp_size, d_model), c(position)),
            nn.Sequential(LinearEmbedding(dec_inp_size, d_model), c(position)),
            TFHeadGenerator(d_model, dec_out_size))

        # This was important from their code.
        # Initialize parameters with Glorot / fan_avg.
        for p in self.generator.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def sample_noise(self, batch_size):
        noise = torch.randn(batch_size, self.dec_inp_size, device=self.device)
        noise[:, -1] = 1.  # Distinguish start-of-sequence token
        return noise.unsqueeze(1)

    def forward(self, src, noise):
        """
        Given a src trajectory in shape ((b)atch, self.src_len, (d)iminsionality)
        Generate a tgt trajectory in shape ((b)atch, self.tgt_len, (d)iminsionality)
        """
        batch_size = src.shape[0]
        src_mask = torch.ones((batch_size, 1, self.src_len)).to(self.device)
        dec_inp = noise

        # Now generate step by step
        for i in range(self.tgt_len):
            tgt_mask = subsequent_mask(dec_inp.shape[1]).repeat(
                batch_size, 1, 1).to(self.device)
            out = self.generator.generator(
                self.generator(src, dec_inp, src_mask, tgt_mask))
            dec_inp = torch.cat((dec_inp, out[:, -1:, :]), 1)

        return dec_inp[:, 1:, :]  # skip the start of sequence
コード例 #2
0
class QuantizedTF(nn.Module):
    def __init__(self,
                 enc_inp_size,
                 dec_inp_size,
                 dec_out_size,
                 N=6,
                 d_model=512,
                 d_ff=2048,
                 h=8,
                 dropout=0.1):
        super(QuantizedTF, self).__init__()
        "Helper: Construct a model from hyperparameters."
        c = copy.deepcopy
        attn = MultiHeadAttention(h, d_model)
        ff = PointerwiseFeedforward(d_model, d_ff, dropout)
        position = PositionalEncoding(d_model, dropout)
        self.model = EncoderDecoder(
            Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
            Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout),
                    N),
            nn.Sequential(Embeddings(d_model, enc_inp_size), c(position)),
            nn.Sequential(Embeddings(d_model, dec_inp_size), c(position)),
            Generator(d_model, dec_out_size))

        # This was important from their code.
        # Initialize parameters with Glorot / fan_avg.
        for p in self.model.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(self, *input):
        return self.model.generator(self.model(*input))

    def predict(self, *input):

        return F.softmax(self.model.generator(self.model(*input)), dim=-1)
コード例 #3
0
class IndividualTF(nn.Module):
    def __init__(self,
                 enc_inp_size,
                 dec_inp_size,
                 dec_out_size,
                 N=6,
                 d_model=512,
                 d_ff=2048,
                 h=8,
                 dropout=0.1,
                 mean=[0, 0],
                 std=[0, 0]):
        """
        :param enc_inp_size Encoder Input Size
        :param dec_inp_size Decoder Input Size
        :param dec_out_size Decoder Output Size
        :param N 网络层数
        """
        super(IndividualTF, self).__init__()
        "Helper: Construct a model from hyperparameters."
        # 深拷贝alias
        deepcopy = copy.deepcopy
        attn = MultiHeadAttention(h, d_model)
        ff = PointerwiseFeedforward(d_model, d_ff, dropout)  # 前馈神经网络
        position = PositionalEncoding(d_model, dropout)  # 进行位置编码
        self.mean = np.array(mean)
        self.std = np.array(std)
        self.model = EncoderDecoder(
            Encoder(
                EncoderLayer(d_model, deepcopy(attn), deepcopy(ff), dropout),
                N),
            Decoder(
                DecoderLayer(d_model, deepcopy(attn), deepcopy(attn),
                             deepcopy(ff), dropout), N),
            nn.Sequential(LinearEmbedding(enc_inp_size, d_model),
                          deepcopy(position)),
            nn.Sequential(LinearEmbedding(dec_inp_size, d_model),
                          deepcopy(position)),
            Generator(d_model, dec_out_size))

        # This was important from their code.
        # Initialize parameters with Glorot / fan_avg.
        for p in self.model.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(self, *input):
        return self.model.generator(self.model(*input))