Beispiel #1
0
 def call_Decoder(self,nlayer,Elt_state,\
                  Rlt_top,Rlt_state,epoch):
     # h = hidden layers
     R_lt_next = None
     Elt_state_in = None
     Decode_lt = None
     # return hidden layers size:
     h_l_down_in,  h_l_top_out,\
     h_l_down_out, h_Elt  = self.hidden_layers_selctor(nlayer)
     if Elt_state is None:
         Elt_state_in = self.get_init_Elt_tensor(self.error_states[nlayer +
                                                                   1])
     else:
         Elt_state_in = Elt_state
     if Rlt_top is None:
         Decode_lt = Decoder(h_Elt, 0, h_l_down_out,\
                     self.kernel_size).cuda()
         Elt_state = self.get_init_Elt_tensor(self.error_states[nlayer + 1])
         R_lt_next = Decode_lt(Elt_state_in, None, None)
     else:
         Decode_lt = Decoder(h_Elt, Rlt_top.data.size()[1],\
                     h_l_top_out,self.kernel_size).cuda()
         R_lt_next = Decode_lt(Elt_state_in,\
                     Rlt_top,Rlt_state)
     if self.saveModel == True:
         if epoch % self.numSaveIter == 0:
             self.save_models(Decode_lt, epoch, "Decoder")
     return R_lt_next, Decode_lt.parameters()
Beispiel #2
0
    def __init__(self, w2i, i2w, embs=None, title_emb = None, info=None):
        """
        Args:
            args: parameters of the model
            textData: the dataset object
        """
        super(LSTM_CTE_Model_with_action, self).__init__()
        print("Model creation...")

        self.word2index = w2i
        self.index2word = i2w
        self.max_length = args['maxLengthDeco']
        self.info = info

        self.NLLloss = torch.nn.NLLLoss(ignore_index=0)
        self.CEloss = torch.nn.CrossEntropyLoss(ignore_index=0)

        if embs is not None:
            self.embedding = nn.Embedding.from_pretrained(embs)
        else:
            self.embedding = nn.Embedding(args['vocabularySize'], args['embeddingSize'])

        if title_emb is not None:
            self.field_embedding = nn.Embedding.from_pretrained(title_emb)
        else:
            self.field_embedding = nn.Embedding(args['TitleNum'], args['embeddingSize'])

        self.encoder = Encoder(w2i, i2w, bidirectional=True)
        # self.encoder_answer_only = Encoder(w2i, i2w)
        self.encoder_no_answer = Encoder(w2i, i2w)
        self.encoder_pure_answer = Encoder(w2i, i2w)

        self.decoder_answer = Decoder(w2i, i2w, self.embedding, copy='pure', max_dec_len=10)
        self.decoder_no_answer = Decoder(w2i, i2w, self.embedding,
                                         input_dim = args['embeddingSize']*2,# +  args['hiddenSize'] ,
                                         copy='semi')

        self.ansmax2state_h = nn.Linear(args['embeddingSize'], args['hiddenSize']*2, bias=False)
        self.ansmax2state_c = nn.Linear(args['embeddingSize'], args['hiddenSize']*2, bias=False)
        self.tanh = nn.Tanh()
        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(dim=-1)
        self.sigmoid = nn.Sigmoid()

        self.att_size_r = 60
        # self.grm = GaussianOrthogonalRandomMatrix()
        # self.att_projection_matrix = Parameter(self.grm.get_2d_array(args['embeddingSize'], self.att_size_r))
        self.M = Parameter(torch.randn([args['embeddingSize'], args['hiddenSize']*2,2]))

        self.shrink_copy_input= nn.Linear(args['hiddenSize']*2, args['hiddenSize'], bias=False)
        self.emb2hid = nn.Linear(args['embeddingSize'], args['hiddenSize'], bias=False)
Beispiel #3
0
 def build_decoder(self):
     decoder = Decoder(input_size=self.input_size,
                       hidden_size=self.sentiment_size,
                       vocab_size=self.vocab_size,
                       max_len=self.max_len,
                       embedding=self.embedding)
     return decoder
 def __init__(self,
              n_blocks,
              d_model,
              d_feature,
              d_ff,
              dropout,
              d_vocab_in,
              d_vocab_out,
              max_seq_len=512):
     '''n_blocks: number of encoder and decoder blocks. d_model: Size of word feature vector d_feature: size of attn head input feature
     d_ff: size between linear layers'''
     super(Transformer, self).__init__()
     self.encoder = Encoder(n_blocks,
                            d_model,
                            d_feature,
                            d_ff,
                            dropout,
                            max_seq_len=512)
     self.decoder = Decoder(n_blocks,
                            d_model,
                            d_feature,
                            d_ff,
                            dropout,
                            max_seq_len=512)
     self.encoder_embed = nn.Embedding(d_vocab_in, d_model)
     self.decoder_embed = nn.Embedding(d_vocab_out, d_model)
     self.linear = nn.Linear(d_model, d_vocab_out)
 def __init__(self, mem_size: int = 10000):
     """
             The following is an abstraction for a bank of values
             such as valA, which will be used during each cycle.
             It's set up as an object to avoid circular import.
     """
     self.ValBank = ValBank()
     """
     The following are functional units like memory,
     registers, or flags
     """
     self.Memory = Memory(mem_size)
     self.RegisterBank = RegisterBank()
     self.ZF = CCFlag("ZF")  # zero flag
     self.OF = CCFlag("OF")  # overflow flag
     self.SF = CCFlag("SF")  # sign flag
     self.ErrorFlag = StateFlag("Error Flag", error_lib)
     self.StateFlag = StateFlag("State Flag", state_lib)
     self.ALU = ALU(self.ValBank, self.StateFlag, self.ErrorFlag, self.SF, self.OF, self.ZF)
     """
     The following are functional abstractions of operations
     that the processor performs
     """
     self.Fetcher = Fetcher(self.ValBank, self.RegisterBank, self.Memory, self.StateFlag, self.ErrorFlag)
     self.Decoder = Decoder(self.ValBank, self.RegisterBank, self.Memory)
     self.Executor = Executor(self.ValBank, self.ALU, self.OF, self.ZF, self.SF)
     self.Memorizer = Memorizer(self.ValBank, self.Memory)
     self.RegWriter = RegWriter(self.RegisterBank, self.ValBank)
     self.PCUpdater = PCUpdater(self.RegisterBank, self.ValBank)
Beispiel #6
0
    def __init__(self, input_size, hidden_size, batch_size, learning_rate,
                 num_epoch, method):
        dataset = Seq2SeqDataset()

        self.vocab = sorted(set(dataset.full_text))
        self.vocab_size = len(self.vocab)
        self.char2ind, self.ind2char = self.get_vocab()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = self.vocab_size
        self.method = method
        self.learning_rate = learning_rate
        self.batch_size = batch_size
        self.num_epoch = num_epoch
        self.device = "cuda:0" if torch.cuda.is_available() else "cpu"

        self.dataloader = DataLoader(dataset=dataset,
                                     batch_size=batch_size,
                                     shuffle=True)

        self.encoder = Encoder(input_size, hidden_size, self.vocab_size)
        self.decoder = Decoder(hidden_size, self.output_size, method)

        self.encoder = self.encoder.to(self.device)
        self.decoder = self.decoder.to(self.device)

        self.loss_function = NLLLoss()

        self.encoder_optim = optim.Adam(self.encoder.parameters(),
                                        lr=self.learning_rate)
        self.decoder_optim = optim.Adam(self.decoder.parameters(),
                                        lr=self.learning_rate)
Beispiel #7
0
    def __init__(self, config):
        super(Model, self).__init__()
        self.config = config

        # 定义嵌入层
        self.embedding = Embedding(
            config.num_vocab,  # 词汇表大小
            config.embedding_size,  # 嵌入层维度
            config.pad_id,  # pad_id
            config.dropout)

        # post编码器
        self.post_encoder = Encoder(
            config.post_encoder_cell_type,  # rnn类型
            config.embedding_size,  # 输入维度
            config.post_encoder_output_size,  # 输出维度
            config.post_encoder_num_layers,  # rnn层数
            config.post_encoder_bidirectional,  # 是否双向
            config.dropout)  # dropout概率

        # response编码器
        self.response_encoder = Encoder(
            config.response_encoder_cell_type,
            config.embedding_size,  # 输入维度
            config.response_encoder_output_size,  # 输出维度
            config.response_encoder_num_layers,  # rnn层数
            config.response_encoder_bidirectional,  # 是否双向
            config.dropout)  # dropout概率

        # 先验网络
        self.prior_net = PriorNet(
            config.post_encoder_output_size,  # post输入维度
            config.latent_size,  # 潜变量维度
            config.dims_prior)  # 隐藏层维度

        # 识别网络
        self.recognize_net = RecognizeNet(
            config.post_encoder_output_size,  # post输入维度
            config.response_encoder_output_size,  # response输入维度
            config.latent_size,  # 潜变量维度
            config.dims_recognize)  # 隐藏层维度

        # 初始化解码器状态
        self.prepare_state = PrepareState(
            config.post_encoder_output_size + config.latent_size,
            config.decoder_cell_type, config.decoder_output_size,
            config.decoder_num_layers)

        # 解码器
        self.decoder = Decoder(
            config.decoder_cell_type,  # rnn类型
            config.embedding_size,  # 输入维度
            config.decoder_output_size,  # 输出维度
            config.decoder_num_layers,  # rnn层数
            config.dropout)  # dropout概率

        # 输出层
        self.projector = nn.Sequential(
            nn.Linear(config.decoder_output_size, config.num_vocab),
            nn.Softmax(-1))
    def __init__(self, vocab_size, embed_size, hidden_size, choose_model,
                 use_gpu, gpu_id):
        # gpu Setting
        model = Alex_RNNLM(hidden_size)
        if choose_model == "Alex_RNNLM":
            model = Alex_RNNLM(hidden_size)
        if choose_model == "AlexBn_RNNLM":
            model = AlexBn_RNNLM(hidden_size)

        if use_gpu:
            cuda.get_device(gpu_id).use()
            model.to_gpu()
        # Setting Model
        super(EncoderDecoderAttention, self).__init__(
            enc=model,
            im1=links.Linear(IM_SIZE, RESIZE_IM_SIZE),
            im2=links.Linear(IM_SIZE, RESIZE_IM_SIZE),
            im3=links.Linear(IM_SIZE, RESIZE_IM_SIZE),
            att=Attention(hidden_size, RESIZE_IM_SIZE),
            outay=links.Linear(hidden_size, hidden_size),
            dec=Decoder(vocab_size, embed_size, hidden_size),
        )
        self.vocab_size = vocab_size
        self.embed_size = embed_size
        self.hidden_size = hidden_size
        self.common_function = CommonFunction()
        self.use_gpu = use_gpu
        self.gpu_id = gpu_id
        self.choose_model = choose_model
        self.__set_gpu()
Beispiel #9
0
    def __network(self):
        """
        Define the VAE network
        """
        # Network Construction begins here
        with tf.variable_scope(self.__scope):
            # encoder
            encoder_input = tf.concat([self.depth_map_input, self.foreground_map_input, self.normal_map_input], axis=3)
            encoder_input = tf.transpose(encoder_input, perm=[0, 3, 1, 2])  # [?, 5, 256, 256]
            encoder = Encoder(encoder_input/self.__NORMALIZATION_FACTOR, 3, self.__downsampling_factor,
                              self.__width_multiplier, self.__weight_decay,
                              self.__dropout_keep_prob, self.__batchnorm,
                              self.__batchnorm_decay, self.__is_training, self.__data_format, self.__latent_dim)

            self.z_mu = encoder.shape_mu  # [?, latent_dim]
            self.z_logvar = encoder.shape_logvar  # [?, latent_dim]

            # reparameterization
            eps = tf.random_normal(shape=tf.shape(self.z_mu),
                                   mean=0, stddev=0.01, dtype=tf.float32)  # [?, latent_dim]
            self.z = tf.add(self.z_mu, tf.multiply(tf.sqrt(tf.exp(self.z_logvar)), eps))  # [?, latent_dim]

            # decoder
            z = tf.expand_dims(self.z, 1)
            decoder_input = tf.tile(z, multiples=[1, self.__num_sample_points, 1])  # [?, K, latent_dim]
            decoder_input = tf.concat([decoder_input, self.samples], axis=2)   # [?, K, latent_dim+3]
            decoder = Decoder(decoder_input)

            # output sdf values
            self.sdf_pred = decoder.end_point
Beispiel #10
0
    def __init__(self, w2i, i2w):
        """
        Args:
            args: parameters of the model
            textData: the dataset object
        """
        super(Model, self).__init__()
        print("Model creation...")

        self.word2index = w2i
        self.index2word = i2w
        self.max_length = args['maxLengthDeco']

        self.dtype = 'float32'
        self.NLLloss = torch.nn.NLLLoss(reduction='none')
        self.CEloss = torch.nn.CrossEntropyLoss(reduction='none')

        self.embedding = nn.Embedding(args['vocabularySize'],
                                      args['embeddingSize'])
        self.emo_embedding = nn.Embedding(args['emo_labelSize'],
                                          args['embeddingSize'])
        self.encoder = Encoder(w2i, i2w, self.embedding)
        self.decoder = Decoder(w2i, i2w, self.embedding)
        self.tanh = nn.Tanh()
        self.softmax = nn.Softmax(dim=-1)
Beispiel #11
0
def train(iterations, train_file, beam_size):
    data = prepare_data.read_file(train_file)
    feature = Feature()
    decoder = Decoder(beam_size, feature.get_score)

    for t in range(iterations):
        count = 0
        data_size = len(data)

        for line in data:
            y = line.split()
            z = decoder.beamSearch(line)
            if z != y:
                feature.update_weight(y, z)

            train_seg = ' '.join(z)
            seg_data_file = '/home/xzt/CWS/train_seg_data/train-seg-data_ model-' + str(
                t) + '.txt'
            with open(seg_data_file, 'a') as f:
                f.write(train_seg + '\n')

            count += 1
            if count % 1000 == 0:
                print("iter %d , finish %.2f%%" % (t,
                                                   (count / data_size) * 100))

        model_file = open(
            "/home/xzt/CWS/model_result/model-" + str(t) + "_beam-size-" +
            str(beam_size) + '.pkl', 'wb')
        feature.save_model(model_file)

        model_file.close()
        f.close()
        print("segment with model-%d finish" % t)
        print("iteration %d finish" % t)
Beispiel #12
0
def generate_data(filename, version, block_size=16):
    input_path = os.path.join(PGM_ORIGINAL_PATH, filename + PGM_SUFFIX)

    if not os.path.exists(BITSTREAM_PATH):
        os.mkdir(BITSTREAM_PATH)
    bitstream_path = os.path.join(BITSTREAM_PATH, filename + BITSTREAM_SUFFIX)

    if not os.path.exists(PGM_RECONSTRUCTION_PATH):
        os.mkdir(PGM_RECONSTRUCTION_PATH)
    output_path = os.path.join(PGM_RECONSTRUCTION_PATH, filename + PGM_SUFFIX)

    df = pd.DataFrame(columns=['bpp', 'db'])

    for index, quality in enumerate([8, 12, 16, 20, 24]):
        enc = Encoder(input_path, bitstream_path, block_size, quality, False)
        enc.encode_image()
        dec = Decoder(bitstream_path, output_path, pgm=True)
        dec.decode_all_frames()

        process = subprocess.run(
            [PSNR_TOOL_PATH, input_path, output_path, bitstream_path],
            stdout=subprocess.PIPE,
        )

        stdout = process.stdout.decode("utf-8")
        bpp, db = stdout.split(' bpp ')
        bpp, db = float(bpp), float(db.replace(' dB', ''))
        db = 0.0 if math.isinf(db) else db
        df.loc[index] = [bpp, db]

    version_path = os.path.join(DATA_ROOT_PATH, version)
    print(version)
    if not os.path.exists(version_path):
        os.mkdir(version_path)
    df.to_pickle(os.path.join(version_path, filename + DATA_SUFFIX))
Beispiel #13
0
 def __init__(self, config: Config):
     super().__init__()
     self.cfg = config
     self.encoder = Encoder(config)  # 编码器
     self.decoder = Decoder(config)  # 解码器
     self.final_layer = tf.keras.layers.Dense(
         config.target_vocab_size)  # 最后输出层
Beispiel #14
0
def test_avg(iterations, test_file, beam_size):
    data = prepare_data.read_file(test_file)
    feature = Feature()
    decoder = Decoder(beam_size, feature.get_score)

    count = 0
    data_size = len(data)

    model_file = open(
        '/home/xzt/CWS/model_result/avg-model_beam-size-' + str(beam_size) +
        '.pkl', 'rb')
    feature.load_model(model_file)
    model_file.close()
    for line in data:
        z = decoder.beamSearch(line)
        seg_data = ' '.join(z)
        seg_data_file = '/home/xzt/CWS/test_seg_data/avg-test-seg-data' + '_beam-size-' + str(
            beam_size) + '.txt'
        with open(seg_data_file, 'a') as f:
            f.write(seg_data + '\n')
        count += 1
        if count % 1000 == 0:
            print("segment with avg-model, finish %.2f%%" %
                  ((count / data_size) * 100))
    f.close()
    print("segment with avg model finish")
Beispiel #15
0
    def __init__(self,
                 vocab_size,
                 max_len,
                 device,
                 num_layers=6,
                 stack_layers=6,
                 d_model=512,
                 num_heads=8,
                 ffn_dim=2048,
                 dropout=0.2):

        super(Transformer, self).__init__()

        self.device = device

        self.encoder = Encoder(vocab_size, max_len, num_layers, d_model,
                               num_heads, ffn_dim, dropout)
        self.decoder = Decoder(vocab_size, max_len, device, num_layers,
                               d_model, num_heads, ffn_dim, dropout)
        self.generator = Generator(vocab_size, max_len, device, stack_layers,
                                   d_model, num_heads, ffn_dim, dropout)

        self.embedding = nn.Embedding(vocab_size, d_model)
        self.linear = nn.Linear(d_model, vocab_size, bias=False)
        self.softmax = nn.Softmax(dim=2)
Beispiel #16
0
    def predict(self, y, phi_gmm, encoder_layers, decoder_layers, seed=0):
        """
        Args:
            y: data to cluster and reconstruct
            phi_gmm: latent phi param
            encoder_layers: encoder NN architecture
            decoder_layers: encoder NN architecture
            seed: random seed

        Returns:
            reconstructed y and most probable cluster allocation
        """

        nb_samples = 1
        phi_enc_model = Encoder(layerspecs=encoder_layers)
        phi_enc = phi_enc_model.forward(y)

        x_k_samples, log_r_nk, _, _ = e_step(phi_enc,
                                             phi_gmm,
                                             nb_samples,
                                             seed=0)
        x_samples = subsample_x(x_k_samples, log_r_nk, seed)[:, 0, :]

        y_recon_model = Decoder(layerspecs=decoder_layers)
        y_mean, _ = y_recon_model.forward(x_samples)

        return (y_mean, torch.argmax(log_r_nk, dim=1))
Beispiel #17
0
 def __init__(self,
              in_channels,
              hidden_channels,
              num_resblocks,
              res_channels,
              D,
              K,
              beta=0.25,
              gamma=0.99):
     """
     in_channels: number of channels of the input image
     hidden_channels: the number of channels that are used by the hidden conv layers
     num_resblocks: the number of residual blocks used in both the encoder and the decoder
     res_channels: the number of channels that are used by the residual blocks
     D: dimensionality of each embedding vector, or embedding_dim in the sonnet's implementation
     K: the size of the discrete space (the number of embedding vectors), or num_embeddings in the sonnet's implementation
     beta: the hyperparameter that acts as a weighting to the lost term, or commitment_cost in the sonnet's implementation
         recommendation from the paper, beta=0.25
     gamma: controls the speed of the EMA, or decay in the sonnet's implementation
         recommendation from the paper, gamma=0.99
     """
     super(VQVAE, self).__init__()
     self.encoder = Encoder(in_channels, hidden_channels, num_resblocks,
                            res_channels)
     # the following is the additional layer added in the author's original implementation
     # to make sure that the number of channels equals to D (embedding dimension)
     self.pre_vq = nn.Conv2d(in_channels=hidden_channels,
                             out_channels=D,
                             kernel_size=1,
                             stride=1)
     self.vectorquantizer = VectorQuantizerEMA(D, K, beta, gamma)
     self.decoder = Decoder(D, hidden_channels, num_resblocks, res_channels,
                            in_channels)
Beispiel #18
0
def test_folder(folder):
    """ Test all images inside a folder

        Use Zbar library to test the image.

    Args:
        folder:  The path of your target folder

    Returns:
        (succ, fail, rate):  The number of success, failure, 
        and the "success rate" = (succ) / (succ + fail)

    """
    def is_img(path):
        # Add more extensions if you need
        img_ext = ['jpg', 'png', 'bmp']
        return path.split('.')[-1] in img_ext

    dc = Decoder()
    for root, folders, files in os.walk(folder):
        img_list = [os.path.join(root, file) for file in files if is_img(file)]

    succ = fail = 0
    for img in img_list:
        pil = Image.open(img).convert('L')
        code = dc.decode(pil)
        if len(code) > 0:
            succ += 1
        else:
            fail += 1
    rate = float(succ) / (succ + fail)
    return (succ, fail, rate)
Beispiel #19
0
    def __init__(self, args):
        super(Mem2SeqRunner, self).__init__(args)

        # Model parameters
        self.gru_size = 128
        self.emb_size = 128
        #TODO: Try hops 4 with task 3
        self.hops = 3
        self.dropout = 0.2

        self.encoder = Encoder(self.hops, self.nwords, self.gru_size)
        self.decoder = Decoder(self.emb_size, self.hops, self.gru_size,
                               self.nwords)

        self.optim_enc = torch.optim.Adam(self.encoder.parameters(), lr=0.001)
        self.optim_dec = torch.optim.Adam(self.decoder.parameters(), lr=0.001)
        if self.loss_weighting:
            self.optim_loss_weights = torch.optim.Adam([self.loss_weights],
                                                       lr=0.0001)
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optim_dec,
                                                        mode='max',
                                                        factor=0.5,
                                                        patience=1,
                                                        min_lr=0.0001,
                                                        verbose=True)

        if self.use_cuda:
            self.cross_entropy = self.cross_entropy.cuda()
            self.encoder = self.encoder.cuda()
            self.decoder = self.decoder.cuda()
            if self.loss_weighting:
                self.loss_weights = self.loss_weights.cuda()
 def __build(self):
     self.encoder = Encoder(self.src_vocab_size, self.src_emb_dim,
                            self.enc_units)
     self.decoder = Decoder(self.tgt_vocab_size, self.tgt_emb_dim,
                            self.enc_units, self.dec_units, self.att_units,
                            self.score_type)
     self.sess = tf.Session()
def make_model(src_vocab,
               tgt_vocab,
               N=6,
               d_model=512,
               d_ff=2048,
               h=8,
               dropout=0.1):
    "Helper: Construct a model from hyperparameters."
    c = copy.deepcopy
    attn = MultiHeadedAttention(h, d_model)
    ff = PositionwiseFeedForward(d_model, d_ff, dropout)
    position = PositionalEncoding(d_model, dropout)
    model = EncoderDecoder(
        Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
        Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
        nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
        nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
        Generator(d_model, tgt_vocab))

    # This was important from their code.
    # Initialize parameters with Glorot / fan_avg.
    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
            """https://zhuanlan.zhihu.com/p/74274453
            #權值初始化 Xavier均勻分佈"""
    return model
    def __init__(self, vocab_size, embed_size, hidden_size, choose_model,
                 use_gpu, gpu_id):
        # gpu Setting
        model = Alex_RNNLM(hidden_size)
        if choose_model == "Alex_RNNLM":
            model = Alex_RNNLM(hidden_size)
        if choose_model == "AlexBn_RNNLM":
            model = AlexBn_RNNLM(hidden_size)

        if use_gpu:
            cuda.get_device(gpu_id).use()
            model.to_gpu()
        # Setting Model
        super(EncoderDecoder, self).__init__(
            enc=model,
            dec=Decoder(vocab_size, embed_size, hidden_size),
        )
        self.vocab_size = vocab_size
        self.embed_size = embed_size
        self.hidden_size = hidden_size
        self.common_function = CommonFunction()
        self.use_gpu = use_gpu
        self.gpu_id = gpu_id
        self.choose_model = choose_model
        self.__set_gpu()
Beispiel #23
0
    def encoder_decoder(self, inputs):
        encoded = self.encoder.encoder(inputs, self.target_layer)
        model = Decoder()
        decoded, _ = model.decoder(encoded, self.target_layer)
        decoded_encoded = self.encoder.encoder(decoded, self.target_layer)

        return encoded, decoded, decoded_encoded
Beispiel #24
0
 def __init__(self, src_vocab_size: int, tgt_vocab_size: int,
              encoder_layer_num: int, decoder_layer_num: int,
              hidden_size: int, feedback_size: int, num_head: int,
              dropout: float, device: str):
     super().__init__()
     self.Encoder = Encoder(src_vocab_size,
                            num_encoder_layer=encoder_layer_num,
                            hidden_size=hidden_size,
                            num_head=num_head,
                            feedward=feedback_size,
                            dropout=dropout,
                            device=device)
     self.Decoder = Decoder(tgt_vocab_size,
                            num_layer=decoder_layer_num,
                            hiddensize=hidden_size,
                            num_head=num_head,
                            feed_back=feedback_size,
                            dropout=dropout,
                            device=device)
     Encoder_layer = TransformerEncoderLayer(nhead=8, d_model=512)
     self.Encoder_off = TransformerEncoder(encoder_layer=Encoder_layer,
                                           num_layers=6)
     Decoder_layer = TransformerDecoderLayer(nhead=8,
                                             dim_feedforward=2048,
                                             d_model=512)
     self.Decoder_off = TransformerDecoder(decoder_layer=Decoder_layer,
                                           num_layers=6)
     self.model = tf()
     self.device = device
     self.input_embedding = torch.nn.Embedding(src_vocab_size, 512)
     self.output_embedding = torch.nn.Embedding(tgt_vocab_size, 512)
     self.positional = Positional_Encoding(512, 512, device)
     self.linear = torch.nn.Linear(512, tgt_vocab_size)
Beispiel #25
0
    def __init__(self, cells, heads, seq_len_enc, seq_len_dec,
                 attention_dimensions, vocab_size):
        super().__init__()

        self.encoder = Encoder(cells, heads, seq_len_enc,
                               attention_dimensions).to(device)
        self.decoder = Decoder(cells, heads, seq_len_dec, attention_dimensions,
                               vocab_size).to(device)
Beispiel #26
0
 def encoder_decoder(self, inputs):
     encoded = self.encoder.encoder(inputs, self.target_layer)  #特征提取
     model = Decoder()  #生成图像model
     decoded, _ = model.decoder(encoded, self.target_layer)  #生成图像
     # 再对生成图像,进行特征提取
     decoded_encoded = self.encoder.encoder(decoded, self.target_layer)
     # 返回值为 a.提取的特征 b.由特征生成的图片 c.由生成图片提取的特征
     return encoded, decoded, decoded_encoded
Beispiel #27
0
 def __init__(self, vocab_size, eos_id=1):
     super(Seq2Seq, self).__init__()
     self.vocab_size = vocab_size
     self.eos_id = eos_id
     self.encoder = ResNet18(128)
     self.decoder = Decoder(vocab_size=self.vocab_size,
                            num_layers=0,
                            name='ctc_layer2')
    def aptidao(self, cromossomo):
        decodificacao = Decoder(self.custos, sqrt(self.TAM_CROM))
        '''
		Transforma as chaves aleatorias em binarios, verifica as restrições e 
		retorna o custo a ser minimizado
		'''
        Z = decodificacao.decode(cromossomo)
        return Z
Beispiel #29
0
 def __init__(self, options):
     super(model, self).__init__()
     self.enc = UtteranceEncoder(options)
     self.kvmlookup = KVMemoryReader(options)
     self.inter_utter_encoder = InterUtteranceEncoder(
         options.ut_hid_size, options.ut_hid_size, options)
     self.dec = Decoder(options, options.response_vocab_size)
     self.kg_predict = KG_embedding(options)
Beispiel #30
0
 def __init__(self, vocab_size, embed_size, hidden_size):
     super(EncoderDecoder, self).__init__(
         enc=Encoder(vocab_size, embed_size, hidden_size),
         dec=Decoder(vocab_size, embed_size, hidden_size),
     )
     self.vocab_size = vocab_size
     self.embed_size = embed_size
     self.hidden_size = hidden_size
     self.common_function = CommonFunction()