示例#1
0
文件: main.py 项目: njnuzpy/CWS
def test_avg(iterations, test_file, beam_size):
    data = prepare_data.read_file(test_file)
    feature = Feature()
    decoder = Decoder(beam_size, feature.get_score)

    count = 0
    data_size = len(data)

    model_file = open(
        '/home/xzt/CWS/model_result/avg-model_beam-size-' + str(beam_size) +
        '.pkl', 'rb')
    feature.load_model(model_file)
    model_file.close()
    for line in data:
        z = decoder.beamSearch(line)
        seg_data = ' '.join(z)
        seg_data_file = '/home/xzt/CWS/test_seg_data/avg-test-seg-data' + '_beam-size-' + str(
            beam_size) + '.txt'
        with open(seg_data_file, 'a') as f:
            f.write(seg_data + '\n')
        count += 1
        if count % 1000 == 0:
            print("segment with avg-model, finish %.2f%%" %
                  ((count / data_size) * 100))
    f.close()
    print("segment with avg model finish")
示例#2
0
文件: test.py 项目: FUHannes/IVC
def generate_data(filename, version, block_size=16):
    input_path = os.path.join(PGM_ORIGINAL_PATH, filename + PGM_SUFFIX)

    if not os.path.exists(BITSTREAM_PATH):
        os.mkdir(BITSTREAM_PATH)
    bitstream_path = os.path.join(BITSTREAM_PATH, filename + BITSTREAM_SUFFIX)

    if not os.path.exists(PGM_RECONSTRUCTION_PATH):
        os.mkdir(PGM_RECONSTRUCTION_PATH)
    output_path = os.path.join(PGM_RECONSTRUCTION_PATH, filename + PGM_SUFFIX)

    df = pd.DataFrame(columns=['bpp', 'db'])

    for index, quality in enumerate([8, 12, 16, 20, 24]):
        enc = Encoder(input_path, bitstream_path, block_size, quality, False)
        enc.encode_image()
        dec = Decoder(bitstream_path, output_path, pgm=True)
        dec.decode_all_frames()

        process = subprocess.run(
            [PSNR_TOOL_PATH, input_path, output_path, bitstream_path],
            stdout=subprocess.PIPE,
        )

        stdout = process.stdout.decode("utf-8")
        bpp, db = stdout.split(' bpp ')
        bpp, db = float(bpp), float(db.replace(' dB', ''))
        db = 0.0 if math.isinf(db) else db
        df.loc[index] = [bpp, db]

    version_path = os.path.join(DATA_ROOT_PATH, version)
    print(version)
    if not os.path.exists(version_path):
        os.mkdir(version_path)
    df.to_pickle(os.path.join(version_path, filename + DATA_SUFFIX))
示例#3
0
文件: main.py 项目: njnuzpy/CWS
def train(iterations, train_file, beam_size):
    data = prepare_data.read_file(train_file)
    feature = Feature()
    decoder = Decoder(beam_size, feature.get_score)

    for t in range(iterations):
        count = 0
        data_size = len(data)

        for line in data:
            y = line.split()
            z = decoder.beamSearch(line)
            if z != y:
                feature.update_weight(y, z)

            train_seg = ' '.join(z)
            seg_data_file = '/home/xzt/CWS/train_seg_data/train-seg-data_ model-' + str(
                t) + '.txt'
            with open(seg_data_file, 'a') as f:
                f.write(train_seg + '\n')

            count += 1
            if count % 1000 == 0:
                print("iter %d , finish %.2f%%" % (t,
                                                   (count / data_size) * 100))

        model_file = open(
            "/home/xzt/CWS/model_result/model-" + str(t) + "_beam-size-" +
            str(beam_size) + '.pkl', 'wb')
        feature.save_model(model_file)

        model_file.close()
        f.close()
        print("segment with model-%d finish" % t)
        print("iteration %d finish" % t)
示例#4
0
    def predict(self, y, phi_gmm, encoder_layers, decoder_layers, seed=0):
        """
        Args:
            y: data to cluster and reconstruct
            phi_gmm: latent phi param
            encoder_layers: encoder NN architecture
            decoder_layers: encoder NN architecture
            seed: random seed

        Returns:
            reconstructed y and most probable cluster allocation
        """

        nb_samples = 1
        phi_enc_model = Encoder(layerspecs=encoder_layers)
        phi_enc = phi_enc_model.forward(y)

        x_k_samples, log_r_nk, _, _ = e_step(phi_enc,
                                             phi_gmm,
                                             nb_samples,
                                             seed=0)
        x_samples = subsample_x(x_k_samples, log_r_nk, seed)[:, 0, :]

        y_recon_model = Decoder(layerspecs=decoder_layers)
        y_mean, _ = y_recon_model.forward(x_samples)

        return (y_mean, torch.argmax(log_r_nk, dim=1))
示例#5
0
 def call_Decoder(self,nlayer,Elt_state,\
                  Rlt_top,Rlt_state,epoch):
     # h = hidden layers
     R_lt_next = None
     Elt_state_in = None
     Decode_lt = None
     # return hidden layers size:
     h_l_down_in,  h_l_top_out,\
     h_l_down_out, h_Elt  = self.hidden_layers_selctor(nlayer)
     if Elt_state is None:
         Elt_state_in = self.get_init_Elt_tensor(self.error_states[nlayer +
                                                                   1])
     else:
         Elt_state_in = Elt_state
     if Rlt_top is None:
         Decode_lt = Decoder(h_Elt, 0, h_l_down_out,\
                     self.kernel_size).cuda()
         Elt_state = self.get_init_Elt_tensor(self.error_states[nlayer + 1])
         R_lt_next = Decode_lt(Elt_state_in, None, None)
     else:
         Decode_lt = Decoder(h_Elt, Rlt_top.data.size()[1],\
                     h_l_top_out,self.kernel_size).cuda()
         R_lt_next = Decode_lt(Elt_state_in,\
                     Rlt_top,Rlt_state)
     if self.saveModel == True:
         if epoch % self.numSaveIter == 0:
             self.save_models(Decode_lt, epoch, "Decoder")
     return R_lt_next, Decode_lt.parameters()
示例#6
0
    def encoder_decoder(self, inputs):
        encoded = self.encoder.encoder(inputs, self.target_layer)
        model = Decoder()
        decoded, _ = model.decoder(encoded, self.target_layer)
        decoded_encoded = self.encoder.encoder(decoded, self.target_layer)

        return encoded, decoded, decoded_encoded
示例#7
0
    def __init__(self, w2i, i2w):
        """
        Args:
            args: parameters of the model
            textData: the dataset object
        """
        super(Model, self).__init__()
        print("Model creation...")

        self.word2index = w2i
        self.index2word = i2w
        self.max_length = args['maxLengthDeco']

        self.dtype = 'float32'
        self.NLLloss = torch.nn.NLLLoss(reduction='none')
        self.CEloss = torch.nn.CrossEntropyLoss(reduction='none')

        self.embedding = nn.Embedding(args['vocabularySize'],
                                      args['embeddingSize'])
        self.emo_embedding = nn.Embedding(args['emo_labelSize'],
                                          args['embeddingSize'])
        self.encoder = Encoder(w2i, i2w, self.embedding)
        self.decoder = Decoder(w2i, i2w, self.embedding)
        self.tanh = nn.Tanh()
        self.softmax = nn.Softmax(dim=-1)
示例#8
0
    def __init__(self, args):
        super(Mem2SeqRunner, self).__init__(args)

        # Model parameters
        self.gru_size = 128
        self.emb_size = 128
        #TODO: Try hops 4 with task 3
        self.hops = 3
        self.dropout = 0.2

        self.encoder = Encoder(self.hops, self.nwords, self.gru_size)
        self.decoder = Decoder(self.emb_size, self.hops, self.gru_size,
                               self.nwords)

        self.optim_enc = torch.optim.Adam(self.encoder.parameters(), lr=0.001)
        self.optim_dec = torch.optim.Adam(self.decoder.parameters(), lr=0.001)
        if self.loss_weighting:
            self.optim_loss_weights = torch.optim.Adam([self.loss_weights],
                                                       lr=0.0001)
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optim_dec,
                                                        mode='max',
                                                        factor=0.5,
                                                        patience=1,
                                                        min_lr=0.0001,
                                                        verbose=True)

        if self.use_cuda:
            self.cross_entropy = self.cross_entropy.cuda()
            self.encoder = self.encoder.cuda()
            self.decoder = self.decoder.cuda()
            if self.loss_weighting:
                self.loss_weights = self.loss_weights.cuda()
示例#9
0
 def __init__(self, mem_size: int = 10000):
     """
             The following is an abstraction for a bank of values
             such as valA, which will be used during each cycle.
             It's set up as an object to avoid circular import.
     """
     self.ValBank = ValBank()
     """
     The following are functional units like memory,
     registers, or flags
     """
     self.Memory = Memory(mem_size)
     self.RegisterBank = RegisterBank()
     self.ZF = CCFlag("ZF")  # zero flag
     self.OF = CCFlag("OF")  # overflow flag
     self.SF = CCFlag("SF")  # sign flag
     self.ErrorFlag = StateFlag("Error Flag", error_lib)
     self.StateFlag = StateFlag("State Flag", state_lib)
     self.ALU = ALU(self.ValBank, self.StateFlag, self.ErrorFlag, self.SF, self.OF, self.ZF)
     """
     The following are functional abstractions of operations
     that the processor performs
     """
     self.Fetcher = Fetcher(self.ValBank, self.RegisterBank, self.Memory, self.StateFlag, self.ErrorFlag)
     self.Decoder = Decoder(self.ValBank, self.RegisterBank, self.Memory)
     self.Executor = Executor(self.ValBank, self.ALU, self.OF, self.ZF, self.SF)
     self.Memorizer = Memorizer(self.ValBank, self.Memory)
     self.RegWriter = RegWriter(self.RegisterBank, self.ValBank)
     self.PCUpdater = PCUpdater(self.RegisterBank, self.ValBank)
示例#10
0
def test_folder(folder):
    """ Test all images inside a folder

        Use Zbar library to test the image.

    Args:
        folder:  The path of your target folder

    Returns:
        (succ, fail, rate):  The number of success, failure, 
        and the "success rate" = (succ) / (succ + fail)

    """
    def is_img(path):
        # Add more extensions if you need
        img_ext = ['jpg', 'png', 'bmp']
        return path.split('.')[-1] in img_ext

    dc = Decoder()
    for root, folders, files in os.walk(folder):
        img_list = [os.path.join(root, file) for file in files if is_img(file)]

    succ = fail = 0
    for img in img_list:
        pil = Image.open(img).convert('L')
        code = dc.decode(pil)
        if len(code) > 0:
            succ += 1
        else:
            fail += 1
    rate = float(succ) / (succ + fail)
    return (succ, fail, rate)
示例#11
0
    def __init__(self, input_size, hidden_size, batch_size, learning_rate,
                 num_epoch, method):
        dataset = Seq2SeqDataset()

        self.vocab = sorted(set(dataset.full_text))
        self.vocab_size = len(self.vocab)
        self.char2ind, self.ind2char = self.get_vocab()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = self.vocab_size
        self.method = method
        self.learning_rate = learning_rate
        self.batch_size = batch_size
        self.num_epoch = num_epoch
        self.device = "cuda:0" if torch.cuda.is_available() else "cpu"

        self.dataloader = DataLoader(dataset=dataset,
                                     batch_size=batch_size,
                                     shuffle=True)

        self.encoder = Encoder(input_size, hidden_size, self.vocab_size)
        self.decoder = Decoder(hidden_size, self.output_size, method)

        self.encoder = self.encoder.to(self.device)
        self.decoder = self.decoder.to(self.device)

        self.loss_function = NLLLoss()

        self.encoder_optim = optim.Adam(self.encoder.parameters(),
                                        lr=self.learning_rate)
        self.decoder_optim = optim.Adam(self.decoder.parameters(),
                                        lr=self.learning_rate)
示例#12
0
    def __init__(self, config):
        super(Model, self).__init__()
        self.config = config

        # 定义嵌入层
        self.embedding = Embedding(
            config.num_vocab,  # 词汇表大小
            config.embedding_size,  # 嵌入层维度
            config.pad_id,  # pad_id
            config.dropout)

        # post编码器
        self.post_encoder = Encoder(
            config.post_encoder_cell_type,  # rnn类型
            config.embedding_size,  # 输入维度
            config.post_encoder_output_size,  # 输出维度
            config.post_encoder_num_layers,  # rnn层数
            config.post_encoder_bidirectional,  # 是否双向
            config.dropout)  # dropout概率

        # response编码器
        self.response_encoder = Encoder(
            config.response_encoder_cell_type,
            config.embedding_size,  # 输入维度
            config.response_encoder_output_size,  # 输出维度
            config.response_encoder_num_layers,  # rnn层数
            config.response_encoder_bidirectional,  # 是否双向
            config.dropout)  # dropout概率

        # 先验网络
        self.prior_net = PriorNet(
            config.post_encoder_output_size,  # post输入维度
            config.latent_size,  # 潜变量维度
            config.dims_prior)  # 隐藏层维度

        # 识别网络
        self.recognize_net = RecognizeNet(
            config.post_encoder_output_size,  # post输入维度
            config.response_encoder_output_size,  # response输入维度
            config.latent_size,  # 潜变量维度
            config.dims_recognize)  # 隐藏层维度

        # 初始化解码器状态
        self.prepare_state = PrepareState(
            config.post_encoder_output_size + config.latent_size,
            config.decoder_cell_type, config.decoder_output_size,
            config.decoder_num_layers)

        # 解码器
        self.decoder = Decoder(
            config.decoder_cell_type,  # rnn类型
            config.embedding_size,  # 输入维度
            config.decoder_output_size,  # 输出维度
            config.decoder_num_layers,  # rnn层数
            config.dropout)  # dropout概率

        # 输出层
        self.projector = nn.Sequential(
            nn.Linear(config.decoder_output_size, config.num_vocab),
            nn.Softmax(-1))
    def aptidao(self, cromossomo):
        decodificacao = Decoder(self.custos, sqrt(self.TAM_CROM))
        '''
		Transforma as chaves aleatorias em binarios, verifica as restrições e 
		retorna o custo a ser minimizado
		'''
        Z = decodificacao.decode(cromossomo)
        return Z
示例#14
0
 def encoder_decoder(self, inputs):
     encoded = self.encoder.encoder(inputs, self.target_layer)  #特征提取
     model = Decoder()  #生成图像model
     decoded, _ = model.decoder(encoded, self.target_layer)  #生成图像
     # 再对生成图像,进行特征提取
     decoded_encoded = self.encoder.encoder(decoded, self.target_layer)
     # 返回值为 a.提取的特征 b.由特征生成的图片 c.由生成图片提取的特征
     return encoded, decoded, decoded_encoded
示例#15
0
文件: model.py 项目: vicyor/bysj
 def __init__(self,target_layer,content_path,style_path,alpha,pretrained_vgg,output_path,decoder_weights) :
     self.target_layer = target_layer
     self.content_path = content_path
     self.style_path = style_path
     self.output_path = output_path
     self.alpha = alpha
     self.encoder = Vgg19(pretrained_vgg)
     self.decoder = Decoder()  
     self.decoder_weights = decoder_weights
示例#16
0
def Decode(input, msg, coder, transSymbols=None):
    print("----------")
    print("Wejscie: " + input)
    decoder = Decoder(None, None, coder)
    if transSymbols != None:
        print("Zdekodowano: " +
              decoder.DecodeAllLevelsString(msg, transSymbols))
    else:
        print("Zdekodowano: " + decoder.DecodeString(msg))
    print("----------")
示例#17
0
文件: model.py 项目: vicyor/bysj
 def __init__(self,content_path,style_path,alpha,pretrained_vgg,output_path) :
     self.content_path = content_path
     self.style_path = style_path
     self.output_path = output_path
     self.alpha = alpha
     self.encoder = Vgg19(pretrained_vgg)
     self.decoder = Decoder()
     #添加第五个权重
     #add ——>models/decoder_5.ckpt
     self.decoder_weights = ['models/decoder_1.ckpt','models/decoder_2.ckpt','models/decoder_3.ckpt','models/decoder_4.ckpt','models/model.ckpt-15004']
示例#18
0
def network():
    # define encoder input data
    enc_token_ids = fluid.layers.data(name="enc_token_ids", shape=[None, SEQ_MAX_LEN], dtype='int64')
    enc_segment_ids = fluid.layers.data(name="enc_segment_ids", shape=[None, SEQ_MAX_LEN], dtype='int64')
    enc_pos_ids = fluid.layers.data(name="enc_pos_ids", shape=[None, SEQ_MAX_LEN], dtype='int64')
    enc_input_length = fluid.layers.data(name='enc_input_length', shape=[None, SEQ_MAX_LEN, 1], dtype='int64')

    # define decoder input data
    dec_token_ids = fluid.layers.data(name="token_ids", shape=[None, SEQ_MAX_LEN], dtype='int64')
    dec_segment_ids = fluid.layers.data(name="segment_ids", shape=[None, SEQ_MAX_LEN], dtype='int64')
    dec_pos_ids = fluid.layers.data(name="pos_ids", shape=[None, SEQ_MAX_LEN], dtype='int64')
    dec_enc_slf_attn = fluid.layers.data(name='enc_slf_attn', shape=[None, SEQ_MAX_LEN, SEQ_MAX_LEN], dtype='int64')

    # task label
    dec_lm_label_mat = fluid.layers.data(name='lm_label_mat', shape=[None, SEQ_MAX_LEN], dtype='int64')
    dec_lm_pos_mask = fluid.layers.data(name='lm_pos_mask', shape=[None, SEQ_MAX_LEN, SEQ_MAX_LEN], dtype='int64')
    dec_lm_pos_len = fluid.layers.data(name='lm_pos_len', shape=[None, 1], dtype='int64')

    goal_type_pos = fluid.layers.data(name="goal_type_pos", shape=[None, 2], dtype='int64')
    goal_type_label = fluid.layers.data(name="goal_type_label", shape=[None], dtype='int64')

    # enc_dec_mask
    enc_dec_mask = fluid.layers.data(name='enc_dec_mask', shape=[None, SEQ_MAX_LEN, SEQ_MAX_LEN], dtype='int64')

    # network
    encode = Encoder(enc_token_ids, enc_pos_ids, enc_segment_ids,
                     enc_input_length, config)
    enc_output = encode.get_sequence_output()

    decode = Decoder(dec_token_ids, dec_pos_ids, dec_segment_ids,
                     dec_enc_slf_attn, config=config, enc_input=enc_output, enc_input_mask=enc_dec_mask)

    loss, goal_type_acc = decode.pretrain(goal_type_pos, goal_type_label,
                                          dec_lm_label_mat, dec_lm_pos_mask, dec_lm_pos_len)

    input_name_list = [
        enc_token_ids.name,
        enc_segment_ids.name,
        enc_pos_ids.name,
        enc_input_length.name,

        dec_token_ids.name,
        dec_segment_ids.name,
        dec_pos_ids.name,
        dec_enc_slf_attn.name,
        enc_dec_mask.name,

        dec_lm_label_mat.name,
        dec_lm_pos_mask.name,
        dec_lm_pos_len.name,

        goal_type_pos.name,
        goal_type_label.name
    ]
    return loss.name, goal_type_acc.name, input_name_list
示例#19
0
class Processor:
    """
    This represents the processor itself. Most work and operations
    will be outsourced to other objects referenced by the processor.
    The processor holds the values of valP, valE, valA, valB, valC,
    and rA, rB.
    """

    def __init__(self, mem_size: int = 10000):
        """
                The following is an abstraction for a bank of values
                such as valA, which will be used during each cycle.
                It's set up as an object to avoid circular import.
        """
        self.ValBank = ValBank()
        """
        The following are functional units like memory,
        registers, or flags
        """
        self.Memory = Memory(mem_size)
        self.RegisterBank = RegisterBank()
        self.ZF = CCFlag("ZF")  # zero flag
        self.OF = CCFlag("OF")  # overflow flag
        self.SF = CCFlag("SF")  # sign flag
        self.ErrorFlag = StateFlag("Error Flag", error_lib)
        self.StateFlag = StateFlag("State Flag", state_lib)
        self.ALU = ALU(self.ValBank, self.StateFlag, self.ErrorFlag, self.SF, self.OF, self.ZF)
        """
        The following are functional abstractions of operations
        that the processor performs
        """
        self.Fetcher = Fetcher(self.ValBank, self.RegisterBank, self.Memory, self.StateFlag, self.ErrorFlag)
        self.Decoder = Decoder(self.ValBank, self.RegisterBank, self.Memory)
        self.Executor = Executor(self.ValBank, self.ALU, self.OF, self.ZF, self.SF)
        self.Memorizer = Memorizer(self.ValBank, self.Memory)
        self.RegWriter = RegWriter(self.RegisterBank, self.ValBank)
        self.PCUpdater = PCUpdater(self.RegisterBank, self.ValBank)

    def run(self):
        """
        This is the only necessary public method for the processor.
        Currently the way to operate this is by manually loading values
        into the memory using the place_instruction method, then calling
        the 'run' method for the processor. Afterwards calling print
        on the memory object will reveal the finish state of the processor.
        """
        while self.StateFlag.State == 0:
            self.Fetcher.fetch()
            self.Decoder.decode()
            self.Executor.execute()
            self.Memorizer.memory_write()
            self.RegWriter.write_back()
            self.PCUpdater.update_pc()
示例#20
0
	def __init__(self):
		threading.Thread.__init__(self)
		self.shutdown_flag = threading.Event()
		self.motor = Motorcontroller()
		self.buzzer = Buzzer()
		self.xbee = Xbee()
		self.decoder = Decoder()
		self.servo1 = 96
		self.servo2 = 75
		self.joycalc = Joystick()
		self.motor.setServo1(self.servo1)
		self.motor.setServo2(self.servo2)
		self.lastSavedTime = 0
示例#21
0
    def __init__(self, w2i, i2w, embs=None, title_emb = None, info=None):
        """
        Args:
            args: parameters of the model
            textData: the dataset object
        """
        super(LSTM_CTE_Model_with_action, self).__init__()
        print("Model creation...")

        self.word2index = w2i
        self.index2word = i2w
        self.max_length = args['maxLengthDeco']
        self.info = info

        self.NLLloss = torch.nn.NLLLoss(ignore_index=0)
        self.CEloss = torch.nn.CrossEntropyLoss(ignore_index=0)

        if embs is not None:
            self.embedding = nn.Embedding.from_pretrained(embs)
        else:
            self.embedding = nn.Embedding(args['vocabularySize'], args['embeddingSize'])

        if title_emb is not None:
            self.field_embedding = nn.Embedding.from_pretrained(title_emb)
        else:
            self.field_embedding = nn.Embedding(args['TitleNum'], args['embeddingSize'])

        self.encoder = Encoder(w2i, i2w, bidirectional=True)
        # self.encoder_answer_only = Encoder(w2i, i2w)
        self.encoder_no_answer = Encoder(w2i, i2w)
        self.encoder_pure_answer = Encoder(w2i, i2w)

        self.decoder_answer = Decoder(w2i, i2w, self.embedding, copy='pure', max_dec_len=10)
        self.decoder_no_answer = Decoder(w2i, i2w, self.embedding,
                                         input_dim = args['embeddingSize']*2,# +  args['hiddenSize'] ,
                                         copy='semi')

        self.ansmax2state_h = nn.Linear(args['embeddingSize'], args['hiddenSize']*2, bias=False)
        self.ansmax2state_c = nn.Linear(args['embeddingSize'], args['hiddenSize']*2, bias=False)
        self.tanh = nn.Tanh()
        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(dim=-1)
        self.sigmoid = nn.Sigmoid()

        self.att_size_r = 60
        # self.grm = GaussianOrthogonalRandomMatrix()
        # self.att_projection_matrix = Parameter(self.grm.get_2d_array(args['embeddingSize'], self.att_size_r))
        self.M = Parameter(torch.randn([args['embeddingSize'], args['hiddenSize']*2,2]))

        self.shrink_copy_input= nn.Linear(args['hiddenSize']*2, args['hiddenSize'], bias=False)
        self.emb2hid = nn.Linear(args['embeddingSize'], args['hiddenSize'], bias=False)
示例#22
0
    def __init__(self, config):
        super(Model, self).__init__()

        self.config = config

        # 情感嵌入层
        self.affect_embedding = AffectEmbedding(config.num_vocab,
                                                config.affect_embedding_size,
                                                config.pad_id)

        # 定义嵌入层
        self.embedding = WordEmbedding(config.num_vocab,  # 词汇表大小
                                       config.embedding_size,  # 嵌入层维度
                                       config.pad_id)  # pad_id

        # 情感编码器
        self.affect_encoder = Encoder(config.encoder_decoder_cell_type,  # rnn类型
                                      config.affect_embedding_size,  # 输入维度
                                      config.affect_encoder_output_size,  # 输出维度
                                      config.encoder_decoder_num_layers,  # 层数
                                      config.encoder_bidirectional,  # 是否双向
                                      config.dropout)

        # 编码器
        self.encoder = Encoder(config.encoder_decoder_cell_type,  # rnn类型
                               config.embedding_size,  # 输入维度
                               config.encoder_decoder_output_size,  # 输出维度
                               config.encoder_decoder_num_layers,  # rnn层数
                               config.encoder_bidirectional,  # 是否双向
                               config.dropout)  # dropout概率

        self.prepare_state = PrepareState(config.encoder_decoder_cell_type,
                                          config.encoder_decoder_output_size + config.affect_encoder_output_size,
                                          config.encoder_decoder_output_size)

        self.linear_prepare_input = nn.Linear(config.embedding_size + config.affect_encoder_output_size,
                                              config.decoder_input_size)

        # 解码器
        self.decoder = Decoder(config.encoder_decoder_cell_type,  # rnn类型
                               config.decoder_input_size,  # 输入维度
                               config.encoder_decoder_output_size,  # 输出维度
                               config.encoder_decoder_num_layers,  # rnn层数
                               config.dropout)  # dropout概率

        # 输出层
        self.projector = nn.Sequential(
            nn.Linear(config.encoder_decoder_output_size, config.num_vocab),
            nn.Softmax(-1)
        )
示例#23
0
    def __init__(self, config):
        super(Model, self).__init__()
        self.config = config

        self.embedding = Embedding(config.num_vocab,
                                   config.embedding_size,
                                   config.pad_id,
                                   config.dropout)

        self.affect_embedding = Embedding(config.num_vocab,
                                          config.affect_embedding_size,
                                          config.pad_id,
                                          config.dropout)
        self.affect_embedding.embedding.weight.requires_grad = False

        self.post_encoder = Encoder(config.encoder_cell_type,
                                    config.embedding_size + config.affect_embedding_size,
                                    config.encoder_output_size,
                                    config.encoder_num_layers,
                                    config.encoder_bidirectional,
                                    config.dropout)

        self.response_encoder = Encoder(config.encoder_cell_type,
                                        config.embedding_size + config.affect_embedding_size,
                                        config.encoder_output_size,
                                        config.encoder_num_layers,
                                        config.encoder_bidirectional,
                                        config.dropout)

        self.prior_net = PriorNet(config.encoder_output_size,
                                  config.latent_size,
                                  config.dims_prior)

        self.recognize_net = RecognizeNet(config.encoder_output_size,
                                          config.encoder_output_size,
                                          config.latent_size,
                                          config.dims_recognize)

        self.prepare_state = PrepareState(config.encoder_output_size + config.latent_size,
                                          config.decoder_cell_type,
                                          config.decoder_output_size,
                                          config.decoder_num_layers)

        self.decoder = Decoder(config.decoder_cell_type,
                               config.embedding_size + config.affect_embedding_size + config.encoder_output_size,
                               config.decoder_output_size,
                               config.decoder_num_layers,
                               config.dropout)

        self.projector = nn.Sequential(nn.Linear(config.decoder_output_size, config.num_vocab), nn.Softmax(-1))
示例#24
0
 def __init__(self, content_path, style_path, alpha_wct, alpha_swap,
              pretrained_vgg, output_path):
     self.content_path = content_path  # 内容图片路径
     self.style_path = style_path  # 风格图片路径
     self.output_path = output_path  # 融合后图片输出路径
     self.alpha_wct = alpha_wct  # wct方法内容与风格图片比重
     self.alpha_swap = alpha_swap  #wct_swap方法内容与风格图片比重
     self.encoder = Vgg19(pretrained_vgg)  # 导入VGG19 模型
     self.decoder = Decoder()  # 导入Decoder 反卷积
     # 导入Decoder模型参数
     self.decoder_weights = [
         'models/decoder_1.ckpt', 'models/decoder_2.ckpt',
         'models/decoder_3.ckpt', 'models/decoder_4.ckpt'
     ]
示例#25
0
    def encoder_decoder(self, inputs):
        """
        encoder_decoder function:
        
        Outputs:
        encoded: feature map obtained by processing the original image with the encoder
        decoded: reconstructed image obtained by processing the original image with both the encoder and decoder
        decoded_encoded: feature map obtained by processing the reconstructed image with the encoder
        """
        encoded = self.encoder.encoder(inputs, self.target_layer)
        model = Decoder()
        decoded, _ = model.decoder(encoded, self.target_layer)
        decoded_encoded = self.encoder.encoder(decoded, self.target_layer)

        return encoded, decoded, decoded_encoded
示例#26
0
 def __init__(self,
              in_channels,
              hidden_channels,
              num_resblocks,
              res_channels,
              D,
              K,
              beta=0.25,
              gamma=0.99):
     """
     in_channels: number of channels of the input image
     hidden_channels: the number of channels that are used by the hidden conv layers
     num_resblocks: the number of residual blocks used in both the encoder and the decoder
     res_channels: the number of channels that are used by the residual blocks
     D: dimensionality of each embedding vector, or embedding_dim in the sonnet's implementation
     K: the size of the discrete space (the number of embedding vectors), or num_embeddings in the sonnet's implementation
     beta: the hyperparameter that acts as a weighting to the lost term, or commitment_cost in the sonnet's implementation
         recommendation from the paper, beta=0.25
     gamma: controls the speed of the EMA, or decay in the sonnet's implementation
         recommendation from the paper, gamma=0.99
     """
     super(VQVAE, self).__init__()
     self.encoder = Encoder(in_channels, hidden_channels, num_resblocks,
                            res_channels)
     # the following is the additional layer added in the author's original implementation
     # to make sure that the number of channels equals to D (embedding dimension)
     self.pre_vq = nn.Conv2d(in_channels=hidden_channels,
                             out_channels=D,
                             kernel_size=1,
                             stride=1)
     self.vectorquantizer = VectorQuantizerEMA(D, K, beta, gamma)
     self.decoder = Decoder(D, hidden_channels, num_resblocks, res_channels,
                            in_channels)
 def __build(self):
     self.encoder = Encoder(self.src_vocab_size, self.src_emb_dim,
                            self.enc_units)
     self.decoder = Decoder(self.tgt_vocab_size, self.tgt_emb_dim,
                            self.enc_units, self.dec_units, self.att_units,
                            self.score_type)
     self.sess = tf.Session()
    def __init__(self, vocab_size, embed_size, hidden_size, choose_model,
                 use_gpu, gpu_id):
        # gpu Setting
        model = Alex_RNNLM(hidden_size)
        if choose_model == "Alex_RNNLM":
            model = Alex_RNNLM(hidden_size)
        if choose_model == "AlexBn_RNNLM":
            model = AlexBn_RNNLM(hidden_size)

        if use_gpu:
            cuda.get_device(gpu_id).use()
            model.to_gpu()
        # Setting Model
        super(EncoderDecoder, self).__init__(
            enc=model,
            dec=Decoder(vocab_size, embed_size, hidden_size),
        )
        self.vocab_size = vocab_size
        self.embed_size = embed_size
        self.hidden_size = hidden_size
        self.common_function = CommonFunction()
        self.use_gpu = use_gpu
        self.gpu_id = gpu_id
        self.choose_model = choose_model
        self.__set_gpu()
示例#29
0
    def __init__(self,
                 vocab_size,
                 max_len,
                 device,
                 num_layers=6,
                 stack_layers=6,
                 d_model=512,
                 num_heads=8,
                 ffn_dim=2048,
                 dropout=0.2):

        super(Transformer, self).__init__()

        self.device = device

        self.encoder = Encoder(vocab_size, max_len, num_layers, d_model,
                               num_heads, ffn_dim, dropout)
        self.decoder = Decoder(vocab_size, max_len, device, num_layers,
                               d_model, num_heads, ffn_dim, dropout)
        self.generator = Generator(vocab_size, max_len, device, stack_layers,
                                   d_model, num_heads, ffn_dim, dropout)

        self.embedding = nn.Embedding(vocab_size, d_model)
        self.linear = nn.Linear(d_model, vocab_size, bias=False)
        self.softmax = nn.Softmax(dim=2)
示例#30
0
def make_model(src_vocab,
               tgt_vocab,
               N=6,
               d_model=512,
               d_ff=2048,
               h=8,
               dropout=0.1):
    "Helper: Construct a model from hyperparameters."
    c = copy.deepcopy
    attn = MultiHeadedAttention(h, d_model)
    ff = PositionwiseFeedForward(d_model, d_ff, dropout)
    position = PositionalEncoding(d_model, dropout)
    model = EncoderDecoder(
        Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
        Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
        nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
        nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
        Generator(d_model, tgt_vocab))

    # This was important from their code.
    # Initialize parameters with Glorot / fan_avg.
    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
            """https://zhuanlan.zhihu.com/p/74274453
            #權值初始化 Xavier均勻分佈"""
    return model
示例#31
0
文件: tests.py 项目: EQ4/HMMDuration
def decode(lyricsWithModels, observationFeatures):   
    '''
    same as decoder.decodeAudio() without the parts with WITH_Duration flag.
    '''
    alpha = 0.97
    deviationInSec = 0.1
    ONLY_MIDDLE_STATE=False
    params = Parameters(alpha, ONLY_MIDDLE_STATE, deviationInSec)
    decoder = Decoder(lyricsWithModels, params.ALPHA, params.deviationInSec)
    
    #  decodes
    decoder.hmmNetwork.initDecodingParameters(observationFeatures)
    chiBackPointer, psiBackPointer = decoder.hmmNetwork._viterbiForcedDur(observationFeatures)
    
    # backtrack
    path =  Path(chiBackPointer, psiBackPointer)
    detectedWordList = decoder.path2ResultWordList(path)
         # DEBUG
    
    decoder.lyricsWithModels.printWordsAndStatesAndDurations(decoder.path)
    path.printDurations()
示例#32
0
# tu jest moj main
import sys
import matplotlib
from Decoder import Decoder
matplotlib.use("Tkagg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure

import tkinter as tk
from tkinter import ttk

LARGE_FONT = ("Verdana", 12)

data = Decoder()
allData = data.getData()


class SeaofBTCapp(tk.Tk):
    def __init__(self, *args, **kwargs):
        tk.Tk.__init__(self, *args, **kwargs)

        tk.Tk.wm_title(self, "BlackBox")

        container = tk.Frame(self)
        container.pack(side="top", fill="both", expand=True)
        container.grid_rowconfigure(0, weight=1)
        container.grid_columnconfigure(0, weight=1)

        self.frames = {}

        for F in (StartPage, PageOne, PageTwo, PageThree):
示例#33
0
 # get average signal power
 signal_power = mapper.get_signal_average_power()
 
 # what is the noise power and standard deviation when SNR is 10dB?
 noise_power = signal_power / 10.0
 noise_std_dev = math.sqrt(noise_power)
 
 # initialize random number generator. this seed is fixed constant in order
 # to get deterministic results in this example.
 random.seed(314159265359)
 
 # add white gaussian noise at 10dB to signal
 print "Adding white gaussian noise at 10dB."
 noisy_symbols = [sym + random.gauss(0, noise_std_dev) for sym in symbols]
 # round to closest integer
 noisy_symbols = [int(x + 0.5) for x in noisy_symbols]
 print "noisy symbols:", noisy_symbols
 
 # instantiate decoder
 decoder = Decoder(k, B, d, map_func)
 
 # update decoder with gathered points
 for i in xrange(spine_length):
     decoder.advance([noisy_symbols[i], 
                      noisy_symbols[i+spine_length], 
                      noisy_symbols[i+2*spine_length]])
 
 print "decoded hex:", decoder.get_most_likely().encode("hex")
 
 # make sure we got the message we started with
 assert(decoder.get_most_likely() == message)
socket = ServerUDP.initServerSocket()
print "Running and waiting..."

while True:

    try:
        
        encoded_text = ServerUDP.readSocket(socket)
        
        print "Data received!"
        
        file = open(Util.ENCODED_FILE, "w")
        
        file.write(encoded_text)
        
        file.close()
        
        print "Decoding data..."
        
        Decoder.decode()
        
        print "File successfully decoded!"
        print "\nRunning and waiting..."
        
    except Exception as e:
        print e
        print("Error!")
        
#   finally:
#       socket.close()
#       os._exit(0)
示例#35
0
文件: Coder.py 项目: GINK03/shedskin
 def decodeRaw(inputStream, outputStream, intervalLength, options):
     Coder.checkInterval(intervalLength)
     decoder = Decoder(inputStream, outputStream, options)
     amountProcessed = 0
     while not decoder.decode(intervalLength):
         amountProcessed += intervalLength