コード例 #1
0
 def enviar_datos(self, intento=0):
     if intento < 10:
         try:
             # Comprobamos si el servidor existe
             remote_ip = socket.gethostbyname(self.__SERVER_NAME)
             # Nos conectamos al servidor
             self.ssl_sock.connect((remote_ip, self.__PORT))
             # Serializamos y enviamos
             self.ssl_sock.send(json.dumps(self.datos_enviar))
             # Recibo la respuesta del servidor y la devuelvo
             result = Decoder.Decoder(self.ssl_sock.recv(8192))
             ##### BETA FUNCTION
             # result = Decoder.Decoder(self._recv_timeout(self.ssl_sock))
             # Correcto, cerramos y devolvemos.
             self.ssl_sock.close()
             return result.decode_json()
         except socket.error as msg:
             # Se ha producido un error con el socket
             print msg, " -- Intento número", intento
             # Cerramos el socket y creamos uno nuevo
             self.ssl_sock.close()
             self.__init__(self.datos_enviar)
             intento += 1
             time.sleep(5)
             self.enviar_datos(intento)
     else:
         # Después de 10 intentos no ha sido posible enviar el socket
         # Devolvemos un None para indicar el error.
         print "El envío del socket ha excedido el número de intentos"
         self.ssl_sock.close()
         return None
コード例 #2
0
    def __init__(self, input_size, word_vec_dim, hidden_size, output_size, 
                n_layers=4, dropout_p=.2):
        self.input_size = input_size
        self.word_vec_dim = word_vec_dim
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.n_layers = n_layers
        self.dropout_p = dropout_p
        
        super(Seq2Seq, self).__init__()

        self.emb_src = nn.Embedding(input_size, word_vec_dim)
        self.emb_dec = nn.Embedding(output_size, word_vec_dim)

        self.encoder = Encoder(word_vec_dim, hidden_size,
                               n_layers=n_layers,
                               dropout_p=dropout_p)
        
        self.decoder = Decoder(word_vec_dim, hidden_size,
                               n_lyaers=n_layers,
                               dropout_p=dropout_p)
        
        self.attn = Attention(hidden_size)

        self.concat = nn.Linear(hidden_size*2, hidden_size)
        self.tanh = nn.Tanh()
        self.generator = Generator(hidden_size, output_size)
コード例 #3
0
    def __init__(self,
                 num_layers,
                 num_heads,
                 d_model,
                 dense_dim,
                 in_vocab_size,
                 tar_vocab_size,
                 input_max_position,
                 target_max_position,
                 rate=0.1):
        super().__init__()

        self.encoder = Encoder(num_layers,
                               num_heads,
                               d_model,
                               dense_dim,
                               in_vocab_size,
                               max_encoding_position=input_max_position,
                               dropout=0.1)

        self.decoder = Decoder(num_layers,
                               num_heads,
                               d_model,
                               dense_dim,
                               tar_vocab_size,
                               max_encoding_position=target_max_position,
                               dropout=0.1)

        self.dense = tf.keras.layers.Dense(tar_vocab_size)
コード例 #4
0
    def fbCheck(self):

        from Decoder import *
        dcd = Decoder("ASEntryExample.xml").dictionary
        from HandleDict import HandleDictionary
        astr = HandleDictionary(dcd)
        v = astr.findall("platforms")
コード例 #5
0
 def __init__(self,
              ddconfig,
              lossconfig,
              n_embed,
              embed_dim,
              ckpt_path=None,
              ignore_keys=[],
              image_key="image",
              colorize_nlabels=None,
              monitor=None
              ):
     super().__init__()
     self.image_key = image_key
     self.encoder = Encoder(**ddconfig)
     self.decoder = Decoder(**ddconfig)
     self.loss = instantiate_from_config(lossconfig)
     self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25)
     self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
     self.post_quant_conv = torch.nn.Conv2d(
         embed_dim, ddconfig["z_channels"], 1)
     if ckpt_path is not None:
         self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
     self.image_key = image_key
     if colorize_nlabels is not None:
         assert type(colorize_nlabels) == int
         self.register_buffer(
             "colorize", torch.randn(3, colorize_nlabels, 1, 1))
     if monitor is not None:
         self.monitor = monitor
コード例 #6
0
ファイル: seq2seq.py プロジェクト: passing2961/CS492
 def __init__(self, enc_unit, dec_unit, batch_size, horizon_size, dropout_rate):
     super(Seq2Seq, self).__init__()
     
     self.batch_size = batch_size
     
     self.encoder = Encoder(enc_unit, batch_size, horizon_size+1, dropout_rate)
     self.decoder = Decoder(dec_unit, batch_size, horizon_size+1, dropout_rate)
コード例 #7
0
    def __init__(self, model_params):
        self.word_indexer = model_params["word_indexer"]
        self.word_embedder = model_params["word_embedder"]
        self.embedding_size = model_params["embedding_size"]
        self.state_size = model_params["state_size"]
        self.mode_size = model_params["mode_size"]
        self.position_size = model_params["position_size"]
        self.ques_attention_size = model_params["ques_attention_size"]
        self.kb_attention_size = model_params["kb_attention_size"]
        self.dis_embedding_dim = model_params["dis_embedding_dim"]
        self.dis_hidden_dim = model_params["dis_hidden_dim"]
        self.max_fact_num = model_params["max_fact_num"]
        self.max_ques_len = model_params["max_ques_len"]

        self.learning_rate = model_params["learning_rate"]
        self.mode_loss_rate = model_params["mode_loss_rate"]
        self.position_loss_rate = model_params["position_loss_rate"]
        self.L2_factor = model_params["L2_factor"]
        self.batch_size = model_params["batch_size"]
        self.adv_batch_size = model_params["adv_batch_size"]
        self.epoch_size = model_params["epoch_size"]
        self.adv_epoch_size = model_params["adv_epoch_size"]
        self.instance_weight = 1.0 / self.batch_size
        self.MAX_LENGTH = model_params["MAX_LENGTH"]
        self.has_trained = False
        self.oracle_samples = []

        ################ Initialize graph components ########################
        self.encoder = Encoder(self.word_indexer.wordCount, self.state_size,
                               self.embedding_size)
        self.decoder = Decoder(output_size=self.word_indexer.wordCount,
                               state_size=self.state_size,
                               embedding_size=self.embedding_size,
                               mode_size=self.mode_size,
                               kb_attention_size=self.kb_attention_size,
                               max_fact_num=self.max_fact_num,
                               ques_attention_size=self.ques_attention_size,
                               max_ques_len=self.max_ques_len,
                               position_size=self.MAX_LENGTH)
        self.positioner = Positioner(self.state_size, self.position_size,
                                     self.MAX_LENGTH)
        self.dis = Discriminator(self.dis_embedding_dim,
                                 self.dis_hidden_dim,
                                 self.word_indexer.wordCount,
                                 self.MAX_LENGTH,
                                 gpu=use_cuda)

        if use_cuda:
            self.encoder.cuda()
            self.decoder.cuda()
            self.positioner.cuda()
            self.dis.cuda()

        self.optimizer = optim.Adam(list(self.encoder.parameters()) +
                                    list(self.decoder.parameters()) +
                                    list(self.positioner.parameters()),
                                    lr=self.learning_rate,
                                    weight_decay=self.L2_factor)
        self.dis_optimizer = optim.Adagrad(dis.parameters())
コード例 #8
0
    def __init__(self, input_size):
        super(RNN, self).__init__()

        self.encoder = Encoder(input_size)
        self.decoder = Decoder(input_size)

        self.loss = nn.CrossEntropyLoss()
        self.encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=0.1)
        self.decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=0.1)
コード例 #9
0
ファイル: Seq2Seq.py プロジェクト: attendfov/att_ctc_tf2
    def __init__(self,
                 enc_num_layers,
                 dec_num_layers,
                 d_model,
                 vocab_size,
                 enc_num_heads,
                 dec_num_heads,
                 enc_dff,
                 dec_dff,
                 enc_used_rnn=False,
                 sos_id=0,
                 eos_id=1,
                 max_enc_length=1200,
                 max_dec_length=48,
                 enc_rate=0.1,
                 dec_rate=0.1):

        super(Seq2Seq, self).__init__()

        self.enc_num_layers = enc_num_layers
        self.dec_num_layers = dec_num_layers
        self.d_model = d_model
        self.vocab_size = vocab_size
        self.enc_num_heads = enc_num_heads
        self.dec_num_heads = dec_num_heads
        self.enc_dff = enc_dff
        self.dec_dff = dec_dff
        self.enc_used_rnn = enc_used_rnn
        self.sos_id = sos_id
        self.eos_id = eos_id
        self.max_enc_length = max_enc_length
        self.max_dec_length = max_dec_length
        self.enc_rate = enc_rate
        self.dec_rate = dec_rate

        self.encoder = Encoder(num_layers=self.enc_num_layers,
                               d_model=self.d_model,
                               num_heads=self.enc_num_heads,
                               dff=self.enc_dff,
                               rate=self.enc_rate,
                               used_rnn=self.enc_used_rnn,
                               max_width=self.max_enc_length)

        self.decoder = Decoder(num_layers=self.dec_num_layers,
                               vocab_size=self.vocab_size,
                               d_model=self.d_model,
                               num_heads=self.dec_num_heads,
                               dff=self.dec_dff,
                               sos_id=self.sos_id,
                               eos_id=self.eos_id,
                               max_length=self.max_dec_length,
                               rate=self.dec_rate)

        self.final_layer = tf.keras.layers.Dense(self.vocab_size)
コード例 #10
0
    def __init__(self, DEBUG=False):
        """ Initializer of QRDetector class.
            
            Create a QR code decoder from ZBar.
        """
        """ The QR code decoder provided by ZBar
        """
        self.dc = Decoder()
        """ Set 'True' to show intermediate results
        """
        self.debug = DEBUG
        """ The dilation used when searching for candidate of 
                finder patterns

            Used in 'self.__find_qr_finder()'
        """
        self.dilation = 3
        """ The offset of y when finding finder patterns horizontally
         
            e.g.    Given a center location for y: y_c
                    With offset_y = 10, the code will search from
                    (y_c - 10) to (y_c + 10)

            Used in 'self.__locate_qr_horizontally_once()'
        """
        self.offset_y = 10
        """ The padding used when cropping the QR code from source image
            Do not set too large, 0 is the best
            
            Used in 'self.__crop_qr_region()'
        """
        self.pad = 0
        """ The configuration used in enhancing the cropped QR region
            Should be an odd number, like 37
            Change this value according to the type of QR code

            Used in 'self.__enhance_cropped_qr_region()'
        """
        self.length = 37
        """ The configuration used in enhancing the cropped QR region
            Provide a list for thresholds to test on

            Used in 'self.__enhance_cropped_qr_region()'
        """
        self.thres = np.arange(0.1, 1, 0.1)
        """ The configuration used in enhancing the cropped QR region
            Make the binary image larger to better calculate the grid size
                (avoid decimal numbers)

            Used in 'self.__enhance_cropped_qr_region()'
        """
        self.enlarge_ratio = 50
コード例 #11
0
ファイル: Model.py プロジェクト: ximzzzzz/Food_CAMERA
    def __init__(self, opt, device):
        super(Basemodel, self).__init__()

        if opt.TPS:
            self.TPS = Trans.TPS_SpatialTransformerNetwork(
                F=opt.num_fiducial,
                i_size=(opt.img_h, opt.img_w),
                i_r_size=(opt.img_h, opt.img_w),
                i_channel_num=3,  #input channel 
                device=device)
        self.encoder = Encoder.Resnet_encoder(opt)
        self.decoder = Decoder.Decoder(opt, device)
        self.generator = GlyphGen.Generator(opt, device)
コード例 #12
0
    def __init__(self, nb_features, nb_encoder_cell, nb_decoder_cell, learning_rate=1e-2):
        self.features = tf.placeholder(tf.float32, [None, None, nb_features]) # Batch, step, features

        batch_shape = tf.shape(self.features)
        self.batch_size = batch_shape[0]
        self.nb_step = batch_shape[1]
        self.nb_features = nb_features

        with tf.variable_scope("Encoder"):
            self.encoder = Encoder(self.features, nb_encoder_cell)

        with tf.variable_scope("decoder"):
            self.decoder = Decoder(batch_shape, nb_features, self.encoder.last_state, nb_decoder_cell)

        self._create_learning_tensors(learning_rate)
コード例 #13
0
    def __init__(self, image_embed_size, word_embed_size, rnn_hidden_size,
                 num_rnn_steps, vocab_size, latent_size, cluster_embed_size):
        self.encoder = Encoder(image_embed_size, word_embed_size,
                               rnn_hidden_size, num_rnn_steps, latent_size,
                               vocab_size, cluster_embed_size)
        self.decoder = Decoder(image_embed_size, latent_size, word_embed_size,
                               rnn_hidden_size, num_rnn_steps, vocab_size,
                               cluster_embed_size)

        self.image_embed_size = image_embed_size
        self.latent_size = latent_size
        self.word_embed_size = word_embed_size
        self.rnn_hidden_size = rnn_hidden_size
        self.num_rnn_steps = num_rnn_steps
        self.vocab_size = vocab_size
        self.latent_size = latent_size
        self.cluster_embed_size = cluster_embed_size
コード例 #14
0
def evaluate(epoch, test_data, embedding_matrix, embedding_matrix2, pro_dic,
             trimatrix):
    decoder = Decoder(embedding_matrix, embedding_matrix2)
    decoder.load_weights(
        '/media/data6t/educationData/submitData/shao/EERNN/model2/my_model_' +
        str(epoch + 1))
    print("loadWeight!!!")
    f = open('/media/data6t/educationData/submitData/shao/EERNN/data/acc.txt',
             'w')
    print('start evaluation')
    preds, targets, binary_preds = [], [], []
    X, cos_X = decoder.call_encode(pro_dic)
    for batch, (data) in enumerate(test_data):
        prediction = decoder.call(data, pro_dic.shape[0], X, cos_X, trimatrix)
        # data_t == (batch_size,1,2)
        pred, binary_pred, target_correctness = cal_pre(prediction, data)

        preds.append(pred)
        binary_preds.append(binary_pred)
        targets.append(target_correctness)

    preds = np.concatenate(preds)
    binary_preds = np.concatenate(binary_preds)
    targets = np.concatenate(targets)

    np.savetxt(
        "/media/data6t/educationData/submitData/shao/EERNN/result2/" +
        str(epoch + 1) + "preds.txt", preds)
    np.savetxt(
        "/media/data6t/educationData/submitData/shao/EERNN/result2/" +
        str(epoch + 1) + "targets.txt", targets)
    np.savetxt(
        "/media/data6t/educationData/submitData/shao/EERNN/result2/" +
        str(epoch + 1) + "binary_preds.txt", binary_preds)

    # auc_value = roc_auc_score(targets,preds)
    accuracy = accuracy_score(targets, binary_preds)
    precision, recall, f_score, _ = precision_recall_fscore_support(
        targets, binary_preds)
    f.write(
        str(epoch + 1) +
        ", auc={0}, accuracy={1}, precision={2}, recall={3} \n".format(
            0, accuracy, precision, recall))
    print("\n auc={0}, accuracy={1}, precision={2}, recall={3}".format(
        0, accuracy, precision, recall))
    f.close()
コード例 #15
0
def run(DataName, TmpDir):
    trimatrix = np.tri(MAXLEN, MAXLEN, 0).T
    # trimatrix = tf.reshape(trimatrix,shape=[MAXLEN,MAXLEN])
    trimatrix = tf.cast(trimatrix, tf.float32)
    DataProssor = EERNNDataProcessor(
        [15, 1000000, 0.06, 1], [10, 1000000, 0.02, 1],
        ['2005-01-01 23:47:31', '2019-01-02 11:21:49'], True, DataName, TmpDir)
    pro_dic, embedding_matrix, dataset, test_data, embedding_matrix2 = DataProssor.LoadEERNNData(
        BATCH_SIZE, PREFETCH_SIZE, SHUFFLE_BUFFER_SIZE, LSTM_UNITS, 100)
    print("Start training...")
    epochs = 10
    decoder = Decoder(embedding_matrix, embedding_matrix2)
    lr = 0.01
    lr_decay = 0.92
    for epoch in range(0, epochs):
        optimizer = tf.train.AdamOptimizer(lr * lr_decay**epoch)
        start = time.time()
        total_loss = 0
        dataset.shuffle(BUFFER_SIZE)
        for batch, (data) in enumerate(dataset):
            data_target, data_cor = data
            loss = 0
            with tf.GradientTape() as tape:
                X, cos_X = decoder.call_encode(pro_dic)
                prediction = decoder.call(data, pro_dic.shape[0], X, cos_X,
                                          trimatrix)
                #data_t == (batch_size,1,2)
                loss += entroy_loss(prediction, data)
            batch_loss = (loss / int(data_target.shape[1]))
            total_loss += batch_loss
            variables = decoder.lstm.variables + decoder.dense2.variables + decoder.dense1.variables + decoder.bi_lstm.variables
            gradients = tape.gradient(loss, variables)
            optimizer.apply_gradients(zip(gradients, variables))
            print('.', end='')
            if batch % 100 == 0:
                print()
                print("Epoch {} Batch {} Loss {:.4f}".format(
                    epoch + 1, batch, batch_loss.numpy()))
        end = time.time()
        decoder.save_weights(
            '/media/data6t/educationData/submitData/shao/EERNN/model2/my_model_'
            + str(epoch + 1))
        print("Epoch {} cost {}".format(epoch + 1, end - start))
        evaluate(epoch, test_data, embedding_matrix, embedding_matrix2,
                 pro_dic, trimatrix)
コード例 #16
0
ファイル: demo.py プロジェクト: serge-k-hanna/GC
    def sequential():
        de=Decoder(mlen, numDel, numChecker, lengthExtension)

        ttime = 0
        count=0
        for num in orange:
            for dels in irange:
                orgdata= Encoder.genMsg(num,mlen) # converts the integer num to its binary string representation.
                deldata=Encoder.pop(orgdata,dels) # pop one or more bits out to create a deleted sequence.
                parity=en.paritize(orgdata) # Compute the parity integers in based on the original sequence (encoder's end).
                print(parity)
                t1=time.time()
                r = de.decode(deldata, parity)
                ttime+=(time.time()-t1)
                record(orgdata, r, True)
                count+=1
        print('Average time: ',ttime/count*1000,'ms')
        print('Failure rate:',(stat['f']+stat['u'])/count*100,'%')
コード例 #17
0
ファイル: Models.py プロジェクト: headacheboy/note_pytorch
    def __init__(self, config):
        super(Data2Text, self).__init__()
        self.config = config
        self.encoder = Encoder.Encoder(config)
        self.decoder = Decoder.Decoder(config)

        def weights_init(m):
            if isinstance(m, torch.nn.Linear):
                torch.nn.init.normal_(m.weight.data, mean=0., std=0.08)
                if m.bias is not None:
                    torch.nn.init.normal_(m.bias.data, mean=0., std=0.08)
            elif isinstance(m, torch.nn.LSTM):
                torch.nn.init.normal_(m.all_weights[0][0], mean=0., std=0.08)
                torch.nn.init.normal_(m.all_weights[0][1], mean=0., std=0.08)
                torch.nn.init.normal_(m.all_weights[1][0], mean=0., std=0.08)
                torch.nn.init.normal_(m.all_weights[1][1], mean=0., std=0.08)

        self.apply(weights_init)
コード例 #18
0
def evaluate(dataset, embedding_matrix, pro_dic):
    #model.load_weights
    #checkpoint_dir = './training_checkpoints'
    #checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt)')
    #checkpoint = tf.train.load_checkpoint(checkpoint_prefix)
    #model.load_weights(checkpoint_path)
    #status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
    optimizer = tf.train.AdamOptimizer(0.001)
    encoder = Encoder(embedding_matrix)
    decoder = Decoder()
    checkpoint_dir = './training_checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt)')
    checkpoint = tf.train.Checkpoint(optimizer=optimizer,
                                     encoder=encoder,
                                     decoder=decoder)
    checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
    preds, targets, binary_preds = list(), list(), list()
    X = encoder(pro_dic)
    cos_X = cosine(X, X)
    for (batch, data) in enumerate(dataset):
        hatt = tf.zeros([BATCH_SIZE, len(pro_dic), LSTM_UNITS])
        data_t = tf.expand_dims(data[:, 0, :], 1)
        for t in range(1, data.shape[1]):
            xt = data_forstu(data_t, X)
            xt = tf.expand_dims(xt, 1)
            prediction, hatt = decoder(xt, data_t, hatt, X, cos_X)
            data_t = tf.expand_dims(data[:, t, :], 1)
            pred, binary_pred, target_correctness = cal_pre(prediction, data_t)
            preds.append(pred)
            binary_preds.append(binary_pred)
            targets.append(target_correctness)

        preds = np.concatenate(preds)
        binary_preds = np.concatenate(binary_preds)
        targets = np.concatenate(targets)
        auc_value = roc_auc_score(targets, preds)
        accuracy = accuracy_score(targets, binary_preds)
        precision, recall, f_score, _ = precision_recall_fscore_support(
            targets, binary_preds)
        print("\n auc={0}, accuracy={1}, precision={2}, recall={3}".format(
            auc_value, accuracy, precision, recall))
コード例 #19
0
    def __init__(self,
                 vocab_size=1000,
                 embedding_dim=96,
                 SOS_ID = 0,
                 EOS_ID = 1,
                 dec_units=128,
                 enc_units=128,
                 attention_name='luong',
                 attention_type=0,
                 rnn_type='gru',
                 max_length=10,
                 teacher_forcing_ratio=0.5):
        super(Seq2Seq, self).__init__()

        self.encoder = ResNet18(enc_units)
        #self.encoder = Encoder(enc_units)
        self.decoder = Decoder(vocab_size,
                               embedding_dim,
                               SOS_ID,
                               EOS_ID,
                               dec_units,
                               enc_units,
                               attention_name,
                               attention_type,
                               rnn_type,
                               max_length
                               )

        self.vocab_size = vocab_size
        self.embedding_dim = embedding_dim
        self.SOS_ID = SOS_ID
        self.EOS_ID = EOS_ID
        self.dec_units = dec_units
        self.enc_units = enc_units
        self.attention_name = attention_name
        self.attention_type = attention_type
        self.rnn_type = rnn_type
        self.max_length = max_length
        self.teacher_forcing_ratio = teacher_forcing_ratio
        self.loss_value = 0.0
コード例 #20
0
    def handle(self):
        try:
            reciv = Decoder.Decoder(self.request.recv(4096).strip())
            data = reciv.decode_json()
            # Programamos el diccionario para elegir las acciones a realizar.
            operaciones = {
                'iniciar_sesion': self.iniciar_sesion,
                'obtener_todos_los_propietarios':
                self.obtener_todos_los_propietarios,
                'obtener_grupos': self.obtener_grupos,
                'obtener_alumnos': self.obtener_alumnos,
                'obtener_scripts': self.obtener_scripts,
                'obtener_tags': self.obtener_tags,
                'obtener_scripts_disponibles':
                self.obtener_scripts_disponibles,
                'obtener_tags_disponibles': self.obtener_tags_disponibles,
                'obtener_tags_usuario': self.obtener_tags_usuario,
                'obtener_scripts_tag': self.obtener_scripts_tag,
                'obtener_scripts_no_en_tag': self.obtener_scripts_no_en_tag,
                'borrar_grupo': self.borrar_grupo,
                'cambiar_nombre': self.cambiar_nombre,
                'crear_tag_usuario': self.crear_tag_usuario,
                'crear_grupo': self.crear_grupo,
                'aplicar_cambios': self.aplicar_cambios,
                'eliminar_tag_usuario': self.eliminar_tag_usuario,
                'modificar_tag': self.modificar_tag,
                'obtener_historial': self.obtener_historial,
            }

            print "Hay que llamar a al gestor %s" % data[0]['metodo']
            # operaciones[seleccion](datos_entrada_del_metodo_elegido)
            resultado_operacion = operaciones[data[0]['metodo']](data)
            # devolvemos el resultado obtenido al cliente
            self.request.sendall(json.dumps(resultado_operacion))

            # send some 'ok' back
            # self.request.sendall(json.dumps({'return':'ok'}))
        except Exception, e:
            print "Exception al recibir el mensaje del cliente: ", e
            self.request.sendall(json.dumps({'return': 'fail'}))
コード例 #21
0
    def __init__(self, model_params):
        self.word_indexer = model_params["word_indexer"]
        self.embedding_size = model_params["embedding_size"]
        self.state_size = model_params["state_size"]
        self.mode_size = model_params["mode_size"]
        self.ques_attention_size = model_params["ques_attention_size"]
        self.kb_attention_size = model_params["kb_attention_size"]
        self.max_fact_num = model_params["max_fact_num"]
        self.max_ques_len = model_params["max_ques_len"]

        self.learning_rate = model_params["learning_rate"]
        self.mode_loss_rate = model_params["mode_loss_rate"]
        self.L2_factor = model_params["L2_factor"]
        self.batch_size = model_params["batch_size"]
        self.epoch_size = model_params["epoch_size"]
        self.instance_weight = 1.0 / self.batch_size
        self.MAX_LENGTH = model_params["MAX_LENGTH"]
        self.has_trained = False

        ################ Initialize graph components ########################
        self.embedding = nn.Embedding(self.word_indexer.wordCount,
                                      self.embedding_size)
        self.encoder = Encoder(self.word_indexer.wordCount, self.state_size,
                               self.embedding)
        self.decoder = Decoder(output_size=self.word_indexer.wordCount,
                               state_size=self.state_size,
                               embedding=self.embedding,
                               mode_size=self.mode_size,
                               kb_attention_size=self.kb_attention_size,
                               max_fact_num=self.max_fact_num,
                               ques_attention_size=self.ques_attention_size,
                               max_ques_len=self.max_ques_len)
        if use_cuda:
            self.encoder.cuda()
            self.decoder.cuda()

        self.optimizer = optim.Adam(list(self.encoder.parameters()) +
                                    list(self.decoder.parameters()),
                                    lr=self.learning_rate,
                                    weight_decay=self.L2_factor)
コード例 #22
0
ファイル: Main.py プロジェクト: ztl-35/ML-DL-code
def main():
    # 在这里设置想要的超参数
    args = parse_arguments()
    hidden_size = 512
    embed_size = 256
    print("[!] preparing dataset...")
    train_iter, val_iter, test_iter, DE, EN = load_dataset(args.batch_size)
    de_size, en_size = len(DE.vocab), len(EN.vocab)

    encoder = Encoder(de_size,
                      embed_size,
                      hidden_size,
                      n_layers=2,
                      dropout=0.5)
    decoder = Decoder(embed_size,
                      hidden_size,
                      en_size,
                      n_layers=1,
                      dropout=0.5)
    seq2seq = Sqe2Sqe(encoder, decoder).cuda()

    optimizer = optim.Adam(seq2seq.parameters(), lr=args.lr)
    best_val_loss = None
    for e in range(1, args.epochs + 1):
        train(e, seq2seq, optimizer, train_iter, en_size, args.grad_clip, DE,
              EN)
        val_loss = evaluate(seq2seq, val_iter, en_size, DE, EN)
        print("[Epoch:%d] val_loss:%5.3f | val_pp:%5.2fS" %
              (e, val_loss, math.exp(val_loss)))

        # Save the model if the validation loss is the best we've seen so far.
        if not best_val_loss or val_loss < best_val_loss:
            print("[!] saving model...")
            if not os.path.isdir(".save"):
                os.makedirs(".save")
            torch.save(seq2seq.state_dict(), './.save/seq2seq_%d.pt' % (e))
            best_val_loss = val_loss
    test_loss = evaluate(seq2seq, test_iter, en_size, DE, EN)
    print("[TEST] loss:%5.2f" % test_loss)
コード例 #23
0
    def __init__(self,
                 global_step,
                 inputs,
                 input_length,
                 mel_targets,
                 target_lengths,
                 scope="Tacotron2"):

        self.encoder = Encoder.Encoder(scope + "_Encoder")
        self.decoder = Decoder.Decoder(scope + "_Decoder")
        self.global_step = global_step
        self.inputs = inputs
        self.input_lengths = input_length
        self.mel_targets = mel_targets
        self.target_lengths = target_lengths
        self.loss = tf.placeholder(tf.float32, shape=(None, ), name='loss')
        '''
        self.inputs = tf.placeholder(tf.int32, shape=[None, Config.BatchSize], name='inputs')
        self.input_lengths = tf.placeholder(tf.int32, shape=[Config.BatchSize], name='input_lengths')
        self.mel_targets = tf.placeholder(tf.float32, shape=[None,  Config.BatchSize, Config.MelVectorSize], name='mel_targets')
        self.target_lengths = tf.placeholder(tf.int32, shape=[Config.BatchSize], name='target_lengths')
        '''
        self.optimizer = tf.train.AdamOptimizer()
コード例 #24
0
ファイル: ultimate.py プロジェクト: PADGETS-EU/padgets-repo
 def readEntry(self, listName):
     document = Decoder(listName).dictionary
     """
    Testing of Michael
    """
     print "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
     print document
     print "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
     """
    End of testing
    """
     parsedList = Parse(document)
     #entry identification
     self.id = parsedList.find(parsedList.original, "id")
     self.title = parsedList.find(parsedList.original, "title")
     self.published = parsedList.find(parsedList.original, "published")
     #author: embeded on the XMPP id management of the user who published on the hub
     #self.author=ActivityStreamsAuthor.ActivityStreamsAuthor(name=parsedList.findall(parsedList.original, ["author","name"]), uri=parsedList.findall(parsedList.original, ["author","uri"]), id=parsedList.findall(parsedList.original, ["author","id"]), link=parsedList.findall(parsedList.original, ["author","link","href"]))
     #verb
     self.activity_verb = parsedList.find(parsedList.original,
                                          Prefixes.ACTIVITY + "verb")
     activity_object = parsedList.find(parsedList.original,
                                       Prefixes.ACTIVITY + "object")
     self.readASObject(activity_object)
コード例 #25
0
    opt = options.parser.parse_args()
    
    speech_train = np.load(opt.dataroot + 'train_new.npy', allow_pickle=True, encoding='bytes')
    speech_valid = np.load(opt.dataroot + 'dev_new.npy', allow_pickle=True, encoding='bytes')

    transcript_train = np.load(opt.dataroot + 'train_transcripts.npy', allow_pickle=True,encoding='bytes')
    transcript_valid = np.load(opt.dataroot + 'dev_transcripts.npy', allow_pickle=True,encoding='bytes')
    print("Data Loading Sucessful.....")

    word_dict, word_list, transcript_train, transcript_valid = collect_word(transcript_train, transcript_valid)
    opt.vocab_size = len(word_list)
    options.printer(opt)
    print("Transfer the transcript from words to index sucessfully.....")

    encoder = Encoder(opt)
    decoder = Decoder(opt)∏
    # encoder.load_state_dict(torch.load('./LAS_latest/encoder_latest.pt'))
    # decoder.load_state_dict(torch.load('./LAS_latest/decoder_latest.pt'))
    encoder.to(opt.device)
    decoder.to(opt.device)
    optimizer_encoder = Adam(encoder.parameters(), opt.lr)
    optimizer_decoder = Adam(decoder.parameters(), opt.lr)
    criterion = nn.CrossEntropyLoss(reduction = 'none')
    criterion.to(opt.device)
    
    train_data = MyDataset(speech_train, transcript_train)
    dev_data = MyDataset(speech_valid, transcript_valid)

    train_loader_args = dict(batch_size = opt.train_batch_size, pin_memory=True, shuffle = True, collate_fn = collate_fn) 
    train_loader = Data.DataLoader(train_data, **train_loader_args)
    dev_loader_args = dict(shuffle=False, batch_size = opt.val_batch_size, pin_memory=True, collate_fn = collate_fn) 
コード例 #26
0
 def __init__(self, options):
     super(HSeq2seq, self).__init__()
     self.seq2seq = options.seq2seq
     self.utt_enc = UtteranceEncoder(options.vocab_size, options.emb_size, options.ut_hid_size, options)
     self.intutt_enc = InterUtteranceEncoder(options.ses_hid_size, options.ut_hid_size, options)
     self.dec = Decoder(options)
コード例 #27
0
    x_train, x_test = x_train / 255.0, x_test / 255.0

    # Add dimension to fit with shape: (None, Height, Width, 1)
    x_train = np.expand_dims(x_train, axis=3)
    x_test = np.expand_dims(x_test, axis=3)

    height = x_train.shape[1]
    width = x_train.shape[2]

    input = tf.placeholder(tf.float32, (None, height, width, 1))

    with tf.variable_scope("Encoder"):
        encoder = Encoder(input)

    with tf.variable_scope("Decoder"):
        decoder = Decoder(input, encoder, learning_rate=LEARNING_RATE)

    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)

        for i in range(1000):
            batch = x_train[np.random.choice(x_train.shape[0], BATCH_SIZE)]

            loss_train = decoder.train(batch)

            if i % 10 == 0:
                batch = x_test[np.random.choice(x_test.shape[0], BATCH_SIZE)]
                loss_test = decoder.evaluate(batch)
                print("Epoch %d: Training Loss = %f, Test Loss = %f" % (i, loss_train, loss_test))
コード例 #28
0
ファイル: main.py プロジェクト: adarsh-kr/pytorch_nmt
from torch.autograd import Variable
from Encoder import *
from Decoder import *

enc = Encoder(3, 2, 5, "lstm", 1, True, 0, 0, 0)
	# def __init__(self, input_size, hidden_size, n_vocab, att_type, dropout, cell_type, num_layers):

dec = Decoder(2, 3, 5, "luong", 0.1, "lstm", 1) 
inp = Variable(torch.LongTensor([[1,2,3,4],[2,3,4,1],[1,2,3,4],[1,0,0,0]]))

enc_outputs = Variable(torch.Tensor(4, 4, 5 ).uniform_(0, 1))
h_t = Variable(torch.Tensor(1, 4, 3).uniform_(0, 1))
c_t = Variable(torch.Tensor(1, 4, 3).uniform_(0, 1))

print(dec)
print("Loss : {}".format(dec(inp, enc_outputs,  (h_t, c_t))))
コード例 #29
0
def run(dataset, test_data, pro_dic, embedding_matrix, prodata):
    print("Start training...")
    epochs = 1
    optimizer = tf.train.AdamOptimizer(0.001)
    encoder = Encoder(embedding_matrix)
    decoder = Decoder()
    checkpoint_dir = './training_checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt)')
    checkpoint = tf.train.Checkpoint(optimizer=optimizer,
                                     encoder=encoder,
                                     decoder=decoder)
    for epoch in range(epochs):
        start = time.time()
        total_loss = 0
        dataset.shuffle(BUFFER_SIZE)
        for batch, (data) in enumerate(dataset):
            loss = 0
            with tf.GradientTape() as tape:
                tmp = []
                for data_pro in prodata:
                    tmp.append(encoder(data_pro))
                X = tf.concat(tmp, axis=0)
                cos_X = cosine(X, X)
                #X == (num_problems,bistml_size)
                # weight = encoder.embedding.get_weights()
                #data == (batch_size,max_step,2)
                hatt = tf.zeros([BATCH_SIZE, pro_dic.shape[0], LSTM_UNITS])
                data_t = tf.expand_dims(data[:, 0, :], 1)
                for t in range(1, data.shape[1]):
                    xt = data_forstu(data_t, X)
                    xt = tf.expand_dims(xt, 1)
                    # xt == shape(batch_size, 1,4 * LSTM_UNITS)
                    prediction, hatt = decoder(xt, data_t, hatt, X, cos_X)
                    data_t = tf.expand_dims(data[:, t, :], 1)
                    #data_t == (batch_size,1,2)
                    loss += entroy_loss(prediction, data_t)
            batch_loss = (loss / int(data.shape[1]))
            total_loss += batch_loss
            variables = encoder.bi_lstm.variables + decoder.variables
            gradients = tape.gradient(loss, variables)
            optimizer.apply_gradients(zip(gradients, variables))

            if batch % 10 == 0:
                print()
                print("Epoch {} Batch {} Loss {:.4f}".format(
                    epoch + 1, batch, batch_loss.numpy()))
        end = time.time()
        if (epoch + 1) % 2 == 0:
            checkpoint.save(file_prefix=checkpoint_prefix)

        print("Epoch {} cost {}".format(epoch + 1, end - start))

    print("End training...")
    print('start evaluation')
    preds, targets, binary_preds = list(), list(), list()
    X = encoder(pro_dic)
    cos_X = cosine(X, X)
    for (batch, data) in enumerate(dataset):
        hatt = tf.zeros([BATCH_SIZE, len(pro_dic), LSTM_UNITS])
        data_t = tf.expand_dims(data[:, 0, :], 1)
        for t in range(1, data.shape[1]):
            xt = data_forstu(data_t, X)
            xt = tf.expand_dims(xt, 1)
            prediction, hatt = decoder(xt, data_t, hatt, X, cos_X)
            data_t = tf.expand_dims(data[:, t, :], 1)
            pred, binary_pred, target_correctness = cal_pre(prediction, data_t)
            preds.append(pred)
            binary_preds.append(binary_pred)
            targets.append(target_correctness)

        preds = np.concatenate(preds)
        binary_preds = np.concatenate(binary_preds)
        targets = np.concatenate(targets)
        auc_value = roc_auc_score(targets, preds)
        accuracy = accuracy_score(targets, binary_preds)
        precision, recall, f_score, _ = precision_recall_fscore_support(
            targets, binary_preds)
        print("\n auc={0}, accuracy={1}, precision={2}, recall={3}".format(
            auc_value, accuracy, precision, recall))
コード例 #30
0
#Load dataset vocabulary
img_size = 224
dataset = 'flickr8k'
vocab = pickle.load(open('dumps/vocab_' + dataset + '.pkl', 'rb'))

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = torch.device('cpu')

val_loader = get_loader('val', dataset, vocab, img_size, batch_size)

criterion = nn.CrossEntropyLoss().to(device)

encoder = Encoder_Resnet101().to(device)
decoder = Decoder(vocab_size=len(vocab),
                  use_glove=glove_model,
                  use_bert=bert_model,
                  glove_vectors=glove_vectors,
                  vocab=vocab).to(device)
#decoder_optimizer = torch.optim.Adam(params=decoder.parameters(),lr=decoder_lr)


def print_sample(hypotheses, references, test_references, imgs, alphas, k,
                 show_att, losses):
    bleu_1 = corpus_bleu(references, hypotheses, weights=(1, 0, 0, 0))
    bleu_2 = corpus_bleu(references, hypotheses, weights=(0, 1, 0, 0))
    bleu_3 = corpus_bleu(references, hypotheses, weights=(0, 0, 1, 0))
    bleu_4 = corpus_bleu(references, hypotheses, weights=(0, 0, 0, 1))

    print("Validation loss: " + str(losses.avg))
    print("BLEU-1: " + str(bleu_1))
    print("BLEU-2: " + str(bleu_2))