Exemplo n.º 1
0
def translate(translate_sentence='he saw a old yellow truck .'):
    _, (source_vocab_to_int,
        target_vocab_to_int), (source_int_to_vocab,
                               target_int_to_vocab) = helper.load_preprocess()
    load_path = helper.load_params()

    translate_sentence = sentence_to_seq(translate_sentence,
                                         source_vocab_to_int)

    loaded_graph = tf.Graph()
    with tf.Session(graph=loaded_graph) as sess:
        # Load saved model
        loader = tf.train.import_meta_graph(load_path + '.meta')
        loader.restore(sess, load_path)

        input_data = loaded_graph.get_tensor_by_name('input:0')
        logits = loaded_graph.get_tensor_by_name('logits:0')
        keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')

        translate_logits = sess.run(logits, {
            input_data: [translate_sentence],
            keep_prob: 1.0
        })[0]

    print('Input')
    print('  Word Ids:      {}'.format([i for i in translate_sentence]))
    print('  English Words: {}'.format(
        [source_int_to_vocab[i] for i in translate_sentence]))

    print('\nPrediction')
    print('  Word Ids:      {}'.format(
        [i for i in np.argmax(translate_logits, 1)]))
    print('  French Words: {}'.format(
        [target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
Exemplo n.º 2
0
        def predecir(self, frase):
            _, (source_vocab_to_int, target_vocab_to_int), (
                source_int_to_vocab,
                target_int_to_vocab) = helper.load_preprocess()
            load_path = helper.load_params()
            tests.test_sentence_to_seq(sentence_to_seq)
            translate_sentence = frase
            pIngles = translate_sentence
            translate_sentence = sentence_to_seq(translate_sentence,
                                                 source_vocab_to_int)
            loaded_graph = tf.Graph()
            with tf.Session(graph=loaded_graph) as sess:
                # Load saved model
                loader = tf.train.import_meta_graph(load_path + '.meta')
                loader.restore(sess, load_path)

                input_data = loaded_graph.get_tensor_by_name('input:0')
                logits = loaded_graph.get_tensor_by_name('predictions:0')
                target_sequence_length = loaded_graph.get_tensor_by_name(
                    'target_sequence_length:0')
                source_sequence_length = loaded_graph.get_tensor_by_name(
                    'source_sequence_length:0')
                keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')

                translate_logits = sess.run(
                    logits, {
                        input_data: [translate_sentence] * batch_size,
                        target_sequence_length:
                        [len(translate_sentence) * 2] * batch_size,
                        source_sequence_length:
                        [len(translate_sentence)] * batch_size,
                        keep_prob: 1.0
                    })[0]
            """
        print('Input')
        print('  Word Ids:      {}'.format([i for i in translate_sentence]))
        print('  English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))

        print('\nPrediction')
        print('  Word Ids:      {}'.format([i for i in translate_logits]))
        print('  Spanish Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
        """
            variableRetornar = format(" ".join(
                [target_int_to_vocab[i] for i in translate_logits]))
            print('Resultado de ', pIngles)
            print(variableRetornar)

            miTxt = open("BorderOut\\IA\\respuesta.txt", 'w')
            miTxt.write(variableRetornar)
            miTxt.close()

            return variableRetornar
Exemplo n.º 3
0
 def run():
     """
     This method is the main entry point for this processing block
     """
     ensure_data_directories_exist()
     params: dict = load_params()
     input_metadata: FeatureCollection = load_metadata()
     pol_processor = SNAPPolarimetry(params)
     result, outfile, outfile_pol = pol_processor.process(input_metadata, params)
     save_metadata(result)
     if params['mask'] is not None:
         pol_processor.post_process(outfile, outfile_pol)
     pol_processor.rename_final_stack(outfile, outfile_pol)
Exemplo n.º 4
0
def apply_model(d):
    gen_length = 30
    if request.method == 'POST':
        content = request.json
        gen_length = content['gen_length']

    _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()

    prime_word = random.choice(int_to_vocab)
    print (prime_word)
        
    seq_length, load_dir = helper.load_params()

    loaded_graph = tf.Graph()

    with tf.Session(graph=loaded_graph) as sess:
        # Load saved model
        loader = tf.train.import_meta_graph(load_dir + '.meta')
        loader.restore(sess, load_dir)

        # Get Tensors from loaded model
        input_text, initial_state, final_state, probs = get_tensors(loaded_graph)

        # Sentences generation setup
        gen_sentences = [prime_word]
        prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
        print (gen_sentences)   
        
        # Generate sentences
        for n in range(gen_length):
            # Dynamic Input
            dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
            dyn_seq_length = len(dyn_input[0])

            # Get Prediction
            probabilities, prev_state = sess.run(
                [probs, final_state],
                {input_text: dyn_input, initial_state: prev_state})

            pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)

            gen_sentences.append(pred_word) 

        data = massage_results(gen_sentences, token_dict)
		if (len(data) > 2):
        	wod_return = random.choice(data[1:-1])  
		else:
			wod_return = data[0]
Exemplo n.º 5
0
 def run():
     """
     This method is the main entry point for this processing block
     """
     ensure_data_directories_exist()
     params: dict = load_params()
     input_metadata: FeatureCollection = load_metadata()
     pol_processor = SNAPPolarimetry(params)
     result, out_dict = pol_processor.process(input_metadata, params)
     save_metadata(result)
     for out_id in out_dict:
         if params["mask"] is not None:
             pol_processor.post_process(out_dict[out_id]["out_path"],
                                        out_dict[out_id]["z"])
         pol_processor.rename_final_stack(out_dict[out_id]["out_path"],
                                          out_dict[out_id]["z"])
Exemplo n.º 6
0
def genScript():
    _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
    #print("vocab to int : ",vocab_to_int)
    seq_length, load_dir = helper.load_params()
    gen_length = 200
    prime_word = 'bart_simpson'
    #'bart_simpson'  # 'moe_szyslak'
    loaded_graph = tf.Graph()
    with tf.Session(graph=loaded_graph) as sess:
        loader = tf.train.import_meta_graph('save.meta')
        loader.restore(sess, load_dir)
        input_text, initial_state, final_state, probs = getTensors(
            loaded_graph)
        gen_sentences = [prime_word + ':']

        prev_state = sess.run(initial_state, {input_text: np.array([[1]])})

        # Generate sentences
        for n in range(gen_length):
            dyn_input = [[
                vocab_to_int[word] for word in gen_sentences[-seq_length:]
            ]]

            dyn_seq_length = len(dyn_input[0])

            probabilities, prev_state = sess.run([probs, final_state], {
                input_text: dyn_input,
                initial_state: prev_state
            })

            pred_word = pick_word(probabilities[0][dyn_seq_length - 1],
                                  int_to_vocab)

            gen_sentences.append(pred_word)

        # remove tokens
        tv_script = ' '.join(gen_sentences)
        for key, token in token_dict.items():
            ending = ' ' if key in ['\n', '(', '"'] else ''
            tv_script = tv_script.replace(' ' + token.lower(), key)
        tv_script = tv_script.replace('\n ', '\n')
        tv_script = tv_script.replace('( ', '(')

        print(tv_script)
Exemplo n.º 7
0
 def train(self,
           path_params='best_params.json',
           path_model='model.h5',
           plot_chart=False,
           handmade_params=None):
     if os.path.exists(path_params) and not handmade_params:
         layers, average_pooling, batch, epochs, learning_rate = load_params(
             path_params)
     else:
         layers, average_pooling, batch, epochs, learning_rate = handmade_params
     layers = layer_combination[layers]
     print(
         fg('green') +
         'Parameters : layers : {0}, average_pooling : {1}, batch : {2}, epochs : '
         '{3}, '
         'learning rate : {4}'.format(layers, average_pooling, batch,
                                      epochs, learning_rate))
     sudoku_model = SudokuBreaker(layers=layers,
                                  average_pooling=average_pooling)
     id_mlflow = random.randint(1, 2542314)
     if self.custom:
         sudoku_model.fit_custom(self.train_x,
                                 self.train_y,
                                 self.val_x,
                                 self.val_y,
                                 batch=batch,
                                 epochs=epochs,
                                 learning_rate=learning_rate,
                                 id_mlflow=id_mlflow)
     else:
         sudoku_model.fit_inbuilt(self.train_x,
                                  self.train_y,
                                  self.val_x,
                                  self.val_y,
                                  batch=batch,
                                  epochs=epochs,
                                  learning_rate=learning_rate)
     sudoku_model.save(path_model)
     if plot_chart:
         plot(sudoku_model.hist)
Exemplo n.º 8
0
# # 检查点

# In[18]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests

_, (source_vocab_to_int,
    target_vocab_to_int), (source_int_to_vocab,
                           target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()

# ## 句子到序列
#
# 要向模型提供要翻译的句子,你首先需要预处理该句子。实现函数 `sentence_to_seq()` 以预处理新的句子。
#
# - 将句子转换为小写形式
# - 使用 `vocab_to_int` 将单词转换为 id
#  - 如果单词不在词汇表中,将其转换为`<UNK>` 单词 id

# In[19]:


def sentence_to_seq(sentence, vocab_to_int):
    """
    Convert a sentence to a sequence of ids
Exemplo n.º 9
0
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))

# # Checkpoint

# In[60]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests

_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()

# ## Implement Generate Functions
# ### Get Tensors
# Get tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graph#get_tensor_by_name).  Get the tensors using the following names:
# - "input:0"
# - "initial_state:0"
# - "final_state:0"
# - "probs:0"
#
# Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`

# In[61]:


def get_tensors(loaded_graph):
Exemplo n.º 10
0
def run(username, prime_word, gen_length):
    # directory for saving the model
    save_dir = './save'

    _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()

    seq_length, load_dir = helper.load_params()

    loaded_graph = tf.Graph()
    with tf.Session(graph=loaded_graph) as sess:
        # Load saved model
        loader = tf.train.import_meta_graph(load_dir + '.meta')
        loader.restore(sess, load_dir)

        # Get Tensors from loaded model
        input_text = loaded_graph.get_tensor_by_name('input:0')
        #input_text_rev=loaded_graph.get_tensor_by_name('input_rev:0')
        initial_state = loaded_graph.get_tensor_by_name('initial_state:0')
        final_state = loaded_graph.get_tensor_by_name('final_state:0')
        probs = loaded_graph.get_tensor_by_name('probs_f:0')

        # Sentences generation setup
        gen_sentences = [prime_word]  #changed
        prev_state = sess.run(
            initial_state,
            {input_text: np.array([[1]])})  #,input_text_rev: np.array([[1]])})

        # Generate sentences
        for n in range(gen_length):
            # Dynamic Input
            dyn_input = [[
                vocab_to_int[word] for word in gen_sentences[-seq_length:]
            ]]
            dyn_seq_length = len(dyn_input[0])
            # Get Prediction
            probabilities, prev_state = sess.run([probs, final_state], {
                input_text: dyn_input,
                initial_state: prev_state,
            })

            index = np.argmax(probabilities[0][dyn_seq_length - 1])  ###
            pred_word = int_to_vocab[index]

            gen_sentences.append(pred_word)

        # Remove tokens
        quote_script = ' '.join(gen_sentences)
        for key, token in token_dict.items():
            ending = ' ' if key in ['\n', '(', '"'] else ''
            quote_script = quote_script.replace(' ' + token.lower(), key)
        quote_script = quote_script.replace('\n ', '\n')
        quote_script = quote_script.replace('( ', '(')

        quote_data = quote_script.split("\n")
        quote_data = quote_data[1:-1]
        res = []
        for i in quote_data:
            res.append(i)
        for i in quote_data:
            if len(i) <= 139 - len(username) and len(
                    i) > 9:  # "i have" not in i:
                res.append(i)  # remove it
        #final list of result is in "res"
        choice = random.randint(1, 1000)
        choice_fin = choice % len(res)

        #return username+" "+res[choice_fin]
        return res
Exemplo n.º 11
0
def trainModel():
    """
    From make-tweet.py
    """
    _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
    seq_length, load_dir = helper.load_params()
    print('Done')


    def get_tensors(loaded_graph):
        """
        Get input, initial state, final state, and probabilities tensor from <loaded_graph>
        """
        InputTensor = loaded_graph.get_tensor_by_name("input:0")
        InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0")
        FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0")
        ProbsTensor = loaded_graph.get_tensor_by_name("probs:0")

        return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor


    def pick_word(probabilities, int_to_vocab):
        """
        Pick the next word in the generated text
        """
        # TODO: Implement Function
        #one_hot_encoded = np.argmax(probabilities, axis=0)
        one_hot_encoded = np.random.choice(len(int_to_vocab),p=probabilities)
        next_word = int_to_vocab[one_hot_encoded]

        return next_word


    # ## Generate TV Script
    # This will generate the TV script for you.  Set `gen_length` to the length of TV script you want to generate.
    gen_length = 30 # length of a tweet
    #prime_word = '@'
    choice = np.random.choice(len(int_to_vocab))
    prime_word = int_to_vocab[choice]
    prime_word

    loaded_graph = tf.Graph()
    with tf.Session(graph=loaded_graph) as sess:
        # Load saved model
        loader = tf.train.import_meta_graph(load_dir + '.meta')
        loader.restore(sess, load_dir)

        # Get Tensors from loaded model
        input_text, initial_state, final_state, probs = get_tensors(loaded_graph)

        # Sentences generation setup
        gen_sentences = [prime_word ]# + ':']
        prev_state = sess.run(initial_state, {input_text: np.array([[1]])})

        # Generate sentences
        for n in range(gen_length):
            # Dynamic Input
            dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
            dyn_seq_length = len(dyn_input[0])

            # Get Prediction
            probabilities, prev_state = sess.run(
                [probs, final_state],
                {input_text: dyn_input, initial_state: prev_state})

            pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)

            gen_sentences.append(pred_word)

        # Remove tokens
        tweet = ' '.join(gen_sentences)
        for key, token in token_dict.items():
            ending = ' ' if key in ['\n', '(', '"'] else ''
            tweet = tweet.replace(' ' + token.lower(), key)
        tweet = tweet.replace('\n ', '\n')
        tweet = tweet.replace('( ', '(')
        tweet = tweet.replace('\\', '')
        tweet = tweet.replace('\/', '')
        tweet = tweet.replace('\"', '')
        tweet = tweet.replace('  ', ' ')

    return tweet
Exemplo n.º 12
0
def traducir(frase):
    # Number of Epochs
    epochs = 10
    # Batch Size
    batch_size = 512
    # RNN Size
    rnn_size = 128
    # Number of Layers
    num_layers = 2
    # Embedding Size
    encoding_embedding_size = 128
    decoding_embedding_size = 128
    # Learning Rate
    learning_rate = 0.001
    # Dropout Keep Probability
    keep_probability = 0.55
    display_step = True

    _, (source_vocab_to_int,
        target_vocab_to_int), (source_int_to_vocab,
                               target_int_to_vocab) = helper.load_preprocess()
    load_path = helper.load_params()
    tests.test_sentence_to_seq(sentence_to_seq)
    translate_sentence = frase
    pIngles = translate_sentence
    translate_sentence = sentence_to_seq(translate_sentence,
                                         source_vocab_to_int)
    loaded_graph = tf.Graph()
    with tf.Session(graph=loaded_graph) as sess:
        # Load saved model
        loader = tf.train.import_meta_graph(load_path + '.meta')
        loader.restore(sess, load_path)

        input_data = loaded_graph.get_tensor_by_name('input:0')
        logits = loaded_graph.get_tensor_by_name('predictions:0')
        target_sequence_length = loaded_graph.get_tensor_by_name(
            'target_sequence_length:0')
        source_sequence_length = loaded_graph.get_tensor_by_name(
            'source_sequence_length:0')
        keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')

        translate_logits = sess.run(
            logits, {
                input_data: [translate_sentence] * batch_size,
                target_sequence_length:
                [len(translate_sentence) * 2] * batch_size,
                source_sequence_length: [len(translate_sentence)] * batch_size,
                keep_prob: 1.0
            })[0]
    """
    print('Input')
    print('  Word Ids:      {}'.format([i for i in translate_sentence]))
    print('  English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))

    print('\nPrediction')
    print('  Word Ids:      {}'.format([i for i in translate_logits]))
    print('  Spanish Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
    """
    variableRetornar = format(" ".join(
        [target_int_to_vocab[i] for i in translate_logits]))
    return variableRetornar
Exemplo n.º 13
0
        "?": "||QUEST_MARK||",
        "(": "||L_PARENTH||",
        ")": "||R_PARENTH||",
        "--": "||DASH||",
        "\n": "||RETURN||"
    }

for key, token in tokens.items():
    text = text.replace(key, ' {} '.format(token.lower()))

lines = text.split(' ||period||  ')
    
first_words = list(set([line.split(" ")[0] for line in lines]))  

_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, _ = helper.load_params()

# Длина генерируемой последовательности
gen_length = 10
phrases = 10

def get_tensors(loaded_graph):

    inputs = loaded_graph.get_tensor_by_name('input:0')
    initial_state = loaded_graph.get_tensor_by_name('initial_state:0')
    final_state = loaded_graph.get_tensor_by_name('final_state:0')
    probs = loaded_graph.get_tensor_by_name('probs:0')
    
    return inputs, initial_state, final_state, probs

def pick_word(probabilities, int_to_vocab):
Exemplo n.º 14
0
    "?": "||QUEST_MARK||",
    "(": "||L_PARENTH||",
    ")": "||R_PARENTH||",
    "--": "||DASH||",
    "\n": "||RETURN||"
}

for key, token in tokens.items():
    text = text.replace(key, ' {} '.format(token.lower()))

lines = text.split(' ||period||  ')

first_words = list(set([line.split(" ")[0] for line in lines]))

_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, _ = helper.load_params()

# Длина генерируемой последовательности
gen_length = 10
phrases = 10


def get_tensors(loaded_graph):

    inputs = loaded_graph.get_tensor_by_name('input:0')
    initial_state = loaded_graph.get_tensor_by_name('initial_state:0')
    final_state = loaded_graph.get_tensor_by_name('final_state:0')
    probs = loaded_graph.get_tensor_by_name('probs:0')

    return inputs, initial_state, final_state, probs