コード例 #1
0
ファイル: web.py プロジェクト: thedeadparrot/ficbot
def make_generated_text():
    DEFAULT_NUM_WORDS = 100
    if request.method == 'POST':
        # if no number of words has been passed in, default to 100
        num_words = request.form['num_words'] if 'num_words' in request.form else DEFAULT_NUM_WORDS
        try:
            num_words = int(request.form['num_words'])
        except ValueError:
            num_words = DEFAULT_NUM_WORDS

        return generate_text(reader_cfd=reader_cfd, num_words=num_words)

    return generate_text(reader_cfd=reader_cfd)
コード例 #2
0
def generate_model_output(generated_content, language):
    progress_bar = it.ProgressBar(0, len(generated_content))
    progress_bar.print_progress_bar()
    # Use model to generate evaulation set
    to_be_removed = []
    for i, item in enumerate(generated_content):
        try:
            # Read contents of file
            file_path = item['file_name']
            gen_start_string = ''
            with open(file_path, 'r', encoding='utf8') as f:
                gen_start_string = f.read()

            model_output, generated_lines = generator.generate_text(
                model, language, gen_start_string, num_lines,
                state['index_to_token'], state['variable_char_start'])
            with open(file_path, 'w') as output_file:
                output_file.writelines(model_output)
            generated_content[i].update({'generated_lines': generated_lines})
        except Exception as e:
            progress_bar.increment_errors()
            to_be_removed.append(item)
        finally:
            progress_bar.increment_work()
            progress_bar.print_progress_bar()

    for remove_me in to_be_removed:
        generated_content.remove(remove_me)
    return generated_content
コード例 #3
0
def onclick():
    text = text_box.get("0.0", tk.END)
    text = text[:-1]
    prev_length = len(text)
    new_text = generate_text(text)
    print(new_text[prev_length:])
    text_box.delete("0.0", tk.END)
    text_box.insert("0.0", new_text)
コード例 #4
0
    def test_generate_text(self):
        seed = 'Once upon a time'
        model_path = os.path.join("../models/shakespeare/shakespeare.json")
        with open(model_path, 'r') as file:
            config = file.read()
        model = keras.models.model_from_json(config)
        model = load_model(model, os.path.join("../checkpoints/shakespeare/shakespeare.ckpt"))
        mapping_path = os.path.join('../char_mappings/shakespeare_w2v.model')

        self.assertRegex(generate_text(model, seed, mapping_path, num_to_generate=50), ".*")
コード例 #5
0
    def test_generate_failed_text(self):
        try:
            seed = 'Hello'
            model_path = os.path.join("../models/shakespeare/shakespeare.json")
            with open(model_path, 'r') as file:
                config = file.read()
            model = keras.models.model_from_json(config)
            model = load_model(model, os.path.join("../checkpoints/shakespeare/shakespeare.ckpt"))
            mapping_path = os.path.join('../char_mappings/shakespeare_w2v.model')

            self.assertNotRegex(generate_text(model, seed, mapping_path, num_to_generate=50), ".*")
        except KeyError:
            self.assertTrue(True)
        except:
            self.assertTrue(False)
コード例 #6
0
    def onMessage(self, author_id, message_object, thread_id, thread_type,
                  **kwargs):

        order = 7
        info = "I am a robot that Matthew created to simulate typical discussion in this thread.  I scrape this group message for posts and compile it into a library.  Then I use a conditional probability text generator on the library to simulate a message. \n https://github.com/Matthew-Swartz"
        if message_object.text == '!Mimicbot' and thread_type == ThreadType.GROUP and thread_id == threadname:
            lm = train_char_lm("repo.txt", order=order)
            gen = generate_text(lm, order)
            #log.info('{} requested from {}'.format(author_id, thread_id))
            self.send(Message(text=gen),
                      thread_id=thread_id,
                      thread_type=thread_type)
        elif message_object.text == '!Mimicbotinfo' and thread_type == ThreadType.GROUP and thread_id == threadname:
            #log.info('{} requested from {}'.format(author_id, thread_id))
            self.send(Message(text=info),
                      thread_id=thread_id,
                      thread_type=thread_type)
        elif thread_type == ThreadType.GROUP and thread_id == threadname and author_id != self.uid:
            #print(message_object.text)
            if (message_object.text != None
                    and type(message_object.text) == str):
                f = open('repo.txt', 'r')
                temp = f.read()
                f.close()
                f = open('repo.txt', 'w')
                f.write(
                    str(message_object.text.encode('ascii', 'ignore'))[2:-1])
                f.write('\n')
                f.write(temp)
                f.close()

        else:
            # Sends the data to the inherited onMessage, so that we can still see when a message is recieved
            super(PSPBot, self).onMessage(author_id=author_id,
                                          message_object=message_object,
                                          thread_id=thread_id,
                                          thread_type=thread_type,
                                          **kwargs)
コード例 #7
0
ファイル: cmdline.py プロジェクト: DragoonAethis/ManifestoGen
# Czy wiesz, że?: Jeśli wykonasz s/I/O/g na IWI, dostaniesz OWO?
import sys
from generator import generate_text

# ./cmdline.py <N> <STARTER>
# Opcjonalne parametry:
# - N: Ilość zdań do wygenerowania (domyślnie 1)
# - STARTER: Token od którego zacząć generowanie (domyślnie "start")

if __name__ == "__main__":
    iter_count = 1
    starter = "start"

    if len(sys.argv) > 1:
        iter_count = int(sys.argv[1])

    if len(sys.argv) > 2:
        starter = sys.argv[2]

    for i in range(iter_count):
        print(generate_text(starter))
コード例 #8
0
    for val in data:
        prediction.append(nai.get_val(val))
    return prediction

# NN models
models_to_test = ['model_cnn', 'model_lstm', 'model_bidir']
for model_nam in models_to_test:
    model_name = os.path.join(path, model_nam)
    model_load()

    #Generate Vals
    #generate_set_vals(X_data, Y_data, _rev_data, False)
    # generate_set(unlabeled, _unlabeled)
    generate(False)

# Naive model
model_name = 'Naive'
#generate_set_vals_naive(_rev_data, Y_data, False)
# generate_set_naive(_unlabeled)
generate_naive(False)

generate = True
# Generate Text
if generate:
    while True:
        input('\nPress Enter to Generate Review:\n')
        txt = gen.generate_text(4, 20)
        stars = predict(txt)
        print(stars)
        print(txt)
コード例 #9
0
def generate():
    return render_template('index.html', text=generate_text())
コード例 #10
0
                    help="The number of words to generate. This parameter is overridden when the character flag is set. Default: 100")
parser.add_argument('--characters', '-c', action='store', dest='limit_characters', type=int, default=None,
                    help="Truncate the number of characters in the output to the given number. Turned off by default.")
parser.add_argument('starting_text', action='store', nargs='?',
                    help="The text we will use to start the text generation.")

args = parser.parse_args()

# tuple-ize the starting text that gets passed in
if args.starting_text:
    starting_text = tuple(nltk.word_tokenize(args.starting_text))
else:
    starting_text = None

if args.regen_model or args.regen_model_only:
    generate_model(ngram_length=args.ngram_length)

# run the generator with the given options
if not args.regen_model_only:
    try:
        print(generate_text(
            starting_text,
            ngram_length=args.ngram_length,
            num_words=args.num_words,
            limit_characters=args.limit_characters
        ))
    except AssertionError as assertion:
        print("ERROR:")
        print("\t{}".format(assertion.message))
        print("\tYou may need to regenerate the model first. Run the command again with the -r flag.")
コード例 #11
0
def predict():
    """Receive and process the POST request and provide generated text as a response"""
    id_to_char = []
    char_to_id = {}
    checkpoint = ''

    # Allow for both form and json formatted POST requests
    print('Received request...')
    if request.is_json is True:
        params = request.get_json(force=True)
    else:
        params = {
            'seed': request.form['seed'],
            'author': request.form['author'],
            'length': request.form['length']
        }

    author = params['author']
    seed = params['seed']

    if params['length'] != '':
        length = int(params['length'])
    else:
        length = 0

    # Retrieve list of available checkpoints
    checkpoints_dir = 'checkpoints'
    checkpoints = [
        os.path.join(checkpoints_dir, o) for o in os.listdir(checkpoints_dir)
        if os.path.isdir(os.path.join(checkpoints_dir, o))
    ]
    tensor_model_dir = 'models'
    models = [
        os.path.join(tensor_model_dir, o) for o in os.listdir(tensor_model_dir)
        if os.path.isdir(os.path.join(tensor_model_dir, o))
    ]

    print('Building Model...')
    for c in models:
        if author in c:
            tensor_model = c

    # Retrieve desired checkpoint
    print('Retrieving checkpoint...')
    for c in checkpoints:
        if author in c:
            checkpoint = c

    # Retrieve character mapping of desired author and create reverse mapping
    print('Retrieving mapping...')
    if author in checkpoint:
        mapping_path = os.path.join('char_mappings', author + '_w2v.model')

    # Load checkpoint into model
    print('Loading model...')
    model_path = os.path.join(tensor_model_dir, author)
    model_path = model_path + "/" + author + ".json"
    with open(model_path, 'r') as file:
        config = file.read()
    model = keras.models.model_from_json(config)
    checkpoint_path = os.path.join(checkpoints_dir, author)
    checkpoint_path = checkpoint_path + '/' + author + '.ckpt'
    new_model = load_model(model, checkpoint_path)

    # Generate text and return JSON in POST response
    print('Generating text...')
    prediction = generate_text(new_model,
                               seed,
                               mapping_path,
                               num_to_generate=length)
    response = make_response(
        jsonify(author=author, length=length, seed=seed, response=prediction),
        200)
    print('Generation complete.')
    return response
コード例 #12
0
 def generate_text_pre_loaded(self, **kwargs):
     """ We are always going to be using these test files, so we might as well abstract them away. """
     return generate_text(model_file=self.TEST_MODEL_FILE, sentence_file=self.TEST_SENTENCES_FILE, **kwargs)