def getWord2Vec(modelFile, useExistingCorpus = False):
    if useExistingCorpus:
        return gensim.models.Word2Vec.load(modelFile)

    tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')

    corpus = subtitles_corpus_composer.getCorpus(useExistingCorpus)
    sents = tokenizer.tokenize(corpus)
    tokenized = []
    for s in sents:
        tokens = nltk.word_tokenize(s.strip(string.punctuation).replace("\"", ""))
        tokenized.append(tokens)

    bigramTransformed = Phrases(tokenized)
    modelCom = gensim.models.Word2Vec(bigramTransformed[tokenized], size=50)

    modelCom.init_sims(replace=True)
    modelCom.save(modelFile)
    #in case you want to save the non-binary format, uncomment the following line
    #modelCom.save_word2vec_format("./data/recipes_sw2v", binary=False)

    return modelCom
import random
import sys

import subtitles_corpus_composer

'''
    Example script to generate text from food subtitles corpus.
    At least 20 epochs are required before the generated text
    starts sounding coherent.
    It is recommended to run this script on GPU, as recurrent
    networks are quite computationally intensive.
    If you try this script on new data, make sure your corpus
    has at least ~100k characters. ~1M is better.
'''

text = subtitles_corpus_composer.getCorpus()
print('corpus length:', len(text))

chars = set(text)
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))

# cut the text in semi-redundant sequences of maxlen characters
maxlen = 20
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
    sentences.append(text[i: i + maxlen])
    next_chars.append(text[i + maxlen])