Example #1
0
def load_bert_vocab():
    """Loads Bert vocabulary from file."""
    try:
        bert_vocab = gan.load('models/BertVocabulary.model')
        return bert_vocab['vectors'], bert_vocab['words']
    except:
        return None
 def Bert(N=1):
     """Generates text from Bert.
     
     Args:
         N (int): Number of samples.
     """
     # load model
     model = gan.load("models/nn/Bert.model")
     model_g = model['generator']\
         .eval()\
         .to(config.device)
     state_g = [m.to(config.device) for m in model['state_g']]
     # predict
     input_g = model_g.generate_input(N)
     pred_g, state_g = model_g(input_g.detach(), state_g)
     # reverse mapping
     sentences = []
     for sentence_idx in range(N):
         logging.info("sentence %d", sentence_idx)
         words = []
         for word_idx in range(config.bert_seq_len):
             # get word vector
             word_vector = pred_g[sentence_idx, word_idx]
             # map to word
             word = embeddings.rev.Bert(word_vector)
             # add word to sentence
             if word is not None:
                 words.append(word)
         sentences.append(' '.join(words))
     return pd.Series(sentences)
 def ClosestWord2Vec(N=1):
     """Generates text from ClosestWord2Vec.
     
     Args:
         N (int): Number of samples.
     """
     # load model
     model = gan.load("models/nn/ClosestWord2Vec.model")
     model_g = model['generator'].eval()
     state_g = model['state_g']
     # predict
     input_g = model_g.generate_input(N)
     pred_g, state_g = model_g(input_g.detach(), state_g)
     # reverse mapping
     sentences = []
     for sentence_idx in range(N):
         logging.info("sentence %d", sentence_idx)
         words = []
         for word_idx in range(config.seq_len):
             # get word vector
             word_vector = pred_g[sentence_idx, word_idx]
             # map to word
             word = embeddings.rev.ClosestWord2Vec(word_vector)
             # add word to sentence
             #print('  ',word_code, word)
             if word is not None:
                 words.append(word)
         sentences.append(' '.join(words))
     return pd.Series(sentences)
 def ScalarIncremental(N=1):
     """Generates text from ScalarIncremental.
     
     Args:
         N (int): Number of samples.
     """
     # load model
     model = gan.load("models/nn/ScalarIncremental.model")
     model_g = model['generator'].eval()
     state_g = model['state_g']
     # predict
     input_g = model_g.generate_input(N)
     pred_g, state_g = model_g(input_g.detach(), state_g)
     # reverse mapping
     sentences = []
     for sentence_idx in range(N):
         words = []
         #print(sentence_idx)
         for word_idx in range(config.seq_len):
             # get word
             word_code = pred_g[sentence_idx, word_idx, 0].round()
             word = embeddings.rev.ScalarIncremental(int(word_code))
             if word is not None:
                 words.append(word)
         sentences.append(' '.join(words))
     return pd.Series(sentences)
def submit_data():
    if request.method == 'POST':
        path = "./static/{}"
        path2 = "/static/{}"
        path3 = "/static1/{}"
        images = request.files["userfile"]
        images.save(path.format(images.filename))
        generated = gan.load(path.format(images.filename))
        out = gan.output(generated)
        gan.save(out, path.format("gen" + images.filename))

        return render_template("index.html",
                               your_image=path2.format("gen" +
                                                       images.filename))
    def ScalarIncremental(cls, train=True, test=True, markov=True, N=5000):
        """Measures ScalarIncremental Discriminator performance.
        
        Args:
            train (bool): Use train data.
            test (bool): Use test data.
            markov (bool): Use Markov data.
            N (int): Number of Generator outputs.
        """
        # load model
        model = gan.load("models/nn/ScalarIncremental.model")
        model_d = model['discriminator']\
            .to(config.device)\
            .eval()
        state_d = [m.to(config.device) for m in model['state_d']]
        model_dis = lambda d: model_d(d, state_d)
        score = {}

        # train data
        if train:
            dataloader = embeddings.load.trainset.ScalarIncremental()
            train_score = cls._perform(model_dis, dataloader, "train")
            score['train'] = cls._score(1 - train_score)
        # test data
        if test:
            dataloader = embeddings.load.testset.ScalarIncremental()
            test_score = cls._perform(model_dis, dataloader, "test")
            score['test'] = cls._score(1 - test_score)
        # markov data
        if markov:
            dataloader = embeddings.load.markov.ScalarIncremental()
            markov_score = cls._perform(model_dis, dataloader, "markov")
            score['markov'] = cls._score(markov_score)
        # generated data
        if N > 0:
            model_g = model['generator']\
                .to(config.device)\
                .eval()
            state_g = [m.to(config.device) for m in model['state_g']]

            def model_gen(d):
                pred_g, _ = model_g(d, state_g)
                return model_dis(pred_g)

            dataloader = embeddings.load.generator.ScalarIncremental(model_g,
                                                                     N=N)
            generator_score = cls._perform(model_gen, dataloader, "generator")
            score['generator'] = cls._score(generator_score)
        return score