def search():
    if request.method == "POST":
        #print(request.form.getlist('mycheckbox'))
        selected = request.form.getlist('mycheckbox')
        if '1' in selected:
            stop_words = 1
        else:
            stop_words = 0
        if '2' in selected:
            lemmatize = 1
        else:
            lemmatize = 0
        #print(stop_words, lemmatize)
        text = str(request.form['text'])
        morph = pymorphy2.MorphAnalyzer()
        pos = []
        for w in text.split():
            pos.append(morph.parse(w)[0].tag.POS)
        invalid_pos = Counter(pos)[None] / len(pos)
        if len(pos) > 15 and invalid_pos < 0.5:
            preprocess = TextPreprocessing(stop_words, lemmatize)
            preprocessed_text = preprocess.preprocess(text)
            tf_idf, model = get_model(stop_words, lemmatize)
            text_tf_idf = tf_idf.transform([preprocessed_text])
            result = TOPICS[model.predict(text_tf_idf)[0]]
        else:
            raise ValueError

        with open('result.txt', 'w+') as f:
            f.write(str(result))
        return redirect(url_for('results'))

    return render_template('search.html')
Beispiel #2
0
    test_dataset = BRATS18Dataset(test_metadata, transforms=test_transforms)
    test_loader = DataLoader(dataset=test_dataset,
                             num_workers=num_workers,
                             batch_size=batch_size,
                             shuffle=False)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    results = []
    for model_name, (model_arch, model_log_dir) in model_paths.items():
        print("*** Evaluating: '{}' ***".format(model_name))
        model_params = pickle.load(
            open(os.path.join(model_log_dir, "params.p"), "rb"))
        if not hasattr(model_params, "arch_name"):
            model_params.arch_name = "unet"
        model = get_model(model_params)
        model.load_state_dict(
            torch.load(
                os.path.join(model_log_dir, "model", model_name + "__best")))
        model = model.to(device)
        total_params = sum(p.numel() for p in model.parameters())
        val_stats, test_stats = {}, {}
        with torch.no_grad():
            print("* Validation set *")
            for i, (inputs, targets) in tqdm(enumerate(val_loader),
                                             total=len(val_loader)):
                inputs, targets = inputs.to(device), targets.to(device)
                outputs = model(inputs)
                log_stats(val_stats, outputs, targets)
            print("* Test set *")
            for i, (inputs, targets) in tqdm(enumerate(test_loader),
Beispiel #3
0
import random

from CaptionCaptionEvaluator import *
from CaptionImageEvaluator import *

from sklearn.manifold import TSNE
from matplotlib import colors as mcolors
import matplotlib.pyplot as plt

import nltk
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu

for i in range(1, 11):

    vae = get_model(i)
    print(vae.model_name)
    vae.define_model()
    vae.load_epoch(120)

    BLEU_scores = []
    for x in random.sample(vae.all_captions_train, 200):
        mu = vae.sentence_to_params([x])[0][0]
        one = vae.tokenizer.texts_to_sequences([x])[0]
        generated = vae.generate_from_mu(mu)

        gen_strip = ""
        for word in generated.split(" "):
            if word != '</e>':
                gen_strip += word
                gen_strip += " "
Beispiel #4
0
#!/usr/bin/env python
from CaptionImageEvaluator import *
import sys

from get_models import get_model

index = int(sys.argv[1])

vae = get_model(index)

#vae.define_model()
#vae.load_epoch(120)

#caption_image = CaptionImageEvaluator(
#    vae, save_aux='VAE-EPOCH-120_hidden_1536')

caption_image = CaptionImageEvaluator(vae, save_aux='BOW-hidden-1536')
caption_image.load_model(hidden_size=1536, is_bow=True)
caption_image.train(batch_size=256,
                    steps_per_epoch=50,
                    start=0,
                    end=500,
                    save_every=10,
                    validation=False)
Beispiel #5
0
#!/usr/bin/env python

from keras import backend as K
import numpy as np

from Flickr30k.get_data import Flickr30kLoader
from pascal50S.get_data_refactor import Pascal10SLoader

import sys

from get_models import get_model

index = int(sys.argv[1])

vae = get_model(index, True)
vae.define_model()
vae.train_model_vae(batch_size=64,
                    end_epoch=120,
                    kl_anneal_rate=vae.kl_beta / 80)
Beispiel #6
0
    # Dataloaders
    train_loader = DataLoader(dataset=train_dataset,
                              num_workers=params.num_workers,
                              batch_size=params.batch_size,
                              shuffle=True)
    val_loader = DataLoader(dataset=val_dataset,
                            num_workers=params.num_workers,
                            batch_size=params.batch_size,
                            shuffle=False)
    test_loader = DataLoader(dataset=test_dataset,
                             num_workers=params.num_workers,
                             batch_size=params.batch_size,
                             shuffle=False)

    # Model
    model = get_model(params)

    # CUDA
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)

    # Parameters
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters()
                           if p.requires_grad)
    print("Total number of params: {}\nTotal number of trainable params: {}".
          format(total_params, trainable_params))

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=params.lr)
    scheduler = StepLR(optimizer,