Ejemplo n.º 1
0
def answerer(input_string, tp='big'):
    my_extractor = extractor()
    my_extractor.from_string(input_string)
    my_responser = responser()
    obj1, obj2, predicates = my_extractor.get_params()
    print("len(obj1), len(obj2)", len(obj1), len(obj2))
    print("obj1, obj2, predicates", obj1, obj2, predicates)
    if (len(obj1) > 0 and len(obj2) > 0):
        response = my_responser.get_response(
            first_object=obj1,
            second_object=obj2,
            fast_search=True,
            aspects=predicates,
            weights=[1 for predicate in predicates])
        try:
            response_json = response.json()
        except:
            return ("smth wrong in response, please try again")
        try:
            my_diviner = diviner(tp=tp)
            print(1)
            my_diviner.create_from_json(response_json, predicates)
            print(2)
        except:
            return ("smth wrong in diviner, please try again")
        try:
            answer = my_diviner.generate_advice()
            print("answer", answer)
            #del my_extractor,my_diviner, my_responser
            return answer
        except:
            #del my_extractor,my_diviner, my_responser
            return ("smth wrong in answer generation, please try again")
    elif (len(obj1) > 0 and len(obj2) == 0):
        print("len(obj1) > 0 and len(obj2) == 0")
        response = my_responser.get_response(
            first_object=obj1,
            second_object='and',
            fast_search=True,
            aspects=predicates,
            weights=[1 for predicate in predicates])
        try:
            response_json = response.json()
            my_diviner = diviner(tp="big")
            my_diviner.create_from_json(response_json, predicates)
            answer = my_diviner.generate_advice(is_object_single=True)
            print("answer", answer)
            #del my_extractor,my_diviner, my_responser
            return answer
        except:
            #del my_extractor,my_diviner, my_responser
            return ("smth wrong in response, please try again")
    else:
        return ("We can't recognize objects for comparision")
Ejemplo n.º 2
0
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#LM_CAM = load_cam_model(device)
#Cam = diviner(tp = 'cam', model = LM_CAM, device = device)
#print ("loaded cam")

device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")

from ctrl_generation import initialize_model
model_type = "ctrl" #PUT NAME OF NEEDED MODEL
length = 200 #MODEL LENGTH

device = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")
model, tokenizer, length = initialize_model(model_type, length, device)

CTRL = diviner(tp = 'ctrl', model = model, device = device, tokenizer = tokenizer)
#my_extractor = extractor(my_device = 0)
print ("loaded extractor")


def main():
    df = pd.DataFrame(columns=['Object 1', 'Object 2', 'Question', 'Best Answer',  'Answers'])

    with open('yahoo_answers_positive_questions.csv', 'r') as file:
        reader = csv.reader(file)
        for ind, row in enumerate(reader):
            d = {'Object 1': row[0], 'Object 2': row[1], 'Question': row[2], 'Best Answer': row[3],  'Answers': [elem for elem in row[3:]]}
            if (ind > 0):
                df = df.append(d, ignore_index=True)
            
    gpt_answ_list = []
Ejemplo n.º 3
0
from generation.generation import diviner
from my_functions import extractor
from my_functions import responser

from cam_summarize import load_cam_model
from text_gen_big import load_big_model
from text_gen import load_small_model

#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#LM_CAM = load_cam_model(device)
#Cam = diviner(tp = 'cam', model = LM_CAM, device = device)
#print ("loaded cam")

device = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")
LM_SMALL = load_small_model(device)
GPT2Small = diviner(tp='small', model=LM_SMALL, device=device)
#GPT2Small_vs = diviner(tp = 'small_vs', model = LM_SMALL, device = device)
#GPT2Small_vs_str = diviner(tp = 'small_vs_str', model = LM_SMALL, device = device)
print("loaded gpt2")

Templ = diviner(tp='templates', model='', device=device)

my_extractor = extractor(my_device=3)
print("loaded extractor")


def main():
    df = pd.DataFrame(
        columns=['Object 1', 'Object 2', 'Question', 'Best Answer', 'Answers'])

    with open('yahoo_answers_positive_questions.csv', 'r') as file:
Ejemplo n.º 4
0
from flasgger import Swagger, LazyString, LazyJSONEncoder
from flask_restful import Api, Resource, reqparse
from flask import make_response
from nltk.tokenize import sent_tokenize, word_tokenize
import random
import json
from flask import jsonify
import json

"""Models"""

from generation.generation import diviner
from my_functions import extractor
from my_functions import responser

GPT2Big = diviner('big', device = 5)

GPT2Small = diviner('small', device = 6)

Templ = diviner('templates', device = 5)

Cam = diviner('cam', device = 6)


class ReverseProxied(object):
    def __init__(self, app):
        self.app = app

    def __call__(self, environ, start_response):
        script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
        if script_name:
Ejemplo n.º 5
0
sys.path.insert(0, "/notebook/cqas")
sys.path.insert(0, "/notebook/cqas/generation/gpt-2-Pytorch")
sys.path.insert(0, "/notebook/cqas/generation/Student")
sys.path.insert(0, "/notebook/cqas/generation/pytorch_transformers")

from generation.generation import diviner
from my_functions import extractor
from my_functions import responser

from cam_summarize import load_cam_model
from text_gen_big import load_big_model
from text_gen import load_small_model

device = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")
LM_CAM = load_cam_model(device)
Cam = diviner(tp='cam', model=LM_CAM, device=device)
print("loaded cam")

#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#LM_SMALL = load_small_model(device)
#GPT2Small = diviner(tp = 'small', model = LM_SMALL, device = device)
#GPT2Small_vs = diviner(tp = 'small_vs', model = LM_SMALL, device = device)
#GPT2Small_vs_str = diviner(tp = 'small_vs_str', model = LM_SMALL, device = device)
print("loaded gpt2")

Templ = diviner(tp='templates', model='', device=device)

my_extractor = extractor(my_device=5)
print("loaded extractor")

Ejemplo n.º 6
0
    parser.add_argument(
        '--input',
        help=
        'input phrases as a list form, i.e [qwestion1, qwestion2, qwestion3]')

    args = parser.parse_args()

    x = ast.literal_eval(args.input)

    words = create_sequence_from_sentence([
        'what is better amazon or itunes for showing',
        'what is better mouse or rat', 'what is easier to make bread o pizza'
    ])
    model = TaggerFactory.load(PATH_TO_PRETRAINED + MODEL_NAME)
    tags = model.predict_tags_from_words(words)

    objects_list = []
    for elem in list(zip(words, tags)):
        objects = get_objects(elem[0], elem[1])
        assert len(objects) >= 2, "We have %d objects to compare" % (
            len(objects))
        objects_list.append((objects[0], objects[1]))

    for obj0, obj1 in objects_list:
        response = get_response(obj0, obj1, False)
        response_json = response.json()
        Merlin = diviner()
        Merlin.create_from_json(response_json)
        Merlin.generate_advice()

    print('\nThe end.')
Ejemplo n.º 7
0
from generation.Student.cam_summarize import load_cam_model
#from text_gen_big import load_big_model
#from text_gen import load_small_model
from generation.Student.ctrl_generation import initialize_model
from generation.Student.ctrl_generation import generate_text_from_condition

model_type = "ctrl"  #PUT NAME OF NEEDED MODEL
length = 100  #MODEL LENGTH
import configparser
config_parser = configparser.ConfigParser()
config_parser.read('config.ini')
config = config_parser['DEV']

device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
#LM_CAM = load_cam_model(device)
Cam = diviner(tp='cam', model='', device=device)
print("loaded cam")

#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#LM_SMALL = load_small_model(device)
#GPT2Small = diviner(tp = 'small', model = LM_SMALL, device = device)
#print ("loaded gpt2")

device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
model, tokenizer, length = initialize_model(model_type, length, device=device)

CTRL = diviner(tp='ctrl', model=model, device=device, tokenizer=tokenizer)
print("loaded ctrl")

#device = torch.device("cuda:6" if torch.cuda.is_available() else "cpu")
#LM_BIG, tokenizer_big = load_big_model(device)
Ejemplo n.º 8
0
sys.path.insert(0, "/notebook/cqas")
sys.path.insert(0, "/notebook/cqas/generation/gpt-2-Pytorch")
sys.path.insert(0, "/notebook/cqas/generation/Student")
sys.path.insert(0, "/notebook/cqas/generation/pytorch_transformers")

from generation.generation import diviner
from my_functions import extractor
from my_functions import responser

from cam_summarize import load_cam_model
from text_gen_big import load_big_model
from text_gen import load_small_model

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
LM_CAM = load_cam_model(device)
Cam = diviner(tp = 'cam', model = LM_CAM, device = device)
print ("loaded cam")

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
LM_SMALL = load_small_model(device)
GPT2Small = diviner(tp = 'small', model = LM_SMALL, device = device)
GPT2Small_vs = diviner(tp = 'small_vs', model = LM_SMALL, device = device)
GPT2Small_vs_str = diviner(tp = 'small_vs_str', model = LM_SMALL, device = device)
print ("loaded gpt2")

Templ = diviner(tp = 'templates', model = '', device = device)

my_extractor = extractor(my_device = 0)
print ("loaded extractor")

Ejemplo n.º 9
0
sys.path.insert(0, "/notebook/cqas")
sys.path.insert(0, "/notebook/cqas/generation/gpt-2-Pytorch")
sys.path.insert(0, "/notebook/cqas/generation/Student")
sys.path.insert(0, "/notebook/cqas/generation/pytorch_transformers")

from generation.generation import diviner
from my_functions import extractor
from my_functions import responser

from cam_summarize import load_cam_model
from text_gen_big import load_big_model
from text_gen import load_small_model

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
LM_CAM = load_cam_model(device)
Cam = diviner(tp='cam', model=LM_CAM, device=device)
print("loaded cam")

from evaluation.gen import generate_one_answer_with_defined_objects

df = pd.DataFrame(
    columns=['Object 1', 'Object 2', 'Question', 'Best Answer', 'Answers'])


def main():
    df = pd.DataFrame(
        columns=['Object 1', 'Object 2', 'Question', 'Best Answer', 'Answers'])

    with open('yahoo_answers_positive_questions.csv', 'r') as file:
        reader = csv.reader(file)
        for ind, row in enumerate(reader):