Beispiel #1
0
def response(request):
    '''
	if request.session['is_asked'] is 0:
		question = request.GET.get('msg')
		document_selected = generate_idf.make_query(question)
		data = {
		'response' : document_selected
		}
		request.session['is_asked'] = 1
		request.session['document_selected'] = document_selected
		return JsonResponse(data)
	else:
		'''
    print(request.session['context_passed'])

    if request.session['context_passed'] is 0:
        context = request.GET.get('msg')
        data = {'response': 'Ask your question'}
        request.session['context'] = context
        request.session['context_passed'] = 1
        return JsonResponse(data)
    else:
        question = request.GET.get('msg')
        """
		entity_list = get_named_entities(question)

		for entity in entity_list:
			if search_knowledgebase(entity):

		"""
        readerpath = os.path.join(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
            'fastqa_reader')
        # print(readerpath)
        fastqa_reader = readers.reader_from_file(readerpath)
        #request.session['is_asked'] = 1
        #document_selected = request.GET.get('doc')
        #document_path ='knowledgebase/' + (document_selected.split('/')[-1]).split('.')[0] + '.txt'
        #document_path = 'k'
        '''
		document_path = 'knowledgebase/' + document_selected + '.txt'
		with open(document_path,'r') as myfile:
			support = myfile.read()
			#print (support)
		'''

        context = request.session['context']
        answers = fastqa_reader(
            [QASetting(question=question, support=[context])])
        print(question, "\n")
        print("Answer: " + answers[0][0].text + "\n")
        data = {'response': answers[0][0].text}
        return JsonResponse(data)
def run_nli(config):
    reader = readers.reader_from_file(config['saved_reader'], dropout=0.0)

    for in_file, out_file in [
        ('train_input_file', 'train_predicted_labels_and_scores'),
        ('dev_input_file', 'dev_predicted_labels_and_scores')
    ]:
        all_settings = list()
        instances = read_ir_result(config[in_file],
                                   n_sentences=config['n_sentences'])

        for instance in instances:
            evidence_list = instance['evidence']
            claim = instance['claim']
            settings = [
                QASetting(question=claim, support=[evidence])
                for evidence in evidence_list
            ]
            all_settings.append(settings)

        preds_list = predict(reader, all_settings, config['batch_size'])
        save_predictions(instances, preds_list, path=config[out_file])
et2rels = {}
rel2qf = {}

with open("en-r2q.format") as f:
    for line in f:
        fields = line.rstrip("\n").split("\t")
        rel = fields[0]
        e1type = rel[0:3]
        qformat = fields[1]
        if e1type not in et2rels:
            et2rels[e1type] = []
        et2rels[e1type].append(rel)
        rel2qf[rel] = qformat

reader = reader_from_file("remqa_reader")


@app.route('/api/qa', methods=['POST'])
def get_relations():
    if not request.json:
        abort(400)
    support = "NoAnswerFound " + request.json["text"]
    entities = request.json["nel"]["entities"]
    qas = []
    entdict = {}
    mentions = {}
    for e in entities:
        entity = e["entity"]
        ementions = e["mentions"]
        eid = entity["id"]
Beispiel #4
0
from jack.io.load import loaders
from jack.readers import reader_from_file, eval_hooks

logger = logging.getLogger(os.path.basename(sys.argv[0]))
logging.basicConfig(level=logging.INFO)

tf.app.flags.DEFINE_string('dataset', None, 'dataset file')
tf.app.flags.DEFINE_string('loader', 'jack', 'name of loader')
tf.app.flags.DEFINE_string('model_dir', None, 'directory to saved model')
tf.app.flags.DEFINE_integer('batch_size', 64, 'batch size')

FLAGS = tf.app.flags.FLAGS

logger.info("Creating and loading reader from {}...".format(FLAGS.model_dir))

reader = reader_from_file(FLAGS.model_dir)
dataset = loaders[FLAGS.loader](FLAGS.dataset)

logger.info("Start!")


def side_effect(metrics, _):
    """Returns: a state (in this case a metric) that is used as input for the next call"""
    logger.info("#####################################")
    logger.info("Results:")
    for k, v in metrics.items():
        logger.info("{}: {}".format(k, v))
    logger.info("#####################################")
    return 0.0

Beispiel #5
0
logger = logging.getLogger(os.path.basename(sys.argv[0]))
logging.basicConfig(level=logging.INFO)

tf.app.flags.DEFINE_string('dataset', None, 'dataset file')
tf.app.flags.DEFINE_string('loader', 'jack', 'name of loader')
tf.app.flags.DEFINE_string('model_dir', None, 'directory to saved model')
tf.app.flags.DEFINE_integer('batch_size', 64, 'batch size')
tf.app.flags.DEFINE_string('overwrite', '{}', 'json string that overwrites configuration.')

FLAGS = tf.app.flags.FLAGS

logger.info("Creating and loading reader from {}...".format(FLAGS.model_dir))

kwargs = json.loads(FLAGS.overwrite)

reader = reader_from_file(FLAGS.model_dir, **kwargs)
dataset = loaders[FLAGS.loader](FLAGS.dataset)

logger.info("Start!")

def side_effect(metrics, _):
    """Returns: a state (in this case a metric) that is used as input for the next call"""
    logger.info("#####################################")
    logger.info("Results:")
    for k, v in metrics.items():
        logger.info("{}: {}".format(k, v))
    logger.info("#####################################")
    return 0.0


test_eval_hook = eval_hooks[reader.shared_resources.config["reader"]](
from jack.io.load import loaders
from projects.knowledge_integration.qa.definition_model import DefinitionPorts
from projects.knowledge_integration.shared import AssertionMRPorts

tf.app.flags.DEFINE_string('dataset', None, 'dataset file.')
tf.app.flags.DEFINE_string('output', None, 'output json.')
tf.app.flags.DEFINE_string('loader', 'squad', 'loader type.')
tf.app.flags.DEFINE_string('knowledge_store', None, 'assertion store.')
tf.app.flags.DEFINE_string('load_dir', None, 'path to reader.')
tf.app.flags.DEFINE_integer('batch_size', 64, 'batch size.')

FLAGS = tf.app.flags.FLAGS
dataset = loaders[FLAGS.loader](FLAGS.dataset)

if FLAGS.knowledge_store:
    reader = readers.reader_from_file(FLAGS.reader,
                                      assertion_dir=FLAGS.knowledge_store)
else:
    reader = readers.reader_from_file(FLAGS.reader)

input_module = reader.input_module

num_batches = int(math.ceil(len(dataset) / FLAGS.batch_size))

id2sideinformation = {}
bar = progressbar.ProgressBar(max_value=num_batches,
                              widgets=[
                                  ' [',
                                  progressbar.Timer(), '] ',
                                  progressbar.Bar(), ' (',
                                  progressbar.ETA(), ') '
                              ])
Beispiel #7
0
from jack import readers
from jack.core import QASetting

saved_reader='/scratch/fz758/ucl/fever/results/depsa_elmo_10k_n5/reader'

reader = readers.reader_from_file(saved_reader, dropout=0.0)
reader.model_module.prediction_module.set_visualize('/scratch/fz758/ucl/fever/results/depsa_elmo_10k_n5/dep_attn.jsonl')


s = 'Munich is the capital and largest city of the German state of Bavaria.'
s_tokenized = ['Munich', 'is', 'the', 'capital', 'and', 'largest', 'city', 'of', 'the', 'German', 'state', 'of', 'Bavaria', '.']
s_length = [14]
s_dep_i = [3, 3, 3, 6, 6, 3, 10, 10, 10, 6, 12, 10, 3]
s_dep_j = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
s_dep_type = [6, 19, 7, 17, 12, 14, 5, 7, 12, 16, 5, 16, 4]

q = 'Munich is the capital of Germany.'
q_tokenized = ['Munich', 'is', 'the', 'capital', 'of', 'Germany', '.']
q_length = [7]
q_dep_i = [3, 3, 3, 5, 3, 3]
q_dep_j = [0, 1, 2, 4, 5, 6]
q_dep_type = [6, 19, 7, 5, 16, 4]

qa_setting = QASetting(
    q,
    [s],
    1,
    None,
    None,
    None,
    q_tokenized = q_tokenized,
Beispiel #8
0
                        default=5,
                        help="how many sentences to read for prediction")
    parser.add_argument("--batch_size",
                        type=int,
                        default=32,
                        help="batch size for inference")
    parser.add_argument(
        "--preprocessed_in_file",
        default=None,
        type=str,
        help="path for preprocessed input data, if set, in_file is ignored")
    args = parser.parse_args()
    print(args, file=sys.stderr)

    print("loading reader from file:", args.saved_reader)
    dam_reader = readers.reader_from_file(args.saved_reader, dropout=0.0)

    if args.preprocessed_in_file is None:
        results = list()
        preds_length = list()
        all_settings = list()
        instances = read_ir_result(args.in_file,
                                   n_sentences=args.n_sentences,
                                   prependlinum=args.prependlinum,
                                   prependtitle=args.prependtitle,
                                   concatev=args.concatev)

        for instance in instances:
            evidence_list = instance["evidence"]
            claim = instance["claim"]
            settings = [
Beispiel #9
0
from jack.readers import reader_from_file

logger = logging.getLogger(os.path.basename(sys.argv[0]))
logging.basicConfig(level=logging.INFO)

tf.app.flags.DEFINE_string('dataset', None, 'dataset file')
tf.app.flags.DEFINE_string('loader', 'jack', 'name of loader')
tf.app.flags.DEFINE_string('load_dir', None, 'directory to saved model')
tf.app.flags.DEFINE_integer('batch_size', 64, 'batch size')
tf.app.flags.DEFINE_integer('max_examples', None,
                            'maximum number of examples to evaluate')
tf.app.flags.DEFINE_string('overwrite', '{}',
                           'json string that overwrites configuration.')

FLAGS = tf.app.flags.FLAGS

logger.info("Creating and loading reader from {}...".format(FLAGS.save_dir))

kwargs = json.loads(FLAGS.overwrite)

reader = reader_from_file(FLAGS.save_dir, **kwargs)
dataset = loaders[FLAGS.loader](FLAGS.dataset)
if FLAGS.max_examples:
    dataset = dataset[:FLAGS.max_examples]

logger.info("Start!")
result_dict = evaluate_reader(reader, dataset, FLAGS.batch_size)

logger.info("############### RESULTS ##############")
pretty_print_results(result_dict)
Beispiel #10
0
from jack.readers import reader_from_file

logger = logging.getLogger(os.path.basename(sys.argv[0]))
logging.basicConfig(level=logging.INFO)

tf.app.flags.DEFINE_string('dataset', None, 'dataset file')
tf.app.flags.DEFINE_string('loader', 'jack', 'name of loader')
tf.app.flags.DEFINE_string('load_dir', None, 'directory to saved model')
tf.app.flags.DEFINE_integer('batch_size', 64, 'batch size')
tf.app.flags.DEFINE_integer('max_examples', None,
                            'maximum number of examples to evaluate')
tf.app.flags.DEFINE_string('overwrite', '{}',
                           'json string that overwrites configuration.')

FLAGS = tf.app.flags.FLAGS

logger.info("Creating and loading reader from {}...".format(FLAGS.load_dir))

kwargs = json.loads(FLAGS.overwrite)

reader = reader_from_file(FLAGS.load_dir, **kwargs)
dataset = loaders[FLAGS.loader](FLAGS.dataset)
if FLAGS.max_examples:
    dataset = dataset[:FLAGS.max_examples]

logger.info("Start!")
result_dict = evaluate_reader(reader, dataset, FLAGS.batch_size)

logger.info("############### RESULTS ##############")
pretty_print_results(result_dict)