예제 #1
0
 def _get_data(self, filename):
     with open(os.path.join(self.data, filename + '.json')) as f:
         try:
             return json.load(f)
         except ValueError as e:
             colored_print(
                 'Check JSON format of `%s.json` file' % filename,
                 'FAIL'
             )
             colored_print(e, 'FAIL')
             return {}
예제 #2
0
 def _make_pdf(self):
     colored_print('Generating PDFs...', 'OKGREEN')
     html_files = glob.glob(os.path.join(self.html_output, '*.html'))
     for html_file in html_files:
         pdf_filename = os.path.splitext(ntpath.basename(html_file))[0]
         pdf_filename += rand_string(5)
         pdf_filename += '.pdf'
         try:
             HTML(html_file).write_pdf(os.path.join(self.pdf_output, pdf_filename), zoom=1.75)
         except Exception as e:
             colored_print('PDF `%s` generation failed' % pdf_filename, 'FAIL')
예제 #3
0
 def load_state(self, state_file):
     state = torch.load(state_file)
     if state['model_name'] != type(self.scoring_function).__name__:
         utils.colored_print('yellow', 'model name in saved file %s is different from the name of current model %s' %
                             (state['model_name'], type(self.scoring_function).__name__))
     self.scoring_function.load_state_dict(state['model_weights'])
     if state['optimizer_name'] != type(self.optim).__name__:
         utils.colored_print('yellow', ('optimizer name in saved file %s is different from the name of current '+
                                       'optimizer %s') %
                             (state['optimizer_name'], type(self.optim).__name__))
     self.optim.load_state_dict(state['optimizer_state'])
     return state['mini_batches']
예제 #4
0
 def _initialize(self):
     colored_print('Initializing... ', 'OKGREEN')
     # Init html dest dir
     if os.path.isdir(self.html_output):
         shutil.rmtree(self.html_output)
     os.mkdir(self.html_output)
     copytree(self.assets, self.html_assets)
     os.mkdir(self.html_css)
     # Init pdf dir
     if os.path.isdir(self.pdf_output):
         shutil.rmtree(self.pdf_output)
     os.mkdir(self.pdf_output)
예제 #5
0
    def save_state(self, mini_batches, valid_score, test_score):
        state = dict()
        state['mini_batches'] = mini_batches
        state[
            'epoch'] = mini_batches * self.batch_size / self.train.kb.facts.shape[
                0]
        state['model_name'] = type(self.scoring_function).__name__
        state['model_weights'] = self.scoring_function.state_dict()
        state['optimizer_state'] = self.optim.state_dict()
        state['optimizer_name'] = type(self.optim).__name__
        state['valid_score_e2'] = valid_score['e2']
        state['test_score_e2'] = test_score['e2']
        state['valid_score_e1'] = valid_score['e1']
        state['test_score_e1'] = test_score['e1']
        state['valid_score_m'] = valid_score['m']
        state['test_score_m'] = test_score['m']
        filename = os.path.join(
            self.save_directory,
            "epoch_%.1f_val_%5.2f_%5.2f_%5.2f_test_%5.2f_%5.2f_%5.2f.pt" %
            (state['epoch'], state['valid_score_e2']['mrr'],
             state['valid_score_e1']['mrr'], state['valid_score_m']['mrr'],
             state['test_score_e2']['mrr'], state['test_score_e1']['mrr'],
             state['test_score_m']['mrr']))

        #torch.save(state, filename)
        try:
            if (state['valid_score_m']['mrr'] >=
                    self.best_mrr_on_valid["valid_m"]["mrr"]):
                print("Best Model details:\n", "valid_m",
                      str(state['valid_score_m']), "test_m",
                      str(state["test_score_m"]), "valid",
                      str(state['valid_score_e2']), "test",
                      str(state["test_score_e2"]), "valid_e1",
                      str(state['valid_score_e1']), "test_e1",
                      str(state["test_score_e1"]))
                best_name = os.path.join(self.save_directory,
                                         "best_valid_model.pt")
                self.best_mrr_on_valid = {
                    "valid_m": state['valid_score_m'],
                    "test_m": state["test_score_m"],
                    "valid": state['valid_score_e2'],
                    "test": state["test_score_e2"],
                    "valid_e1": state['valid_score_e1'],
                    "test_e1": state["test_score_e1"]
                }

                if (os.path.exists(best_name)):
                    os.remove(best_name)
                torch.save(
                    state, best_name
                )  #os.symlink(os.path.realpath(filename), best_name)
        except:
            utils.colored_print("red", "unable to save model")
예제 #6
0
파일: app.py 프로젝트: HwDhyeon/Discord-Bot
async def on_message(message):
    bad_words = [
        'f**k',
    ]
    for bad_word in bad_words:
        if bad_word in message.content:
            colored_print('\nBad word detection!', 'red')
            colored_print(f'Content: [{message.content}]')
            await message.channel.send('Let\'s use good and fine words.')
            await message.delete()
            break
    await bot.process_commands(message)
예제 #7
0
 def _compile_sass(self):
     colored_print('Recompiling SaSS... ', 'OKGREEN')
     css_filename = os.path.join(self.html_css, 'main.' + rand_string(12) + '.css')
     try:
         compiled = sass.compile(
             filename=os.path.join(self.sass, 'main.scss'),
             output_style='compressed'
         )
         with open(css_filename, 'w') as f:
             f.write(compiled)
     except Exception as e:
         colored_print(e, 'WARNING')
     return css_filename
예제 #8
0
    def process(self, event):
        start_time = time.time()
        colored_print('*' * 60, 'OKBLUE')
        colored_print(time.ctime() + ' : Start process.', 'OKGREEN')

        self._initialize()
        css_file = self._compile_sass()
        self._renew_localization()
        self._compile_html(css_file)
        self._make_pdf()

        colored_print('Finished in ' + str(time.time() - start_time), 'OKGREEN')
        colored_print('*' * 60, 'OKBLUE')
예제 #9
0
 def _compile_html(self, css_file):
     for locale in self.locales_list:
         os.environ['LANG'] = locale
         self._init_jinja()
         templates = glob.glob(os.path.join(self.templates, '*.html'))
         for template in templates:
             colored_print(
                 'Compiling `%s` file to Html for `%s` locale' % (template, locale),
                 'OKGREEN'
             )
             template = ntpath.basename(template)
             name = os.path.splitext(template)
             data = self._get_data(name[0])
             data['css_path'] = os.path.join(
                 self.css_relative,
                 ntpath.basename(css_file)
             )
             data['locale'] = locale
             rendered = 'ERROR HAPPENED'
             try:
                 rendered = self.jinja_env.get_template(template).render(**data)
             except Exception as e:
                 colored_print(
                     'Jinja render error. Check template `%s`' % template,
                     'FAIL'
                 )
                 colored_print(e, 'FAIL')
             filename = os.path.join(
                 self.html_output,
                 ''.join([name[0], '_', locale, '.html'])
             )
             with open(filename, 'w') as f:
                 f.write(rendered.encode('utf-8'))
             colored_print(
                 'compiled and saved to `%s`' % filename,
                 'OKGREEN'
             )
예제 #10
0
 def _renew_localization(self):
     """
     For create translation template use command
         pybabel extract -F ./babel.cfg -o ./i18n/messages.pot ./
     For initialize new locale use
         pybabel init -l en_US -d ./i18n -i ./i18n/messages.pot
     For updating locale files use
         pybabel update -l en_US -d ./i18n -i ./i18n/messages.pot
     For compiling use
         pybabel compile -f -d ./i18n
     """
     colored_print('Re-extract localization messages...', 'OKGREEN')
     params = ['pybabel', 'extract', '-F', './babel.cfg', '-o',
               os.path.join(self.locale_dir, self.locale_domain + '.pot'), './']
     subprocess.call(params)
     for loc in self.locales_list:
         colored_print('Update `%s` localization file...' % loc, 'OKGREEN')
         params = ['pybabel', 'update', '-l', loc, '-d', self.locale_dir,
                   '-i', os.path.join(self.locale_dir, self.locale_domain + '.pot')]
         subprocess.call(params)
     colored_print('Recompile localization files...', 'OKGREEN')
     params = ['pybabel', 'compile', '-f', '-d', self.locale_dir]
     subprocess.call(params)
        line = line.strip().split()
        for word in line:
            mapped_line.append(mapp.get(word, len(mapp)))
        #add end token
        mapped_line.append(END_MAPPING)
        lengths.append(len(mapped_line))
        assert (len(mapped_line) <= maxlen)
        for i in range(maxlen - len(mapped_line)):
            mapped_line.append(PAD_MAPPING)
        mapped_data.append(mapped_line)
    return torch.tensor(mapped_data), torch.tensor(lengths)


has_cuda = torch.cuda.is_available()
if not has_cuda:
    utils.colored_print("yellow", "CUDA is not available, using cpu")


def main(args):
    # read token maps
    etokens, etoken_map = utils.get_tokens_map(
        os.path.join(args.data_dir, "mapped_to_ids",
                     "entity_token_id_map.txt"))
    rtokens, rtoken_map = utils.get_tokens_map(
        os.path.join(args.data_dir, "mapped_to_ids",
                     "relation_token_id_map.txt"))
    entity_mentions, em_map = utils.read_mentions(
        os.path.join(args.data_dir, "mapped_to_ids", "entity_id_map.txt"))
    _, rm_map = utils.read_mentions(
        os.path.join(args.data_dir, "mapped_to_ids", "relation_id_map.txt"))
예제 #12
0
def main(dataset_root, save_dir, model_name, model_arguments, loss_function,
         loss_arguments, learning_rate, batch_size, regularization_coefficient,
         gradient_clip, optimizer_name, max_epochs, negative_sample_count,
         hooks, eval_every_x_epochs, eval_batch_size, resume_from_save,
         introduce_oov, verbose):
    ktrain = kb.kb(os.path.join(dataset_root, 'train.txt'))
    if introduce_oov:
        ktrain.entity_map["<OOV>"] = len(ktrain.entity_map)
    kvalid = kb.kb(os.path.join(dataset_root, 'valid.txt'),
                   ktrain.entity_map,
                   ktrain.relation_map,
                   add_unknowns=not introduce_oov)
    ktest = kb.kb(os.path.join(dataset_root, 'test.txt'),
                  ktrain.entity_map,
                  ktrain.relation_map,
                  add_unknowns=not introduce_oov)

    if (verbose > 0):
        utils.colored_print("yellow", "VERBOSE ANALYSIS only for FB15K")
        tpm = extra_utils.fb15k_type_map_fine()
        ktrain.augment_type_information(tpm)
        ktest.augment_type_information(tpm)
        kvalid.augment_type_information(tpm)
        hooks = extra_utils.load_hooks(hooks, ktrain)

    model_arguments['entity_count'] = len(ktrain.entity_map)
    model_arguments['relation_count'] = len(ktrain.relation_map)
    scoring_function = getattr(models, model_name)(**model_arguments)
    if has_cuda:
        scoring_function = scoring_function.cuda()
    if loss_function == 'query2box_loss':
        loss = getattr(losses, loss_function)(**loss_arguments)
    else:
        loss = getattr(losses, loss_function)()
    optim = getattr(torch.optim, optimizer_name)(scoring_function.parameters(),
                                                 lr=learning_rate)

    if (not eval_batch_size):
        eval_batch_size = max(
            50,
            batch_size * 2 * negative_sample_count // len(ktrain.entity_map))
    tr = trainer.Trainer(scoring_function,
                         scoring_function.regularizer,
                         loss,
                         optim,
                         ktrain,
                         kvalid,
                         ktest,
                         batch_size=batch_size,
                         eval_batch=eval_batch_size,
                         negative_count=negative_sample_count,
                         save_dir=save_dir,
                         gradient_clip=gradient_clip,
                         hooks=hooks,
                         regularization_coefficient=regularization_coefficient,
                         verbose=verbose)
    if resume_from_save:
        mb_start = tr.load_state(resume_from_save)
    else:
        mb_start = 0
    max_mini_batch_count = int(max_epochs * ktrain.facts.shape[0] / batch_size)
    print("max_mini_batch_count: %d, eval_batch_size %d" %
          (max_mini_batch_count, eval_batch_size))
    tr.start(
        max_epochs,
        [(eval_every_x_epochs * ktrain.facts.shape[0] / batch_size) // 20, 20],
        mb_start, learning_rate)
예제 #13
0
파일: trainer.py 프로젝트: sushant21/tkbi
    def save_state(self, mini_batches, valid_score, test_score):
        state = dict()

        # -------- #
        state[
            'datamap'] = self.train.kb.datamap  # save datamap as well, useful for analysis later on
        state['model_arguments'] = self.scoring_function_arguments
        # ------- #

        state['mini_batches'] = mini_batches
        state[
            'epoch'] = mini_batches * self.batch_size / self.train.kb.facts.shape[
                0]
        state['model_name'] = type(self.scoring_function).__name__
        state['model_weights'] = self.scoring_function.state_dict()
        state['optimizer_state'] = self.optim.state_dict()
        state['optimizer_name'] = type(self.optim).__name__
        state['valid_score_e2'] = valid_score['e2']
        state['test_score_e2'] = test_score['e2']
        state['valid_score_e1'] = valid_score['e1']
        state['test_score_e1'] = test_score['e1']
        state['valid_score_m'] = valid_score['m']
        state['test_score_m'] = test_score['m']

        state['valid_score_t'] = valid_score['t']
        state['test_score_t'] = test_score['t']

        state['valid_score_r'] = valid_score['r']
        state['test_score_r'] = test_score['r']

        # --not needed, but keeping for backward compatibility-- #
        state['entity_map'] = self.train.kb.datamap.entity_map
        state['reverse_entity_map'] = self.train.kb.datamap.reverse_entity_map
        state['relation_map'] = self.train.kb.datamap.relation_map
        state[
            'reverse_relation_map'] = self.train.kb.datamap.reverse_relation_map
        # --------------- #

        # state['additional_params'] = self.train.kb.additional_params
        state['nonoov_entity_count'] = self.train.kb.nonoov_entity_count

        filename = os.path.join(
            self.save_directory,
            "epoch_%.1f_val_%5.2f_%5.2f_%5.2f_test_%5.2f_%5.2f_%5.2f.pt" %
            (state['epoch'], state['valid_score_e2']['mrr'],
             state['valid_score_e1']['mrr'], state['valid_score_m']['mrr'],
             state['test_score_e2']['mrr'], state['test_score_e1']['mrr'],
             state['test_score_m']['mrr']))

        # torch.save(state, filename)
        def print_tensor(data):
            data2 = {}
            for key in data:
                data2[key] = round(data[key].tolist(), 4)
            return str(data2)

        try:
            if state['valid_score_m']['mrr'] >= self.best_mrr_on_valid[
                    "valid_m"]["mrr"]:
                print("Best Model details:\n", "valid_m",
                      str(state['valid_score_m']), "\n", "test_m",
                      str(state["test_score_m"]), "\n\n", "valid",
                      str(state['valid_score_e2']), "\n", "test",
                      str(state["test_score_e2"]), "\n\n", "valid_e1",
                      str(state['valid_score_e1']), "\n", "test_e1",
                      str(state["test_score_e1"]), "\n\n", "valid_r",
                      str(state['valid_score_r']), "\n", "test_r",
                      str(state["test_score_r"]), "\n\n", "valid_t",
                      str(state['valid_score_t']), "\n", "test_t",
                      str(state["test_score_t"]), "\n")
                best_name = os.path.join(self.save_directory,
                                         "best_valid_model.pt")
                self.best_mrr_on_valid = {
                    "valid_m": state['valid_score_m'],
                    "test_m": state["test_score_m"],
                    "valid": state['valid_score_e2'],
                    "test": state["test_score_e2"],
                    "valid_e1": state['valid_score_e1'],
                    "test_e1": state["test_score_e1"],
                    "valid_r": state['valid_score_r'],
                    "test_r": state["test_score_r"],
                    "valid_t": state['valid_score_t'],
                    "test_t": state["test_score_t"]
                }

                if os.path.exists(best_name):
                    os.remove(best_name)
                torch.save(
                    state, best_name
                )  # os.symlink(os.path.realpath(filename), best_name)
        except:
            utils.colored_print("red", "unable to save model")
예제 #14
0
파일: app.py 프로젝트: HwDhyeon/Discord-Bot
async def on_ready() -> NoReturn:
    colored_print('Bot is running...', 'yellow')
예제 #15
0
파일: main.py 프로젝트: dair-iitd/KBI
import kb
import data_loader
import trainer
import torch
import losses
import models
import argparse
import os
import datetime
import json
import utils
import extra_utils

has_cuda = torch.cuda.is_available()
if not has_cuda:
    utils.colored_print("yellow", "CUDA is not available, using cpu")


def main(dataset_root, save_dir, model_name, model_arguments, loss_function,
         learning_rate, batch_size, regularization_coefficient, gradient_clip,
         optimizer_name, max_epochs, negative_sample_count, hooks,
         eval_every_x_mini_batches, eval_batch_size, resume_from_save,
         introduce_oov, verbose):
    ktrain = kb.kb(os.path.join(dataset_root, 'train.txt'))
    if introduce_oov:
        ktrain.entity_map["<OOV>"] = len(ktrain.entity_map)
    ktest = kb.kb(os.path.join(dataset_root, 'test.txt'),
                  ktrain.entity_map,
                  ktrain.relation_map,
                  add_unknowns=not introduce_oov)
    kvalid = kb.kb(os.path.join(dataset_root, 'valid.txt'),
예제 #16
0
import sys
import time
import numpy as np
import random
import torch
from tqdm import tqdm
import datetime
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from kb import kb
import utils
from dataset import Dataset
import ast

has_cuda = torch.cuda.is_available()
if not has_cuda:
    utils.colored_print("yellow", "CUDA is not available, using cpu")


def main():
    data_dir = "data/olpbench"
    head_or_tail = "tail"
    sample = 100
    train_kb = kb(os.path.join(data_dir, "train_data_thorough.txt"),
                  em_map=None,
                  rm_map=None)
    freq_r = {}
    freq_r_e2 = {}
    for triple in train_kb.triples:
        e1 = triple[0].item()
        r = triple[1].item()
        e2 = triple[2].item()
예제 #17
0
파일: main.py 프로젝트: sushant21/tkbi
def main(mode, dataset, dataset_root, save_dir, tflogs_dir, debug, model_name,
         model_arguments, loss_function, learning_rate, batch_size,
         regularization_coefficient, regularizer, gradient_clip,
         optimizer_name, max_epochs, negative_sample_count, hooks,
         eval_every_x_mini_batches, eval_batch_size, resume_from_save,
         introduce_oov, verbose, batch_norm, predict_rel, predict_time,
         time_args, time_neg_samples, expand_mode, flag_bin, flag_time_smooth,
         flag_additional_filter, filter_method, perturb_time, use_time_facts,
         time_loss_margin, dump_t_scores, save_text, save_time_results,
         patience, flag_add_reverse):
    # --set arguments for different models-- #
    if model_name.startswith('TA'):
        use_time_tokenizer = True
    else:
        use_time_tokenizer = False
    print("Flag: use_time_tokenizer-", use_time_tokenizer)

    print("Flag: flag_add_reverse-", flag_add_reverse)
    # if re.search("_lx", model_name):# or model_name=="TimePlex":
    #     flag_add_reverse = 1
    # else:
    #     flag_add_reverse = 0
    # -------------------------------------- #

    if resume_from_save:
        map_location = None if has_cuda else 'cpu'
        model = torch.load(resume_from_save, map_location=map_location)

        datamap = model['datamap']  # load datamap from saved model
        saved_model_arguments = model['model_arguments']
        if model_arguments is not None:
            for key in model_arguments:
                saved_model_arguments[key] = model_arguments[key]

        model_arguments = saved_model_arguments
        print("model_arguments:", model_arguments)
        # model_arguments = model['model_arguments']  # load model_arguments for model init (argument ignored)

    else:
        # --for HyTE-like binning-- #
        if flag_bin:
            use_time_interval = True
            print("Using hyTE-like chunking\n")
        else:
            use_time_interval = False
        print("Flag: use_time_interval", use_time_interval)
        # ------------------------- #

        # build datamap
        datamap = kb.Datamap(dataset, dataset_root, use_time_interval)

        if introduce_oov:
            if not "<OOV>" in datamap.entity_map.keys():
                eid = len(datamap.entity_map)
                datamap.entity_map["<OOV>"] = eid
                datamap.reverse_entity_map[eid] = "<OOV>"
                datamap.nonoov_entity_count = datamap.entity_map["<OOV>"] + 1

    # ---create train/test/valid kbs for filtering (need to keep this same irrespective of model)--- #
    dataset_root_filter = './data/{}'.format(dataset)

    datamap_filter = kb.Datamap(dataset,
                                dataset_root_filter,
                                use_time_interval=False)

    ranker_ktrain = kb.kb(datamap_filter,
                          os.path.join(dataset_root_filter, 'train.txt'))

    ranker_ktest = kb.kb(datamap_filter,
                         os.path.join(dataset_root_filter, 'test.txt'),
                         add_unknowns=int(not (int(introduce_oov))))

    ranker_kvalid = kb.kb(datamap_filter,
                          os.path.join(dataset_root_filter, 'valid.txt'),
                          add_unknowns=int(not (int(introduce_oov))))
    # --------------------------- #

    # ---create train/test/valid kbs--- #
    ktrain = kb.kb(datamap,
                   os.path.join(dataset_root, 'train.txt'),
                   use_time_tokenizer=use_time_tokenizer)

    ktest = kb.kb(datamap,
                  os.path.join(dataset_root, 'test.txt'),
                  add_unknowns=int(not (int(introduce_oov))),
                  use_time_tokenizer=use_time_tokenizer)

    kvalid = kb.kb(datamap,
                   os.path.join(dataset_root, 'valid.txt'),
                   add_unknowns=int(not (int(introduce_oov))),
                   use_time_tokenizer=use_time_tokenizer)
    # --------------------------- #

    print("Train (no expansion)", ktrain.facts.shape)
    print("Test", ktest.facts.shape)
    print("Valid", kvalid.facts.shape)

    # print("dateYear2id", len(datamap.dateYear2id))
    # print("dateYear2id", datamap.dateYear2id)
    # print("intervalId2dateYears", len(datamap.intervalId2dateYears))

    if not eval_batch_size:
        eval_batch_size = max(
            40,
            batch_size * 2 * negative_sample_count // len(datamap.entity_map))

    # init model
    if resume_from_save:
        if 'eval_batch_size' in model_arguments:
            model_arguments['eval_batch_size'] = eval_batch_size

        scoring_function = getattr(models, model_name)(
            **model_arguments
        )  # use model_arguments from saved model, allowing those provided in command to be overridden
    else:
        scoring_function = init_model(model_name, model_arguments, datamap,
                                      ktrain, eval_batch_size,
                                      flag_time_smooth, regularizer,
                                      expand_mode, flag_add_reverse,
                                      batch_norm, has_cuda)

    if has_cuda:
        scoring_function = scoring_function.cuda()

    if mode == 'train':
        # expand data as needed
        if expand_mode != "None":
            ktrain.expand_data(mode=expand_mode)
            print("Expanded training data with mode= {}".format(expand_mode))
        else:
            print("Not expanding training data")

        print("Train (after expansion)", ktrain.facts.shape)

        # ---create dataloaders to be used when training--- #
        dltrain = data_loader.data_loader(ktrain,
                                          has_cuda,
                                          loss=loss_function,
                                          flag_add_reverse=flag_add_reverse,
                                          model=model_name,
                                          perturb_time=perturb_time)
        dlvalid = data_loader.data_loader(
            kvalid,
            has_cuda,
            loss=loss_function,  #flag_add_reverse=flag_add_reverse,
            model=model_name)
        dltest = data_loader.data_loader(
            ktest,
            has_cuda,
            loss=loss_function,  #flag_add_reverse=flag_add_reverse,
            model=model_name)
        # ------------------------------------------------ #

        # loss, optimiser, scheduler for training
        loss = getattr(losses, loss_function)()
        optim = getattr(torch.optim,
                        optimizer_name)(scoring_function.parameters(),
                                        lr=learning_rate)
        scheduler = lr_scheduler.ReduceLROnPlateau(
            optim, 'max', factor=0.1, patience=patience,
            verbose=True)  # mrr tracking

        # init trainer and start training
        tr = trainer.Trainer(
            scoring_function,
            model_arguments,
            scoring_function.regularizer,
            loss,
            optim,
            dltrain,
            dlvalid,
            dltest,
            batch_size=batch_size,
            eval_batch=eval_batch_size,
            negative_count=negative_sample_count,
            save_dir=save_dir,
            gradient_clip=gradient_clip,
            hooks=hooks,
            regularization_coefficient=regularization_coefficient,
            verbose=verbose,
            scheduler=scheduler,
            debug=debug,
            time_neg_samples=time_neg_samples,
            expand_mode=expand_mode,
            flag_additional_filter=flag_additional_filter,
            filter_method=filter_method,
            use_time_facts=use_time_facts,
            time_loss_margin=time_loss_margin,
            predict_time=predict_time,
            time_args=time_args,
            flag_add_reverse=flag_add_reverse,
            load_to_gpu=has_cuda)  # 0.01)
        if resume_from_save:
            mb_start = tr.load_state(resume_from_save)
        else:
            mb_start = 0
        max_mini_batch_count = int(max_epochs * ktrain.facts.shape[0] /
                                   batch_size)
        print("max_mini_batch_count: %d, eval_batch_size %d" %
              (max_mini_batch_count, eval_batch_size))
        tr.start(
            max_mini_batch_count,
            [eval_every_x_mini_batches // 20, 20],
            mb_start,
            tflogs_dir,
        )

    elif mode == 'test':
        # if not eval_batch_size:
        #     eval_batch_size = max(40, batch_size * 2 * negative_sample_count // len(datamap.entity_map))

        # Load Model
        map_location = None if has_cuda else 'cpu'
        saved_model = torch.load(
            resume_from_save, map_location=map_location
        )  # note: resume_from_save is required for testing

        scoring_function.load_state_dict(saved_model['model_weights'])

        print("valid_score_m", saved_model['valid_score_m'])
        print("valid_score_e1", saved_model['valid_score_e1'])
        print("valid_score_e2", saved_model['valid_score_e2'])
        print("test_score_m", saved_model['test_score_m'])
        print("test_score_e1", saved_model['test_score_e1'])
        print("test_score_e2", saved_model['test_score_e2'])

        # '''
        # ---entity/relation prediction--- #
        print("Scores with {} filtering".format(filter_method))

        # ranker = evaluate.Ranker(scoring_function, kb.union([ktrain, kvalid, ktest]), kb_data=kvalid,
        #                          filter_method=filter_method, flag_additional_filter=flag_additional_filter,
        #                          expand_mode=expand_mode, load_to_gpu=has_cuda)
        ranker = evaluate.Ranker(
            scoring_function,
            kb.union([ranker_ktrain, ranker_kvalid, ranker_ktest]),
            kb_data=ranker_kvalid,
            filter_method=filter_method,
            flag_additional_filter=flag_additional_filter,
            expand_mode=expand_mode,
            load_to_gpu=has_cuda)

        valid_score = evaluate.evaluate("valid",
                                        ranker,
                                        kvalid,
                                        eval_batch_size,
                                        verbose=verbose,
                                        hooks=hooks,
                                        save_text=save_text,
                                        predict_rel=predict_rel,
                                        load_to_gpu=has_cuda,
                                        flag_add_reverse=flag_add_reverse)

        # ranker = evaluate.Ranker(scoring_function, kb.union([ktrain, kvalid, ktest]), kb_data=test,
        #                          filter_method=filter_method, flag_additional_filter=flag_additional_filter,
        #                          expand_mode=expand_mode, load_to_gpu=has_cuda)

        ranker = evaluate.Ranker(
            scoring_function,
            kb.union([ranker_ktrain, ranker_kvalid, ranker_ktest]),
            kb_data=ranker_ktest,
            filter_method=filter_method,
            flag_additional_filter=flag_additional_filter,
            expand_mode=expand_mode,
            load_to_gpu=has_cuda)
        test_score = evaluate.evaluate("test",
                                       ranker,
                                       ktest,
                                       eval_batch_size,
                                       verbose=verbose,
                                       hooks=hooks,
                                       save_text=save_text,
                                       predict_rel=predict_rel,
                                       load_to_gpu=has_cuda,
                                       flag_add_reverse=flag_add_reverse)

        print("Valid")
        pprint.pprint(valid_score)
        print("Test")
        pprint.pprint(test_score)
        # ------------------ #
        '''

        # '''
        # ---time prediction--- #
        utils.colored_print("yellow", "\nEvaluating on time prediction\n")

        # create test/valid kbs for subset of data (for which boths start end have been provided)
        ktest_sub = kb.kb(datamap,
                          os.path.join(dataset_root, 'intervals/test.txt'),
                          add_unknowns=int(not (int(introduce_oov))),
                          use_time_tokenizer=use_time_tokenizer)

        kvalid_sub = kb.kb(datamap,
                           os.path.join(dataset_root, 'intervals/valid.txt'),
                           add_unknowns=int(not (int(introduce_oov))),
                           use_time_tokenizer=use_time_tokenizer)

        if predict_time:
            time_evaluate(scoring_function,
                          kvalid_sub,
                          ktest_sub,
                          time_args=time_args,
                          dump_t_scores=dump_t_scores,
                          load_to_gpu=has_cuda,
                          save_time_results=save_time_results)