示例#1
0
def load_atomic_data(opt, LoaderPath=""):
    # Hacky workaround, you may have to change this
    # if your models use different pad lengths for e1, e2, r
    ###################SET MAXE1, MAXE2 HERE ASWELL
    ###############################################################################
    #print(opt)#DEBUG
    if opt.data.get("maxe1", None) is None:
        opt.data.maxe1 = 50  ###DEFAULT FOR COMET PRETRAINED MODELS: MAXE1=17 , MAXE2 = 35 MAXR=1
        opt.data.maxe2 = 35
        opt.data.maxr = 1

    path = "data/atomic/processed/generation/{}.pickle".format(
        utils.make_name_string(opt.data))
    #data loader is made directly from data in atomic folder
    data_loader = data.make_data_loader(opt, opt.data.categories)
    #loaded = data_loader.load_data(path)###OLD####
    #ADRIAN ADDED##############################CHANGE FOR DIFF LOADER
    if (LoaderPath == ""):
        print('No loader path provided, using defalut loader')
        loaded = data_loader.load_data(path)  ###OLD####
    else:
        print("Loader Path Provided:", LoaderPath)
        #FPathEng = 'MULTI_COMET_DATA/It50k_MaxE50/English/Eng_Loader_It50k_maxE50.pickle'
        #FPathSlo = 'MULTI_COMET_DATA/It50k_MaxE50/Slovene/Slo_Loader_It50k_maxE50.pickle'
        print("IS VALID FILE: ", os.path.isfile(LoaderPath))
        loaded = data_loader.load_data(LoaderPath)
    ##########################
    #print(data_loader.vocab_encoder)#DEBUG

    return data_loader
示例#2
0
def load_conceptnet_data(opt):
    # Hacky workaround, you may have to change this
    # if your models use different pad lengths for r
    if opt.data.get("maxr", None) is None:
        if opt.data.rel == "language":
            opt.data.maxr = 5
        else:
            opt.data.maxr = 1
    path = "data/conceptnet/processed/generation/{}.pickle".format(
        utils.make_name_string(opt.data))
    data_loader = data.make_data_loader(opt)
    loaded = data_loader.load_data(path)
    return data_loader
示例#3
0
def load_atomic_data(opt):
    # Hacky workaround, you may have to change this
    # if your models use different pad lengths for e1, e2, r
    if opt.data.get("maxe1", None) is None:
        opt.data.maxe1 = 17
        opt.data.maxe2 = 35
        opt.data.maxr = 1
    path = "data/atomic/processed/generation/{}.pickle".format(
        utils.make_name_string(opt.data))
    data_loader = data.make_data_loader(opt, opt.data.categories)
    loaded = data_loader.load_data(path)

    return data_loader
示例#4
0
def main(num):
    # Generate configuration files depending on experiment being run
    utils.generate_config_files("atomic", num)

    # Loads the correct configuration file
    config_file = "config/atomic/config_{}.json".format(num)

    print(config_file)

    # Read config file to option
    config = cfg.read_config(cfg.load_config(config_file))
    opt, meta = cfg.get_parameters(config)

    # Set the random seeds
    torch.manual_seed(opt.train.static.seed)
    random.seed(opt.train.static.seed)
    if config.gpu_mode:
        torch.cuda.manual_seed_all(opt.train.static.seed)

    # Where to find the data
    splits = ["train", "dev", "test"]

    opt.train.dynamic.epoch = 0

    print("Loading Data")

    categories = opt.data.categories

    path = "data/atomic/processed/{}/{}.pickle".format(
        opt.exp, utils.make_name_string(opt.data))

    data_loader = data.make_data_loader(opt, categories)
    loaded = data_loader.load_data(path)
    print(data_loader.sequences["train"]["total"].size(0))
    data_loader.opt = opt
    data_loader.batch_size = opt.train.dynamic.bs

    print("Done.")

    # Initialize text_encoder
    text_encoder = TextEncoder(config.encoder_path, config.bpe_path)

    special = [data.start_token, data.end_token]
    special += ["<{}>".format(cat) for cat in categories]
    special += [data.blank_token]

    text_encoder.encoder = data_loader.vocab_encoder
    text_encoder.decoder = data_loader.vocab_decoder

    opt.data.maxe1 = data_loader.max_event
    opt.data.maxe2 = data_loader.max_effect
    opt.data.maxr = data.atomic_data.num_delimiter_tokens["category"]

    n_special = len(special)
    n_ctx = opt.data.maxe1 + opt.data.maxe2
    n_vocab = len(text_encoder.encoder) + n_ctx

    print(data_loader.__dict__.keys())
    opt.net.vSize = n_vocab

    print("Building Model")

    model = models.make_model(opt,
                              n_vocab,
                              n_ctx,
                              n_special,
                              load=(opt.net.init == "pt"))

    print("Done.")

    print("Files will be logged at: {}".format(
        utils.make_name(opt, prefix="results/losses/", is_dir=True,
                        eval_=True)))

    data_loader.reset_offsets("train")

    # Get number of examples
    data.set_max_sizes(data_loader)

    if config.gpu_mode:
        print("Pushing to GPU: {}".format(config.gpu_index))
        cfg.device = config.gpu_index
        cfg.do_gpu = True
        torch.cuda.set_device(cfg.device)
        if config.multigpu:
            model = models.multi_gpu(model, config.gpu_indices).cuda()
        else:
            model.cuda(cfg.device)
        print("Done.")

    print("Training")

    optimizer = OpenAIAdam(model.parameters(),
                           lr=opt.train.dynamic.lr,
                           schedule=opt.train.static.lrsched,
                           warmup=opt.train.static.lrwarm,
                           t_total=meta.iterations,
                           b1=opt.train.static.b1,
                           b2=opt.train.static.b2,
                           e=opt.train.static.e,
                           l2=opt.train.static.l2,
                           vector_l2=opt.train.static.vl2,
                           max_grad_norm=opt.train.static.clip)

    scorers = ["bleu", "rouge", "cider"]
    trainer = train.make_trainer(opt, meta, data_loader, model, optimizer)
    trainer.set_evaluator(opt, model, data_loader)

    trainer.run()
示例#5
0
opt.exp = "generation"

data_params = get_data_params(gens_file)

categories = data_params[
    "categories"]  #sorted(["oReact", "oEffect", "oWant", "xAttr", "xEffect", "xIntent", "xNeed", "xReact", "xWant"])

opt.data.categories = data_params["categories"]

if "maxe1" in data_params:
    opt.data.maxe1 = data_params["maxe1"]
    opt.data.maxe2 = data_params["maxe2"]
    opt.data.maxr = data_params["maxr"]

path = "data/atomic/processed/generation/{}.pickle".format(
    utils.make_name_string(opt.data))
data_loader = data.make_data_loader(opt, categories)
loaded = data_loader.load_data(path)

refs = {}

for i in range(data_loader.sequences[split]["total"].size(0)):
    sequence = data_loader.sequences[split]["total"][i]
    tmp = sequence[:data_loader.max_event + 1]
    init = "".join([
        data_loader.vocab_decoder[i].replace('</w>',
                                             ' ').replace("<blank>", "___ ")
        for i in tmp[:-1].squeeze().tolist() if i
    ])
    attr = data_loader.vocab_decoder[tmp[-1].item()].strip("<>")
    Ref = sequence[data_loader.max_event + 1:]
示例#6
0
# Set the random seeds
torch.manual_seed(args.seed)
random.seed(args.seed)
if config.gpu_mode:
    torch.cuda.manual_seed_all(args.seed)

opt.train.dynamic.epoch = 0

print("Loading Data")

f = open("atomic_generate.txt", "w")
f.close()
categories = opt.data.categories
x = "data/atomic/processed/generation/categories_oEffect#oReact#oWant#xAttr#xEffect#xIntent#xNeed#xReact#xWant-maxe1_17-maxe2_36-maxr_1.pickle"
path = x.format(utils.make_name_string(opt.data))

data_loader = data.make_data_loader(opt, categories)
loaded = data_loader.load_data(path)

data_loader.batch_size = opt.train.dynamic.bs

print("Done.")

categories = opt.data.categories
text_encoder = GPT2Tokenizer.from_pretrained('gpt2')
special = [data.start_token, data.end_token]
special += ["<{}>".format(cat) for cat in categories]
special += [data.blank_token]
special_tokens = {
    "cls_token": "[CLS]",
示例#7
0
print(set_of_categories)

# Iterate over sets of categories to compute perplexities for
for eval_categories in set_of_categories:
    print(eval_categories)
    opt.eval.categories = eval_categories

    results_name = "{}/{}.{}".format(
        utils.make_name(opt,
                        prefix="results/{}/".format("losses"),
                        is_dir=True,
                        eval_=True), split, "pickle")
    print("Will save {} losses to {}".format(split, results_name))

    path = "data/atomic/processed/generation/{}.pickle".format(
        utils.make_name_string(opt.data).replace(
            "kr_{}".format(opt.data.get("kr", 1)), "kr_1"))
    data_loader = data.make_data_loader(opt, opt.data.categories)
    loaded = data_loader.load_data(path)

    data_loader.batch_size = opt.train.dynamic.bs

    print("Done.")

    text_encoder = TextEncoder(config.encoder_path, config.bpe_path)

    # Set special tokens
    formatted_categories = ["<{}>".format(cat) for cat in opt.data.categories]

    special = [data.start_token, data.end_token]
    special += formatted_categories
    special += [data.blank_token]
示例#8
0
def main(num):
    # Generate configuration files depending on experiment being run
    utils.generate_config_files("conceptnet", num)

    # Loads the correct configuration file
    config_file = "config/conceptnet/config_{}.json".format(num)

    print(config_file)

    # Read config file to option
    config = cfg.read_config(cfg.load_config(config_file))
    opt, meta = cfg.get_parameters(config)

    # config.gpu_mode = torch.cuda.is_available()

    # Set the random seeds
    torch.manual_seed(opt.train.static.seed)
    random.seed(opt.train.static.seed)
    if config.gpu_mode:
        torch.cuda.manual_seed_all(opt.train.static.seed)

    # Load the data
    splits = ["train", "dev", "test"]

    opt.train.dynamic.epoch = 0

    print("Loading Data")

    # Initialize path to pre-set data loader
    path = "data/conceptnet/processed/{}/{}.pickle".format(
        opt.exp, utils.make_name_string(opt.data))

    # Make data loader
    data_loader = data.make_data_loader(opt)
    loaded = data_loader.load_data(path)
    print(data_loader.sequences["train"]["total"].size(0))
    data_loader.opt = opt
    data_loader.batch_size = opt.train.dynamic.bs

    print("Done.")

    text_encoder = TextEncoder(config.encoder_path, config.bpe_path)

    categories = data.conceptnet_data.conceptnet_relations

    special = [data.start_token, data.end_token]
    special += ["<{}>".format(cat) for cat in categories]

    if loaded:
        text_encoder.encoder = data_loader.vocab_encoder
        text_encoder.decoder = data_loader.vocab_decoder
    else:
        for special_token in special:
            text_encoder.decoder[len(encoder)] = special_token
            text_encoder.encoder[special_token] = len(encoder)
        data_loader.make_tensors(text_encoder, special)

    # Set max size of different parts of relation
    context_size_e1 = data_loader.max_e1
    context_size_e2 = data_loader.max_e2
    context_size_r = data_loader.max_r

    opt.data.maxr = context_size_r

    n_special = len(special)
    n_ctx = context_size_e1 + context_size_r + context_size_e2
    n_vocab = len(text_encoder.encoder) + n_ctx

    print(data_loader.__dict__.keys())
    opt.net.vSize = n_vocab

    # Build Model
    print("Building Model")

    model = models.make_model(opt,
                              n_vocab,
                              n_ctx,
                              n_special,
                              load=(opt.net.init == "pt"))

    print("Done.")

    print("Files will be logged at: {}".format(
        utils.make_name(opt, prefix="results/losses/", is_dir=True,
                        eval_=True)))

    data_loader.reset_offsets("train", keys=["total"])

    data.set_max_sizes(data_loader)

    # Push to GPU
    if config.gpu_mode:
        print("Pushing to GPU: {}".format(config.gpu_index))
        cfg.device = config.gpu_index
        cfg.do_gpu = True
        torch.cuda.set_device(cfg.device)
        if config.multigpu:
            model = models.multi_gpu(model, config.gpu_indices).cuda()
        else:
            model.cuda(cfg.device)
        print("Done.")

    print("Training")

    optimizer = OpenAIAdam(model.parameters(),
                           lr=opt.train.dynamic.lr,
                           schedule=opt.train.static.lrsched,
                           warmup=opt.train.static.lrwarm,
                           t_total=meta.iterations,
                           b1=opt.train.static.b1,
                           b2=opt.train.static.b2,
                           e=opt.train.static.e,
                           l2=opt.train.static.l2,
                           vector_l2=opt.train.static.vl2,
                           max_grad_norm=opt.train.static.clip)

    trainer = train.make_trainer(opt, meta, data_loader, model, optimizer)
    print(data_loader.sequences["dev"]["total"].max())
    trainer.set_generator(opt, model, data_loader)
    trainer.set_evaluator(opt, model, data_loader)

    trainer.run()
示例#9
0
def main(num):
    # Generate configuration files depending on experiment being run
    #utils.generate_config_files("conceptnet", num)

    # Loads the correct configuration file
    config_file = "config/conceptnet/config_{}.json".format(num)

    print(config_file)

    # Read config file to option
    config = cfg.read_config(cfg.load_config(config_file))
    opt, meta = cfg.get_parameters(config)

    # config.gpu_mode = torch.cuda.is_available()

    # Set the random seeds
    torch.manual_seed(opt.train.static.seed)
    random.seed(opt.train.static.seed)
    if config.gpu_mode:
        torch.cuda.manual_seed_all(opt.train.static.seed)

    # Load the data
    splits = ["train", "dev", "test"]

    opt.train.dynamic.epoch = 0

    print("Loading Data")

    # Initialize path to pre-set data loader
    x = "data/conceptnet/processed/generation/rel_language-trainsize_100-devversion_12-maxe1_200-maxe2_200.pickle"
    path = x.format(opt.exp, utils.make_name_string(opt.data))
    print(path)

    # Make data loader
    data_loader = data.make_data_loader(opt)
    loaded = data_loader.load_data(path)
    #print(data_loader.sequences["train"]["total"].size(0))
    data_loader.opt = opt
    data_loader.batch_size = opt.train.dynamic.bs

    print("Done.")
    print(data_loader)

    #text_encoder = TextEncoder(config.encoder_path, config.bpe_path)
    text_encoder = GPT2Tokenizer.from_pretrained('gpt2')
    special_tokens = {
        "cls_token": "[CLS]",
        "unk_token": "[UNK]"
    }  #, "mask": '["MASK"]',"separator": '["SEP"]', "start_of_sentence": '["SOS"]', "end_of_sentence": '["EOS"]'}
    text_encoder = GPT2Tokenizer.from_pretrained("gpt2",
                                                 cls_token="[CLS]",
                                                 unk_token="[UNK]",
                                                 mask='["MASK"]',
                                                 separator='["SEP"]',
                                                 start_of_sentence='["SOS"]',
                                                 end_of_sentence='["EOS"]')
    text_encoder.add_special_tokens(special_tokens)

    #categories = data.conceptnet_data.conceptnet_relations

    special = [data.start_token, data.end_token]
    #special += ["<{}>".format(cat) for cat in categories]

    if loaded:
        text_encoder.encoder = data_loader.vocab_encoder
        text_encoder.decoder = data_loader.vocab_decoder
    else:
        for special_token in special:
            text_encoder.decoder[len(encoder)] = special_token
            text_encoder.encoder[special_token] = len(encoder)
        data_loader.make_tensors(text_encoder, special)

    # Set max size of different parts of relation
    context_size_i1 = data_loader.max_input1
    context_size_i2 = data_loader.max_input2
    context_size_i3 = data_loader.max_input3
    context_size_i4 = data_loader.max_input4
    context_size_o1 = data_loader.max_output1
    context_size_o2 = data_loader.max_output2
    context_size_o3 = data_loader.max_output3
    context_size_o4 = data_loader.max_output4

    #opt.data.maxr = context_size_r

    n_special = len(special)
    n_ctx = context_size_i1 + context_size_i2 + context_size_i3 + context_size_i4 + context_size_o1 + context_size_o2 + context_size_o3 + context_size_o4
    n_vocab = len(text_encoder.encoder) + n_ctx

    opt.net.vSize = n_vocab
    # Build Model
    print("Building Model")
    print(opt.net.init == "pt")
    model = models.make_model(opt, n_vocab, n_ctx, n_special)
    model.resize_token_embeddings(len(text_encoder))

    model_knowledge = model_knowledge_story.make_model(opt, n_vocab, n_ctx,
                                                       n_special)
    model_knowledge.resize_token_embeddings(len(text_encoder))

    print("Done.")

    print("Files will be logged at: {}".format(
        utils.make_name(opt, prefix="results/losses/", is_dir=True,
                        eval_=True)))

    data_loader.reset_offsets("train", keys=["total"])

    data.set_max_sizes(data_loader)

    # Push to GPU
    if config.gpu_mode:
        print("Pushing to GPU: {}".format(config.gpu_index))
        cfg.device = config.gpu_index
        cfg.do_gpu = True
        torch.cuda.set_device(cfg.device)
        if config.multigpu:
            #print("!!! I am here !!!")
            model = models.multi_gpu(model, config.gpu_indices).cuda()
            #model.to(f'cuda:{model.device_ids[0]}')
            model_knowledge = model_knowledge_story.multi_gpu(
                model_knowledge, config.gpu_indices).cuda()
            #model_knowledge.to(f'cuda:{model.device_ids[1]}')
        else:
            model.cuda(cfg.device)
            model_knowledge.cuda(cfg.device)
        print("Done.")

    print("Training")

    optimizer_m = OpenAIAdam(model.parameters(),
                             lr=opt.train.dynamic.lr,
                             schedule=opt.train.static.lrsched,
                             warmup=opt.train.static.lrwarm,
                             t_total=meta.iterations,
                             b1=opt.train.static.b1,
                             b2=opt.train.static.b2,
                             e=opt.train.static.e,
                             l2=opt.train.static.l2,
                             vector_l2=opt.train.static.vl2,
                             max_grad_norm=opt.train.static.clip)

    optimizer_k = Knowledge_Adam(model_knowledge.parameters(),
                                 lr=opt.train.dynamic.lr,
                                 schedule=opt.train.static.lrsched,
                                 warmup=opt.train.static.lrwarm,
                                 t_total=meta.iterations,
                                 b1=opt.train.static.b1,
                                 b2=opt.train.static.b2,
                                 e=opt.train.static.e,
                                 l2=opt.train.static.l2,
                                 vector_l2=opt.train.static.vl2,
                                 max_grad_norm=opt.train.static.clip)

    trainer = train.make_trainer(opt, meta, data_loader, model,
                                 model_knowledge, optimizer_m, optimizer_k)

    trainer.set_generator(opt, model, model_knowledge, data_loader)
    trainer.set_evaluator(opt, model, model_knowledge, data_loader)

    trainer.run()
示例#10
0
]

special = [data.start_token, data.end_token]
special += ["<{}>".format(relation) for relation in relations]

encoder_path = "model/encoder_bpe_40000.json"
bpe_path = "model/vocab_40000.bpe"

text_encoder = TextEncoder(encoder_path, bpe_path)

for special_token in special:
    text_encoder.decoder[len(text_encoder.encoder)] = special_token
    text_encoder.encoder[special_token] = len(text_encoder.encoder)

data_loader = cdata.GenerationDataLoader(opt)
data_loader.load_data("data/conceptnet/")

data_loader.make_tensors(text_encoder, special, test=False)

opt.data.maxr = data_loader.max_r

save_path = "data/conceptnet/processed/generation"
save_name = os.path.join(save_path,
                         "{}.pickle".format(utils.make_name_string(opt.data)))

utils.mkpath(save_path)

print("Data Loader will be saved to {}".format(save_name))

torch.save(data_loader, save_name)
示例#11
0
special = [data.start_token, data.end_token]#, data.sep_token]
special += ["<{}>".format(cat) for cat in categories]
special += [data.blank_token]

for special_token in special:
    text_encoder.decoder[len(encoder)] = special_token
    encoder[special_token] = len(encoder)

save_path = "data/atomic/processed/{}".format(opt.exp)
utils.mkpath(save_path)

#ipdb.set_trace()
if opt.pickled_data:
    save_name = opt.pickled_data
else:
    save_name = os.path.join(save_path, "{}.pickle".format(utils.make_name_string(opt.data)))

data_loader = data.make_data_loader(opt, categories)
data_loader.load_data("data/atomic/")
random.shuffle(data_loader.data["dev"]["total"])

data_loader.make_tensors(text_encoder, special, test=False)
data_loader.reset_offsets()


opt.data.maxe1 = data_loader.max_event
opt.data.maxe2 = data_loader.max_effect
opt.data.maxr = 1

if opt.pickled_data:
    save_name = opt.pickled_data