コード例 #1
0
ファイル: functions.py プロジェクト: deepmipt/deepy
def make_model(opt, n_vocab, n_ctx, state_dict):
    model = models.make_model(opt, n_vocab, n_ctx, return_acts=True, return_probs=False)

    models.load_state_dict(model, state_dict)

    model.eval()
    return model
コード例 #2
0
def main(num):
    # Generate configuration files depending on experiment being run
    utils.generate_config_files("atomic", num)

    # Loads the correct configuration file
    config_file = "config/atomic/config_{}.json".format(num)

    print(config_file)

    # Read config file to option
    config = cfg.read_config(cfg.load_config(config_file))
    opt, meta = cfg.get_parameters(config)

    # Set the random seeds
    torch.manual_seed(opt.train.static.seed)
    random.seed(opt.train.static.seed)
    if config.gpu_mode:
        torch.cuda.manual_seed_all(opt.train.static.seed)

    # Where to find the data
    splits = ["train", "dev", "test"]

    opt.train.dynamic.epoch = 0

    print("Loading Data")

    categories = opt.data.categories

    path = "data/atomic/processed/{}/{}.pickle".format(
        opt.exp, utils.make_name_string(opt.data))

    data_loader = data.make_data_loader(opt, categories)
    loaded = data_loader.load_data(path)
    print(data_loader.sequences["train"]["total"].size(0))
    data_loader.opt = opt
    data_loader.batch_size = opt.train.dynamic.bs

    print("Done.")

    # Initialize text_encoder
    text_encoder = TextEncoder(config.encoder_path, config.bpe_path)

    special = [data.start_token, data.end_token]
    special += ["<{}>".format(cat) for cat in categories]
    special += [data.blank_token]

    text_encoder.encoder = data_loader.vocab_encoder
    text_encoder.decoder = data_loader.vocab_decoder

    opt.data.maxe1 = data_loader.max_event
    opt.data.maxe2 = data_loader.max_effect
    opt.data.maxr = data.atomic_data.num_delimiter_tokens["category"]

    n_special = len(special)
    n_ctx = opt.data.maxe1 + opt.data.maxe2
    n_vocab = len(text_encoder.encoder) + n_ctx

    print(data_loader.__dict__.keys())
    opt.net.vSize = n_vocab

    print("Building Model")

    model = models.make_model(opt,
                              n_vocab,
                              n_ctx,
                              n_special,
                              load=(opt.net.init == "pt"))

    print("Done.")

    print("Files will be logged at: {}".format(
        utils.make_name(opt, prefix="results/losses/", is_dir=True,
                        eval_=True)))

    data_loader.reset_offsets("train")

    # Get number of examples
    data.set_max_sizes(data_loader)

    if config.gpu_mode:
        print("Pushing to GPU: {}".format(config.gpu_index))
        cfg.device = config.gpu_index
        cfg.do_gpu = True
        torch.cuda.set_device(cfg.device)
        if config.multigpu:
            model = models.multi_gpu(model, config.gpu_indices).cuda()
        else:
            model.cuda(cfg.device)
        print("Done.")

    print("Training")

    optimizer = OpenAIAdam(model.parameters(),
                           lr=opt.train.dynamic.lr,
                           schedule=opt.train.static.lrsched,
                           warmup=opt.train.static.lrwarm,
                           t_total=meta.iterations,
                           b1=opt.train.static.b1,
                           b2=opt.train.static.b2,
                           e=opt.train.static.e,
                           l2=opt.train.static.l2,
                           vector_l2=opt.train.static.vl2,
                           max_grad_norm=opt.train.static.clip)

    scorers = ["bleu", "rouge", "cider"]
    trainer = train.make_trainer(opt, meta, data_loader, model, optimizer)
    trainer.set_evaluator(opt, model, data_loader)

    trainer.run()
コード例 #3
0
opt.data.maxe1 = data_loader.max_event
opt.data.maxe2 = data_loader.max_effect
opt.data.maxr = data.atomic_data.num_delimiter_tokens["category"]

n_special = len(special)
n_ctx = opt.data.maxe1 + opt.data.maxe2
n_vocab = len(text_encoder.encoder) + n_ctx

print(data_loader.__dict__.keys())
opt.net.vSize = n_vocab

print("Building Model")

model = models.make_model(opt,
                          n_vocab,
                          n_ctx,
                          n_special,
                          load=(opt.net.init == "pt"))

print("Building Model")

print(opt.exp)
print(n_vocab)

model = models.make_model(opt,
                          n_vocab,
                          n_ctx,
                          0,
                          load=False,
                          return_acts=True,
                          return_probs=False)
コード例 #4
0
    # Prune data from data loader depending on the evaluation set
    if not all([i in opt.eval.categories for i in opt.data.categories]):
        print("Pruning Data")
        print("Original number of evaluation sequences: {}".format(
            len(data_loader.sequences[split]["total"])))

        adata.prune_data_for_evaluation(
            data_loader, ["<{}>".format(cat) for cat in opt.eval.categories],
            split)

        print("Pruned number of evaluation sequences for subset: {}".format(
            len(data_loader.sequences[split]["total"])))

    print("Building Model")

    model = models.make_model(opt, n_vocab, n_ctx, n_special, load=False)

    print("Loading Weights")
    models.load_state_dict(model, model_file["state_dict"])

    print("Done Loading Weights")

    model.eval()

    # Initialize variable for # of examples to cycle through
    data.set_max_sizes(data_loader, force_split=split)

    evaluator = evaluate.make_evaluator(opt, model, data_loader)
    evaluator.batch_variables["split"] = split
    model.cuda(cfg.device)
コード例 #5
0
def main(num):
    # Generate configuration files depending on experiment being run
    utils.generate_config_files("conceptnet", num)

    # Loads the correct configuration file
    config_file = "config/conceptnet/config_{}.json".format(num)

    print(config_file)

    # Read config file to option
    config = cfg.read_config(cfg.load_config(config_file))
    opt, meta = cfg.get_parameters(config)

    # config.gpu_mode = torch.cuda.is_available()

    # Set the random seeds
    torch.manual_seed(opt.train.static.seed)
    random.seed(opt.train.static.seed)
    if config.gpu_mode:
        torch.cuda.manual_seed_all(opt.train.static.seed)

    # Load the data
    splits = ["train", "dev", "test"]

    opt.train.dynamic.epoch = 0

    print("Loading Data")

    # Initialize path to pre-set data loader
    path = "data/conceptnet/processed/{}/{}.pickle".format(
        opt.exp, utils.make_name_string(opt.data))

    # Make data loader
    data_loader = data.make_data_loader(opt)
    loaded = data_loader.load_data(path)
    print(data_loader.sequences["train"]["total"].size(0))
    data_loader.opt = opt
    data_loader.batch_size = opt.train.dynamic.bs

    print("Done.")

    text_encoder = TextEncoder(config.encoder_path, config.bpe_path)

    categories = data.conceptnet_data.conceptnet_relations

    special = [data.start_token, data.end_token]
    special += ["<{}>".format(cat) for cat in categories]

    if loaded:
        text_encoder.encoder = data_loader.vocab_encoder
        text_encoder.decoder = data_loader.vocab_decoder
    else:
        for special_token in special:
            text_encoder.decoder[len(encoder)] = special_token
            text_encoder.encoder[special_token] = len(encoder)
        data_loader.make_tensors(text_encoder, special)

    # Set max size of different parts of relation
    context_size_e1 = data_loader.max_e1
    context_size_e2 = data_loader.max_e2
    context_size_r = data_loader.max_r

    opt.data.maxr = context_size_r

    n_special = len(special)
    n_ctx = context_size_e1 + context_size_r + context_size_e2
    n_vocab = len(text_encoder.encoder) + n_ctx

    print(data_loader.__dict__.keys())
    opt.net.vSize = n_vocab

    # Build Model
    print("Building Model")

    model = models.make_model(opt,
                              n_vocab,
                              n_ctx,
                              n_special,
                              load=(opt.net.init == "pt"))

    print("Done.")

    print("Files will be logged at: {}".format(
        utils.make_name(opt, prefix="results/losses/", is_dir=True,
                        eval_=True)))

    data_loader.reset_offsets("train", keys=["total"])

    data.set_max_sizes(data_loader)

    # Push to GPU
    if config.gpu_mode:
        print("Pushing to GPU: {}".format(config.gpu_index))
        cfg.device = config.gpu_index
        cfg.do_gpu = True
        torch.cuda.set_device(cfg.device)
        if config.multigpu:
            model = models.multi_gpu(model, config.gpu_indices).cuda()
        else:
            model.cuda(cfg.device)
        print("Done.")

    print("Training")

    optimizer = OpenAIAdam(model.parameters(),
                           lr=opt.train.dynamic.lr,
                           schedule=opt.train.static.lrsched,
                           warmup=opt.train.static.lrwarm,
                           t_total=meta.iterations,
                           b1=opt.train.static.b1,
                           b2=opt.train.static.b2,
                           e=opt.train.static.e,
                           l2=opt.train.static.l2,
                           vector_l2=opt.train.static.vl2,
                           max_grad_norm=opt.train.static.clip)

    trainer = train.make_trainer(opt, meta, data_loader, model, optimizer)
    print(data_loader.sequences["dev"]["total"].max())
    trainer.set_generator(opt, model, data_loader)
    trainer.set_evaluator(opt, model, data_loader)

    trainer.run()
コード例 #6
0
context_size_e1 = data_loader.max_e1
context_size_e2 = data_loader.max_e2
context_size_r = data_loader.max_r

n_special = len(special)
n_ctx = context_size_e1 + context_size_e2 + context_size_r
n_vocab = len(text_encoder.encoder) + n_ctx

print(data_loader.__dict__.keys())
opt.net.vSize = n_vocab

print("Building Model")

print(opt.exp)

model = models.make_model(
    opt, n_vocab, n_ctx, 0, load=False, return_acts=True, return_probs=False)

model.load_state_dict(model_stuff["state_dict"])

if config.gpu_mode:
    print("Pushing to GPU: {}".format(config.gpu_index))
    cfg.device = config.gpu_index
    cfg.do_gpu = True
    torch.cuda.set_device(cfg.device)
    model.cuda(cfg.device)
    print("Done.")

model.eval()

device = cfg.device
model.to(device)
コード例 #7
0
def main(num):
    # Generate configuration files depending on experiment being run
    #utils.generate_config_files("conceptnet", num)

    # Loads the correct configuration file
    config_file = "config/conceptnet/config_{}.json".format(num)

    print(config_file)

    # Read config file to option
    config = cfg.read_config(cfg.load_config(config_file))
    opt, meta = cfg.get_parameters(config)

    # config.gpu_mode = torch.cuda.is_available()

    # Set the random seeds
    torch.manual_seed(opt.train.static.seed)
    random.seed(opt.train.static.seed)
    if config.gpu_mode:
        torch.cuda.manual_seed_all(opt.train.static.seed)

    # Load the data
    splits = ["train", "dev", "test"]

    opt.train.dynamic.epoch = 0

    print("Loading Data")

    # Initialize path to pre-set data loader
    x = "data/conceptnet/processed/generation/rel_language-trainsize_100-devversion_12-maxe1_200-maxe2_200.pickle"
    path = x.format(opt.exp, utils.make_name_string(opt.data))
    print(path)

    # Make data loader
    data_loader = data.make_data_loader(opt)
    loaded = data_loader.load_data(path)
    #print(data_loader.sequences["train"]["total"].size(0))
    data_loader.opt = opt
    data_loader.batch_size = opt.train.dynamic.bs

    print("Done.")
    print(data_loader)

    #text_encoder = TextEncoder(config.encoder_path, config.bpe_path)
    text_encoder = GPT2Tokenizer.from_pretrained('gpt2')
    special_tokens = {
        "cls_token": "[CLS]",
        "unk_token": "[UNK]"
    }  #, "mask": '["MASK"]',"separator": '["SEP"]', "start_of_sentence": '["SOS"]', "end_of_sentence": '["EOS"]'}
    text_encoder = GPT2Tokenizer.from_pretrained("gpt2",
                                                 cls_token="[CLS]",
                                                 unk_token="[UNK]",
                                                 mask='["MASK"]',
                                                 separator='["SEP"]',
                                                 start_of_sentence='["SOS"]',
                                                 end_of_sentence='["EOS"]')
    text_encoder.add_special_tokens(special_tokens)

    #categories = data.conceptnet_data.conceptnet_relations

    special = [data.start_token, data.end_token]
    #special += ["<{}>".format(cat) for cat in categories]

    if loaded:
        text_encoder.encoder = data_loader.vocab_encoder
        text_encoder.decoder = data_loader.vocab_decoder
    else:
        for special_token in special:
            text_encoder.decoder[len(encoder)] = special_token
            text_encoder.encoder[special_token] = len(encoder)
        data_loader.make_tensors(text_encoder, special)

    # Set max size of different parts of relation
    context_size_i1 = data_loader.max_input1
    context_size_i2 = data_loader.max_input2
    context_size_i3 = data_loader.max_input3
    context_size_i4 = data_loader.max_input4
    context_size_o1 = data_loader.max_output1
    context_size_o2 = data_loader.max_output2
    context_size_o3 = data_loader.max_output3
    context_size_o4 = data_loader.max_output4

    #opt.data.maxr = context_size_r

    n_special = len(special)
    n_ctx = context_size_i1 + context_size_i2 + context_size_i3 + context_size_i4 + context_size_o1 + context_size_o2 + context_size_o3 + context_size_o4
    n_vocab = len(text_encoder.encoder) + n_ctx

    opt.net.vSize = n_vocab
    # Build Model
    print("Building Model")
    print(opt.net.init == "pt")
    model = models.make_model(opt, n_vocab, n_ctx, n_special)
    model.resize_token_embeddings(len(text_encoder))

    model_knowledge = model_knowledge_story.make_model(opt, n_vocab, n_ctx,
                                                       n_special)
    model_knowledge.resize_token_embeddings(len(text_encoder))

    print("Done.")

    print("Files will be logged at: {}".format(
        utils.make_name(opt, prefix="results/losses/", is_dir=True,
                        eval_=True)))

    data_loader.reset_offsets("train", keys=["total"])

    data.set_max_sizes(data_loader)

    # Push to GPU
    if config.gpu_mode:
        print("Pushing to GPU: {}".format(config.gpu_index))
        cfg.device = config.gpu_index
        cfg.do_gpu = True
        torch.cuda.set_device(cfg.device)
        if config.multigpu:
            #print("!!! I am here !!!")
            model = models.multi_gpu(model, config.gpu_indices).cuda()
            #model.to(f'cuda:{model.device_ids[0]}')
            model_knowledge = model_knowledge_story.multi_gpu(
                model_knowledge, config.gpu_indices).cuda()
            #model_knowledge.to(f'cuda:{model.device_ids[1]}')
        else:
            model.cuda(cfg.device)
            model_knowledge.cuda(cfg.device)
        print("Done.")

    print("Training")

    optimizer_m = OpenAIAdam(model.parameters(),
                             lr=opt.train.dynamic.lr,
                             schedule=opt.train.static.lrsched,
                             warmup=opt.train.static.lrwarm,
                             t_total=meta.iterations,
                             b1=opt.train.static.b1,
                             b2=opt.train.static.b2,
                             e=opt.train.static.e,
                             l2=opt.train.static.l2,
                             vector_l2=opt.train.static.vl2,
                             max_grad_norm=opt.train.static.clip)

    optimizer_k = Knowledge_Adam(model_knowledge.parameters(),
                                 lr=opt.train.dynamic.lr,
                                 schedule=opt.train.static.lrsched,
                                 warmup=opt.train.static.lrwarm,
                                 t_total=meta.iterations,
                                 b1=opt.train.static.b1,
                                 b2=opt.train.static.b2,
                                 e=opt.train.static.e,
                                 l2=opt.train.static.l2,
                                 vector_l2=opt.train.static.vl2,
                                 max_grad_norm=opt.train.static.clip)

    trainer = train.make_trainer(opt, meta, data_loader, model,
                                 model_knowledge, optimizer_m, optimizer_k)

    trainer.set_generator(opt, model, model_knowledge, data_loader)
    trainer.set_evaluator(opt, model, model_knowledge, data_loader)

    trainer.run()
コード例 #8
0
# Get component segmentation of sequences
# context_size_event = maximum size of an event description
# context_size_effect = maximum size of an event effect/intent/etc.
context_size_event = data_loader.max_event
context_size_effect = data_loader.max_effect

n_special = len(special_tokens)
n_ctx = context_size_event + context_size_effect
n_vocab = len(text_encoder.encoder) + n_ctx

config.net.vSize = n_vocab

print("Building Model")

model = models.make_model(config, n_vocab, n_ctx, n_special, load=False)

print("Loading Weights")
model_file = torch.load(args.model_name, map_location=torch.device("cpu"))
model.load_state_dict(model_file['state_dict'])
print("Done Loading Weights")

model.eval()

# Initialize variable for # of examples to cycle through
data.set_max_sizes(data_loader, force_split=split)

evaluator = evaluate.make_evaluator(config, model, data_loader)
evaluator.batch_variables["split"] = split
# model.cuda(cfg.device)