Пример #1
0
def load_model_file(model_file):
    print(model_file)
    model_stuff = data.load_checkpoint(model_file)
    opt = model_stuff["opt"]
    state_dict = model_stuff["state_dict"]

    return opt, state_dict
Пример #2
0
args = parser.parse_args()
split = args.split

# Generate configuration files depending on experiment being run
utils.generate_config_files("atomic", args.experiment_num, eval_mode=True)

# Loads the correct configuration file
config_file = "config/atomic/config_{}.json".format(args.experiment_num)

# Read config file to option
config = cfg.read_config(cfg.load_config(config_file))
cfg.device = args.device

eval_opt = cfg.get_eval_parameters(config)

model_stuff = data.load_checkpoint(args.model_name)

opt = model_stuff["opt"]
opt.eval.update(eval_opt)

# Set the random seeds
torch.manual_seed(args.seed)
random.seed(args.seed)
if config.gpu_mode:
    torch.cuda.manual_seed_all(args.seed)

opt.train.dynamic.epoch = 0

print("Loading Data")

f = open("atomic_generate.txt", "w")
Пример #3
0
# eval_mode = True means changes are taken from config/atomic/eval_changes.json
utils.generate_config_files("atomic", args.experiment_num, eval_mode=True)

# Loads the correct configuration file
config_file = "config/atomic/config_{}.json".format(args.experiment_num)

print(config_file)

# Read config file to option
config = cfg.read_config(cfg.load_config(config_file))
cfg.device = config.gpu_index
eval_opt = cfg.get_eval_parameters(config)

# Batch multiple models
model_file = data.load_checkpoint(args.model_name)
opt = model_file["opt"]

opt.eval.update(eval_opt)

print("Loading Data")

# Do multiple sets of categories:
# compute individual perplexity of categories in addition to total perplexity
if len(opt.data.categories) == 1:
    set_of_categories = [opt.data.categories]
else:
    set_of_categories = [opt.data.categories] + [[i]
                                                 for i in opt.data.categories]

print(set_of_categories)
Пример #4
0
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=int, default=0)
parser.add_argument(
    "--model_file",
    type=str,
    default=
    "models/conceptnet-generation/iteration-500-100000/transformer/rel_language-trainsize_100-devversion_12-maxe1_10-maxe2_15/model_transformer-nL_12-nH_12-hSize_768-edpt_0.1-adpt_0.1-rdpt_0.1-odpt_0.1-pt_gpt-afn_gelu-init_pt-vSize_40545/exp_generation-seed_123-l2_0.01-vl2_T-lrsched_warmup_linear-lrwarm_0.002-clip_1-loss_nll-b2_0.999-b1_0.9-e_1e-08/bs_1-smax_40-sample_greedy-numseq_1-gs_full-es_full-categories_None/1e-05_adam_64_15500.pickle"
)
parser.add_argument("--output_file", type=str, default="tmp/output.json")
parser.add_argument("--input", type=str, default="")
parser.add_argument("--sampling_algorithm", type=str, default="greedy")

args = parser.parse_args()

model_stuff = data.load_checkpoint(args.model_file)
opt = model_stuff["opt"]

relations = data.conceptnet_data.conceptnet_relations

if opt.data.get("maxr", None) is None:
    if opt.data.rel == "language":
        opt.data.maxr = 5
    else:
        opt.data.maxr = 1

path = "comet-commonsense/data/conceptnet/processed/generation/{}.pickle".format(
    utils.make_name_string(opt.data))
data_loader = data.make_data_loader(opt)
loaded = data_loader.load_data(path)
Пример #5
0
if args.model_name == None:
    print("Please enter model name.")
    exit()

split = args.split

# configure evaluation run
config = ac_conf.load_default()
config.train.dynamic.bs = 32
#config.gpu_index = int(args.gpu_num)
meta = ac_conf.get_meta(config)

eval_opt = cfg.get_eval_parameters(config)

checkpoint = data.load_checkpoint(abs_path(args.model_name), gpu=False)
opt = checkpoint["opt"]
opt.eval.update(eval_opt)

# Set the random seeds
torch.manual_seed(opt.train.static.seed)
random.seed(opt.train.static.seed)
#if config.gpu_mode:
#    torch.cuda.manual_seed_all(opt.train.static.seed)

opt.train.dynamic.epoch = 0

print("Loading Data")

# load data
relations = encode.get_relations()
args = parser.parse_args()
split = args.split

# Generate configuration files depending on experiment being run
#utils.generate_config_files("conceptnet", args.experiment_num, eval_mode=True)

# Loads the correct configuration file
config_file = "config/conceptnet/config_{}.json".format(args.experiment_num)

# Read config file to option
config = cfg.read_config(cfg.load_config(config_file))
cfg.device = args.device
eval_opt = cfg.get_eval_parameters(config)

model_stuff = data.load_checkpoint(args.model_name)
model_know_stuff = data.load_checkpoint(args.model_knowledge_name)

opt = model_stuff["opt"]
opt.eval.update(eval_opt)

# Set the random seeds
torch.manual_seed(opt.train.static.seed)
random.seed(opt.train.static.seed)
if config.gpu_mode:
    torch.cuda.manual_seed_all(opt.train.static.seed)

# Where to find the data
splits = ["train", "dev", "test"]

opt.train.dynamic.epoch = 0
Пример #7
0
    "sentence": (True, True),
    "reiss": (False, False),
    "maslow": (False, False),
    "plutchik": (False, False),
    "plutchik16": (False, False),
    "entity": (True, False)
}

splits = ["test"]
split = splits[0]

config_file = "config/class_config.json"
config = cfg.read_config(cfg.load_config(config_file))

print("Loading model from: {}".format(config.load_model_name))
loaded_model = data.load_checkpoint(config.load_model_name)
opt = loaded_model["opt"]

print("Doing task: {}".format(opt.task))
print("Doig granularity: {}".format(opt.granularity))

if opt.net.enc.model in ["ren", "npn"]:
    data_loader = data.MemoryModelDataLoader()
    data_loader.load_vocabs(vocab_paths, vocab_text)
    data_loader = data_loader.load_data(
        opt, splits, opt.task, dl_type="memory",
        granularity=opt.granularity, exist=True)
else:
    data_loader = data.NeuralModelDataLoader()
    data_loader.load_vocabs(vocab_paths, vocab_text)
    data_loader = data_loader.load_data(
Пример #8
0
}

splits = ["dev", "test"]

print("Loading Data")
print opt

# Don't save models
meta.save = True

opt.epochs = 100

# Load model
print 'Loading model from: {}'.format(config["load_model_{}_{}".format(
    opt.net.enc.model, opt.task)])
loaded_model = data.load_checkpoint(config["load_model_{}_{}".format(
    opt.net.enc.model, opt.task)])

# Load configuration
old_opt = loaded_model["opt"]

if old_opt.net.enc.model != opt.net.enc.model:
    print "Not the same model being run. Ending"
    raise

# Save number of epochs trained for pretrained model
# Good for tracking the pretrained models we used
opt.net.enc["prev#"] = config["load_model_{}_{}".format(
    opt.net.enc.model, opt.task)][:-7].split("_")[-1]

opt.net.gendec = old_opt.net.gendec