for bias in biases: f.write(str(bias) + "\n") # Arguments for this script parser = argparse.ArgumentParser() parser.add_argument("-m", "--corelm-model", dest="corelm_model", required=True, help="The input NPLM model file") parser.add_argument("-v", "--vocab-file", dest="vocab_path", required=True, help="The input vocabulary") parser.add_argument("-dir", "--directory", dest="out_dir", help="The output directory for log file, model, etc.") args = parser.parse_args() U.set_theano_device('cpu',1) from dlm.models.mlp import MLP if args.out_dir is None: args.out_dir = 'corelm_convert-' + U.curr_time() U.mkdir_p(args.out_dir) # Loading CoreLM model and creating classifier class L.info("Loading CoreLM model") classifier = MLP(model_path=args.corelm_model) args_nn = classifier.args params_nn = classifier.params U.xassert(len(params_nn)==7, "CoreLM model is not compatible with NPLM architecture. 2 hidden layers and an output linear layer is required.") embeddings = params_nn[0].get_value() W1 = params_nn[1].get_value() W1 = np.transpose(W1) b1 = params_nn[2].get_value() W2 = params_nn[3].get_value() W2 = np.transpose(W2)
# Arguments for this script parser = argparse.ArgumentParser() parser.add_argument("-m", "--nplm-model", dest="nplm_model", required=True, help="The input NPLM model file") parser.add_argument("-dir", "--directory", dest="out_dir", help="The output directory for log file, model, etc.") args = parser.parse_args() U.set_theano_device('cpu',1) from dlm.models.mlp import MLP if args.out_dir is None: args.out_dir = 'nplm_convert-' + U.curr_time() U.mkdir_p(args.out_dir) # Reading the NPLM Model args_nn = argparse.Namespace() model_dict = dict() lines = [] req_attribs = ['\config','\\vocab', '\input_vocab', '\output_vocab', '\input_embeddings', '\hidden_weights 1', '\hidden_biases 1', '\hidden_weights 2', '\hidden_biases 2', '\output_weights', '\output_biases','\end'] attrib = '' with open(args.nplm_model,'r') as f_model: for line in f_model: line = line.strip() if(line in req_attribs): if attrib != '':
for bias in biases: f.write(str(bias) + "\n") # Arguments for this script parser = argparse.ArgumentParser() parser.add_argument("-m", "--primelm-model", dest="primelm_model", required=True, help="The input NPLM model file") parser.add_argument("-v", "--vocab-file", dest="vocab_path", required=True, help="The input vocabulary") parser.add_argument("-dir", "--directory", dest="out_dir", help="The output directory for log file, model, etc.") args = parser.parse_args() U.set_theano_device('cpu',1) from dlm.models.mlp import MLP if args.out_dir is None: args.out_dir = 'primelm_convert-' + U.curr_time() U.mkdir_p(args.out_dir) # Loading PrimeLM model and creating classifier class L.info("Loading PrimeLM model") classifier = MLP(model_path=args.primelm_model) args_nn = classifier.args params_nn = classifier.params U.xassert(len(params_nn)==7, "PrimeLM model is not compatible with NPLM architecture. 2 hidden layers and an output linear layer is required.") embeddings = params_nn[0].get_value() W1 = params_nn[1].get_value() W1 = np.transpose(W1) b1 = params_nn[2].get_value() W2 = params_nn[3].get_value() W2 = np.transpose(W2)
parser.add_argument("--clip-threshold", dest="clip_threshold", default=0, type=float, help="If threshold > 0, clips gradients to [-threshold, +threshold]. Default: 0 (disabled)") parser.add_argument("--weighted-emb", dest="weighted_emb", action='store_true', help="Use this flag to add per-word weights to embeddings.") parser.add_argument("--threads", dest="threads", default=8, type=int, help="Number of threads when device is CPU. Default: 8") parser.add_argument("--emb-path", dest="emb_path", help="(optional) Word embeddings file.") parser.add_argument("--vocab", dest="vocab", help="(optional) Only needed if --emb-path is used.") parser.add_argument("--quiet", dest="quiet", action='store_true', help="Use this flag to disable the logger.") parser.add_argument( "--adjust-learning-rate", dest="enable_lr_adjust", action='store_true', help="Enable learning rate adjustment") #parser.add_argument("-m","--model-file", dest="model_path", help="The file path to load the model from") args = parser.parse_args() args.cwd = os.getcwd() if args.out_dir is None: args.out_dir = 'corelm-' + U.curr_time() U.mkdir_p(args.out_dir) L.quiet = args.quiet L.set_file_path(os.path.abspath(args.out_dir) + "/log.txt") L.info('Command: ' + ' '.join(sys.argv)) curr_version = U.curr_version() if curr_version: L.info("Version: " + curr_version) if args.emb_path: U.xassert(args.vocab, 'When --emb-path is used, vocab file must be given too (using --vocab).') if args.loss_function == "nll":
"--vocab-file", dest="vocab_path", required=True, help="The input vocabulary") parser.add_argument("-dir", "--directory", dest="out_dir", help="The output directory for log file, model, etc.") args = parser.parse_args() U.set_theano_device('cpu', 1) from dlm.models.mlp import MLP if args.out_dir is None: args.out_dir = 'primelm_convert-' + U.curr_time() U.mkdir_p(args.out_dir) # Loading PrimeLM model and creating classifier class L.info("Loading PrimeLM model") classifier = MLP(model_path=args.primelm_model) args_nn = classifier.args params_nn = classifier.params U.xassert( len(params_nn) == 7, "PrimeLM model is not compatible with NPLM architecture. 2 hidden layers and an output linear layer is required." ) embeddings = params_nn[0].get_value() W1 = params_nn[1].get_value() W1 = np.transpose(W1)
dest="enable_lr_adjust", action='store_true', help="Enable learning rate adjustment") parser.add_argument("-bm", "--base-model", dest="base_model_path", help="Base model used for adaptation") #parser.add_argument("-m","--model-file", dest="model_path", help="The file path to load the model from") args = parser.parse_args() args.cwd = os.getcwd() if args.out_dir is None: args.out_dir = 'primelm-' + U.curr_time() U.mkdir_p(args.out_dir) L.quiet = args.quiet L.set_file_path(os.path.abspath(args.out_dir) + "/log.txt") L.info('Command: ' + ' '.join(sys.argv)) curr_version = U.curr_version() if curr_version: L.info("Version: " + curr_version) if args.emb_path: U.xassert( args.vocab, 'When --emb-path is used, vocab file must be given too (using --vocab).'
dest="quiet", action='store_true', help="Use this flag to disable the logger.") parser.add_argument("--adjust-learning-rate", dest="enable_lr_adjust", action='store_true', help="Enable learning rate adjustment") #parser.add_argument("-m","--model-file", dest="model_path", help="The file path to load the model from") args = parser.parse_args() args.cwd = os.getcwd() if args.out_dir is None: args.out_dir = 'corelm-' + U.curr_time() U.mkdir_p(args.out_dir) L.quiet = args.quiet L.set_file_path(os.path.abspath(args.out_dir) + "/log.txt") L.info('Command: ' + ' '.join(sys.argv)) curr_version = U.curr_version() if curr_version: L.info("Version: " + curr_version) if args.emb_path: U.xassert( args.vocab, 'When --emb-path is used, vocab file must be given too (using --vocab).'
"--vocab-file", dest="vocab_path", required=True, help="The input vocabulary") parser.add_argument("-dir", "--directory", dest="out_dir", help="The output directory for log file, model, etc.") args = parser.parse_args() U.set_theano_device('cpu', 1) from dlm.models.mlp import MLP if args.out_dir is None: args.out_dir = 'corelm_convert-' + U.curr_time() U.mkdir_p(args.out_dir) # Loading CoreLM model and creating classifier class L.info("Loading CoreLM model") classifier = MLP(model_path=args.corelm_model) args_nn = classifier.args params_nn = classifier.params U.xassert( len(params_nn) == 7, "CoreLM model is not compatible with NPLM architecture. 2 hidden layers and an output linear layer is required." ) embeddings = params_nn[0].get_value() W1 = params_nn[1].get_value() W1 = np.transpose(W1)