def main(): parser = argparse.ArgumentParser() parser.add_argument('--save_dir', help='Location of checkpoint files') parser.add_argument('--out_dir', help='Location to keep vocabs, weights and options') args = parser.parse_args() if not os.path.isdir(args.out_dir): os.system("mkdir %s"%args.out_dir) dw(args.save_dir, os.path.join(args.out_dir, 'weights.hdf5')) os.system("cp %s/vocabs.txt %s/"%(args.save_dir, args.out_dir)) options = load_options(os.path.join(args.save_dir, 'options.json')) # fix "n_characters" in options.json options['char_cnn']["n_characters"] += 1 save_options(options, os.path.join(args.out_dir, 'options.json'))
import sys, os sys.path.append(os.getcwd()) import argparse from bilm.training import dump_weights as dw if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--save_dir', help='Location of checkpoint files') parser.add_argument('--outfile', help='Output hdf5 file with weights') args = parser.parse_args() dw(args.save_dir, args.outfile)
#!/usr/bin/env python # coding: utf-8 # In[ ]: get_ipython().system('pip install bilm razdel') # In[ ]: from bilm.training import dump_weights as dw dw('../input/model-elmo/', 'weights.hdf5') # In[ ]: import pandas as pd df = (pd.read_csv('../input/avito-dataset/train.csv', usecols=['description', 'category_id'], nrows=25_000).rename(columns={ 'description': 'text', 'category_id': 'label' })) # In[ ]: import json with open('../input/model-elmo/options.json', 'r') as file: options = json.load(file)
import argparse from bilm.training import dump_weights as dw if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--save_dir', help='Location of checkpoint files') parser.add_argument('--outfile', help='Output hdf5 file with weights') args = parser.parse_args() dw(args.save_dir, args.outfile)
def main(args): dw(args.save_dir, args.out)