Beispiel #1
0
def load_model(path, args=None):
    if not args:
        if not os.path.exists(path):
            raise Exception("Model " + path + " does not exist")
        with open(path + "/args", "r") as f:
            args = pickle.load(f)

    model = dynet.Model()
    RNNModel = rnnlm.get_lm(args.model)

    vocab = load_vocab(args)

    if args.s2s:
        print "loading s2s..."
        s2s = seq2seq.get_s2s(args.s2s_type).load(model, args.s2s)

        pron_dict = util.PronDict(model, s2s)
        #print "getting prons for train data"
        #pron_dict.add_prons(train_data)
        #print "getting prons for valid data"
        #pron_dict.add_prons(valid_data)

        lm = RNNModel(model, vocab, pron_dict, s2s, args)
    else:
        lm = RNNModel(model, vocab, args)

    if not args:
        lm.m.load(path + "/params")
    else:
        lm.m.load(path)

    return lm, vocab
Beispiel #2
0
    def __init__(self, model, vocab, args):
        self.m = model
        self.vocab = vocab
        self.args = args

        if args.s2s:
            print "loading s2s..."
            self.s2s = seq2seq.get_s2s(args.s2s_type).load(model, args.s2s)

        self.rnn = args.rnn(args.layers, self.s2s.args.hidden_dim * 2,
                            args.hidden_dim, model)

        self.R = model.add_parameters((vocab.size, args.hidden_dim))
        self.bias = model.add_parameters((vocab.size, ))
Beispiel #3
0
## model-specific parameters
parser.add_argument("--beam_size", default=3, type=int)

args = parser.parse_args()
print "ARGS:", args

BEGIN_TOKEN = '<s>'
END_TOKEN = '<e>'

# define model

model = dynet.Model()
sgd = dynet.SimpleSGDTrainer(model)

S2SModel = seq2seq.get_s2s(args.model)
if args.load:
    print "Loading model..."
    s2s = S2SModel.load(model, args.load)
    src_vocab = s2s.src_vocab
    tgt_vocab = s2s.tgt_vocab
else:
    print "fresh model. getting vocab...",
    src_reader = util.get_reader(args.reader_mode)(args.train,
                                                   mode=args.reader_mode,
                                                   begin=BEGIN_TOKEN,
                                                   end=END_TOKEN)
    src_vocab = util.Vocab.load_from_corpus(src_reader,
                                            remake=args.rebuild_vocab,
                                            src_or_tgt="src")
    src_vocab.START_TOK = src_vocab[BEGIN_TOKEN]
Beispiel #4
0
else:
    if args.percent_valid > 1: cutoff = args.percent_valid
    else: cutoff = int(len(train_data)*(args.percent_valid))
    valid_data = train_data[-cutoff:]
    train_data = train_data[:-cutoff]
    #valid_data = train_data[-int(len(train_data)*(args.percent_valid+args.percent_test)):-int(len(train_data)*args.percent_test)]
    #test_data = train_data[-int(len(train_data)*args.percent_test):]
    #train_data = train_data[:-int(len(train_data)*(args.percent_valid+args.percent_test))]

print "finished loading data"

RNNModel = rnnlm.get_lm(args.model)

if args.s2s:
    print "loading s2s..."
    s2s = seq2seq.get_s2s(args.s2s_type).load(model, args.s2s)

    pron_dict = util.PronDict(model, s2s)
    print "getting prons for train data"
    pron_dict.add_prons(train_data)
    print "getting prons for valid data"
    pron_dict.add_prons(valid_data)

    lm = RNNModel(model, vocab, pron_dict, s2s, args)
else:
    lm = RNNModel(model, vocab, args)

if args.load:
    print "loading params..."
    model.load(args.load)