idx2word = dict((k, v) for v, k in dic['words2idx'].iteritems()) train_lex, train_ne, train_y = train_set valid_lex, valid_ne, valid_y = valid_set test_lex, test_ne, test_y = test_set vocsize = len(dic['words2idx']) nclasses = len(dic['labels2idx']) nsentences = len(train_lex) # instanciate the model numpy.random.seed(s['seed']) random.seed(s['seed']) rnn = model(nh=s['nhidden'], nc=nclasses, ne=vocsize, de=s['emb_dimension'], cs=s['win']) # train with early stopping on validation set best_f1 = -numpy.inf s['clr'] = s['lr'] for e in xrange(s['nepochs']): # shuffle shuffle([train_lex, train_ne, train_y], s['seed']) s['ce'] = e tic = time.time() for i in xrange(nsentences): cwords = contextwin(train_lex[i], s['win']) words = map(lambda x: numpy.asarray(x).astype('int32'),\ minibatch(cwords, s['bs']))
lambda x, y: list(x)+list(y),\ train_lex+valid_lex+test_lex))) nclasses = len(set(reduce(\ lambda x, y: list(x)+list(y),\ train_y+test_y+valid_y))) nsentences = len(train_lex) # instanciate the model numpy.random.seed(s.seed) random.seed(s.seed) rnn = model( nh = s.hidden_size, nc = nclasses, ne = vocsize, de = s.emb_size, cs = s.win, memory_size = s.memory_size, n_memory_slots = s.n_memory_slots ) # train with early stopping on validation set best_f1 = -numpy.inf s.clr = s.lr for e in xrange(s.n_epochs): # shuffle shuffle([train_lex, train_ne, train_y], s.seed) s.ce = e tic = time.time() for i in xrange(nsentences): cwords = contextwin(train_lex[i], s.win) words = map(lambda x: numpy.asarray(x).astype('int32'),\
idx2label = dict((k,v) for v,k in dic['labels2idx'].iteritems()) idx2word = dict((k,v) for v,k in dic['words2idx'].iteritems()) train_lex, train_y, train_cue = train_set valid_lex, valid_y, valid_cue = valid_set vocsize = len(dic['words2idx']) nclasses = len(dic['labels2idx']) nsentences = len(train_lex) # instanciate the model numpy.random.seed(s['seed']) random.seed(s['seed']) rnn = model( nh = s['nhidden'], nc = nclasses, ne = vocsize, de = s['emb_dimension'], cs = s['win'], cue = args.c) # train with early stopping on validation set best_f1 = -numpy.inf s['clr'] = s['lr'] for e in xrange(s['nepochs']): # shuffle shuffle([train_lex,train_y,train_cue], s['seed']) print '[learning] epoch %d' % e s['ce'] = e tic = time.time() for i in xrange(nsentences): # take the context win of both # merge the results
train_set, valid_set, test_set, dic = load.atisfold(s["fold"]) idx2label = dict((k, v) for v, k in dic["labels2idx"].iteritems()) idx2word = dict((k, v) for v, k in dic["words2idx"].iteritems()) train_lex, train_ne, train_y = train_set valid_lex, valid_ne, valid_y = valid_set test_lex, test_ne, test_y = test_set vocsize = len(dic["words2idx"]) nclasses = len(dic["labels2idx"]) nsentences = len(train_lex) # instanciate the model numpy.random.seed(s["seed"]) random.seed(s["seed"]) rnn = model(nh=s["nhidden"], nc=nclasses, ne=vocsize, de=s["emb_dimension"], cs=s["win"]) # train with early stopping on validation set best_f1 = -numpy.inf s["clr"] = s["lr"] for e in xrange(s["nepochs"]): # shuffle shuffle([train_lex, train_ne, train_y], s["seed"]) s["ce"] = e tic = time.time() for i in xrange(nsentences): cwords = contextwin(train_lex[i], s["win"]) words = map(lambda x: numpy.asarray(x).astype("int32"), minibatch(cwords, s["bs"])) labels = train_y[i] for word_batch, label_last_word in zip(words, labels): rnn.train(word_batch, label_last_word, s["clr"])