def test_MLBL_implementation(): """ Test Your Implementation of Forward and Backward """ import coco_proc import trainer import cPickle as pickle z, zd, zt = coco_proc.process(context=5) d = {} d['name'] = 'testrun' d['loc'] = 'models/mlbl_model.pkl' d['context'] = 5 d['learning_rate'] = 0.43 d['momentum'] = 0.23 d['batch_size'] = 40 d['maxepoch'] = 10 d['hidden_size'] = 441 d['word_decay'] = 3e-7 d['context_decay'] = 1e-8 d['factors'] = 50 # Load the word embeddings embed_map = trainer.load_embeddings() # Unpack some stuff from the data train_ngrams = z['ngrams'] train_labels = z['labels'] train_instances = z['instances'] word_dict = z['word_dict'] index_dict = z['index_dict'] context = z['context'] vocabsize = len(z['word_dict']) trainIM = z['IM'] train_index = z['index'] net = MLBL(name=d['name'], loc=d['loc'], seed=1234, V=vocabsize, K=100, D=trainIM.shape[1], h=d['hidden_size'], context=d['context'], batchsize=1, maxepoch=d['maxepoch'], eta_t=d['learning_rate'], gamma_r=d['word_decay'], gamma_c=d['context_decay'], f=0.99, p_i=d['momentum'], p_f=d['momentum'], T=20.0, verbose=1) # Train the network X = train_instances indX = train_index Y = train_labels net.init_params(embed_map, index_dict) context_size = d['context'] batchX = X[0:context_size] batchY = Y[0:context_size].toarray() batchindX = indX[0:context_size].astype(int).flatten() batchindX = np.floor(batchindX / 5).astype(int) batchIm = trainIM[batchindX] # check forward implementation ft = net.forward(net.params, batchX, batchIm, test=True) # load gt feature ft_gt = pickle.load(open("data/val_implementation.p", "rb")) # it should be less than 1.0e-5 print 'Difference (L2 norm) between implemented and ground truth feature = {}'.format( np.linalg.norm(ft_gt - ft))
import coco_proc, trainer if __name__ == '__main__': z, zd, zt = coco_proc.process(context=5) trainer.trainer(z, zd)