示例#1
0
 def _doTest (self, W, X, model, queryState, trainPlan):
     D,_ = W.shape
     recons = queryState.means.dot(model.vocab)
     reconsErr = 1./D * np.sum((np.asarray(W.todense()) - recons) * (np.asarray(W.todense()) - recons))
     
     print ("Initial bound is %f\n\n" % ctm.var_bound(W, model, queryState))
     print ("Initial reconstruction error is %f\n\n" % reconsErr)
     
     model, query, (bndItrs, bndVals) = stm.train (W, X, model, queryState, trainPlan)
         
     # Plot the bound
     plt.plot(bndItrs[5:], bndVals[5:])
     plt.xlabel("Iterations")
     plt.ylabel("Variational Bound")
     plt.show()
     
     # Plot the vocab
     ones = np.ones((3,3))
     for k in range(model.K):
         plt.subplot(2, 3, k)
         plt.imshow(ones - model.vocab[k,:].reshape((3,3)), interpolation="none", cmap = cm.Greys_r)
     plt.show()
     
     recons = queryState.means.dot(model.vocab)
     reconsErr = 1./D * np.sum((np.asarray(W.todense()) - recons) * (np.asarray(W.todense()) - recons))
     print ("Final reconstruction error is %f\n\n" % reconsErr)
 def _doTest (self, W, X, model, queryState, trainPlan):
     D,_ = W.shape
     recons = queryState.means.dot(model.vocab)
     reconsErr = 1./D * np.sum((np.asarray(W.todense()) - recons) * (np.asarray(W.todense()) - recons))
     
     print ("Initial bound is %f\n\n" % ctm.var_bound(W, model, queryState))
     print ("Initial reconstruction error is %f\n\n" % reconsErr)
     
     model, query, (bndItrs, bndVals, bndLikes) = stm.train (W, X, model, queryState, trainPlan)
         
     # Plot the evolution of the bound during training.
     fig, ax1 = plt.subplots()
     ax1.plot(bndItrs, bndVals, 'b-')
     ax1.set_xlabel('Iterations')
     ax1.set_ylabel('Bound', color='b')
     
     ax2 = ax1.twinx()
     ax2.plot(bndItrs, bndLikes, 'r-')
     ax2.set_ylabel('Likelihood', color='r')
     
     fig.show()
     plt.show()
     
     # Plot the vocabulary
     ones = np.ones((3,3))
     for k in range(model.K):
         plt.subplot(2, 3, k)
         plt.imshow(ones - model.vocab[k,:].reshape((3,3)), interpolation="none", cmap = cm.Greys_r)
     plt.show()
     
     recons = queryState.means.dot(model.vocab)
     reconsErr = 1./D * np.sum((np.asarray(W.todense()) - recons) * (np.asarray(W.todense()) - recons))
     print ("Final reconstruction error is %f\n\n" % reconsErr)
示例#3
0
 def _doTest (self, W, model, queryState, trainPlan):
     D,_ = W.shape
     recons = rowwise_softmax(queryState.means).dot(model.vocab)
     reconsErr = 1./D * np.sum((np.asarray(W.todense()) - recons) * (np.asarray(W.todense()) - recons))
     
     print ("Initial bound is %f\n\n" % ctm.var_bound(W, model, queryState))
     print ("Initial reconstruction error is %f\n\n" % reconsErr)
     
     model, query, (bndItrs, bndVals) = ctm.train (W, None, model, queryState, trainPlan)
         
     # Plot the bound
     plt.plot(bndItrs[5:], bndVals[5:])
     plt.xlabel("Iterations")
     plt.ylabel("Variational Bound")
     plt.show()
     
     # Plot the inferred vocab
     plt.imshow(model.vocab, interpolation="none", cmap = cm.Greys_r)
     plt.show()
     
     recons = rowwise_softmax(queryState.means).dot(model.vocab)
     reconsErr = 1./D * np.sum((np.asarray(W.todense()) - recons) * (np.asarray(W.todense()) - recons))
     print ("Final reconstruction error is %f\n\n" % reconsErr)