model = LibraryHSMMIntNegBinVariant(init_state_concentration=10., alpha=6., gamma=6., obs_distns=obs_distns, dur_distns=dur_distns) model.add_data(data, left_censoring=True) ################## # infer things # ################## for i in progprint_xrange(50): model.resample_model() likes = model.Viterbi_EM_fit() plt.figure() truemodel.plot() plt.gcf().suptitle('truth') plt.figure() plt.plot(likes) plt.gcf().suptitle('likes') plt.figure() model.plot() plt.gcf().suptitle('inferred') plt.show()
################## # infer things # ################## train_likes = [] test_likes = [] for i in progprint_xrange(5): # for i in progprint_xrange(50): model.resample_model() train_likes.append(model.log_likelihood()) test_likes.append(model.log_likelihood(test_data, left_censoring=True)) model.truncate_num_states(10, destructive=True, mode='random') model.Viterbi_EM_fit() # print 'training data likelihood when in the model: %g' % model.log_likelihood() # print 'training data likelihood passed in externally: %g' % sum(model.log_likelihood(data,left_censoring=True) for data in training_datas) # plt.figure() # truemodel.plot() # plt.gcf().suptitle('truth') # plt.figure() # model.plot() # plt.gcf().suptitle('inferred') # plt.figure() # plt.plot(train_likes,label='training') # plt.plot(test_likes,label='test')