Beispiel #1
0
def main():
    args = parse_args()
    state = prototype_state()

    state_path = args.model_prefix + "_state.pkl"
    model_path = args.model_prefix + "_model.npz"

    with open(state_path) as src:
        state.update(cPickle.load(src))

    logging.basicConfig(
        level=getattr(logging, state['level']),
        format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")

    model = DialogEncoderDecoder(state)
    if os.path.isfile(model_path):
        logger.debug("Loading previous model")
        model.load(model_path)
    else:
        raise Exception("Must specify a valid model path")

    eval_batch = model.build_eval_function()
    eval_misclass_batch = model.build_eval_misclassification_function()

    if args.test_path:
        state['test_triples'] = args.test_path

    # Initialize list of stopwords to remove
    if args.exclude_stop_words:
        logger.debug("Initializing stop-word list")
        stopwords_lowercase = stopwords.lower().split(' ')
        stopwords_indices = []
        for word in stopwords_lowercase:
            if word in model.str_to_idx:
                stopwords_indices.append(model.str_to_idx[word])

    test_data = get_test_iterator(state)
    test_data.start()

    # Load document ids
    if args.document_ids:
        labels_file = open(args.document_ids, 'r')
        labels_text = labels_file.readlines()
        document_ids = numpy.zeros((len(labels_text)), dtype='int32')
        for i in range(len(labels_text)):
            document_ids[i] = int(labels_text[i].split('\t')[0])

        unique_document_ids = numpy.unique(document_ids)

        assert (test_data.data_len == document_ids.shape[0])

    else:
        print 'Warning no file with document ids given... standard deviations cannot be computed.'
        document_ids = numpy.zeros((test_data.data_len), dtype='int32')
        unique_document_ids = numpy.unique(document_ids)

    # Variables to store test statistics
    test_cost = 0  # negative log-likelihood
    test_cost_first_utterances = 0  # marginal negative log-likelihood of first two utterances
    test_cost_last_utterance_marginal = 0  # marginal (approximate) negative log-likelihood of last utterances
    test_misclass = 0  # misclassification error-rate
    test_misclass_first_utterances = 0  # misclassification error-rate of first two utterances
    test_empirical_mutual_information = 0  # empirical mutual information between first two utterances and third utterance, where the marginal P(U_3) is approximated by P(U_3, empty, empty).

    if model.bootstrap_from_semantic_information:
        test_semantic_cost = 0
        test_semantic_misclass = 0

    test_wordpreds_done = 0  # number of words in total
    test_wordpreds_done_last_utterance = 0  # number of words in last utterances
    test_triples_done = 0  # number of triples evaluated

    # Variables to compute negative log-likelihood and empirical mutual information per genre
    compute_genre_specific_metrics = False
    if hasattr(model, 'semantic_information_dim'):
        compute_genre_specific_metrics = True
        test_cost_per_genre = numpy.zeros((model.semantic_information_dim, 1),
                                          dtype='float32')
        test_mi_per_genre = numpy.zeros((model.semantic_information_dim, 1),
                                        dtype='float32')
        test_wordpreds_done_per_genre = numpy.zeros(
            (model.semantic_information_dim, 1), dtype='float32')
        test_triples_done_per_genre = numpy.zeros(
            (model.semantic_information_dim, 1), dtype='float32')

    # Number of triples in dataset
    test_data_len = test_data.data_len

    # Correspond to the same variables as above, but now for each triple.
    # e.g. test_cost_list is a numpy array with the negative log-likelihood for each triple in the test set
    test_cost_list = numpy.zeros((test_data_len, ))
    test_pmi_list = numpy.zeros((test_data_len, ))
    test_cost_last_utterance_marginal_list = numpy.zeros((test_data_len, ))
    test_misclass_list = numpy.zeros((test_data_len, ))
    test_misclass_last_utterance_list = numpy.zeros((test_data_len, ))

    # Array containing number of words in each triple
    words_in_triples_list = numpy.zeros((test_data_len, ))

    # Array containing number of words in last utterance of each triple
    words_in_last_utterance_list = numpy.zeros((test_data_len, ))

    # Prepare variables for printing the test examples the model performs best and worst on
    test_extrema_setsize = min(state['track_extrema_samples_count'],
                               test_data_len)
    test_extrema_samples_to_print = min(state['print_extrema_samples_count'],
                                        test_extrema_setsize)

    test_lowest_costs = numpy.ones((test_extrema_setsize, )) * 1000
    test_lowest_triples = numpy.ones(
        (test_extrema_setsize, state['seqlen'])) * 1000
    test_highest_costs = numpy.ones((test_extrema_setsize, )) * (-1000)
    test_highest_triples = numpy.ones(
        (test_extrema_setsize, state['seqlen'])) * (-1000)

    logger.debug("[TEST START]")

    while True:
        batch = test_data.next()
        # Train finished
        if not batch:
            break

        logger.debug("[TEST] - Got batch %d,%d" %
                     (batch['x'].shape[1], batch['max_length']))

        x_data = batch['x']
        x_data_reversed = batch['x_reversed']
        max_length = batch['max_length']
        x_cost_mask = batch['x_mask']
        x_semantic = batch['x_semantic']
        x_semantic_nonempty_indices = numpy.where(x_semantic >= 0)

        # Hack to get rid of start of sentence token.
        if args.exclude_sos and model.sos_sym != -1:
            x_cost_mask[x_data == model.sos_sym] = 0

        if args.exclude_stop_words:
            for word_index in stopwords_indices:
                x_cost_mask[x_data == word_index] = 0

        batch['num_preds'] = numpy.sum(x_cost_mask)

        c, c_list = eval_batch(x_data, x_data_reversed, max_length,
                               x_cost_mask, x_semantic)

        c_list = c_list.reshape((batch['x'].shape[1], max_length),
                                order=(1, 0))
        c_list = numpy.sum(c_list, axis=1)

        # Compute genre specific stats...
        if compute_genre_specific_metrics:
            non_nan_entries = numpy.array(c_list >= 0, dtype=int)
            c_list[numpy.where(non_nan_entries == 0)] = 0
            test_cost_per_genre += (numpy.asmatrix(non_nan_entries * c_list) *
                                    numpy.asmatrix(x_semantic)).T
            test_wordpreds_done_per_genre += (numpy.asmatrix(
                non_nan_entries * numpy.sum(x_cost_mask, axis=0)) *
                                              numpy.asmatrix(x_semantic)).T

        if numpy.isinf(c) or numpy.isnan(c):
            continue

        test_cost += c

        # Store test costs in list
        nxt = min((test_triples_done + batch['x'].shape[1]), test_data_len)
        triples_in_batch = nxt - test_triples_done

        words_in_triples = numpy.sum(x_cost_mask, axis=0)
        words_in_triples_list[(
            nxt - triples_in_batch):nxt] = words_in_triples[0:triples_in_batch]

        # We don't need to normalzie by the number of words... not if we're computing standard deviations at least...
        test_cost_list[(nxt -
                        triples_in_batch):nxt] = c_list[0:triples_in_batch]

        # Store best and worst test costs
        con_costs = numpy.concatenate(
            [test_lowest_costs, c_list[0:triples_in_batch]])
        con_triples = numpy.concatenate(
            [test_lowest_triples, x_data[:, 0:triples_in_batch].T], axis=0)
        con_indices = con_costs.argsort()[0:test_extrema_setsize][::1]
        test_lowest_costs = con_costs[con_indices]
        test_lowest_triples = con_triples[con_indices]

        con_costs = numpy.concatenate(
            [test_highest_costs, c_list[0:triples_in_batch]])
        con_triples = numpy.concatenate(
            [test_highest_triples, x_data[:, 0:triples_in_batch].T], axis=0)
        con_indices = con_costs.argsort()[-test_extrema_setsize:][::-1]
        test_highest_costs = con_costs[con_indices]
        test_highest_triples = con_triples[con_indices]

        # Compute word-error rate
        miscl, miscl_list = eval_misclass_batch(x_data, x_data_reversed,
                                                max_length, x_cost_mask,
                                                x_semantic)
        if numpy.isinf(c) or numpy.isnan(c):
            continue

        test_misclass += miscl

        # Store misclassification errors in list
        miscl_list = miscl_list.reshape((batch['x'].shape[1], max_length),
                                        order=(1, 0))
        miscl_list = numpy.sum(miscl_list, axis=1)
        test_misclass_list[(
            nxt - triples_in_batch):nxt] = miscl_list[0:triples_in_batch]

        # Equations to compute empirical mutual information

        # Compute marginal log-likelihood of last utterance in triple:
        # We approximate it with the margina log-probabiltiy of the utterance being observed first in the triple
        x_data_last_utterance = batch['x_last_utterance']
        x_data_last_utterance_reversed = batch['x_last_utterance_reversed']
        x_cost_mask_last_utterance = batch['x_mask_last_utterance']
        x_start_of_last_utterance = batch['x_start_of_last_utterance']

        # Hack to get rid of start of sentence token.
        if args.exclude_sos and model.sos_sym != -1:
            x_cost_mask_last_utterance[x_data_last_utterance ==
                                       model.sos_sym] = 0

        if args.exclude_stop_words:
            for word_index in stopwords_indices:
                x_cost_mask_last_utterance[x_data_last_utterance ==
                                           word_index] = 0

        words_in_last_utterance = numpy.sum(x_cost_mask_last_utterance, axis=0)
        words_in_last_utterance_list[(
            nxt - triples_in_batch
        ):nxt] = words_in_last_utterance[0:triples_in_batch]

        batch['num_preds_at_utterance'] = numpy.sum(x_cost_mask_last_utterance)

        marginal_last_utterance_loglikelihood, marginal_last_utterance_loglikelihood_list = eval_batch(
            x_data_last_utterance, x_data_last_utterance_reversed, max_length,
            x_cost_mask_last_utterance, x_semantic)

        marginal_last_utterance_loglikelihood_list = marginal_last_utterance_loglikelihood_list.reshape(
            (batch['x'].shape[1], max_length), order=(1, 0))
        marginal_last_utterance_loglikelihood_list = numpy.sum(
            marginal_last_utterance_loglikelihood_list, axis=1)
        test_cost_last_utterance_marginal_list[(
            nxt - triples_in_batch
        ):nxt] = marginal_last_utterance_loglikelihood_list[0:triples_in_batch]

        # Compute marginal log-likelihood of first utterances in triple by masking the last utterance
        x_cost_mask_first_utterances = numpy.copy(x_cost_mask)
        for i in range(batch['x'].shape[1]):
            x_cost_mask_first_utterances[
                x_start_of_last_utterance[i]:max_length, i] = 0

        marginal_first_utterances_loglikelihood, marginal_first_utterances_loglikelihood_list = eval_batch(
            x_data, x_data_reversed, max_length, x_cost_mask_first_utterances,
            x_semantic)

        marginal_first_utterances_loglikelihood_list = marginal_first_utterances_loglikelihood_list.reshape(
            (batch['x'].shape[1], max_length), order=(1, 0))
        marginal_first_utterances_loglikelihood_list = numpy.sum(
            marginal_first_utterances_loglikelihood_list, axis=1)

        # Compute empirical mutual information and pointwise empirical mutual information
        test_empirical_mutual_information += -c + marginal_first_utterances_loglikelihood + marginal_last_utterance_loglikelihood
        test_pmi_list[(nxt - triples_in_batch):nxt] = (
            -c_list * words_in_triples +
            marginal_first_utterances_loglikelihood_list +
            marginal_last_utterance_loglikelihood_list)[0:triples_in_batch]

        # Compute genre specific stats...
        if compute_genre_specific_metrics:
            if triples_in_batch == batch['x'].shape[1]:
                mi_list = (-c_list * words_in_triples +
                           marginal_first_utterances_loglikelihood_list +
                           marginal_last_utterance_loglikelihood_list
                           )[0:triples_in_batch]
                non_nan_entries = numpy.array(
                    mi_list >= 0, dtype=int) * numpy.array(
                        mi_list != numpy.nan, dtype=int)
                test_mi_per_genre += (
                    numpy.asmatrix(non_nan_entries * mi_list) *
                    numpy.asmatrix(x_semantic)).T
                test_triples_done_per_genre += numpy.reshape(
                    numpy.sum(x_semantic, axis=0),
                    test_triples_done_per_genre.shape)

        # Store log P(U_1, U_2) cost computed during mutual information
        test_cost_first_utterances += marginal_first_utterances_loglikelihood

        # Store marginal log P(U_3)
        test_cost_last_utterance_marginal += marginal_last_utterance_loglikelihood

        # Compute word-error rate for first utterances
        miscl_first_utterances, miscl_first_utterances_list = eval_misclass_batch(
            x_data, x_data_reversed, max_length, x_cost_mask_first_utterances,
            x_semantic)
        test_misclass_first_utterances += miscl_first_utterances
        if numpy.isinf(c) or numpy.isnan(c):
            continue

        # Store misclassification for last utterance
        miscl_first_utterances_list = miscl_first_utterances_list.reshape(
            (batch['x'].shape[1], max_length), order=(1, 0))
        miscl_first_utterances_list = numpy.sum(miscl_first_utterances_list,
                                                axis=1)

        miscl_last_utterance_list = miscl_list - miscl_first_utterances_list

        test_misclass_last_utterance_list[(
            nxt - triples_in_batch
        ):nxt] = miscl_last_utterance_list[0:triples_in_batch]

        if model.bootstrap_from_semantic_information:
            # Compute cross-entropy error on predicting the semantic class and retrieve predictions
            sem_eval = eval_semantic_batch(x_data, x_data_reversed, max_length,
                                           x_cost_mask, x_semantic)

            # Evaluate only non-empty triples (empty triples are created to fill
            #   the whole batch sometimes).
            sem_cost = sem_eval[0][-1, :, :]
            test_semantic_cost += numpy.sum(
                sem_cost[x_semantic_nonempty_indices])

            # Compute misclassified predictions on last timestep over all labels
            sem_preds = sem_eval[1][-1, :, :]
            sem_preds_misclass = len(
                numpy.where(
                    ((x_semantic - 0.5) *
                     (sem_preds - 0.5))[x_semantic_nonempty_indices] < 0)[0])
            test_semantic_misclass += sem_preds_misclass

        test_wordpreds_done += batch['num_preds']
        test_wordpreds_done_last_utterance += batch['num_preds_at_utterance']
        test_triples_done += batch['num_triples']

    logger.debug("[TEST END]")

    test_cost_last_utterance_marginal /= test_wordpreds_done_last_utterance
    test_cost_last_utterance = (test_cost - test_cost_first_utterances
                                ) / test_wordpreds_done_last_utterance
    test_cost /= test_wordpreds_done
    test_cost_first_utterances /= float(test_wordpreds_done -
                                        test_wordpreds_done_last_utterance)

    test_misclass_last_utterance = float(
        test_misclass - test_misclass_first_utterances) / float(
            test_wordpreds_done_last_utterance)
    test_misclass_first_utterances /= float(test_wordpreds_done -
                                            test_wordpreds_done_last_utterance)
    test_misclass /= float(test_wordpreds_done)
    test_empirical_mutual_information /= float(test_triples_done)

    if model.bootstrap_from_semantic_information:
        test_semantic_cost /= float(test_triples_done)
        test_semantic_misclass /= float(test_done_triples)
        print "** test semantic cost = %.4f, test semantic misclass error = %.4f" % (
            float(test_semantic_cost), float(test_semantic_misclass))

    print "** test cost (NLL) = %.4f, test word-perplexity = %.4f, test word-perplexity last utterance = %.4f, test word-perplexity marginal last utterance = %.4f, test mean word-error = %.4f, test mean word-error last utterance = %.4f, test emp. mutual information = %.4f" % (
        float(test_cost), float(
            math.exp(test_cost)), float(math.exp(test_cost_last_utterance)),
        float(
            math.exp(test_cost_last_utterance_marginal)), float(test_misclass),
        float(test_misclass_last_utterance), test_empirical_mutual_information)

    if compute_genre_specific_metrics:
        print '** test perplexity per genre', numpy.exp(
            test_cost_per_genre / test_wordpreds_done_per_genre)
        print '** test_mi_per_genre', test_mi_per_genre

        print '** words per genre', test_wordpreds_done_per_genre

    # Plot histogram over test costs
    if args.plot_graphs:
        try:
            pylab.figure()
            bins = range(0, 50, 1)
            pylab.hist(numpy.exp(test_cost_list), normed=1, histtype='bar')
            pylab.savefig(model.state['save_dir'] + '/' +
                          model.state['run_id'] + "_" + model.state['prefix'] +
                          'Test_WordPerplexities.png')
        except:
            pass

    # Print 5 of 10% test samples with highest log-likelihood
    if args.plot_graphs:
        print " highest word log-likelihood test samples: "
        numpy.random.shuffle(test_lowest_triples)
        for i in range(test_extrema_samples_to_print):
            print "      Sample: {}".format(" ".join(
                model.indices_to_words(numpy.ravel(
                    test_lowest_triples[i, :]))))

        print " lowest word log-likelihood test samples: "
        numpy.random.shuffle(test_highest_triples)
        for i in range(test_extrema_samples_to_print):
            print "      Sample: {}".format(" ".join(
                model.indices_to_words(numpy.ravel(
                    test_highest_triples[i, :]))))

    # Plot histogram over empirical pointwise mutual informations
    if args.plot_graphs:
        try:
            pylab.figure()
            bins = range(0, 100, 1)
            pylab.hist(test_pmi_list, normed=1, histtype='bar')
            pylab.savefig(model.state['save_dir'] + '/' +
                          model.state['run_id'] + "_" + model.state['prefix'] +
                          'Test_PMI.png')
        except:
            pass

    # To estimate the standard deviations, we assume that triples across documents (movies) are independent.
    # We compute the mean metric for each document, and then the variance between documents.
    # We then use the between document variance to compute the:
    # Let m be a metric:
    # Var[m] = Var[1/(words in total) \sum_d \sum_i m_{di}]
    #        = Var[1/(words in total) \sum_d (words in doc d)/(words in doc d) \sum_i m_{di}]
    #        = \sum_d (words in doc d)^2/(words in total)^2 Var [ 1/(words in doc d) \sum_i ]
    #        = \sum_d (words in doc d)^2/(words in total)^2 sigma^2
    #
    # where sigma^2 is the variance computed for the means across documents.

    # negative log-likelihood for each document (movie)
    per_document_test_cost = numpy.zeros((len(unique_document_ids)),
                                         dtype='float32')
    # negative log-likelihood for last utterance for each document (movie)
    per_document_test_cost_last_utterance = numpy.zeros(
        (len(unique_document_ids)), dtype='float32')
    # misclassification error for each document (movie)
    per_document_test_misclass = numpy.zeros((len(unique_document_ids)),
                                             dtype='float32')
    # misclassification error for last utterance for each document (movie)
    per_document_test_misclass_last_utterance = numpy.zeros(
        (len(unique_document_ids)), dtype='float32')

    # Compute standard deviations based on means across documents (sigma^2 above)
    all_words_squared = 0  # \sum_d (words in doc d)^2
    all_words_in_last_utterance_squared = 0  # \sum_d (words in last utterance of doc d)^2
    for doc_id in range(len(unique_document_ids)):
        doc_indices = numpy.where(document_ids == unique_document_ids[doc_id])

        per_document_test_cost[doc_id] = numpy.sum(
            test_cost_list[doc_indices]) / numpy.sum(
                words_in_triples_list[doc_indices])
        per_document_test_cost_last_utterance[doc_id] = numpy.sum(
            test_cost_last_utterance_marginal_list[doc_indices]) / numpy.sum(
                words_in_last_utterance_list[doc_indices])

        per_document_test_misclass[doc_id] = numpy.sum(
            test_misclass_list[doc_indices]) / numpy.sum(
                words_in_triples_list[doc_indices])
        per_document_test_misclass_last_utterance[doc_id] = numpy.sum(
            test_misclass_last_utterance_list[doc_indices]) / numpy.sum(
                words_in_last_utterance_list[doc_indices])

        all_words_squared += float(
            numpy.sum(words_in_triples_list[doc_indices]))**2
        all_words_in_last_utterance_squared += float(
            numpy.sum(words_in_last_utterance_list[doc_indices]))**2

    # Sanity check that all documents are being used in the standard deviation calculations
    assert (numpy.sum(words_in_triples_list) == test_wordpreds_done)
    assert (numpy.sum(words_in_last_utterance_list) ==
            test_wordpreds_done_last_utterance)

    # Compute final standard deviation equation and print the standard deviations
    per_document_test_cost_variance = numpy.var(
        per_document_test_cost) * float(all_words_squared) / float(
            test_wordpreds_done**2)
    per_document_test_cost_last_utterance_variance = numpy.var(
        per_document_test_cost_last_utterance) * float(
            all_words_in_last_utterance_squared) / float(
                test_wordpreds_done_last_utterance**2)
    per_document_test_misclass_variance = numpy.var(
        per_document_test_misclass) * float(all_words_squared) / float(
            test_wordpreds_done**2)
    per_document_test_misclass_last_utterance_variance = numpy.var(
        per_document_test_misclass_last_utterance) * float(
            all_words_in_last_utterance_squared) / float(
                test_wordpreds_done_last_utterance**2)

    print 'Standard deviations:'
    print "** test cost (NLL) = ", math.sqrt(per_document_test_cost_variance)
    print "** test perplexity (NLL) = ", math.sqrt(
        (math.exp(per_document_test_cost_variance) - 1) *
        math.exp(2 * test_cost + per_document_test_cost_variance))

    print "** test cost last utterance (NLL) = ", math.sqrt(
        per_document_test_cost_last_utterance_variance)
    print "** test perplexity last utterance  (NLL) = ", math.sqrt(
        (math.exp(per_document_test_cost_last_utterance_variance) - 1) *
        math.exp(2 * test_cost +
                 per_document_test_cost_last_utterance_variance))

    print "** test word-error = ", math.sqrt(
        per_document_test_misclass_variance)
    print "** test last utterance word-error = ", math.sqrt(
        per_document_test_misclass_last_utterance_variance)

    logger.debug("All done, exiting...")
Beispiel #2
0
def main():
    args = parse_args()
    state = prototype_state()
   
    state_path = args.model_prefix + "_state.pkl"
    model_path = args.model_prefix + "_model.npz"

    with open(state_path) as src:
        state.update(cPickle.load(src)) 
    
    logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
     
    model = DialogEncoderDecoder(state)
    if os.path.isfile(model_path):
        logger.debug("Loading previous model")
        model.load(model_path)
    else:
        raise Exception("Must specify a valid model path")
    
    eval_batch = model.build_eval_function()
    eval_misclass_batch = model.build_eval_misclassification_function()
    
    if args.test_path:
        state['test_triples'] = args.test_path

    # Initialize list of stopwords to remove
    if args.exclude_stop_words:
        logger.debug("Initializing stop-word list")
        stopwords_lowercase = stopwords.lower().split(' ')
        stopwords_indices = []
        for word in stopwords_lowercase:
            if word in model.str_to_idx:
                stopwords_indices.append(model.str_to_idx[word])

    test_data = get_test_iterator(state)
    test_data.start()

    # Load document ids
    if args.document_ids:
        labels_file = open(args.document_ids, 'r')
        labels_text = labels_file.readlines()
        document_ids = numpy.zeros((len(labels_text)), dtype='int32')
        for i in range(len(labels_text)):
            document_ids[i] = int(labels_text[i].split('\t')[0])

        unique_document_ids = numpy.unique(document_ids)
        
        assert(test_data.data_len == document_ids.shape[0])

    else:
        print 'Warning no file with document ids given... standard deviations cannot be computed.'
        document_ids = numpy.zeros((test_data.data_len), dtype='int32')
        unique_document_ids = numpy.unique(document_ids)
    
    # Variables to store test statistics
    test_cost = 0 # negative log-likelihood
    test_cost_first_utterances = 0 # marginal negative log-likelihood of first two utterances
    test_cost_last_utterance_marginal = 0 # marginal (approximate) negative log-likelihood of last utterances
    test_misclass = 0 # misclassification error-rate
    test_misclass_first_utterances = 0 # misclassification error-rate of first two utterances
    test_empirical_mutual_information = 0  # empirical mutual information between first two utterances and third utterance, where the marginal P(U_3) is approximated by P(U_3, empty, empty).

    if model.bootstrap_from_semantic_information:
        test_semantic_cost = 0
        test_semantic_misclass = 0

    test_wordpreds_done = 0 # number of words in total
    test_wordpreds_done_last_utterance = 0 # number of words in last utterances
    test_triples_done = 0 # number of triples evaluated

    # Variables to compute negative log-likelihood and empirical mutual information per genre
    compute_genre_specific_metrics = False
    if hasattr(model, 'semantic_information_dim'):
        compute_genre_specific_metrics = True
        test_cost_per_genre = numpy.zeros((model.semantic_information_dim, 1), dtype='float32')
        test_mi_per_genre = numpy.zeros((model.semantic_information_dim, 1), dtype='float32')
        test_wordpreds_done_per_genre = numpy.zeros((model.semantic_information_dim, 1), dtype='float32')
        test_triples_done_per_genre = numpy.zeros((model.semantic_information_dim, 1), dtype='float32')

    # Number of triples in dataset
    test_data_len = test_data.data_len

    # Correspond to the same variables as above, but now for each triple.
    # e.g. test_cost_list is a numpy array with the negative log-likelihood for each triple in the test set
    test_cost_list = numpy.zeros((test_data_len,))
    test_pmi_list = numpy.zeros((test_data_len,))
    test_cost_last_utterance_marginal_list = numpy.zeros((test_data_len,))
    test_misclass_list = numpy.zeros((test_data_len,))
    test_misclass_last_utterance_list = numpy.zeros((test_data_len,))

    # Array containing number of words in each triple
    words_in_triples_list = numpy.zeros((test_data_len,))

    # Array containing number of words in last utterance of each triple
    words_in_last_utterance_list = numpy.zeros((test_data_len,))

    # Prepare variables for printing the test examples the model performs best and worst on
    test_extrema_setsize = min(state['track_extrema_samples_count'], test_data_len)
    test_extrema_samples_to_print = min(state['print_extrema_samples_count'], test_extrema_setsize)

    test_lowest_costs = numpy.ones((test_extrema_setsize,))*1000
    test_lowest_triples = numpy.ones((test_extrema_setsize,state['seqlen']))*1000
    test_highest_costs = numpy.ones((test_extrema_setsize,))*(-1000)
    test_highest_triples = numpy.ones((test_extrema_setsize,state['seqlen']))*(-1000)

    logger.debug("[TEST START]") 

    while True:
        batch = test_data.next()
        # Train finished
        if not batch:
            break
         
        logger.debug("[TEST] - Got batch %d,%d" % (batch['x'].shape[1], batch['max_length']))

        x_data = batch['x']
        x_data_reversed = batch['x_reversed']
        max_length = batch['max_length']
        x_cost_mask = batch['x_mask']
        x_semantic = batch['x_semantic']
        x_semantic_nonempty_indices = numpy.where(x_semantic >= 0)

        # Hack to get rid of start of sentence token.
        if args.exclude_sos and model.sos_sym != -1:
            x_cost_mask[x_data == model.sos_sym] = 0

        if args.exclude_stop_words:
            for word_index in stopwords_indices:
                x_cost_mask[x_data == word_index] = 0

        batch['num_preds'] = numpy.sum(x_cost_mask)

        c, c_list = eval_batch(x_data, x_data_reversed, max_length, x_cost_mask, x_semantic)
        
        c_list = c_list.reshape((batch['x'].shape[1],max_length), order=(1,0))
        c_list = numpy.sum(c_list, axis=1)
       

        # Compute genre specific stats...
        if compute_genre_specific_metrics:
            non_nan_entries = numpy.array(c_list >= 0, dtype=int)
            c_list[numpy.where(non_nan_entries==0)] = 0
            test_cost_per_genre += (numpy.asmatrix(non_nan_entries*c_list) * numpy.asmatrix(x_semantic)).T
            test_wordpreds_done_per_genre += (numpy.asmatrix(non_nan_entries*numpy.sum(x_cost_mask, axis=0)) * numpy.asmatrix(x_semantic)).T

        if numpy.isinf(c) or numpy.isnan(c):
            continue
        
        test_cost += c

        # Store test costs in list
        nxt =  min((test_triples_done+batch['x'].shape[1]), test_data_len)
        triples_in_batch = nxt-test_triples_done

        words_in_triples = numpy.sum(x_cost_mask, axis=0)
        words_in_triples_list[(nxt-triples_in_batch):nxt] = words_in_triples[0:triples_in_batch]

        # We don't need to normalzie by the number of words... not if we're computing standard deviations at least...
        test_cost_list[(nxt-triples_in_batch):nxt] = c_list[0:triples_in_batch]

        # Store best and worst test costs        
        con_costs = numpy.concatenate([test_lowest_costs, c_list[0:triples_in_batch]])
        con_triples = numpy.concatenate([test_lowest_triples, x_data[:, 0:triples_in_batch].T], axis=0)
        con_indices = con_costs.argsort()[0:test_extrema_setsize][::1]
        test_lowest_costs = con_costs[con_indices]
        test_lowest_triples = con_triples[con_indices]

        con_costs = numpy.concatenate([test_highest_costs, c_list[0:triples_in_batch]])
        con_triples = numpy.concatenate([test_highest_triples, x_data[:, 0:triples_in_batch].T], axis=0)
        con_indices = con_costs.argsort()[-test_extrema_setsize:][::-1]
        test_highest_costs = con_costs[con_indices]
        test_highest_triples = con_triples[con_indices]

        # Compute word-error rate
        miscl, miscl_list = eval_misclass_batch(x_data, x_data_reversed, max_length, x_cost_mask, x_semantic)
        if numpy.isinf(c) or numpy.isnan(c):
            continue

        test_misclass += miscl

        # Store misclassification errors in list
        miscl_list = miscl_list.reshape((batch['x'].shape[1],max_length), order=(1,0))
        miscl_list = numpy.sum(miscl_list, axis=1)
        test_misclass_list[(nxt-triples_in_batch):nxt] = miscl_list[0:triples_in_batch]

        # Equations to compute empirical mutual information

        # Compute marginal log-likelihood of last utterance in triple:
        # We approximate it with the margina log-probabiltiy of the utterance being observed first in the triple
        x_data_last_utterance = batch['x_last_utterance']
        x_data_last_utterance_reversed = batch['x_last_utterance_reversed']
        x_cost_mask_last_utterance = batch['x_mask_last_utterance']
        x_start_of_last_utterance = batch['x_start_of_last_utterance']

        # Hack to get rid of start of sentence token.
        if args.exclude_sos and model.sos_sym != -1:
            x_cost_mask_last_utterance[x_data_last_utterance == model.sos_sym] = 0

        if args.exclude_stop_words:
            for word_index in stopwords_indices:
                x_cost_mask_last_utterance[x_data_last_utterance == word_index] = 0


        words_in_last_utterance = numpy.sum(x_cost_mask_last_utterance, axis=0)
        words_in_last_utterance_list[(nxt-triples_in_batch):nxt] = words_in_last_utterance[0:triples_in_batch]

        batch['num_preds_at_utterance'] = numpy.sum(x_cost_mask_last_utterance)

        marginal_last_utterance_loglikelihood, marginal_last_utterance_loglikelihood_list = eval_batch(x_data_last_utterance, x_data_last_utterance_reversed, max_length, x_cost_mask_last_utterance, x_semantic)

        marginal_last_utterance_loglikelihood_list = marginal_last_utterance_loglikelihood_list.reshape((batch['x'].shape[1],max_length), order=(1,0))
        marginal_last_utterance_loglikelihood_list = numpy.sum(marginal_last_utterance_loglikelihood_list, axis=1)
        test_cost_last_utterance_marginal_list[(nxt-triples_in_batch):nxt] = marginal_last_utterance_loglikelihood_list[0:triples_in_batch]

        # Compute marginal log-likelihood of first utterances in triple by masking the last utterance
        x_cost_mask_first_utterances = numpy.copy(x_cost_mask)
        for i in range(batch['x'].shape[1]):
            x_cost_mask_first_utterances[x_start_of_last_utterance[i]:max_length, i] = 0

        marginal_first_utterances_loglikelihood, marginal_first_utterances_loglikelihood_list = eval_batch(x_data, x_data_reversed, max_length, x_cost_mask_first_utterances, x_semantic)

        marginal_first_utterances_loglikelihood_list = marginal_first_utterances_loglikelihood_list.reshape((batch['x'].shape[1],max_length), order=(1,0))
        marginal_first_utterances_loglikelihood_list = numpy.sum(marginal_first_utterances_loglikelihood_list, axis=1)

        # Compute empirical mutual information and pointwise empirical mutual information
        test_empirical_mutual_information += -c + marginal_first_utterances_loglikelihood + marginal_last_utterance_loglikelihood
        test_pmi_list[(nxt-triples_in_batch):nxt] = (-c_list*words_in_triples + marginal_first_utterances_loglikelihood_list + marginal_last_utterance_loglikelihood_list)[0:triples_in_batch]

        # Compute genre specific stats...
        if compute_genre_specific_metrics:
            if triples_in_batch==batch['x'].shape[1]:
                mi_list = (-c_list*words_in_triples + marginal_first_utterances_loglikelihood_list + marginal_last_utterance_loglikelihood_list)[0:triples_in_batch]
                non_nan_entries = numpy.array(mi_list >= 0, dtype=int)*numpy.array(mi_list != numpy.nan, dtype=int)
                test_mi_per_genre += (numpy.asmatrix(non_nan_entries*mi_list) * numpy.asmatrix(x_semantic)).T
                test_triples_done_per_genre += numpy.reshape(numpy.sum(x_semantic, axis=0), test_triples_done_per_genre.shape)

        # Store log P(U_1, U_2) cost computed during mutual information
        test_cost_first_utterances += marginal_first_utterances_loglikelihood

        # Store marginal log P(U_3)
        test_cost_last_utterance_marginal += marginal_last_utterance_loglikelihood


        # Compute word-error rate for first utterances
        miscl_first_utterances, miscl_first_utterances_list = eval_misclass_batch(x_data, x_data_reversed, max_length, x_cost_mask_first_utterances, x_semantic)
        test_misclass_first_utterances += miscl_first_utterances
        if numpy.isinf(c) or numpy.isnan(c):
            continue

        # Store misclassification for last utterance
        miscl_first_utterances_list = miscl_first_utterances_list.reshape((batch['x'].shape[1],max_length), order=(1,0))
        miscl_first_utterances_list = numpy.sum(miscl_first_utterances_list, axis=1)

        miscl_last_utterance_list = miscl_list - miscl_first_utterances_list

        test_misclass_last_utterance_list[(nxt-triples_in_batch):nxt] = miscl_last_utterance_list[0:triples_in_batch]


        if model.bootstrap_from_semantic_information:
            # Compute cross-entropy error on predicting the semantic class and retrieve predictions
            sem_eval = eval_semantic_batch(x_data, x_data_reversed, max_length, x_cost_mask, x_semantic)

            # Evaluate only non-empty triples (empty triples are created to fill 
            #   the whole batch sometimes).
            sem_cost = sem_eval[0][-1, :, :]
            test_semantic_cost += numpy.sum(sem_cost[x_semantic_nonempty_indices])

            # Compute misclassified predictions on last timestep over all labels
            sem_preds = sem_eval[1][-1, :, :]
            sem_preds_misclass = len(numpy.where(((x_semantic-0.5)*(sem_preds-0.5))[x_semantic_nonempty_indices] < 0)[0])
            test_semantic_misclass += sem_preds_misclass


        test_wordpreds_done += batch['num_preds']
        test_wordpreds_done_last_utterance += batch['num_preds_at_utterance']
        test_triples_done += batch['num_triples']
     
    logger.debug("[TEST END]") 

    test_cost_last_utterance_marginal /= test_wordpreds_done_last_utterance
    test_cost_last_utterance = (test_cost - test_cost_first_utterances) / test_wordpreds_done_last_utterance
    test_cost /= test_wordpreds_done
    test_cost_first_utterances /= float(test_wordpreds_done - test_wordpreds_done_last_utterance)

    test_misclass_last_utterance = float(test_misclass - test_misclass_first_utterances) / float(test_wordpreds_done_last_utterance)
    test_misclass_first_utterances /= float(test_wordpreds_done - test_wordpreds_done_last_utterance)
    test_misclass /= float(test_wordpreds_done)
    test_empirical_mutual_information /= float(test_triples_done)

    if model.bootstrap_from_semantic_information:
        test_semantic_cost /= float(test_triples_done)
        test_semantic_misclass /= float(test_done_triples)
        print "** test semantic cost = %.4f, test semantic misclass error = %.4f" % (float(test_semantic_cost), float(test_semantic_misclass))

    print "** test cost (NLL) = %.4f, test word-perplexity = %.4f, test word-perplexity last utterance = %.4f, test word-perplexity marginal last utterance = %.4f, test mean word-error = %.4f, test mean word-error last utterance = %.4f, test emp. mutual information = %.4f" % (float(test_cost), float(math.exp(test_cost)), float(math.exp(test_cost_last_utterance)), float(math.exp(test_cost_last_utterance_marginal)), float(test_misclass), float(test_misclass_last_utterance), test_empirical_mutual_information)

    if compute_genre_specific_metrics:
        print '** test perplexity per genre', numpy.exp(test_cost_per_genre/test_wordpreds_done_per_genre)
        print '** test_mi_per_genre', test_mi_per_genre

        print '** words per genre', test_wordpreds_done_per_genre




    # Plot histogram over test costs
    if args.plot_graphs:
        try:
            pylab.figure()
            bins = range(0, 50, 1)
            pylab.hist(numpy.exp(test_cost_list), normed=1, histtype='bar')
            pylab.savefig(model.state['save_dir'] + '/' + model.state['run_id'] + "_" + model.state['prefix'] + 'Test_WordPerplexities.png')
        except:
            pass

    # Print 5 of 10% test samples with highest log-likelihood
    if args.plot_graphs:
        print " highest word log-likelihood test samples: " 
        numpy.random.shuffle(test_lowest_triples)
        for i in range(test_extrema_samples_to_print):
            print "      Sample: {}".format(" ".join(model.indices_to_words(numpy.ravel(test_lowest_triples[i,:]))))

        print " lowest word log-likelihood test samples: " 
        numpy.random.shuffle(test_highest_triples)
        for i in range(test_extrema_samples_to_print):
            print "      Sample: {}".format(" ".join(model.indices_to_words(numpy.ravel(test_highest_triples[i,:]))))


    # Plot histogram over empirical pointwise mutual informations
    if args.plot_graphs:
        try:
            pylab.figure()
            bins = range(0, 100, 1)
            pylab.hist(test_pmi_list, normed=1, histtype='bar')
            pylab.savefig(model.state['save_dir'] + '/' + model.state['run_id'] + "_" + model.state['prefix'] + 'Test_PMI.png')
        except:
            pass

    # To estimate the standard deviations, we assume that triples across documents (movies) are independent.
    # We compute the mean metric for each document, and then the variance between documents.
    # We then use the between document variance to compute the:
    # Let m be a metric:
    # Var[m] = Var[1/(words in total) \sum_d \sum_i m_{di}]
    #        = Var[1/(words in total) \sum_d (words in doc d)/(words in doc d) \sum_i m_{di}]
    #        = \sum_d (words in doc d)^2/(words in total)^2 Var [ 1/(words in doc d) \sum_i ]
    #        = \sum_d (words in doc d)^2/(words in total)^2 sigma^2
    #
    # where sigma^2 is the variance computed for the means across documents.

    # negative log-likelihood for each document (movie)
    per_document_test_cost = numpy.zeros((len(unique_document_ids)), dtype='float32')
    # negative log-likelihood for last utterance for each document (movie)
    per_document_test_cost_last_utterance = numpy.zeros((len(unique_document_ids)), dtype='float32')
    # misclassification error for each document (movie)
    per_document_test_misclass = numpy.zeros((len(unique_document_ids)), dtype='float32')
    # misclassification error for last utterance for each document (movie)
    per_document_test_misclass_last_utterance = numpy.zeros((len(unique_document_ids)), dtype='float32')


    # Compute standard deviations based on means across documents (sigma^2 above)
    all_words_squared = 0 # \sum_d (words in doc d)^2
    all_words_in_last_utterance_squared = 0 # \sum_d (words in last utterance of doc d)^2
    for doc_id in range(len(unique_document_ids)):
        doc_indices = numpy.where(document_ids == unique_document_ids[doc_id])

        per_document_test_cost[doc_id] = numpy.sum(test_cost_list[doc_indices]) / numpy.sum(words_in_triples_list[doc_indices])
        per_document_test_cost_last_utterance[doc_id] = numpy.sum(test_cost_last_utterance_marginal_list[doc_indices]) / numpy.sum(words_in_last_utterance_list[doc_indices])

        per_document_test_misclass[doc_id] = numpy.sum(test_misclass_list[doc_indices]) / numpy.sum(words_in_triples_list[doc_indices])
        per_document_test_misclass_last_utterance[doc_id] = numpy.sum(test_misclass_last_utterance_list[doc_indices]) / numpy.sum(words_in_last_utterance_list[doc_indices])

        all_words_squared += float(numpy.sum(words_in_triples_list[doc_indices]))**2
        all_words_in_last_utterance_squared += float(numpy.sum(words_in_last_utterance_list[doc_indices]))**2

    # Sanity check that all documents are being used in the standard deviation calculations
    assert(numpy.sum(words_in_triples_list) == test_wordpreds_done)
    assert(numpy.sum(words_in_last_utterance_list) == test_wordpreds_done_last_utterance)

    # Compute final standard deviation equation and print the standard deviations
    per_document_test_cost_variance = numpy.var(per_document_test_cost) * float(all_words_squared) / float(test_wordpreds_done**2)
    per_document_test_cost_last_utterance_variance = numpy.var(per_document_test_cost_last_utterance) * float(all_words_in_last_utterance_squared) / float(test_wordpreds_done_last_utterance**2)
    per_document_test_misclass_variance = numpy.var(per_document_test_misclass) * float(all_words_squared) / float(test_wordpreds_done**2)
    per_document_test_misclass_last_utterance_variance = numpy.var(per_document_test_misclass_last_utterance) * float(all_words_in_last_utterance_squared) / float(test_wordpreds_done_last_utterance**2)

    print 'Standard deviations:'
    print "** test cost (NLL) = ", math.sqrt(per_document_test_cost_variance)
    print "** test perplexity (NLL) = ", math.sqrt((math.exp(per_document_test_cost_variance) - 1)*math.exp(2*test_cost+per_document_test_cost_variance))

    print "** test cost last utterance (NLL) = ", math.sqrt(per_document_test_cost_last_utterance_variance)
    print "** test perplexity last utterance  (NLL) = ", math.sqrt((math.exp(per_document_test_cost_last_utterance_variance) - 1)*math.exp(2*test_cost+per_document_test_cost_last_utterance_variance))

    print "** test word-error = ", math.sqrt(per_document_test_misclass_variance)
    print "** test last utterance word-error = ", math.sqrt(per_document_test_misclass_last_utterance_variance)

    logger.debug("All done, exiting...")
def main():
    args = parse_args()
    state = prototype_state()
   
    state_path = args.model_prefix + "_state.pkl"
    model_path = args.model_prefix + "_model.npz"

    with open(state_path) as src:
        state.update(cPickle.load(src)) 
    
    logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")

    state['lr'] = 0.01
    state['bs'] = 2

    state['compute_training_updates'] = False
    state['apply_meanfield_inference'] = True

    model = DialogEncoderDecoder(state)
    if os.path.isfile(model_path):
        logger.debug("Loading previous model")
        model.load(model_path)
    else:
        raise Exception("Must specify a valid model path")
    


    mf_update_batch = model.build_mf_update_function()
    mf_reset_batch = model.build_mf_reset_function()

    saliency_batch = model.build_saliency_eval_function()

    test_dialogues = open(args.test_dialogues, 'r').readlines()
    for test_dialogue_idx, test_dialogue in enumerate(test_dialogues):
        #print 'Visualizing dialogue: ', test_dialogue
        test_dialogue_split = test_dialogue.split()
        # Convert dialogue into list of ids
        dialogue = []
        if len(test_dialogue) == 0:
            dialogue = [model.eos_sym]
        else:
            sentence_ids = model.words_to_indices(test_dialogue_split)
            # Add eos tokens
            if len(sentence_ids) > 0:
                if not sentence_ids[0] == model.eos_sym:
                    sentence_ids = [model.eos_sym] + sentence_ids
                if not sentence_ids[-1] == model.eos_sym:
                    sentence_ids += [model.eos_sym]
            else:
                sentence_ids = [model.eos_sym]


            dialogue += sentence_ids

        if len(dialogue) > 3:
            if ((dialogue[-1] == model.eos_sym)
             and (dialogue[-2] == model.eod_sym)
             and (dialogue[-3] == model.eos_sym)):
                del dialogue[-1]
                del dialogue[-1]

        
        dialogue = numpy.asarray(dialogue, dtype='int32').reshape((len(dialogue), 1))
        #print 'dialogue', dialogue
        dialogue_reversed = model.reverse_utterances(dialogue)

        max_batch_sequence_length = len(dialogue)
        bs = state['bs']

        # Initialize batch with zeros
        batch_dialogues = numpy.zeros((max_batch_sequence_length, bs), dtype='int32')
        batch_dialogues_reversed = numpy.zeros((max_batch_sequence_length, bs), dtype='int32')
        batch_dialogues_mask = numpy.zeros((max_batch_sequence_length, bs), dtype='float32')
        batch_dialogues_reset_mask = numpy.zeros((bs), dtype='float32')
        batch_dialogues_drop_mask = numpy.ones((max_batch_sequence_length, bs), dtype='float32')

        # Fill in batch with values
        batch_dialogues[:,0]  = dialogue[:, 0]
        batch_dialogues_reversed[:,0] = dialogue_reversed[:, 0]
        #batch_dialogues  = dialogue
        #batch_dialogues_reversed = dialogue_reversed


        batch_dialogues_ran_gaussian_vectors = numpy.zeros((max_batch_sequence_length, bs, model.latent_gaussian_per_utterance_dim), dtype='float32')
        batch_dialogues_ran_uniform_vectors = numpy.zeros((max_batch_sequence_length, bs, model.latent_piecewise_per_utterance_dim), dtype='float32')

        eos_sym_list = numpy.where(batch_dialogues[:, 0] == state['eos_sym'])[0]
        if len(eos_sym_list) > 1:
            second_last_eos_sym = eos_sym_list[-2]
        else:
            print 'WARNING: dialogue does not have at least two EOS tokens!'

        batch_dialogues_mask[:, 0] = 1.0
        batch_dialogues_mask[0:second_last_eos_sym+1, 0] = 0.0

        print '###'
        print '###'
        print '###'
        if False==True:
            mf_reset_batch()
            for i in range(10):
                print  '  SGD Update', i

                batch_dialogues_ran_gaussian_vectors[second_last_eos_sym:, 0, :] = model.rng.normal(loc=0, scale=1, size=model.latent_gaussian_per_utterance_dim)
                batch_dialogues_ran_uniform_vectors[second_last_eos_sym:, 0, :] = model.rng.uniform(low=0.0, high=1.0, size=model.latent_piecewise_per_utterance_dim)


                training_cost, kl_divergence_cost_acc, kl_divergences_between_piecewise_prior_and_posterior, kl_divergences_between_gaussian_prior_and_posterior = mf_update_batch(batch_dialogues, batch_dialogues_reversed, max_batch_sequence_length, batch_dialogues_mask, batch_dialogues_reset_mask, batch_dialogues_ran_gaussian_vectors, batch_dialogues_ran_uniform_vectors, batch_dialogues_drop_mask)

                print '     training_cost', training_cost
                print '     kl_divergence_cost_acc', kl_divergence_cost_acc
                print '     kl_divergences_between_gaussian_prior_and_posterior',  numpy.sum(kl_divergences_between_gaussian_prior_and_posterior)
                print '     kl_divergences_between_piecewise_prior_and_posterior', numpy.sum(kl_divergences_between_piecewise_prior_and_posterior)


        batch_dialogues_ran_gaussian_vectors[second_last_eos_sym:, 0, :] = model.rng.normal(loc=0, scale=1, size=model.latent_gaussian_per_utterance_dim)
        batch_dialogues_ran_uniform_vectors[second_last_eos_sym:, 0, :] = model.rng.uniform(low=0.0, high=1.0, size=model.latent_piecewise_per_utterance_dim)

        gaussian_saliency, piecewise_saliency = saliency_batch(batch_dialogues, batch_dialogues_reversed, max_batch_sequence_length, batch_dialogues_mask, batch_dialogues_reset_mask, batch_dialogues_ran_gaussian_vectors, batch_dialogues_ran_uniform_vectors, batch_dialogues_drop_mask)

        if test_dialogue_idx < 2:
            print 'gaussian_saliency', gaussian_saliency.shape, gaussian_saliency
            print 'piecewise_saliency', piecewise_saliency.shape, piecewise_saliency

        gaussian_sum = 0.0
        piecewise_sum = 0.0
        for i in range(second_last_eos_sym+1, max_batch_sequence_length):
            gaussian_sum += gaussian_saliency[dialogue[i, 0]]
            piecewise_sum += piecewise_saliency[dialogue[i, 0]]

        gaussian_sum = max(gaussian_sum, 0.0000000000001)
        piecewise_sum = max(piecewise_sum, 0.0000000000001)


        print '###'
        print '###'
        print '###'

        print 'Topic: ', ' '.join(test_dialogue_split[0:second_last_eos_sym])

        print ''
        print 'Response', ' '.join(test_dialogue_split[second_last_eos_sym+1:max_batch_sequence_length])
        gaussian_str = ''
        piecewise_str = ''
        for i in range(second_last_eos_sym+1, max_batch_sequence_length):
            gaussian_str += str(gaussian_saliency[dialogue[i, 0]]/gaussian_sum) + ' '
            piecewise_str += str(piecewise_saliency[dialogue[i, 0]]/piecewise_sum) + ' '




        print 'Gaussian_saliency', gaussian_str
        print 'Piecewise_saliency', piecewise_str

        #print ''
        #print 'HEY', gaussian_saliency[:, 0].argsort()[-3:][::-1]
        print ''
        print 'Gaussian Top 3 Words: ', model.indices_to_words(list(gaussian_saliency[:].argsort()[-3:][::-1]))
        print 'Piecewise Top 3 Words: ', model.indices_to_words(list(piecewise_saliency[:].argsort()[-3:][::-1]))

        print 'Gaussian Top 5 Words: ', model.indices_to_words(list(gaussian_saliency[:].argsort()[-5:][::-1]))
        print 'Piecewise Top 5 Words: ', model.indices_to_words(list(piecewise_saliency[:].argsort()[-5:][::-1]))

        print 'Gaussian Top 7 Words: ', model.indices_to_words(list(gaussian_saliency[:].argsort()[-7:][::-1]))
        print 'Piecewise Top 7 Words: ', model.indices_to_words(list(piecewise_saliency[:].argsort()[-7:][::-1]))


    logger.debug("All done, exiting...")