Ejemplo n.º 1
0
def rlTraining(dataset, summaries, groups, models_list, docs_list, topic_list,
               rewards_dic):
    episode = 5000
    rl_strict = 10.
    all_result_dic = OrderedDict()
    for ii in range(len(models_list)):
        '''
        if topic_list[ii] != 'd120i':
            continue
        '''
        print('\n{} {}th TOPIC: {}'.format(dataset, ii, topic_list[ii]))
        vec = Vectoriser(docs_list[ii])
        rl_agent = TDAgent(vec,
                           summaries[[topic_list[ii] in gg for gg in groups]],
                           episode, rl_strict)
        summary = rl_agent(normaliseList(rewards_dic[topic_list[ii]], 10))
        print('summary length : {}'.format(len(summary.split(' '))))

        for model in models_list[ii]:
            model_name = model[0].split('/')[-1].strip()
            result = evaluateSummary(summary, model)
            print('---model {}---'.format(model_name))
            for metric in result:
                print('{} : {}'.format(metric, result[metric]))
            addResult(all_result_dic, result)

        print('\n=====UNTIL TOPIC {}, EPISODE {}, STRICT {}====='.format(
            ii, episode, rl_strict))
        for metric in all_result_dic:
            print('{} : {}'.format(metric, np.mean(all_result_dic[metric])))
 def evaluateOnDev(self, dev_groups, dev_features, dev_targets):
     if 'mse' in self.loss_type:
         mse_list = []
         for ii in range(len(dev_groups)):
             pred = self.model(
                 Variable(torch.from_numpy(
                     dev_features[ii]).float())).data.numpy()
             mloss = mean_squared_error(pred, dev_targets[ii])
             mse_list.append(mloss)
         return np.mean(mse_list)
     else:
         pcc_list = []
         for ii in range(len(dev_groups)):
             weights = self.model.state_dict(
             )['combination.weight'].data.numpy()
             biases = self.model.state_dict(
             )['combination.bias'].data.numpy()
             if 'entropy' in self.loss_type:
                 weights = weights[1] - weights[0]
                 aa = np.dot(dev_features[ii],
                             weights) + biases[1] - biases[0]
             else:
                 aa = np.dot(dev_features[ii], weights.reshape(-1,
                                                               1)) + biases
             aa = normaliseList([i[0] for i in aa])
             pcc, _ = stats.pearsonr(aa, dev_targets[ii])
             pcc_list.append(-pcc)
             #ndcg
             #sorted_list = sorted(aa,reverse=True)
             #ll = [self.dev_targets[ii][aa.index(ele)] for ele in sorted_list]
             #ndcg = ndcg_at_k(ll,int(len(ll)*0.1))
             #pcc_list.append(ndcg)
         return np.mean(pcc_list)
 def getDevResult(self,
                  features,
                  group_num,
                  dev_groups,
                  targets,
                  init=False):
     unit = features.shape[0] / group_num
     if init:
         self.dev_features = []
         self.dev_targets = []
         for gg in dev_groups:
             start = int(unit * gg)
             end = int(min(start + unit, features.shape[0]))
             self.dev_features.append(features[start:end, :])
             self.dev_targets.append(targets[start:end])
     else:
         pcc_list = []
         for ii in range(len(dev_groups)):
             aa = np.dot(self.dev_features[ii],
                         np.array(self.rank_learner.coef_[0]))
             aa = normaliseList(aa)
             #pcc = stats.pearsonr(aa,self.dev_targets[ii])
             #pcc_list.append(pcc)
             sorted_list = sorted(aa, reverse=True)
             ll = [
                 self.dev_targets[ii][aa.index(ele)] for ele in sorted_list
             ]
             ndcg = ndcg_at_k(ll, int(len(ll) * 0.1))
             pcc_list.append(ndcg)
         return np.mean(pcc_list)
    def getMixReward(self, learnt_weight=-1):
        if learnt_weight == -1:
            learnt_weight = self.learnt_weight

        mix_values = np.array(self.learnt_values) * learnt_weight + np.array(
            self.heuristics) * (1 - learnt_weight)

        return normaliseList(mix_values)
    def predict(self, dataset, summaries, groups, test_groups):
        if test_groups is not None:
            features = readFeatures(self.feature_types, dataset, summaries,
                                    groups, test_groups)
        else:
            topics = sorted(
                set([gg.split('-')[1] for gg in groups if dataset in gg]))
            features = readFeatures(self.feature_types, dataset, summaries,
                                    groups, topics)

        weights = self.best_weights['combination.weight'].data.numpy()
        biases = self.best_weights['combination.bias'].data.numpy()
        if 'entropy' in self.loss_type:
            weights = weights[1] - weights[0]
            aa = np.dot(features, weights) + biases[1] - biases[0]
        else:
            aa = np.dot(features, weights.reshape(-1, 1)) + biases
        return normaliseList([i[0] for i in aa])
    def predict(self, dataset, summaries, groups, test_groups):
        if test_groups is not None:
            features = readFeatures(self.feature_types, dataset, summaries,
                                    groups, test_groups)
        else:
            topics = sorted(
                set([gg.split('-')[1] for gg in groups if dataset in gg]))
            features = readFeatures(self.feature_types, dataset, summaries,
                                    groups, topics)
        '''
        print('before random feature, ', features.shape)
        random_feature = np.array([np.random.random() for i in range(features.shape[0])])
        print('random feature size', random_feature.shape)
        features = np.c_[features,random_feature]
        print('after random feature, ', features.shape)
        '''

        aa = np.dot(features, np.array(self.best_weights))
        return normaliseList(aa)
Ejemplo n.º 7
0
    def __call__(self, summary_list, raw=False):
        doc_summ_list = self.documents[:]
        for sum_idxs in summary_list:
            summary = []
            for idx in sum_idxs:
                summary.append(self.sentences[idx])
            doc_summ_list.append(' '.join(summary))

        vectorizer = TfidfVectorizer(stop_words=LANGUAGE.lower(),
                                     use_idf=True,
                                     smooth_idf=True)
        doc_term_matrix = vectorizer.fit_transform(
            doc_summ_list[:len(self.documents)])
        sum_term_matrix = vectorizer.transform(
            doc_summ_list[len(self.documents):])
        u, s, vh = svd(doc_term_matrix.toarray())
        summ_topic = np.matmul(sum_term_matrix.toarray(),
                               np.transpose(vh[:s.shape[0], :]))
        values = np.matmul(summ_topic, s)

        if raw:
            return values
        else:
            return normaliseList(values, 1)
Ejemplo n.º 8
0
 def getReward(self):
     values = [np.dot(self.reward_learner.weights,vv) for vv in self.summary_vectors]
     return normaliseList(values)
fname = 'data/coala_vec_pred/qa_vec_coala/se_%s_coala.qa_vec_pred' % topic
qa_list, vec_list, pred_list = pickle.load(open(fname, 'rb'),
                                           encoding='latin1')

max_no_egs = 5  # find five examples
no_egs_correct = 0
no_egs_wrong = 0

for question_id in range(len(qa_list)):

    if no_egs_correct == max_no_egs and no_egs_wrong == max_no_egs:
        break

    # the last item in these lists corresponds to the gold answer, so cut it out
    summary_vectors = vec_list[question_id][:-1]
    heuristic_list = normaliseList(pred_list[question_id][:-1])

    original_gold_answer = qa_list[question_id]['gold_answer']
    pool_answers = qa_list[question_id]['pooled_answers']
    # compute reference values as overlap with gold answer

    gold_answer = re.sub('<[^<]+>', "", original_gold_answer)
    # cache it to file
    gold_filename = 'data/cache/coala_cache_%s_%i.txt' % (topic, question_id)
    if not os.path.exists(gold_filename):
        with open(gold_filename, 'w') as fh:
            fh.writelines(gold_answer)

    gold_idx = np.where(np.array(pool_answers) == original_gold_answer)[0][0]

    # compute whether heuristic is correct