max_sent_size = min(max([len(s) for s in sents_idxs]), max_sent_size_overall) sents_idxs_padded = [ s + [padding_idx] * (max_sent_size - len(s)) if len(s) < max_sent_size else s[:max_sent_size] for s in sents_idxs ] reshaped_sentences = np.reshape(np.array(sents_idxs_padded), (1, len(sents), max_sent_size)) reshaped_sentences_tensor = _to_tensor( reshaped_sentences, dtype='float32') # a layer, unlike a model, requires tf tensor as input print('== attention over words ==') sents_att_coeffs = TimeDistributed(get_sent_att_coeffs)( reshaped_sentences_tensor) word_coeffs = sents_att_coeffs.eval(session=sess) word_coeffs = np.reshape(word_coeffs, (len(sents), max_sent_size)) doc_att_tensor = get_doc_attention_coeffs(reshaped_sentences_tensor) doc_att = doc_att_tensor.eval(session=sess)[0] res_tensor = han(reshaped_sentences_tensor) res = res_tensor.eval(session=sess) print(doc_att) my_wcs = [] my_values_array = [] my_keys_array = [] for my_idx, wc in enumerate(word_coeffs): my_keys = sents_tokenized[my_idx] my_values = [round(elt, 2) for elt in wc.tolist()[:len(my_keys)]] my_values_array.append(my_values[:len(my_values)]) my_keys_array.append(my_keys)