def __call__(self, session): test_file_containers = pyreader.get_data(self.data_path, self.test_files, 1, self.map) data = list( zip((list(flatmap(identity_map, c.inputs)) for c in test_file_containers), (list(flatmap(identity_map, c.masks)) for c in test_file_containers))) for testcase, var_mask in data: print("----------Test case----------\n") evals = get_evals([self.prediction_op], self.model) state, att_states, att_counts = get_initial_state(self.model) predicted_tokens = [] for i, (token, mask) in enumerate(zip(testcase, var_mask)): data = (np.array([[token]]), np.array([[1]]), np.array([0]), np.array([1])) feed_dict = construct_feed_dict(self.model, data, state, att_states, att_counts) results = session.run(evals, feed_dict=feed_dict) prediction, state, att_states, att_counts, att_vec, lambda_vec = \ extract_results(results, evals, 1, self.model) predicted_token = self.inverse_map[prediction[0][0]].replace( "\n", "<newline>") current_token = self.inverse_map[token].replace( "\n", "<newline>") predicted_tokens.append(current_token + " ; " + predicted_token) print("\n".join(predicted_tokens))
def preprocess(data_path, config): if config.output_file is None: print("Output file parameter needed, aborting...") sys.exit() word_to_id_path = os.path.join(data_path, config.vocab_file) with open(word_to_id_path, "rb") as f: word_to_id = pickle.load(f) list_file = os.path.join(data_path, config.list_file) data = pyreader.get_data(data_path, list_file, config.seq_length, word_to_id) write_partitions(pyreader.partition_data(data, config.num_partitions), os.path.join(data_path, config.output_file))
def __call__(self, session): test_file_containers = pyreader.get_data(self.data_path, self.test_files, 1, self.map) data = list(zip((list(flatmap(identity_map, c.inputs)) for c in test_file_containers), (list(flatmap(identity_map, c.masks)) for c in test_file_containers))) for testcase, var_mask in data: print("----------Test case----------\n") evals = get_evals([self.prediction_op], self.model) state, att_states, att_counts = get_initial_state(self.model) predicted_tokens = [] for i, (token, mask) in enumerate(zip(testcase, var_mask)): data = (np.array([[token]]), np.array([[1]]), np.array([0]), np.array([1])) feed_dict = construct_feed_dict(self.model, data, state, att_states, att_counts) results = session.run(evals, feed_dict=feed_dict) prediction, state, att_states, att_counts, att_vec, lambda_vec = \ extract_results(results, evals, 1, self.model) predicted_token = self.inverse_map[prediction[0][0]].replace("\n", "<newline>") current_token = self.inverse_map[token].replace("\n", "<newline>") predicted_tokens.append(current_token + " ; " + predicted_token) print("\n".join(predicted_tokens))
def __call__(self, session): test_file_containers = pyreader.get_data(self.data_path, self.test_files, 1, self.map) data = list(zip((list(flatmap(identity_map, c.inputs)) for c in test_file_containers), (list(flatmap(identity_map, c.masks)) for c in test_file_containers))) #tokens = ["def", "function234", "(", "arg289", ")", ":", "\n", "§<indent>§", "with", "open", "(", "§OOV§", ",", "'w'", ")", "as", "f|var76", ":", "\n", "§<indent>§", "f|var76", "."] #tokens = ["def", "function234", "(", "arg289", ")", ":", "\n", "§<indent>§", "with", "open", "(", "§OOV§", ",", "'r'", ")", "as", "f|var76", ":", "\n", "§<indent>§", "var91", "=", "f|var76", "."] #data = [([map_token(self.map, t) for t in tokens], # [np.array([False]) for _ in tokens])] for testcase, var_mask in data: print("----------Test case----------\n") evals = get_evals([self.model.predict], self.model) state, att_states, att_counts = get_initial_state(self.model) accumulated_tokens = [] plot_data = np.zeros([len(testcase), self.max_display]) annotations = np.empty([len(testcase), self.max_display], dtype=object) y_labels = [] predicted_labels = [] for i, (token, mask) in enumerate(zip(testcase, var_mask)): att_mask = attention_masks(self.attns, np.array([mask]), 1) data = (np.array([[token]]), np.array([[1]]), np.array([att_mask]), np.array([1])) feed_dict = construct_feed_dict(self.model, data, state, att_states, att_counts) results = session.run(evals, feed_dict=feed_dict) prediction, state, att_states, att_counts, att_vec, lambda_vec = \ extract_results(results, evals, 1, self.model) predicted = np.argmax(prediction) current_token = self.inverse_map[token] predicted_token = self.inverse_map[predicted] if len(accumulated_tokens) > self.max_attention: accumulated_tokens.pop(0) m = att_vec[0].shape[1] take = min(m, len(accumulated_tokens)) alphas = att_vec[0][0, m-take:] labels = np.array([clean_token(t) for t in accumulated_tokens]) '''if take > self.max_display: ind = np.argpartition(alphas, -self.max_display)[-self.max_display:] alphas = alphas[ind] labels = labels[ind]''' y_labels.append(current_token.replace("\n", "<newline>")) predicted_labels.append(predicted_token.replace("\n", "<newline>")) print("%s ; %s" % (current_token.replace("\n", "<newline>"), predicted_token.replace("\n", "<newline>"))) begin = max(self.max_display-take, 0) plot_data[i, begin:] = alphas annotations[i, begin:] = labels if begin != 0: annotations[i, 0:begin] = "" accumulated_tokens.append(current_token) for i in range(1, len(y_labels)): if y_labels[i] == predicted_labels[i-1]: y_labels[i] = "** " + y_labels[i] x_labels = [""] * self.max_display sns.set(font_scale=1.2) sns.set_style({"savefig.dpi": 100}) ax = sns.heatmap(plot_data, cmap=plt.cm.Blues, linewidths=.1, annot=annotations, fmt="", vmin=0, vmax=1, cbar=False, xticklabels=x_labels, yticklabels=y_labels, annot_kws={"size": 10}) plt.yticks(rotation=0) fig = ax.get_figure() # specify dimensions and save fig.set_size_inches(int(self.max_display)*1.3, int(len(plot_data)/3)) fig.savefig('./out/lagged_attention.png') print("Generated file lagged_attention.png")
def __call__(self, session): test_file_containers = [] if self.test_files: test_file_containers = pyreader.get_data(self.data_path, self.test_files, 1, self.map) data = list(zip(self.test_cases, self.var_masks)) + \ list(zip((list(flatmap(identity_map, c.inputs)) for c in test_file_containers), (list(flatmap(identity_map, c.masks)) for c in test_file_containers))) for testcase, var_mask in data: if len(testcase) != len(var_mask): raise ValueError("Length of testcase does not match corresponding variable mask: %s" % testcase) print("----------Test case----------\n") evals = get_evals([self.model.predict], self.model) state, att_states, att_ids, att_counts = get_initial_state(self.model) prev_mask = False attns = [] plot_data = np.zeros([len(testcase), self.max_attention]) lambda_data = np.zeros([len(testcase), 2]) annotations = np.empty([len(testcase), self.max_attention], dtype=object) y_labels = [] predicted_token = "" for i, (token, mask) in enumerate(zip(testcase, var_mask)): att_mask = attention_masks(self.attns, np.array([mask]), 1) data = (np.array([[token]]), np.array([[1]]), np.array([att_mask]), np.array([[1]]), np.array([1])) feed_dict = construct_feed_dict(self.model, data, state, att_states, att_ids, att_counts) results = session.run(evals, feed_dict=feed_dict) prediction, state, att_states, att_ids, alpha_states, att_counts, lambda_vec = \ extract_results(results, evals, 1, self.model) predicted = np.argmax(prediction) if prev_mask: if len(attns) >= self.max_attention: attns = attns[1:] attns.append(prev_token) prev_mask = mask prev_token = self.inverse_map[token] plot_data[i, :] = alpha_states[0][0] * (lambda_vec[0, 1] if lambda_vec[0, 1] < 0.1 else 1) lambda_data[i, :] = lambda_vec labels = [""] * (self.max_attention-len(attns)) + attns annotations[i, :] = labels current_token = self.inverse_map[token] current_token = "%s%s%s" % ("** " if current_token == predicted_token else "", "(*)" if mask else "", current_token) y_labels.append(current_token) predicted_token = self.inverse_map[predicted] fig, (ax_data, ax_lambda) = plt.subplots(1, 2, gridspec_kw={ 'width_ratios': [self.max_attention, 2] }) blank_x_labels = [""] * self.max_attention blank_y_labels = [""] * len(testcase) lambda_x_labels = ["LM", "Att"] sns.set(font_scale=1.2) sns.set_style({"savefig.dpi": 100}) plt.yticks(rotation=0) ax_data = sns.heatmap(plot_data, ax=ax_data, cmap=plt.cm.Blues, linewidths=.1, annot=annotations, fmt="", vmin=0, vmax=1, cbar=False, xticklabels=blank_x_labels, yticklabels=y_labels, annot_kws={"size": 9}) ax_lambda = sns.heatmap(lambda_data, ax=ax_lambda, cmap=plt.cm.Blues, linewidths=.1, annot=False, fmt="", vmin=0, vmax=1, cbar=False, xticklabels=lambda_x_labels, yticklabels=blank_y_labels, annot_kws={"size": 9}) ax_data.set_yticklabels(ax_data.yaxis.get_majorticklabels(), rotation=0) ax_lambda.xaxis.tick_top() fig.set_size_inches(int(self.max_attention)*1.3, int(len(plot_data)/3)) fig.savefig('./out/attention2.png') print("Generated file attention2.png")
def __call__(self, session): test_file_containers = pyreader.get_data(self.data_path, self.test_files, 1, self.map) data = list( zip((list(flatmap(identity_map, c.inputs)) for c in test_file_containers), (list(flatmap(identity_map, c.masks)) for c in test_file_containers))) #tokens = ["def", "function234", "(", "arg289", ")", ":", "\n", "§<indent>§", "with", "open", "(", "§OOV§", ",", "'w'", ")", "as", "f|var76", ":", "\n", "§<indent>§", "f|var76", "."] #tokens = ["def", "function234", "(", "arg289", ")", ":", "\n", "§<indent>§", "with", "open", "(", "§OOV§", ",", "'r'", ")", "as", "f|var76", ":", "\n", "§<indent>§", "var91", "=", "f|var76", "."] #data = [([map_token(self.map, t) for t in tokens], # [np.array([False]) for _ in tokens])] for testcase, var_mask in data: print("----------Test case----------\n") evals = get_evals([self.model.predict], self.model) state, att_states, att_counts = get_initial_state(self.model) accumulated_tokens = [] plot_data = np.zeros([len(testcase), self.max_display]) annotations = np.empty([len(testcase), self.max_display], dtype=object) y_labels = [] predicted_labels = [] for i, (token, mask) in enumerate(zip(testcase, var_mask)): att_mask = attention_masks(self.attns, np.array([mask]), 1) data = (np.array([[token]]), np.array([[1]]), np.array([att_mask]), np.array([1])) feed_dict = construct_feed_dict(self.model, data, state, att_states, att_counts) results = session.run(evals, feed_dict=feed_dict) prediction, state, att_states, att_counts, att_vec, lambda_vec = \ extract_results(results, evals, 1, self.model) predicted = np.argmax(prediction) current_token = self.inverse_map[token] predicted_token = self.inverse_map[predicted] if len(accumulated_tokens) > self.max_attention: accumulated_tokens.pop(0) m = att_vec[0].shape[1] take = min(m, len(accumulated_tokens)) alphas = att_vec[0][0, m - take:] labels = np.array([clean_token(t) for t in accumulated_tokens]) '''if take > self.max_display: ind = np.argpartition(alphas, -self.max_display)[-self.max_display:] alphas = alphas[ind] labels = labels[ind]''' y_labels.append(current_token.replace("\n", "<newline>")) predicted_labels.append( predicted_token.replace("\n", "<newline>")) print("%s ; %s" % (current_token.replace("\n", "<newline>"), predicted_token.replace("\n", "<newline>"))) begin = max(self.max_display - take, 0) plot_data[i, begin:] = alphas annotations[i, begin:] = labels if begin != 0: annotations[i, 0:begin] = "" accumulated_tokens.append(current_token) for i in range(1, len(y_labels)): if y_labels[i] == predicted_labels[i - 1]: y_labels[i] = "** " + y_labels[i] x_labels = [""] * self.max_display sns.set(font_scale=1.2) sns.set_style({"savefig.dpi": 100}) ax = sns.heatmap(plot_data, cmap=plt.cm.Blues, linewidths=.1, annot=annotations, fmt="", vmin=0, vmax=1, cbar=False, xticklabels=x_labels, yticklabels=y_labels, annot_kws={"size": 10}) plt.yticks(rotation=0) fig = ax.get_figure() # specify dimensions and save fig.set_size_inches( int(self.max_display) * 1.3, int(len(plot_data) / 3)) fig.savefig('./out/lagged_attention.png') print("Generated file lagged_attention.png")
def __call__(self, session): test_file_containers = [] if self.test_files: test_file_containers = pyreader.get_data(self.data_path, self.test_files, 1, self.map) data = list(zip(self.test_cases, self.var_masks)) + \ list(zip((list(flatmap(identity_map, c.inputs)) for c in test_file_containers), (list(flatmap(identity_map, c.masks)) for c in test_file_containers))) for testcase, var_mask in data: if len(testcase) != len(var_mask): raise ValueError( "Length of testcase does not match corresponding variable mask: %s" % testcase) print("----------Test case----------\n") evals = get_evals([self.model.predict], self.model) state, att_states, att_ids, att_counts = get_initial_state( self.model) prev_mask = False attns = [] plot_data = np.zeros([len(testcase), self.max_attention]) lambda_data = np.zeros([len(testcase), 2]) annotations = np.empty([len(testcase), self.max_attention], dtype=object) y_labels = [] predicted_token = "" for i, (token, mask) in enumerate(zip(testcase, var_mask)): att_mask = attention_masks(self.attns, np.array([mask]), 1) data = (np.array([[token]]), np.array([[1]]), np.array([att_mask]), np.array([[1]]), np.array([1])) feed_dict = construct_feed_dict(self.model, data, state, att_states, att_ids, att_counts) results = session.run(evals, feed_dict=feed_dict) prediction, state, att_states, att_ids, alpha_states, att_counts, lambda_vec = \ extract_results(results, evals, 1, self.model) predicted = np.argmax(prediction) if prev_mask: if len(attns) >= self.max_attention: attns = attns[1:] attns.append(prev_token) prev_mask = mask prev_token = self.inverse_map[token] plot_data[i, :] = alpha_states[0][0] * ( lambda_vec[0, 1] if lambda_vec[0, 1] < 0.1 else 1) lambda_data[i, :] = lambda_vec labels = [""] * (self.max_attention - len(attns)) + attns annotations[i, :] = labels current_token = self.inverse_map[token] current_token = "%s%s%s" % ( "** " if current_token == predicted_token else "", "(*)" if mask else "", current_token) y_labels.append(current_token) predicted_token = self.inverse_map[predicted] fig, (ax_data, ax_lambda) = plt.subplots( 1, 2, gridspec_kw={'width_ratios': [self.max_attention, 2]}) blank_x_labels = [""] * self.max_attention blank_y_labels = [""] * len(testcase) lambda_x_labels = ["LM", "Att"] sns.set(font_scale=1.2) sns.set_style({"savefig.dpi": 100}) plt.yticks(rotation=0) ax_data = sns.heatmap(plot_data, ax=ax_data, cmap=plt.cm.Blues, linewidths=.1, annot=annotations, fmt="", vmin=0, vmax=1, cbar=False, xticklabels=blank_x_labels, yticklabels=y_labels, annot_kws={"size": 9}) ax_lambda = sns.heatmap(lambda_data, ax=ax_lambda, cmap=plt.cm.Blues, linewidths=.1, annot=False, fmt="", vmin=0, vmax=1, cbar=False, xticklabels=lambda_x_labels, yticklabels=blank_y_labels, annot_kws={"size": 9}) ax_data.set_yticklabels(ax_data.yaxis.get_majorticklabels(), rotation=0) ax_lambda.xaxis.tick_top() fig.set_size_inches( int(self.max_attention) * 1.3, int(len(plot_data) / 3)) fig.savefig('./out/attention2.png') print("Generated file attention2.png")
if "identifiers" in FLAGS.attention: FLAGS.attention.extend(["identifiers"] * (len(astwalker.possible_types()) - 1)) if __name__ == "__main__": adjust_flags() opts, args = getopt.getopt(sys.argv[1:], "p:", ["path="]) opt, arg = opts[0] path = arg if opt in ("-p", "--path") else None config = FLAGS word_to_id_path = os.path.join(config.vocab_file) with open(word_to_id_path, "rb") as f: word_to_id = pickle.load(f) vocab = {v: k for k, v in word_to_id.items()} data = pyreader.get_data(None, [path], config.seq_length, word_to_id)[0] inputs = np.pad(data.inputs, [(0, config.batch_size - len(data.inputs)), (0, 0)], "constant") targets = np.pad(data.targets, [(0, config.batch_size - len(data.targets)), (0, 0)], "constant") actual_lengths = np.pad(data.actual_lengths, (0, config.batch_size - len(data.actual_lengths)), "constant") masks = np.pad(data.masks, [(config.batch_size - len(data.masks), 0), (0, 0), (0, 0)], "constant") data = inputs, targets, masks, data.identifier_usage, actual_lengths with open(os.path.join(config.model_path, "config.pkl"), "rb") as config_file: model_config_dict = pickle.load(config_file) model_config_dict["batch_size"] = config.batch_size if "attention" not in model_config_dict: