def eval_model(model_type, attack_level, num_modified_words, percentage_attacked_samples): print("evaluate") print("%s white-box adversarial attack modifies %d words of %d%% of the instances: " % ( attack_level, num_modified_words, percentage_attacked_samples)) global model_conf if model_type == "lstm": import movieqa.conf_lstm as model_conf else: import movieqa.conf_cnn as model_conf if not tf.io.gfile.exists(data_conf.EVAL_DIR): tf.io.gfile.makedirs(data_conf.EVAL_DIR) util.save_config_values(data_conf, data_conf.TRAIN_DIR + "/data") util.save_config_values(model_conf, data_conf.TRAIN_DIR + "/model") filepath = data_conf.EVAL_RECORD_PATH + '/*' filenames = glob.glob(filepath) print("Evaluating adversarial attack on %s" % filenames) global_step = tf.contrib.framework.get_or_create_global_step() dataset = tf.contrib.data.TFRecordDataset(filenames) dataset = dataset.map(get_single_sample) batch_size = 1 dataset = dataset.padded_batch(batch_size, padded_shapes=( [None], [ANSWER_COUNT, None], [None], (), [None, None], ())) iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next() _, w_atts, s_atts, _ = predict_batch(model_type, [next_q, next_a, next_plots], training=False) if attack_level == "sentence": m_p = tf.compat.v1.py_func(remove_plot_sentence, [next_plots, s_atts, next_l], [tf.int64])[0] elif attack_level == "word": m_p = tf.compat.v1.py_func(modify_plot_sentence, [next_plots, w_atts, s_atts, next_l, num_modified_words, percentage_attacked_samples], [tf.int64])[0] logits, atts, sent_atts, pl_d = predict_batch(model_type, [next_q, next_a, m_p], training=False) next_q_types = tf.reshape(next_q_types, ()) probabs = model.compute_probabilities(logits=logits) loss_example = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC) accuracy_example = tf.reduce_mean(input_tensor=model.compute_accuracies(logits=logits, labels=next_l, dim=1)) to_restore = tf.contrib.slim.get_variables_to_restore(exclude=["embeddings"]) saver = tf.compat.v1.train.Saver(to_restore) summary_writer = tf.compat.v1.summary.FileWriter(data_conf.TRAIN_DIR) step = 0 total_acc = 0.0 total_prec = 0.0 total_rank = 0.0 total_loss = 0.0 type_counts = np.zeros(6, dtype=np.int32) type_accs = np.zeros(6) max_sent_atts = {} max_atts = {} p_counts = 0 last_p = '' with tf.compat.v1.Session() as sess: init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer()) sess.run(init_op) ckpt = tf.train.get_checkpoint_state(data_conf.TRAIN_DIR) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print('No checkpoint file found') _ = sess.run(set_embeddings_op, feed_dict={place: vectors}) coord = tf.train.Coordinator() threads = tf.compat.v1.train.start_queue_runners(sess=sess, coord=coord) try: while not coord.should_stop(): loss_val, acc_val, probs_val, gs_val, q_type_val, q_val, atts_val, sent_atts_val, labels_val, p_val, a_val, p_id_val = sess.run( [loss_example, accuracy_example, probabs, global_step, next_q_types, next_q, atts, sent_atts, next_l, pl_d, next_a, next_plot_ids]) type_accs[q_type_val + 1] += acc_val type_counts[q_type_val + 1] += 1 predicted_probabilities = probs_val[0] sentence_attentions = sent_atts_val[0] total_loss += loss_val total_acc += acc_val pred_index = np.argmax(probs_val[0]) labels = labels_val[0] gold = np.argmax(labels) filename = '' q_s = '' for index in q_val[0]: word = (vocab[index]) q_s += (word + ' ') filename += (word + '_') p_id = str(p_id_val[0].decode("utf-8")) path = data_conf.EVAL_DIR + "/plots/" + p_id + "/" + filename corr_ans = np.argmax(labels_val[0]) max_att_val = np.argmax(sent_atts_val[0][corr_ans]) att_row = np.max(atts_val[0][corr_ans][max_att_val], 1) red = np.max(atts_val[0][corr_ans][max_att_val], 1) att_inds = np.argsort(red)[::-1] if (p_id != last_p and p_counts < 20): for i, a_att in enumerate(atts_val[0]): a_att = np.mean(a_att, 2) qa_s = q_s + "? (acc: " + str(acc_val) + ")\n " for index in a_val[0][i]: qa_s += (vocab[index] + ' ') lv = " (label: " + str(int(labels_val[0][i])) + " - prediction: " + ( str("%.2f" % (probs_val[0][i] * 100))) + "%)" qa_s += lv a_sents = [] y_labels = [] for j, att in enumerate(a_att): a_s = [] y_labels.append(str("%.2f" % (sent_atts_val[0][i][j] * 100)) + "%") for index in p_val[0][j]: a_s.append(vocab[index]) a_sents.append(a_s) # util.plot_attention(np.array(a_att), np.array(a_sents),qa_s,y_labels,path,filename) last_p = p_id p_counts += 1 m_ap = util.example_precision(probs_val[0], labels_val[0], 5) rank = util.example_rank(probs_val[0], labels_val[0], 5) total_prec += m_ap total_rank += rank print("Sample loss: " + str(loss_val)) print("Sample acc: " + str(acc_val)) print("Sample prec: " + str(m_ap)) print("Sample rank: " + str(rank)) util.print_predictions(data_conf.EVAL_DIR, step, gold, predicted_probabilities, data_conf.MODE) util.print_sentence_attentions(data_conf.EVAL_DIR, step, sentence_attentions) step += 1 print("Total acc: " + str(total_acc / step)) print("Total prec: " + str(total_prec / step)) print("Total rank: " + str(total_rank / step)) print("Local_step: " + str(step * batch_size)) print("Global_step: " + str(gs_val)) if attack_level == "word": print("%d modified word(s)" % num_modified_words) print("===========================================") except tf.errors.OutOfRangeError: summary = tf.compat.v1.Summary() summary.value.add(tag='validation_loss', simple_value=total_loss / step) summary.value.add(tag='validation_accuracy', simple_value=(total_acc / step)) summary_writer.add_summary(summary, gs_val) keys = util.get_question_keys() with open(data_conf.EVAL_DIR + "/accuracy.txt", "a") as file: file.write("global step: " + str(gs_val) + " - total accuracy: " + str( total_acc / step) + "- total loss: " + str(total_loss / step) + str(num_modified_words) + "" "\n") file.write("Types (name / count / correct / accuracy):\n") for entry in zip(keys, type_counts, type_accs, (type_accs / type_counts)): file.write(str(entry) + "\n") file.write("===================================================================" + "\n") util.save_eval_score( "global step: " + str(gs_val) + " - acc : " + str( total_acc / step) + " - total loss: " + str( total_loss / step) + " - " + data_conf.TRAIN_DIR + "_" + str(gs_val)) finally: coord.request_stop() coord.join(threads)
def train_model(model_type, attack_level, num_modified_words, percentage_attacked_samples): print("train") print("%s white-box adversarial attack modifies %d words of %d%% of the instances: " % ( attack_level, num_modified_words, percentage_attacked_samples)) global model_conf if model_type == "lstm": import movieqa.conf_lstm as model_conf else: import movieqa.conf_cnn as model_conf global_step = tf.contrib.framework.get_or_create_global_step() init = False if not tf.io.gfile.exists(data_conf.TRAIN_DIR): init = True print("RESTORING WEIGHTS") tf.io.gfile.makedirs(data_conf.TRAIN_DIR) util.save_config_values(data_conf, data_conf.TRAIN_DIR + "/data") util.save_config_values(model_conf, data_conf.TRAIN_DIR + "/model") filenames = glob.glob(data_conf.TRAIN_RECORD_PATH + '/*') print("Reading training dataset from %s" % filenames) dataset = tf.contrib.data.TFRecordDataset(filenames) dataset = dataset.map(get_single_sample) dataset = dataset.shuffle(buffer_size=9000) dataset = dataset.repeat(data_conf.NUM_EPOCHS) batch_size = data_conf.BATCH_SIZE dataset = dataset.padded_batch(data_conf.BATCH_SIZE, padded_shapes=( [None], [ANSWER_COUNT, None], [None], (), [None, None], ())) iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next() _, w_atts, s_atts, _ = predict_batch(model_type, [next_q, next_a, next_plots], training=True) if attack_level == "sentence": m_p = tf.compat.v1.py_func(remove_plot_sentence, [next_plots, s_atts, next_l], [tf.int64])[0] elif attack_level == "word": m_p = tf.compat.v1.py_func(modify_plot_sentence, [next_plots, w_atts, s_atts, next_l], [tf.int64])[0] logits, _, _, _ = predict_batch(model_type, [next_q, next_a, m_p], training=True) probabs = model.compute_probabilities(logits=logits) loss_batch = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC) accuracy = model.compute_accuracies(logits=logits, labels=next_l, dim=1) accuracy_batch = tf.reduce_mean(input_tensor=accuracy) tf.compat.v1.summary.scalar("train_accuracy", accuracy_batch) tf.compat.v1.summary.scalar("train_loss", loss_batch) training_op = update_op(loss_batch, global_step, model_conf.OPTIMIZER, model_conf.INITIAL_LEARNING_RATE) config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True with tf.compat.v1.train.MonitoredTrainingSession( checkpoint_dir=data_conf.TRAIN_DIR, save_checkpoint_secs=60, save_summaries_steps=5, hooks=[tf.estimator.StopAtStepHook(last_step=model_conf.MAX_STEPS), ], config=config) as sess: step = 0 total_acc = 0.0 if init: _ = sess.run(set_embeddings_op, feed_dict={place: vectors}) while not sess.should_stop(): _, loss_val, acc_val, probs_val, lab_val, gs_val = sess.run( [training_op, loss_batch, accuracy_batch, probabs, next_l, global_step]) print(probs_val) print(lab_val) print("Batch loss: " + str(loss_val)) print("Batch acc: " + str(acc_val)) step += 1 total_acc += acc_val print("Total acc: " + str(total_acc / step)) print("Local_step: " + str(step * batch_size)) print("Global_step: " + str(gs_val)) print("===========================================") util.copy_model(data_conf.TRAIN_DIR, gs_val)
def run_creation(model_type, attack, model_folder, examples_folder, instances_to_attack): print("store created examples in %s" % examples_folder) if model_type == "lstm": import movieqa.run_lstm as runner else: import movieqa.run_cnn as runner runner.data_conf.TRAIN_DIR = model_folder load = False check_sents = [] check_found = [] check_num = 0 corr_probs = [] if not tf.gfile.Exists(examples_folder): tf.gfile.MakeDirs(examples_folder) else: checkpoints = glob.glob(examples_folder + "/[!accuracies]*") checkpoints = sorted(checkpoints, reverse=True) latest = checkpoints[0] splitted = latest.split(".txt")[0] check_num = int(splitted[len(splitted) - 1]) + 1 check = open(latest, encoding="utf8") for line in check: parts = line.replace('\n', '').split("\t") check_words = parts[0].split(" ") check_sents.append(check_words) last_prob = float(parts[1]) found = parts[2] if found == 'True': b_found = True else: b_found = False corr_probs.append(last_prob) check_found.append(b_found) load = True emb_dir = runner.data_conf.EMBEDDING_DIR vectors, vocab = util.load_embeddings(emb_dir) rev_vocab = dict(zip(vocab.values(), vocab.keys())) # print(rev_vocab) filename = "adversarial_addAny/common_english.txt" # length of the distractor sentence d = 10 # pool size of common words to sample from for each word in the distractor sentence poolsize = 10 common_words = {} fin = open(filename, encoding="utf8") for line in fin: word = line.replace('\n', '') # print(word) if word in rev_vocab: common_words[word] = rev_vocab[word] else: print( 'ERROR: word "%s" not in vocab. Run add_common_words_to_vocab.py first.' % word) exit(1) with open(instances_to_attack + '/val.pickle', 'rb') as handle: qa = pickle.load(handle) w_s = [] w_choices = [] w_found = [] q_inds = [] pools = [] with open(examples_folder + "/" + str(0 + check_num) + ".txt", "a") as file: for k, question in enumerate(qa): # load question indices q_words = util.normalize_text(question.question) q_ind = [] for word in q_words: q_ind.append(rev_vocab[word]) a_words = [] for i, answer in enumerate(question.answers): if not i == int(question.correct_index): words = util.normalize_text(answer) a_words.extend(words) w = [] w_choice = [] rand_sent = "" for i in range(0, d): if load: c_word = check_sents[k][i] w_index = rev_vocab[c_word] rand_sent += (c_word + " ") else: w_index = random.choice(list(common_words.values())) rand_sent += (vocab[w_index] + " ") w_found.append(False) w.append(w_index) w_choice.append(i) if load: found = check_found[k] w_found.append(found) # file.write(rand_sent+"\t"+str(corr_probs[k])+"\t"+str(found)+"\n") else: found = False w_found.append(found) file.write(rand_sent + "\t" + "1.0" + "\t" + str(found) + "\n") shuffle(w_choice) w_choices.append(w_choice) w_s.append(w) d_pools = [] for j, dj in enumerate(w): pool = [] random_common_words = np.random.choice(list( common_words.values()), poolsize, replace=False) print("Adding common words") pool.extend(random_common_words) if attack == 'addQ' or attack == "addQA": print("Adding question words") for word in q_words: pool.append(rev_vocab[word]) if attack == "addA" or attack == "addQA": print("Adding answer words") for word in a_words: pool.append(rev_vocab[word]) shuffle(pool) d_pools.append(pool) pools.append(d_pools) filepath = instances_to_attack + "/*.tfrecords" filenames = glob.glob(filepath) global_step = tf.contrib.framework.get_or_create_global_step() dataset = tf.contrib.data.TFRecordDataset(filenames) dataset = dataset.map(runner.get_single_sample) dataset = dataset.repeat(poolsize * d) batch_size = 1 dataset = dataset.padded_batch(batch_size, padded_shapes=([None], [5, None], [None], (), [None, None], ())) iterator = dataset.make_one_shot_iterator() next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next( ) add_sent = tf.placeholder(tf.int64, shape=[None]) # sent_exp = tf.expand_dims(add_sent,0) m_p = tf.py_func(add_plot_sentence, [next_plots, add_sent], [tf.int64])[0] # m_p = next_plots # m_p = tf.concat([next_plots,sent_exp],axis=0) logits, atts, sent_atts, _ = runner.predict_batch([next_q, next_a, m_p], training=False) probabs = model.compute_probabilities(logits=logits) accuracy_example = tf.reduce_mean( model.compute_accuracies(logits=logits, labels=next_l, dim=1)) to_restore = tf.contrib.slim.get_variables_to_restore( exclude=["embeddings"]) saver = tf.train.Saver(to_restore) p_counts = 0 last_p = '' p_id = 0 f_counter = 0 with tf.Session() as sess: init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init_op) ckpt = tf.train.get_checkpoint_state(runner.data_conf.TRAIN_DIR) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print('No checkpoint file found') _ = sess.run(runner.set_embeddings_op, feed_dict={runner.place: vectors}) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) if not load: accs = np.ones(shape=(len(qa))) else: accs = corr_probs for w_counter in range(0, d): words = np.zeros(shape=(len(qa)), dtype=np.int64) # select next word to optimize greedily next_inds = [] for k, question in enumerate(qa): next_word = w_choices[k].pop() next_inds.append(next_word) words[k] = w_s[k][next_word] # go through whole pool for every question next_ind = 0 for pool_counter in range(0, poolsize): total_acc = 0.0 info = "" for k, question in enumerate(qa): w_copy = [x for x in w_s[k]] print("==============") next_ind = next_inds[k] pool = pools[k][next_ind] pool_ind = pool.pop() print("setting " + str(w_s[k][next_ind]) + " to " + str(pool_ind)) w_copy[next_ind] = pool_ind info = "wordcounter: " + str( w_counter) + " - poolcounter: " + str( pool_counter) + " - question: " + str(k) print(info) acc_val, probs_val, gs_val, q_type_val, q_val, atts_val, sent_atts_val, labels_val, p_val, a_val, p_id_val = sess.run( [ accuracy_example, probabs, global_step, next_q_types, next_q, atts, sent_atts, next_l, m_p, next_a, next_plot_ids ], feed_dict={add_sent: w_copy}) sent = "" for word in w_copy: sent += (" " + vocab[word]) print(sent + " - acc: " + str(acc_val)) corr = np.argmax(labels_val[0]) pred_val = probs_val[0][corr] if (pred_val < accs[k]): word_s = vocab[words[k]] pool_s = vocab[pool_ind] print(pool_s + " (" + str(pred_val) + ") < " + word_s + " (" + str(accs[k]) + ")") words[k] = pool_ind accs[k] = pred_val if acc_val == 0: print("setting" + str(k) + " to true with acc" + str(acc_val) + " and pred " + str(pred_val)) w_found[k] = True f_counter += 1 filename = '' q_s = '' for index in q_val[0]: word = (vocab[index]) q_s += (word + ' ') filename += (word + '_') predicted_probabilities = probs_val[0] labels = labels_val[0] p_id = 'test' path = runner.data_conf.EVAL_DIR + "/plots/" + p_id + "/" + filename if (p_counts < 20): for i, a_att in enumerate(atts_val[0]): # a_att = np.max(a_att, 1) qa_s = q_s + "? (acc: " + str(acc_val) + ")\n " for index in a_val[0][i]: qa_s += (vocab[index] + ' ') lv = " (label: " + str(int( labels[i])) + " - prediction: " + (str( "%.2f" % (predicted_probabilities[i] * 100))) + "%)" qa_s += lv a_sents = [] y_labels = [] for j, att in enumerate(a_att): a_s = [] y_labels.append( str("%.2f" % (sent_atts_val[0][i][j] * 100)) + "%") for index in p_val[0][j]: a_s.append(vocab[index]) a_sents.append(a_s) util.plot_attention(np.array(a_att), np.array(a_sents), qa_s, y_labels, path, filename) last_p = p_id p_counts += 1 total_acc += acc_val print(total_acc / (k + 1)) with open(examples_folder + "/accuracies.txt", "a") as file: file.write(info + " - " + str(total_acc / (len(qa))) + "\n") with open( examples_folder + "/" + str(w_counter + check_num + 1) + ".txt", "a") as file: for k, question in enumerate(qa): w_s[k][next_ind] = words[k] sent = "" for word in w_s[k]: sent += (vocab[word] + " ") file.write(sent + "\t" + str(accs[k]) + "\t" + str(w_found[k]) + "\n")
def eval_model(): if not tf.io.gfile.exists(data_conf.EVAL_DIR): tf.io.gfile.makedirs(data_conf.EVAL_DIR) util.save_config_values(data_conf, data_conf.EVAL_DIR + "/data_") util.save_config_values(model_conf, data_conf.EVAL_DIR + "/model_") filepath = data_conf.EVAL_RECORD_PATH + '/*' filenames = glob.glob(filepath) global_step = tf.compat.v1.train.get_or_create_global_step() dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.map(get_single_sample) batch_size = 1 dataset = dataset.padded_batch(batch_size, padded_shapes=([None], [ANSWER_COUNT, None], [None], (), [None, None], ())) iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next( ) logits, word_atts, sent_atts, pl_d = predict_batch( [next_q, next_a, next_plots], training=False) next_q_types = tf.reshape(next_q_types, ()) probabs = model.compute_probabilities(logits=logits) loss_example = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC) accuracy_example = tf.reduce_mean(input_tensor=model.compute_accuracies( logits=logits, labels=next_l, dim=1)) # do not restore embeddings in case the vocabulary size has changed #to_restore = tf.contrib.slim.get_variables_to_restore(exclude=["embeddings"]) saver = tf.compat.v1.train.Saver() summary_writer = tf.compat.v1.summary.FileWriter(data_conf.TRAIN_DIR) step = 0 total_acc = 0.0 total_loss = 0.0 p_counts = 0 last_p = '' type_counts = np.zeros(6, dtype=np.int32) type_accs = np.zeros(6) with tf.compat.v1.Session() as sess: init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer()) sess.run(init_op) ckpt = tf.train.get_checkpoint_state(data_conf.TRAIN_DIR) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print('No checkpoint file found') coord = tf.train.Coordinator() threads = tf.compat.v1.train.start_queue_runners(sess=sess, coord=coord) sess.run(set_embeddings_op, feed_dict={place: vectors}) try: while not coord.should_stop(): loss_val, acc_val, probs_val, gs_val, q_type_val, q_val, labels_val, p_val, a_val, p_id_val, atts_val, \ sent_atts_val = sess.run([loss_example, accuracy_example, probabs, global_step, next_q_types, next_q, next_l, next_plots, next_a, next_plot_ids, word_atts, sent_atts]) total_loss += loss_val total_acc += acc_val predicted_probabilities = probs_val[0] sentence_attentions = sent_atts_val[0] pred_index = np.argmax(probs_val[0]) labels = labels_val[0] gold = np.argmax(labels) filename = '' q_s = '' for index in q_val[0]: word = (vocab[index]) q_s += (word + ' ') filename += (word + '_') filename += "?" p_id = str(p_id_val[0].decode("utf-8")) path = data_conf.EVAL_DIR + "/plots/" + p_id + "_" + str( step) + "/" # + filename # write attention heat-map if (p_id != last_p and p_counts < data_conf.PLOT_SAMPLES_NUM): # if True: for i, a_att in enumerate(atts_val[0]): # a_att = np.mean(a_att, 2) qa_s = q_s + "? (acc: " + str(acc_val) + ")\n " for index in a_val[0][i]: word = vocab[index] qa_s += (word + ' ') filename += word + "_" lv = " (label: " + str(int( labels[i])) + " - prediction: " + (str( "%.2f" % (predicted_probabilities[i] * 100))) + "%)" qa_s += lv a_sents = [] y_labels = [] for j, att in enumerate(a_att): a_s = [] y_labels.append( str("%.2f" % (sent_atts_val[0][i][j] * 100)) + "%") for index in p_val[0][j]: a_s.append(vocab[index]) a_sents.append(a_s) util.plot_attention(np.array(a_att), np.array(a_sents), qa_s, y_labels, path, filename) last_p = p_id p_counts += 1 print("Sample loss: " + str(loss_val)) print("Sample labels: " + str(labels)) print("Sample probabilities: " + str(predicted_probabilities)) print("Sample acc: " + str(acc_val)) util.print_predictions(data_conf.EVAL_DIR, step, gold, predicted_probabilities, data_conf.MODE) util.print_sentence_attentions(data_conf.EVAL_DIR, step, sentence_attentions) step += 1 print("Total acc: " + str(total_acc / step)) print("Local_step: " + str(step * batch_size)) print("Global_step: " + str(gs_val)) print("===========================================") except tf.errors.OutOfRangeError: summary = tf.compat.v1.Summary() summary.value.add(tag='validation_loss', simple_value=total_loss / step) summary.value.add(tag='validation_accuracy', simple_value=(total_acc / step)) summary_writer.add_summary(summary, gs_val) keys = util.get_question_keys() if data_conf.MODE == "val": with open(data_conf.EVAL_DIR + "/val_accuracy.txt", "a") as file: file.write("global step: " + str(gs_val) + " - total accuracy: " + str(total_acc / step) + "- total loss: " + str(total_loss / step) + "\n") file.write("Types (name / count / correct / accuracy):\n") for entry in zip(keys, type_counts, type_accs, (type_accs / type_counts)): file.write(str(entry) + "\n") file.write( "===================================================================" + "\n") util.save_eval_score("global step: " + str(gs_val) + " - acc : " + str(total_acc / step) + " - total loss: " + str(total_loss / step) + " - " + data_conf.TRAIN_DIR + "_" + str(gs_val)) finally: coord.request_stop() coord.join(threads)
def train_model(): print("train RNN-LSTM model") global_step = tf.compat.v1.train.get_or_create_global_step() if not tf.io.gfile.exists(data_conf.TRAIN_DIR): print("RESTORING WEIGHTS") tf.io.gfile.makedirs(data_conf.TRAIN_DIR) util.save_config_values(data_conf, data_conf.TRAIN_DIR + "/data") util.save_config_values(model_conf, data_conf.TRAIN_DIR + "/model") filenames = glob.glob(data_conf.TRAIN_RECORD_PATH + '/*') print("Reading training dataset from %s" % filenames) dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.map(get_single_sample) dataset = dataset.shuffle(buffer_size=9000) dataset = dataset.repeat(model_conf.NUM_EPOCHS) batch_size = model_conf.BATCH_SIZE dataset = dataset.padded_batch(model_conf.BATCH_SIZE, padded_shapes=([None], [ANSWER_COUNT, None], [None], (), [None, None], ())) iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next( ) logits, _, _, _ = predict_batch([next_q, next_a, next_plots], training=True) probabs = model.compute_probabilities(logits=logits) loss_batch = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC) accuracy = model.compute_accuracies(logits=logits, labels=next_l, dim=1) accuracy_batch = tf.reduce_mean(input_tensor=accuracy) tf.compat.v1.summary.scalar("train_accuracy", accuracy_batch) tf.compat.v1.summary.scalar("train_loss", loss_batch) training_op = update_op(loss_batch, global_step, model_conf.OPTIMIZER, model_conf.INITIAL_LEARNING_RATE) config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.OFF with tf.compat.v1.train.MonitoredTrainingSession( checkpoint_dir=data_conf.TRAIN_DIR, save_checkpoint_secs=60, save_summaries_steps=5, hooks=[ tf.estimator.StopAtStepHook(last_step=model_conf.MAX_STEPS), ], config=config) as sess: step = 0 total_acc = 0.0 # print("Feeding embeddings %s of size %s" % (str(vectors), len(vectors))) sess.run(set_embeddings_op, feed_dict={place: vectors}) while not sess.should_stop(): _, loss_val, acc_val, probs_val, lab_val, gs_val = sess.run([ training_op, loss_batch, accuracy_batch, probabs, next_l, global_step ]) print(probs_val) print(lab_val) print("Batch loss: " + str(loss_val)) print("Batch acc: " + str(acc_val)) step += 1 total_acc += acc_val print("Total acc: " + str(total_acc / step)) print("Local_step: " + str(step * batch_size)) print("Global_step: " + str(gs_val)) print("===========================================") util.copy_model(data_conf.TRAIN_DIR, gs_val)
def eval_model(): if not tf.gfile.Exists(data_conf.EVAL_DIR): tf.gfile.MakeDirs(data_conf.EVAL_DIR) util.save_config_values(data_conf, data_conf.EVAL_DIR + "/data") util.save_config_values(model_conf, data_conf.EVAL_DIR + "/model") filepath = data_conf.EVAL_RECORD_PATH + '/*' filenames = glob.glob(filepath) print("Evaluate model on %s" % str(filenames)) global_step = tf.contrib.framework.get_or_create_global_step() dataset = tf.contrib.data.TFRecordDataset(filenames) dataset = dataset.map(get_single_sample) batch_size = 1 dataset = dataset.padded_batch(batch_size, padded_shapes=([None], [ANSWER_COUNT, None], [None], (), [None], ())) iterator = dataset.make_one_shot_iterator() next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next( ) logits = predict_batch([next_q, next_a, next_plots], training=False) next_q_types = tf.reshape(next_q_types, ()) probabs = model.compute_probabilities(logits=logits) loss_example = model.compute_batch_mean_loss(logits, next_l, model_conf.LOSS_FUNC) accuracy_example = tf.reduce_mean( model.compute_accuracies(logits=logits, labels=next_l, dim=1)) saver = tf.train.Saver() summary_writer = tf.summary.FileWriter(data_conf.TRAIN_DIR) step = 0 total_acc = 0.0 total_loss = 0.0 type_counts = np.zeros(6, dtype=np.int32) type_accs = np.zeros(6) with tf.Session() as sess: init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init_op) ckpt = tf.train.get_checkpoint_state(data_conf.TRAIN_DIR) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print('No checkpoint file found') coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: while not coord.should_stop(): loss_val, acc_val, probs_val, gs_val, q_type_val, labels_val = sess.run( [ loss_example, accuracy_example, probabs, global_step, next_q_types, next_l ]) predicted_probabilities = probs_val[0] pred_index = np.argmax(probs_val[0]) labels = labels_val[0] gold = np.argmax(labels) type_accs[q_type_val + 1] += acc_val type_counts[q_type_val + 1] += 1 total_loss += loss_val total_acc += acc_val print("Sample loss: " + str(loss_val)) print("Sample acc: " + str(acc_val)) util.print_predictions(data_conf.EVAL_DIR, step, gold, predicted_probabilities, data_conf.MODE) step += 1 print("Total acc: " + str(total_acc / step)) print("Local_step: " + str(step * batch_size)) print("Global_step: " + str(gs_val)) print("===========================================") except tf.errors.OutOfRangeError: summary = tf.Summary() summary.value.add(tag='validation_loss', simple_value=total_loss / step) summary.value.add(tag='validation_accuracy', simple_value=(total_acc / step)) summary_writer.add_summary(summary, gs_val) keys = util.get_question_keys() if data_conf.MODE == "val": with open(data_conf.EVAL_DIR + "/val_accuracy.txt", "a") as file: file.write("global step: " + str(gs_val) + " - total accuracy: " + str(total_acc / step) + "- total loss: " + str(total_loss / step) + "\n") file.write("Types (name / count / correct / accuracy):\n") for entry in zip(keys, type_counts, type_accs, (type_accs / type_counts)): file.write(str(entry) + "\n") file.write( "===================================================================" + "\n") util.save_eval_score("global step: " + str(gs_val) + " - acc : " + str(total_acc / step) + " - total loss: " + str(total_loss / step) + " - " + data_conf.TRAIN_DIR + "_" + str(gs_val)) finally: coord.request_stop() coord.join(threads)