def get_misclassified_perturbations(model, md, x_part, y_part): au = Augmenter() temp_x_original_train = copy.deepcopy(x_part) x_aug, y_train = au.worst_of_10(temp_x_original_train, y_part) x_aug_after_preprocess = [] for i in range(len(x_aug)): temp = copy.deepcopy(x_aug[i]) x_aug_after_preprocess.append(md.preprocess_original_imgs(temp)) fault_index = 0 nc = NeuralCoverage(model) o1 = nc.get_layer_output(md.preprocess_original_imgs(x_part)) y_true = np.argmax(y_part, axis=1) for i in range(10): print "========================= Round ", i, "============================" set_i = np.array(x_aug_after_preprocess)[:, i] y_predict = model.predict(set_i) y_true1 = np.array(y_part, dtype='float32') y_true1 = tf.convert_to_tensor(y_true1) y_pred1 = tf.convert_to_tensor(y_predict) loss1 = keras.losses.categorical_crossentropy(y_true1, y_pred1) loss1 = keras.backend.get_value(loss1) j = 745 print loss1[j] y_predict = np.argmax(y_predict, axis=1) o2 = nc.get_layer_output(set_i) diffs = nc.compare_output(o2, o1, y_part) print "coverage:", diffs[j] print "true_" + str(y_true[j]) + "_pred_" + str(y_predict[j])
def memory_test(original_target=None, model=None, x=None, x_10=None): """select worst image based on neural coverage""" nc = NeuralCoverage(model) cov_diff = [] for i in range(len(x_10)): x_10[i] = original_target.preprocess_original_imgs(x_10[i]) x = original_target.preprocess_original_imgs(x) origin_cov = nc.get_layer_output(x) for i in range(10): set_i = np.array(x_10)[:, i] start_time = time.time() cov = nc.get_layer_output(set_i) print("--- first %s seconds ---" % (time.time() - start_time)) cov_diff_i = nc.compare_output(origin_cov, cov) print("--- %s seconds ---" % (time.time() - start_time)) del cov[:] del cov cov_diff.append(cov_diff_i) y_argmax = np.argmax(cov_diff, axis=0) print y_argmax del origin_cov[:] del origin_cov for j in range(len(y_argmax)): index = y_argmax[j] x[j] = x_10[j][index] # update x_train (not good design) return x
def select_worst_cov(md, model, original_x=None, x_10=None, y=None): nc = NeuralCoverage(model) """select worst image based on neural coverage""" cov_diff = [] loss = [] for i in range(len(x_10)): x_10[i] = md.preprocess_original_imgs(x_10[i]) origin_cov = nc.get_layer_output(original_x) # class_cov = nc.generate_cov_for_class(origin_cov, y) # del origin_cov[:] # del origin_cov print np.argmax(y, axis=1) for i in range(10): set_i = np.array(x_10)[:, i] cov = nc.get_layer_output(set_i) # cov_diff_i = nc.compare_class_output(cov, class_cov, y) cov_diff_i = nc.compare_output(cov, origin_cov, y) del cov[:] del cov cov_diff.append(cov_diff_i) y_predict_temp = model.predict(set_i) y_true1 = np.array(y, dtype='float32') y_true1 = tf.convert_to_tensor(y_true1) y_pred1 = tf.convert_to_tensor(y_predict_temp) def categorical_crossentropy_wrapper(y_true, y_pred): y_pred = keras.backend.clip(y_pred, 1e-8, 1-1e-8) return keras.losses.categorical_crossentropy(y_true, y_pred) # loss1 = keras.losses.categorical_crossentropy(y_true1, y_pred1) loss1 = categorical_crossentropy_wrapper(y_true1, y_pred1) loss1 = keras.backend.get_value(loss1) loss.append(loss1) print np.argmax(y_predict_temp, axis=1) print cov_diff_i print loss1 y_argmax = np.argmax(cov_diff, axis=0) y_argmax_loss = np.argmax(loss, axis=0) for j in range(len(y_argmax)): print y_argmax[j], y_argmax_loss[j]
def generate_cov(model, origin_x, x, y): nc = NeuralCoverage(model) o1 = nc.get_layer_output(origin_x) o2 = nc.get_layer_output(x) diffs = nc.compare_output(o2, o1, y) return diffs