コード例 #1
0
def _set_config():
    """
    A helper function to set global options.
    """
    utils.set_up_environment()
    tf.config.optimizer.set_jit(FLAGS.use_xla)
    tf.config.optimizer.set_experimental_options({"auto_mixed_precision": FLAGS.use_amp})
コード例 #2
0
ファイル: train.py プロジェクト: yucunlu/path_explain
def train(argv=None):
    set_up_environment(visible_devices=FLAGS.visible_devices)

    print("Loading data...")
    train_set, vald_set = sentiment_dataset(batch_size=FLAGS.batch_size,
                                            max_sequence_length=FLAGS.sequence_length)
    encoder = tfds.features.text.TokenTextEncoder.load_from_file('encoder')


    if FLAGS.eval_only:
        model = tf.keras.models.load_model('model.h5')
        print('Evaluating on the training set...')
        model.evaluate(train_set, verbose=1)
        print('Evaluating on the validation set...')
        model.evaluate(vald_set, verbose=1)
        return

    print('Building model...')
    model = cnn_model(encoder.vocab_size,
                      FLAGS.embedding_dim,
                      FLAGS.sequence_length,
                      FLAGS.dropout_rate,
                      FLAGS.num_filters,
                      FLAGS.hidden_units)

    model.compile(loss=tf.keras.losses.BinaryCrossentropy(),
                  optimizer=tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate),
                  metrics=[tf.keras.metrics.BinaryAccuracy(),
                           tf.keras.metrics.AUC()])

    model.fit(train_set,
              epochs=FLAGS.num_epochs,
              validation_data=vald_set,
              verbose=1)
    tf.keras.models.save_model(model, 'model.h5')
コード例 #3
0
def main(argv=None):
    utils.set_up_environment(visible_devices=FLAGS.visible_devices)

    model_dict = {
        'vgg16': tf.keras.applications.vgg16.VGG16,
        'inception_v3': tf.keras.applications.inception_v3.InceptionV3,
        'mobilenet_v2': tf.keras.applications.mobilenet_v2.MobileNetV2
    }
    sizes = {'vgg16': 224, 'inception_v3': 299, 'mobilenet_v2': 224}
    model = model_dict[FLAGS.model]()
    image_size = sizes[FLAGS.model]

    test_image = np.random.uniform(-1.0,
                                   1.0,
                                   size=(1, image_size, image_size,
                                         3)).astype(np.float32)
    baseline_image = np.zeros(
        (1, image_size, image_size, 3)).astype(np.float32)
    elapsed_time = time_model(model, test_image, baseline_image)

    data_dictionary = {
        'Model': [FLAGS.model],
        'Type': [FLAGS.type],
        'Time': [elapsed_time]
    }

    data = pd.DataFrame(data_dictionary)
    data.to_csv('{}_{}.csv'.format(FLAGS.model, FLAGS.type), index=False)
コード例 #4
0
def main(argv=None):
    set_up_environment(visible_devices=FLAGS.visible_devices)
    x_train, y_train, x_test, y_test, spec_df = get_data(FLAGS.dataset)

    if FLAGS.train_interaction_model:
        train_interaction_model(x_train, y_train, x_test, y_test)

    model, random_weights = load_interaction_model()

    interaction_types = [
        'integrated_hessians', 'expected_hessians', 'hessians',
        'hessians_times_inputs', 'shapley_sampling',
        'contextual_decomposition', 'neural_interaction_detection'
    ]
    if FLAGS.interaction_type not in interaction_types:
        raise ValueError('Invalid interaction type `{}`'.format(
            FLAGS.interaction_type))

    print('Evaluating {}'.format(FLAGS.interaction_type))
    print('Getting interactions')
    interaction_function = return_interaction_function(FLAGS.interaction_type)

    interactions_train = interaction_function(model, x_train, baseline=x_test)
    interactions_test = interaction_function(model, x_test, baseline=x_train)

    mean_performances, sd_performances = \
        get_performance_curve(x_train,
                              x_test,
                              model,
                              spec_df,
                              interactions_train,
                              interactions_test,
                              random_weights)

    num_removed = np.arange(len(mean_performances))
    type_list = [FLAGS.interaction_type] * len(mean_performances)

    data = pd.DataFrame({
        'interaction_type': type_list,
        'mean_perf': mean_performances,
        'sd_perf': sd_performances,
        'num_interactions_removed': num_removed
    })
    if FLAGS.use_random_draw:
        data.to_csv('results_random_draw/{}_{}.csv'.format(
            FLAGS.dataset, FLAGS.interaction_type))
    else:
        data.to_csv('results/{}_{}.csv'.format(FLAGS.dataset,
                                               FLAGS.interaction_type))
コード例 #5
0
def train(argv=None):
    set_up_environment(visible_devices=FLAGS.visible_devices)

    train_set, test_set, vald_set = higgs_dataset(batch_size=FLAGS.batch_size,
                                                  num_parallel_calls=8,
                                                  buffer_size=10000,
                                                  seed=0,
                                                  scale=True,
                                                  include_vald=True,
                                                  flip_indices=FLAGS.flip_indices)

    if FLAGS.evaluate:
        print('Evaluating model with flip indices set to {}'.format(FLAGS.flip_indices))
        model = tf.keras.models.load_model('model.h5')
        print('---------- Train Set ----------')
        model.evaluate(train_set, verbose=2)
        print('---------- Vald Set ----------')
        model.evaluate(vald_set,  verbose=2)
        return

    model = build_model(weight_decay=FLAGS.weight_decay,
                        num_layers=FLAGS.num_layers,
                        hidden_units=FLAGS.hidden_units,
                        for_interpretation=False)

    steps_per_epoch = int(10000000 / FLAGS.batch_size)
    learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=FLAGS.learning_rate,
                                                                   decay_steps=steps_per_epoch,
                                                                   decay_rate=FLAGS.decay_rate,
                                                                   staircase=True)
    optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                        momentum=FLAGS.momentum,
                                        nesterov=True)

    model.compile(loss=tf.keras.losses.BinaryCrossentropy(),
                  optimizer=optimizer,
                  metrics=[tf.keras.metrics.BinaryAccuracy(),
                           tf.keras.metrics.AUC()])

    callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
    history = model.fit(train_set,
                        epochs=FLAGS.epochs,
                        validation_data=vald_set,
                        callbacks=[callback])
    tf.keras.models.save_model(model, 'model.h5')
    joblib.dump(history.history, 'history.pickle')
コード例 #6
0
def interpret(argv=None):
    set_up_environment(visible_devices=FLAGS.visible_devices)

    print('Reading data...')
    x_train, y_train, x_test, y_test = mitbih_dataset()
    print('Dataset shape: {}'.format(x_train.shape))
    print('Loading model...')
    original_model = tf.keras.models.load_model('model.h5')

    interpret_model = cnn_model(for_interpretation=True)
    interpret_model.load_weights('model.h5', by_name=True)

    y_pred = original_model.predict(x_test)
    y_pred_max = np.argmax(y_pred, axis=-1)

    explainer = PathExplainerTF(interpret_model)

    for c in range(5):
        print('Interpreting class {}'.format(c))
        class_mask = np.logical_and(y_test == c, y_pred_max == y_test)
        class_indices = np.where(class_mask)[0][:FLAGS.num_examples]

        batch_samples = x_test[class_indices]

        attributions = explainer.attributions(inputs=batch_samples,
                                              baseline=x_train,
                                              batch_size=FLAGS.batch_size,
                                              num_samples=FLAGS.num_samples,
                                              use_expectation=True,
                                              output_indices=c,
                                              verbose=True)
        np.save('attributions_{}.npy'.format(c), attributions)

        interactions = explainer.interactions(inputs=batch_samples,
                                              baseline=x_train,
                                              batch_size=FLAGS.batch_size,
                                              num_samples=FLAGS.num_samples,
                                              use_expectation=True,
                                              output_indices=c,
                                              verbose=True)
        np.save('interactions_{}.npy'.format(c), interactions)
コード例 #7
0
ファイル: train.py プロジェクト: yucunlu/path_explain
def train(argv=None):
    set_up_environment(visible_devices=FLAGS.visible_devices)

    print('Reading data...')
    x_train, y_train, x_test, y_test = mitbih_dataset()
    print('Dataset shape: {}'.format(x_train.shape))

    if FLAGS.evaluate:
        model = tf.keras.models.load_model('model.h5')
        print('Evaluating on the training data...')
        model.evaluate(x_train, y_train, verbose=2)
        print('Evaluating on the test data...')
        model.evaluate(x_test, y_test, verbose=2)
        return

    print('Building model...')
    model = cnn_model()

    learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate=FLAGS.learning_rate,
        decay_steps=int(x_train.shape[0] / FLAGS.batch_size),
        decay_rate=FLAGS.decay_rate,
        staircase=True)
    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate,
                                         beta_1=FLAGS.beta_1,
                                         beta_2=FLAGS.beta_2)
    loss = tf.keras.losses.SparseCategoricalCrossentropy()
    metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]
    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
    print('Training model...')
    model.fit(x_train,
              y_train,
              epochs=FLAGS.epochs,
              batch_size=FLAGS.batch_size,
              verbose=1,
              validation_data=(x_test, y_test))

    tf.keras.models.save_model(model, 'model.h5')
コード例 #8
0
ファイル: interpret.py プロジェクト: yucunlu/path_explain
def interpret(argv=None):
    print('Setting up environment...')
    utils.set_up_environment(visible_devices=FLAGS.visible_devices)

    print("Loading data...")
    train_set, vald_set = sentiment_dataset(
        batch_size=FLAGS.batch_size, max_sequence_length=FLAGS.sequence_length)
    encoder = tfds.features.text.TokenTextEncoder.load_from_file('encoder')

    print('Loading model...')
    interpret_model = cnn_model(encoder.vocab_size,
                                FLAGS.embedding_dim,
                                FLAGS.sequence_length,
                                FLAGS.dropout_rate,
                                FLAGS.num_filters,
                                FLAGS.hidden_units,
                                for_interpretation=True)

    model = tf.keras.models.load_model('model.h5')
    embedding_model = tf.keras.models.Model(model.input,
                                            model.layers[1].output)

    interpret_model.load_weights('model.h5', by_name=True)

    explainer = PathExplainerTF(interpret_model)

    if use_custom_sentences:
        custom_sentences = ['This movie was good', 'This movie was not good']

    num_accumulated = 0
    accumulated_inputs = []
    accumulated_embeddings = []
    for i, (batch_input, batch_label) in enumerate(vald_set):
        batch_embedding = embedding_model(batch_input)

        batch_pred = model(batch_input)
        batch_pred_max = (batch_pred[:, 0].numpy() > 0.5).astype(int)

        correct_mask = batch_pred_max == batch_label

        accumulated_inputs.append(batch_input[correct_mask])
        accumulated_embeddings.append(batch_embedding[correct_mask])
        num_accumulated += np.sum(correct_mask)
        if num_accumulated >= FLAGS.num_sentences:
            break

    accumulated_inputs = tf.concat(accumulated_inputs, axis=0)
    accumulated_embeddings = tf.concat(accumulated_embeddings, axis=0)
    np.save('accumulated_inputs.npy', accumulated_inputs.numpy())
    np.save('accumulated_embeddings.npy', accumulated_embeddings.numpy())

    baseline_input = np.zeros(accumulated_inputs[0:1].shape)
    baseline_embedding = embedding_model(baseline_input)

    print('Getting attributions...')
    # Get word-level attributions
    embedding_attributions = explainer.attributions(
        accumulated_embeddings,
        baseline_embedding,
        batch_size=FLAGS.batch_size,
        num_samples=FLAGS.num_samples,
        use_expectation=False,
        output_indices=0,
        verbose=True)
    np.save('embedding_attributions.npy', embedding_attributions)

    print('Getting interactions...')
    # Get pairwise word interactions
    max_indices = np.max(np.sum(accumulated_inputs != 0, axis=-1))
    interaction_matrix = np.zeros(
        (accumulated_embeddings.shape[0], max_indices, FLAGS.embedding_dim,
         FLAGS.sequence_length, FLAGS.embedding_dim))

    indices = np.indices((max_indices, FLAGS.embedding_dim))
    indices = indices.reshape(2, -1)
    indices = indices.swapaxes(0, 1)
    for interaction_index in tqdm(indices):
        embedding_interactions = explainer.interactions(
            accumulated_embeddings,
            baseline_embedding,
            batch_size=FLAGS.batch_size,
            num_samples=FLAGS.num_samples,
            use_expectation=False,
            output_indices=0,
            verbose=False,
            interaction_index=interaction_index)
        interaction_matrix[:, interaction_index[0],
                           interaction_index[1], :, :] = embedding_interactions
    np.save('interaction_matrix.npy', interaction_matrix)
コード例 #9
0
ファイル: interpret.py プロジェクト: yucunlu/path_explain
def interpret(argv=None):
    set_up_environment(visible_devices=FLAGS.visible_devices)

    train_set, test_set, vald_set = higgs_dataset(batch_size=FLAGS.batch_size,
                                                  num_parallel_calls=8,
                                                  buffer_size=10000,
                                                  seed=0,
                                                  scale=True,
                                                  include_vald=True)

    print('Loading model...')
    model = build_model(weight_decay=FLAGS.weight_decay,
                        num_layers=FLAGS.num_layers,
                        hidden_units=FLAGS.hidden_units,
                        for_interpretation=True)
    model.load_weights('model.h5', by_name=True)

    print('Gathering inputs...')
    training_iters = int(10000 / FLAGS.batch_size)
    training_samples = []
    for i, (x_batch, _) in enumerate(train_set):
        training_samples.append(x_batch)
        if i >= training_iters:
            break
    training_samples = tf.concat(training_samples, axis=0)

    input_samples = []
    true_labels = []
    pred_output = []
    num_accumulated = 0
    for x_batch, label_batch in test_set:
        pred_labels = model(x_batch)
        correct_mask = (pred_labels[:, 0].numpy() > 0.5).astype(int) == label_batch

        input_samples.append(x_batch.numpy()[correct_mask])
        pred_output.append(pred_labels.numpy()[correct_mask, 0])
        true_labels.append(label_batch.numpy()[correct_mask])
        num_accumulated += np.sum(correct_mask)

        if num_accumulated >= FLAGS.num_examples:
            break

    input_samples = np.concatenate(input_samples, axis=0).astype(np.float32)
    true_labels = np.concatenate(true_labels, axis=0)
    pred_output = np.concatenate(pred_output, axis=0)

    np.save('input_samples.npy', input_samples)
    np.save('pred_output.npy', pred_output)
    np.save('true_labels.npy', true_labels)

    explainer = PathExplainerTF(model)
    print('Computing attributions...')
    attributions = explainer.attributions(inputs=input_samples,
                                          baseline=np.zeros((1, input_samples.shape[1]), dtype=np.float32),
                                          batch_size=FLAGS.batch_size,
                                          num_samples=FLAGS.num_samples,
                                          use_expectation=False,
                                          output_indices=0,
                                          verbose=True)
    np.save('attributions.npy', attributions)

    print('Computing interactions...')
    interactions = explainer.interactions(inputs=input_samples,
                                          baseline=np.zeros((1, input_samples.shape[1]), dtype=np.float32),
                                          batch_size=FLAGS.batch_size,
                                          num_samples=FLAGS.num_samples,
                                          use_expectation=False,
                                          output_indices=0,
                                          verbose=True)
    np.save('interactions.npy', interactions)
コード例 #10
0
ファイル: interpret.py プロジェクト: yucunlu/path_explain
def interpret(argv=None):
    print('Setting up environment...')
    utils.set_up_environment(visible_devices=FLAGS.visible_devices)

    print('Loading data...')
    x_train, y_train, x_test, y_test, vocabulary_inv = load_data(
        FLAGS.max_words, FLAGS.sequence_length)

    lengths = np.sum(x_test != 0, axis=1)
    min_indices = np.argsort(lengths)

    print('Loading model...')
    if FLAGS.model_type == 'cnn':
        interpret_model = cnn_model(len(vocabulary_inv),
                                    FLAGS.embedding_dim,
                                    FLAGS.sequence_length,
                                    FLAGS.dropout_rate,
                                    FLAGS.num_filters,
                                    FLAGS.hidden_units,
                                    for_interpretation=True)
    elif FLAGS.model_type == 'lstm':
        interpret_model = lstm_model(vocab_length=len(vocabulary_inv),
                                     embedding_dim=FLAGS.embedding_dim,
                                     sequence_length=FLAGS.sequence_length,
                                     dropout_rate=FLAGS.dropout_rate,
                                     lstm_units=FLAGS.num_filters,
                                     hidden_units=FLAGS.hidden_units,
                                     for_interpretation=True)
    else:
        raise ValueError(
            'Unrecognized value `{}` for argument model_type'.format(
                FLAGS.model_type))

    model = tf.keras.models.load_model('{}.h5'.format(FLAGS.model_type))
    embedding_model = tf.keras.models.Model(model.input,
                                            model.layers[1].output)

    interpret_model.load_weights('{}.h5'.format(FLAGS.model_type),
                                 by_name=True)

    explainer = PathExplainerTF(interpret_model)

    batch_input = x_test[min_indices[:FLAGS.num_sentences]]
    batch_embedding = embedding_model(batch_input)
    batch_pred = model(batch_input)

    baseline_input = np.zeros(x_test[0:1].shape)
    baseline_embedding = embedding_model(baseline_input)

    print('Getting attributions...')
    # Get word-level attributions
    embedding_attributions = explainer.attributions(
        batch_embedding,
        baseline_embedding,
        batch_size=FLAGS.batch_size,
        num_samples=FLAGS.num_samples,
        use_expectation=False,
        output_indices=0,
        verbose=True)
    np.save('embedding_attributions_{}.npy'.format(FLAGS.model_type),
            embedding_attributions)

    print('Getting interactions...')
    # Get pairwise word interactions
    max_indices = np.sum(batch_input[-1] != 0)
    interaction_matrix = np.zeros(
        (FLAGS.num_sentences, max_indices, FLAGS.embedding_dim,
         FLAGS.sequence_length, FLAGS.embedding_dim))

    indices = np.indices((max_indices, FLAGS.embedding_dim))
    indices = indices.reshape(2, -1)
    indices = indices.swapaxes(0, 1)
    for interaction_index in tqdm(indices):
        embedding_interactions = explainer.interactions(
            batch_embedding,
            baseline_embedding,
            batch_size=FLAGS.batch_size,
            num_samples=FLAGS.num_samples,
            use_expectation=False,
            output_indices=0,
            verbose=False,
            interaction_index=interaction_index)
        interaction_matrix[:, interaction_index[0],
                           interaction_index[1], :, :] = embedding_interactions
    np.save('interaction_matrix_{}.npy'.format(FLAGS.model_type),
            interaction_matrix)
コード例 #11
0
ファイル: interpret.py プロジェクト: yucunlu/path_explain
def interpret(argv=None):
    if FLAGS.visible_device is not None:
        set_up_environment(visible_devices=str(FLAGS.visible_device))
    else:
        set_up_environment()

    print('Loading data...')
    (x_train, y_train), (x_test, y_test) = load_mnist()

    if FLAGS.interpret_pca:
        print('Fitting PCA...')
        reshaped_x_train = np.reshape(x_train, (x_train.shape[0], -1))
        pca_model = PCA()
        pca_model.fit(reshaped_x_train)
        dump(pca_model, 'pca_model.pickle')

        transformed_x_train = pca_model.transform(reshaped_x_train).astype(
            np.float32)
        baseline = transformed_x_train

        reshaped_x_test = np.reshape(x_test, (x_test.shape[0], -1))
        transformed_x_test = pca_model.transform(reshaped_x_test).astype(
            np.float32)
    else:
        baseline = x_train
        transformed_x_test = x_test

    print('Loading model...')
    original_model = build_model(for_interpretation=True)
    original_model.load_weights('model.h5', by_name=True)

    if FLAGS.interpret_pca:
        interpret_model = tf.keras.models.Sequential()
        interpret_model.add(tf.keras.layers.Input(shape=(784)))
        pca_layer = tf.keras.layers.Dense(
            units=784,
            activation=None,
            use_bias=True,
            kernel_initializer=tf.keras.initializers.Constant(
                pca_model.components_),
            bias_initializer=tf.keras.initializers.Constant(pca_model.mean_))
        interpret_model.add(pca_layer)
        interpret_model.add(tf.keras.layers.Reshape((28, 28, 1)))
        interpret_model.add(original_model)
    else:
        interpret_model = original_model

    flag_name = ''
    if FLAGS.interpret_pca:
        flag_name += '_pca'

    print('Getting attributions...')
    explainer = PathExplainerTF(interpret_model)
    explained_inputs = transformed_x_test[:FLAGS.max_images]
    explained_labels = y_test[:FLAGS.max_images].astype(int)
    attributions = explainer.attributions(inputs=explained_inputs,
                                          baseline=baseline,
                                          batch_size=FLAGS.batch_size,
                                          num_samples=FLAGS.num_samples,
                                          use_expectation=True,
                                          output_indices=explained_labels,
                                          verbose=True)
    np.save('attributions{}.npy'.format(flag_name), attributions)

    print('Getting interactions...')
    interactions = explainer.interactions(inputs=explained_inputs,
                                          baseline=baseline,
                                          batch_size=FLAGS.batch_size,
                                          num_samples=FLAGS.num_samples,
                                          use_expectation=True,
                                          output_indices=explained_labels,
                                          verbose=True)
    np.save('interactions{}.npy'.format(flag_name), interactions)