Esempio n. 1
0
def optimize_input_for_neuron(model_object,
                              layer_name,
                              neuron_indices,
                              init_function_or_matrices,
                              num_iterations=DEFAULT_NUM_ITERATIONS,
                              learning_rate=DEFAULT_LEARNING_RATE,
                              ideal_activation=DEFAULT_IDEAL_ACTIVATION):
    """Creates synthetic input example to maximize activation of neuron.

    :param model_object: Trained instance of `keras.models.Model` or
        `keras.models.Sequential`.
    :param layer_name: Name of layer containing the relevant neuron.
    :param neuron_indices: 1-D numpy array with indices of the relevant neuron.
        Must have length D - 1, where D = number of dimensions in layer output.
        The first dimension of layer output is the example dimension, for which
        the index in this case is always 0.
    :param init_function_or_matrices: See doc for `_do_gradient_descent`.
    :param num_iterations: Same.
    :param learning_rate: Same.
    :param ideal_activation: If this value is specified, the loss function will
        be (neuron_activation - ideal_activation)^2.

        If this value is None, the loss function will be
        -sign(neuron_activation) * neuron_activation^2.

    :return: list_of_optimized_matrices: See doc for `_do_gradient_descent`.
    """

    model_interpretation.check_component_metadata(
        component_type_string=model_interpretation.
        NEURON_COMPONENT_TYPE_STRING,
        layer_name=layer_name,
        neuron_indices=neuron_indices)

    _check_input_args(num_iterations=num_iterations,
                      learning_rate=learning_rate,
                      ideal_activation=ideal_activation)

    neuron_indices_as_tuple = (0, ) + tuple(neuron_indices)

    if ideal_activation is None:
        loss_tensor = -(K.sign(
            model_object.get_layer(name=layer_name).
            output[neuron_indices_as_tuple]) * model_object.get_layer(
                name=layer_name).output[neuron_indices_as_tuple]**2)
    else:
        loss_tensor = (model_object.get_layer(
            name=layer_name).output[neuron_indices_as_tuple] -
                       ideal_activation)**2

    return _do_gradient_descent(
        model_object=model_object,
        loss_tensor=loss_tensor,
        init_function_or_matrices=init_function_or_matrices,
        num_iterations=num_iterations,
        learning_rate=learning_rate)
Esempio n. 2
0
def optimize_input_for_class(model_object,
                             target_class,
                             init_function_or_matrices,
                             num_iterations=DEFAULT_NUM_ITERATIONS,
                             learning_rate=DEFAULT_LEARNING_RATE):
    """Creates synthetic input example to maximize probability of target class.

    :param model_object: Trained instance of `keras.models.Model` or
        `keras.models.Sequential`.
    :param target_class: Input data will be optimized for this class.  Must be
        an integer in 0...(K - 1), where K = number of classes.
    :param init_function_or_matrices: See doc for `_do_gradient_descent`.
    :param num_iterations: Same.
    :param learning_rate: Same.
    :return: list_of_optimized_matrices: Same.
    """

    model_interpretation.check_component_metadata(
        component_type_string=model_interpretation.CLASS_COMPONENT_TYPE_STRING,
        target_class=target_class)

    _check_input_args(num_iterations=num_iterations,
                      learning_rate=learning_rate)

    num_output_neurons = (
        model_object.layers[-1].output.get_shape().as_list()[-1])

    if num_output_neurons == 1:
        error_checking.assert_is_leq(target_class, 1)

        if target_class == 1:
            loss_tensor = K.mean(
                (model_object.layers[-1].output[..., 0] - 1)**2)
        else:
            loss_tensor = K.mean(model_object.layers[-1].output[..., 0]**2)
    else:
        error_checking.assert_is_less_than(target_class, num_output_neurons)

        loss_tensor = K.mean(
            (model_object.layers[-1].output[..., target_class] - 1)**2)

    return _do_gradient_descent(
        model_object=model_object,
        loss_tensor=loss_tensor,
        init_function_or_matrices=init_function_or_matrices,
        num_iterations=num_iterations,
        learning_rate=learning_rate)
Esempio n. 3
0
def write_standard_file(pickle_file_name,
                        init_function_name_or_matrices,
                        list_of_optimized_matrices,
                        model_file_name,
                        num_iterations,
                        learning_rate,
                        component_type_string,
                        target_class=None,
                        layer_name=None,
                        neuron_indices=None,
                        channel_index=None,
                        ideal_activation=None,
                        storm_ids=None,
                        storm_times_unix_sec=None):
    """Writes optimized learning examples to Pickle file.

    E = number of examples (storm objects)

    :param pickle_file_name: Path to output file.
    :param init_function_name_or_matrices: See doc for `_do_gradient_descent`.
        The only difference here is that, if a function was used, the input
        argument must be the function *name* rather than the function itself.
    :param list_of_optimized_matrices: List of numpy arrays created by
        `_do_gradient_descent`.
    :param model_file_name: Path to file with trained model (readable by
        `cnn.read_model`).
    :param num_iterations: See doc for `_do_gradient_descent`.
    :param learning_rate: Same.
    :param component_type_string: See doc for
        `model_interpretation.check_component_metadata`.
    :param target_class: Same.
    :param layer_name: Same.
    :param neuron_indices: Same.
    :param channel_index: Same.
    :param ideal_activation: See doc for `optimize_input_for_neuron` or
        `optimize_input_for_channel`.
    :param storm_ids:
        [used only if `init_function_name_or_matrices` is list of matrices]
        length-E list of storm IDs (strings).
    :param storm_times_unix_sec:
        [used only if `init_function_name_or_matrices` is list of matrices]
        length-E numpy array of storm times.
    :raises: ValueError: if `init_function_name_or_matrices` is a list of numpy
        arrays and has a different length than `list_of_optimized_matrices`.
    """

    model_interpretation.check_component_metadata(
        component_type_string=component_type_string,
        target_class=target_class,
        layer_name=layer_name,
        neuron_indices=neuron_indices,
        channel_index=channel_index)

    _check_input_args(num_iterations=num_iterations,
                      learning_rate=learning_rate,
                      ideal_activation=ideal_activation)

    error_checking.assert_is_string(model_file_name)
    error_checking.assert_is_list(list_of_optimized_matrices)

    if isinstance(init_function_name_or_matrices, str):
        num_storm_objects = None
    else:
        num_init_matrices = len(init_function_name_or_matrices)
        num_optimized_matrices = len(list_of_optimized_matrices)

        if num_init_matrices != num_optimized_matrices:
            error_string = (
                'Number of input matrices ({0:d}) should equal number of output'
                ' matrices ({1:d}).').format(num_init_matrices,
                                             num_optimized_matrices)

            raise ValueError(error_string)

        error_checking.assert_is_string_list(storm_ids)
        error_checking.assert_is_numpy_array(numpy.array(storm_ids),
                                             num_dimensions=1)

        num_storm_objects = len(storm_ids)
        these_expected_dim = numpy.array([num_storm_objects], dtype=int)

        error_checking.assert_is_integer_numpy_array(storm_times_unix_sec)
        error_checking.assert_is_numpy_array(
            storm_times_unix_sec, exact_dimensions=these_expected_dim)

    num_matrices = len(list_of_optimized_matrices)

    for i in range(num_matrices):
        error_checking.assert_is_numpy_array_without_nan(
            list_of_optimized_matrices[i])

        if num_storm_objects is not None:
            these_expected_dim = numpy.array(
                (num_storm_objects, ) +
                list_of_optimized_matrices[i].shape[1:],
                dtype=int)
            error_checking.assert_is_numpy_array(
                list_of_optimized_matrices[i],
                exact_dimensions=these_expected_dim)

        if not isinstance(init_function_name_or_matrices, str):
            error_checking.assert_is_numpy_array_without_nan(
                init_function_name_or_matrices[i])

            these_expected_dim = numpy.array(
                list_of_optimized_matrices[i].shape, dtype=int)

            error_checking.assert_is_numpy_array(
                init_function_name_or_matrices[i],
                exact_dimensions=these_expected_dim)

    optimization_dict = {
        INIT_FUNCTION_KEY: init_function_name_or_matrices,
        OPTIMIZED_MATRICES_KEY: list_of_optimized_matrices,
        MODEL_FILE_NAME_KEY: model_file_name,
        NUM_ITERATIONS_KEY: num_iterations,
        LEARNING_RATE_KEY: learning_rate,
        COMPONENT_TYPE_KEY: component_type_string,
        TARGET_CLASS_KEY: target_class,
        LAYER_NAME_KEY: layer_name,
        IDEAL_ACTIVATION_KEY: ideal_activation,
        NEURON_INDICES_KEY: neuron_indices,
        CHANNEL_INDEX_KEY: channel_index,
        STORM_IDS_KEY: storm_ids,
        STORM_TIMES_KEY: storm_times_unix_sec
    }

    file_system_utils.mkdir_recursive_if_necessary(file_name=pickle_file_name)
    pickle_file_handle = open(pickle_file_name, 'wb')
    pickle.dump(optimization_dict, pickle_file_handle)
    pickle_file_handle.close()
Esempio n. 4
0
def optimize_input_for_channel(model_object,
                               layer_name,
                               channel_index,
                               init_function_or_matrices,
                               stat_function_for_neuron_activations,
                               num_iterations=DEFAULT_NUM_ITERATIONS,
                               learning_rate=DEFAULT_LEARNING_RATE,
                               ideal_activation=DEFAULT_IDEAL_ACTIVATION):
    """Creates synthetic input example to maxx activation of neurons in channel.

    :param model_object: Trained instance of `keras.models.Model` or
        `keras.models.Sequential`.
    :param layer_name: Name of layer containing the relevant channel.
    :param channel_index: Index of the relevant channel.  Will optimize for
        activation of [j]th channel in layer, where j = `channel_index`.
    :param init_function_or_matrices: See doc for `_do_gradient_descent`.
    :param stat_function_for_neuron_activations: Function used to convert all
        neuron activations into a single number.  Some examples are
        `keras.backend.max` and `keras.backend.mean`.  The exact format of this
        function is given below.

        Input: Keras tensor of neuron activations.
        Output: Single number.

    :param num_iterations: See doc for `_do_gradient_descent`.
    :param learning_rate: Same.
    :param ideal_activation: If this value is specified, the loss function will
        be abs[stat_function_for_neuron_activations(neuron_activations) -
               ideal_activation].

    If this value is None, loss function will be
    -abs[stat_function_for_neuron_activations(neuron_activations)].

    :return: list_of_optimized_matrices: See doc for `_do_gradient_descent`.
    """

    model_interpretation.check_component_metadata(
        component_type_string=model_interpretation.
        CHANNEL_COMPONENT_TYPE_STRING,
        layer_name=layer_name,
        channel_index=channel_index)

    _check_input_args(num_iterations=num_iterations,
                      learning_rate=learning_rate,
                      ideal_activation=ideal_activation)

    if ideal_activation is None:
        loss_tensor = -K.abs(
            stat_function_for_neuron_activations(
                model_object.get_layer(name=layer_name).output[0, ...,
                                                               channel_index]))
    else:
        error_checking.assert_is_greater(ideal_activation, 0.)
        loss_tensor = K.abs(
            stat_function_for_neuron_activations(
                model_object.get_layer(
                    name=layer_name).output[0, ..., channel_index]) -
            ideal_activation)

    return _do_gradient_descent(
        model_object=model_object,
        loss_tensor=loss_tensor,
        init_function_or_matrices=init_function_or_matrices,
        num_iterations=num_iterations,
        learning_rate=learning_rate)
Esempio n. 5
0
def optimize_input_for_channel(model_object,
                               layer_name,
                               channel_index,
                               init_function_or_matrices,
                               stat_function_for_neuron_activations,
                               num_iterations=DEFAULT_NUM_ITERATIONS,
                               learning_rate=DEFAULT_LEARNING_RATE,
                               l2_weight=DEFAULT_L2_WEIGHT,
                               ideal_activation=DEFAULT_IDEAL_ACTIVATION,
                               radar_constraint_weight=None,
                               minmax_constraint_weight=None,
                               model_metadata_dict=None):
    """Creates synthetic input example to maxx activation of neurons in channel.

    :param model_object: Trained instance of `keras.models.Model` or
        `keras.models.Sequential`.
    :param layer_name: Name of layer containing the relevant channel.
    :param channel_index: Index of the relevant channel.  Will optimize for
        activation of [j]th channel in layer, where j = `channel_index`.
    :param init_function_or_matrices: See doc for `_do_gradient_descent`.
    :param stat_function_for_neuron_activations: Function used to convert all
        neuron activations into a single number.  Some examples are
        `keras.backend.max` and `keras.backend.mean`.  The exact format of this
        function is given below.

        Input: Keras tensor of neuron activations.
        Output: Single number.

    :param num_iterations: See doc for `_do_gradient_descent`.
    :param learning_rate: Same.
    :param l2_weight: Same.
    :param ideal_activation: If this value is specified, the loss function will
        be abs[stat_function_for_neuron_activations(neuron_activations) -
               ideal_activation].

    If this value is None, loss function will be
    -abs[stat_function_for_neuron_activations(neuron_activations)].

    :param radar_constraint_weight: See doc for `_radar_constraints_to_loss_fn`.
    :param minmax_constraint_weight: See doc for
        `_minmax_constraints_to_loss_fn`.
    :param model_metadata_dict: Same.
    :return: list_of_optimized_matrices: See doc for `_do_gradient_descent`.
    :return: initial_activation: Same.
    :return: final_activation: Same.
    """

    model_interpretation.check_component_metadata(
        component_type_string=model_interpretation.
        CHANNEL_COMPONENT_TYPE_STRING,
        layer_name=layer_name,
        channel_index=channel_index)

    _check_input_args(num_iterations=num_iterations,
                      learning_rate=learning_rate,
                      l2_weight=l2_weight,
                      radar_constraint_weight=radar_constraint_weight,
                      minmax_constraint_weight=minmax_constraint_weight)

    radar_constraint_loss_tensor = _radar_constraints_to_loss_fn(
        model_object=model_object,
        model_metadata_dict=model_metadata_dict,
        weight=radar_constraint_weight)

    minmax_constraint_loss_tensor = _minmax_constraints_to_loss_fn(
        model_object=model_object,
        model_metadata_dict=model_metadata_dict,
        weight=minmax_constraint_weight)

    activation_tensor = stat_function_for_neuron_activations(
        model_object.get_layer(name=layer_name).output[0, ..., channel_index])

    if ideal_activation is None:
        loss_tensor = -K.sign(activation_tensor) * activation_tensor**2
    else:
        loss_tensor = (activation_tensor - ideal_activation)**2

    if radar_constraint_loss_tensor is not None:
        loss_tensor += radar_constraint_loss_tensor
    if minmax_constraint_loss_tensor is not None:
        loss_tensor += minmax_constraint_loss_tensor

    return _do_gradient_descent(
        model_object=model_object,
        activation_tensor=activation_tensor,
        loss_tensor=loss_tensor,
        init_function_or_matrices=init_function_or_matrices,
        num_iterations=num_iterations,
        learning_rate=learning_rate,
        l2_weight=l2_weight)
Esempio n. 6
0
def optimize_input_for_class(model_object,
                             target_class,
                             init_function_or_matrices,
                             num_iterations=DEFAULT_NUM_ITERATIONS,
                             learning_rate=DEFAULT_LEARNING_RATE,
                             l2_weight=DEFAULT_L2_WEIGHT,
                             radar_constraint_weight=None,
                             minmax_constraint_weight=None,
                             model_metadata_dict=None):
    """Creates synthetic input example to maximize probability of target class.

    :param model_object: Trained instance of `keras.models.Model` or
        `keras.models.Sequential`.
    :param target_class: Input data will be optimized for this class.  Must be
        an integer in 0...(K - 1), where K = number of classes.
    :param init_function_or_matrices: See doc for `_do_gradient_descent`.
    :param num_iterations: Same.
    :param learning_rate: Same.
    :param l2_weight: Same.
    :param radar_constraint_weight: See doc for `_radar_constraints_to_loss_fn`.
    :param minmax_constraint_weight: See doc for
        `_minmax_constraints_to_loss_fn`.
    :param model_metadata_dict: Same.
    :return: list_of_optimized_matrices: Same.
    :return: initial_activation: Same.
    :return: final_activation: Same.
    """

    model_interpretation.check_component_metadata(
        component_type_string=model_interpretation.CLASS_COMPONENT_TYPE_STRING,
        target_class=target_class)

    _check_input_args(num_iterations=num_iterations,
                      learning_rate=learning_rate,
                      l2_weight=l2_weight,
                      radar_constraint_weight=radar_constraint_weight,
                      minmax_constraint_weight=minmax_constraint_weight)

    radar_constraint_loss_tensor = _radar_constraints_to_loss_fn(
        model_object=model_object,
        model_metadata_dict=model_metadata_dict,
        weight=radar_constraint_weight)

    minmax_constraint_loss_tensor = _minmax_constraints_to_loss_fn(
        model_object=model_object,
        model_metadata_dict=model_metadata_dict,
        weight=minmax_constraint_weight)

    num_output_neurons = (
        model_object.layers[-1].output.get_shape().as_list()[-1])

    if num_output_neurons == 1:
        error_checking.assert_is_leq(target_class, 1)

        activation_tensor = model_object.layers[-1].output[..., 0]

        if target_class == 1:
            loss_tensor = K.mean((activation_tensor - 1)**2)
        else:
            loss_tensor = K.mean(activation_tensor**2)
    else:
        error_checking.assert_is_less_than(target_class, num_output_neurons)

        activation_tensor = model_object.layers[-1].output[..., target_class]
        loss_tensor = K.mean((activation_tensor - 1)**2)

    if radar_constraint_loss_tensor is not None:
        loss_tensor += radar_constraint_loss_tensor
    if minmax_constraint_loss_tensor is not None:
        loss_tensor += minmax_constraint_loss_tensor

    return _do_gradient_descent(
        model_object=model_object,
        activation_tensor=activation_tensor,
        loss_tensor=loss_tensor,
        init_function_or_matrices=init_function_or_matrices,
        num_iterations=num_iterations,
        learning_rate=learning_rate,
        l2_weight=l2_weight)