def get_weights_linear_model(model, X, y):
    weights = None
    layer_weights = []
    layers = get_layers(model)
    inp = model.input
    for i, l in enumerate(layers):
        if type(l) in [Sequential, Dropout]:
            continue
        print  type(l)
        if type(l) == InputLayer:
            layer_out = X
        else:
            out = l.output
            print i, l, out
            func = K.function([inp] + [K.learning_phase()], [out])
            layer_out = func([X, 0.])[0]
        # print layer_out.shape
        # layer_outs.append(layer_out)
        linear_model = LogisticRegression(penalty='l1')
        # linear_model = LinearRegression( )
        # layer_out = StandardScaler().fit_transform(layer_out)
        if type(y) == list:
            y = y[0]
        linear_model.fit(layer_out, y.ravel())
        # print 'layer coef  shape ', linear_model.coef_.T.ravel().shape
        layer_weights.append(linear_model.coef_.T.ravel())
    return layer_weights
def get_skf_weights(model, X, y, importance_type):
    from features_processing.feature_selection import FeatureSelectionModel
    layers = get_layers(model)
    inp = model.input
    layer_weights = []
    for i, l in enumerate(layers):

        if type(l) == InputLayer:
            layer_out = X
        elif l.name.startswith('h'):
            out = l.output
            print i, l, out
            func = K.function([inp] + [K.learning_phase()], [out])
            layer_out = func([X, 0.])[0]
        else:
            continue

        if type(y) == list:
            y = y[0]

        # layer_out = StandardScaler().fit_transform(layer_out)
        p = {'type': importance_type, 'params': {}}
        fs_model = FeatureSelectionModel(p)
        fs_model = fs_model.fit(layer_out, y.ravel())
        fs_coef = fs_model.get_coef()
        fs_coef[fs_coef == np.inf] = 0
        layer_weights.append(fs_coef)
    return layer_weights
def get_shap_scores(model, X_train, y_train, target=-1, method_name='deepexplainer', detailed=False):
    gradients_list = []
    gradients_list_sample_level = []
    i = 0
    for l in get_layers(model):
        if type(l) in [Sequential, Dropout, BatchNormalization]:
            continue
        if l.name.startswith('h') or l.name.startswith('inputs'):  # hidden layers (this is just a convention )
            if target is None:
                output = i
            else:
                output = target
            print 'layer # {}, layer name {},  output name {}'.format(i, l.name, output)
            i += 1
            # gradients = get_deep_explain_score_layer(model, X_train, l.name, output, method_name= method_name )
            gradients = get_shap_scores_layer(model, X_train, l.name, output, method_name=method_name)
            # getting average score
            if gradients.ndim > 1:
                # feature_weights = np.sum(np.abs(gradients), axis=-2)
                feature_weights = np.sum(gradients, axis=-2)
            else:
                feature_weights = np.abs(gradients)
            gradients_list.append(feature_weights)
            gradients_list_sample_level.append(gradients)
    if detailed:
        return gradients_list, gradients_list_sample_level
    else:
        return gradients_list
    pass
示例#4
0
 def get_layer_outputs(self, X):
     inp = self.model.input
     layers = get_layers(self.model)[1:]
     layer_names = []
     for l in layers:
         layer_names.append(l.name)
     outputs = [layer.get_output_at(0) for layer in layers]  # all layer outputs
     functor = K.function(inputs=[inp, K.learning_phase()], outputs=outputs)  # evaluation function
     layer_outs = functor([X, 0.])
     ret = dict(zip(layer_names, layer_outs))
     return ret
def get_deep_explain_scores(model, X_train, y_train, target=-1, method_name='grad*input', detailed=False, **kwargs):
    # gradients_list = []
    # gradients_list_sample_level = []

    gradients_list = {}
    gradients_list_sample_level = {}

    i = 0
    for l in get_layers(model):
        if type(l) in [Sequential, Dropout, BatchNormalization]:
            continue
        if l.name.startswith('h') or l.name.startswith('inputs'):  # hidden layers (this is just a convention )

            if target is None:
                output = i
            else:
                output = target

            print 'layer # {}, layer name {},  output name {}'.format(i, l.name, output)
            i += 1
            gradients = get_deep_explain_score_layer(model, X_train, l.name, output, method_name=method_name)
            if gradients.ndim > 1:
                # feature_weights = np.sum(np.abs(gradients), axis=-2)
                # feature_weights = np.sum(gradients, axis=-2)
                print 'gradients.shape', gradients.shape
                # feature_weights = np.abs(np.sum(gradients, axis=-2))
                feature_weights = np.sum(gradients, axis=-2)
                # feature_weights = np.mean(gradients, axis=-2)
                print 'feature_weights.shape', feature_weights.shape
                print 'feature_weights min max', min(feature_weights), max(feature_weights)
            else:
                # feature_weights = np.abs(gradients)
                feature_weights = gradients
                # feature_weights = np.mean(gradients)
            # gradients_list.append(feature_weights)
            # gradients_list_sample_level.append(gradients)
            gradients_list[l.name] = feature_weights
            gradients_list_sample_level[l.name] = gradients
    if detailed:
        return gradients_list, gradients_list_sample_level
    else:
        return gradients_list
    pass
def get_gradient_weights_with_repeated_output(model, X, y):
    gradients_list = []
    # print 'trainable weights',model.trainable_weights
    # print 'layers', get_layers (model)

    for l in get_layers(model):

        if type(l) in [Sequential, Dropout, BatchNormalization]:
            continue

        # print 'get the gradient of layer {}'.format(l.name)
        if l.name.startswith('o') and not l.name.startswith('o_'):
            print l.name
            print l.weights
            weights = l.get_weights()[0]
            # weights = l.get_weights()
            # print 'weights shape {}'.format(weights.shape)
            gradients_list.append(weights.ravel())

    return gradients_list
def get_gradient_weights(model, X, y, signed=False, detailed=False, normalize=True):
    gradients_list = []
    gradients_list_sample_level = []
    for l in get_layers(model):
        if type(l) in [Sequential, Dropout, BatchNormalization]:
            continue
        if l.name.startswith('h') or l.name.startswith('inputs'):  # hidden layers (this is just a convention )
            w = l.get_output_at(0)
            gradients = get_gradient_layer(model, X, y, w, normalize)
            if gradients.ndim > 1:
                if signed:
                    feature_weights = np.sum(gradients, axis=-2)
                else:
                    feature_weights = np.sum(np.abs(gradients), axis=-2)

            else:
                feature_weights = np.abs(gradients)
            gradients_list.append(feature_weights)
            gradients_list_sample_level.append(gradients)
    if detailed:
        return gradients_list, gradients_list_sample_level
    else:
        return gradients_list
def get_weights_gradient_outcome(model, x_train, y_train, detailed=False, target=-1, multiply_by_input=False,
                                 signed=True):
    print model.output
    layers = get_layers(model)
    gradients_list = []
    gradients_list_sample_level = []
    i = 0
    for l in layers:
        if l.name.startswith('h') or l.name.startswith('inputs'):  # hidden layers (this is just an ad hoc convention )

            if target is None:
                output = model.output[i]
            else:
                if type(target) == str:
                    output = model.get_layer(target).output
                else:
                    output = model.outputs[target]

            print 'layer # {}, layer name {},  output name {}'.format(i, l.name, output.name)
            i += 1
            print i, l.name, output.name, output, l.get_output_at(0)
            gradients = get_gradeint(model, l.output, output, x_train, y_train, multiply_by_input=multiply_by_input)

            print 'gradients', len(gradients), gradients[0].shape
            if signed:
                g = np.sum(gradients[0], axis=0)
            else:
                g = np.sum(np.abs(gradients[0]), axis=0)
            # g = np.abs(g)
            gradients_list_sample_level.append(gradients[0])
            print 'gradients', gradients[0].shape
            gradients_list.append(g)

    if detailed:
        return gradients_list, gradients_list_sample_level

    return gradients_list