def setUp(self):
        self.inp = (np.random.randn(10*10*51*51)
                    .reshape(10,10,51,51))
        self.keras_model = keras.models.Sequential()
        conv_layer = keras.layers.convolutional.Conv2D(
                        filters=2, kernel_size=(4,4), strides=(2,2),
                        activation="relu", input_shape=(10,51,51),
                        data_format="channels_first")
        self.keras_model.add(conv_layer)
        self.keras_model.add(keras.layers.pooling.MaxPooling2D(
                             pool_size=(4,4), strides=(2,2),
                             data_format="channels_first")) 
        self.keras_model.add(keras.layers.pooling.AveragePooling2D(
                             pool_size=(4,4), strides=(2,2),
                             data_format="channels_first")) 
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
                        [self.keras_model.layers[0].input,
                         K.learning_phase()],
                        self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
            self.keras_model.layers[-2].output[:,0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad)

        self.saved_file_path = "conv2model_channelsfirst.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path)
예제 #2
0
    def set_up_prediction_func_and_deeplift_func(self, out_layer):

        out_layer.set_inputs(self.dense_layer)
        out_layer.build_fwd_pass_vars()
        self.input_layer.reset_mxts_updated()
        out_layer.set_scoring_mode(layers.ScoringMode.OneAndZeros)
        out_layer.set_active()
        self.input_layer.update_mxts()

        fprop_func = compile_func([self.input_layer.get_activation_vars()],
                                  out_layer.get_activation_vars())
        fprop_results = [list(x) for x in fprop_func(self.inp)]

        bprop_func = compile_func([
            self.input_layer.get_activation_vars(),
            self.input_layer.get_reference_vars()
        ], self.input_layer.get_mxts())
        bprop_results_each_task = []
        for task_idx in range(len(fprop_results[0])):
            out_layer.update_task_index(task_index=task_idx)
            bprop_results_task = [
                list(x)
                for x in bprop_func([self.inp,
                                     np.zeros_like(self.inp)])
            ]
            bprop_results_each_task.append(bprop_results_task)

        out_layer.set_inactive()
        return fprop_results, bprop_results_each_task
    def setUp(self):
        self.inp = (np.random.randn(10*10*51*51)
                    .reshape(10,10,51,51)).transpose(0,2,3,1)
        self.keras_model = keras.models.Sequential()
        conv_layer = keras.layers.convolutional.Convolution2D(
                        nb_filter=2, nb_row=4, nb_col=4, subsample=(2,2),
                        activation="relu", input_shape=(51,51,10))
        self.keras_model.add(conv_layer)
        self.keras_model.add(keras.layers.pooling.MaxPooling2D(
                             pool_size=(4,4), strides=(2,2))) 
        self.keras_model.add(keras.layers.pooling.AveragePooling2D(
                             pool_size=(4,4), strides=(2,2))) 
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
                        [self.keras_model.layers[0].input,
                         K.learning_phase()],
                        self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
            self.keras_model.layers[-2].output[:,0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad)

        self.saved_file_path = "conv2model_validpadding.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path)
예제 #4
0
    def set_up_prediction_func_and_deeplift_func(self, out_layer):
        
        out_layer.set_inputs(self.dense_layer)
        out_layer.build_fwd_pass_vars()
        self.input_layer.reset_mxts_updated()
        out_layer.set_scoring_mode(layers.ScoringMode.OneAndZeros)
        out_layer.set_active()
        self.input_layer.update_mxts()

        fprop_func = compile_func([self.input_layer.get_activation_vars()],
                                out_layer.get_activation_vars())
        fprop_results = [list(x) for x in fprop_func(self.inp)] 

        bprop_func = compile_func(
                          [self.input_layer.get_activation_vars(),
                           self.input_layer.get_reference_vars()],
                          self.input_layer.get_mxts())
        bprop_results_each_task = []
        for task_idx in range(len(fprop_results[0])):
            out_layer.update_task_index(task_index=task_idx)
            bprop_results_task = [list(x) for x in bprop_func(
                                   [self.inp, np.zeros_like(self.inp)])]
            bprop_results_each_task.append(bprop_results_task)

        out_layer.set_inactive()
        return fprop_results, bprop_results_each_task
예제 #5
0
    def setUp(self):

        self.axis = 3
        self.inp = (np.arange(16).reshape(2, 2, 2,
                                          2).transpose(0, 2, 3,
                                                       1).astype("float32"))
        self.keras_model = keras.models.Sequential()
        self.epsilon = 10**(-3)
        self.gamma = np.array([2.0, 3.0]).astype("float32")
        self.beta = np.array([4.0, 5.0]).astype("float32")
        self.mean = np.array([3.0, 3.0]).astype("float32")
        self.var = np.array([4.0, 9.0]).astype("float32")
        batch_norm_layer = keras.layers.normalization.BatchNormalization(
            axis=self.axis, input_shape=(2, 2, 2))
        self.keras_model.add(batch_norm_layer)
        batch_norm_layer.set_weights(
            np.array([
                self.gamma,  #gamma (scaling)
                self.beta,  #beta (shift)
                self.mean,  #mean
                self.var
            ]))  #std
        self.keras_model.add(keras.layers.Flatten())
        dense_layer = keras.layers.Dense(output_dim=1)
        self.keras_model.add(dense_layer)
        dense_layer.set_weights([
            np.ones((1, 8)).astype("float32").T,
            np.zeros(1).astype("float32")
        ])
        self.keras_model.compile(loss="mse", optimizer="sgd")

        keras_batchnorm_fprop_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], self.keras_model.layers[0].output)
        self.keras_batchnorm_fprop_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], self.keras_model.layers[0].output)
        self.keras_output_fprop_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], self.keras_model.layers[-1].output)

        grad = tf.gradients(
            tf.reduce_sum(self.keras_model.layers[-1].output[:, 0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad)

        self.saved_file_path = "batchnorm_model.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path)
예제 #6
0
    def setUp(self):
        self.inp = (np.random.randn(10 * 10 * 51).reshape(10, 10,
                                                          51)).transpose(
                                                              0, 2, 1)
        self.keras_model = keras.models.Sequential()
        #self.keras_model.add(keras.layers.InputLayer((51,10)))
        conv_layer1 = keras.layers.convolutional.Convolution1D(
            nb_filter=20,
            filter_length=4,
            subsample_length=2,
            padding='same',
            input_shape=(51, 10))
        self.keras_model.add(conv_layer1)
        self.keras_model.add(
            keras.layers.advanced_activations.PReLU(shared_axes=[1],
                                                    alpha_initializer="ones"))
        conv_layer2 = keras.layers.convolutional.Convolution1D(
            nb_filter=10,
            filter_length=4,
            subsample_length=2,
            activation="relu",
            padding='same')
        self.keras_model.add(conv_layer2)
        self.keras_model.add(
            keras.layers.pooling.MaxPooling1D(pool_length=4,
                                              stride=2,
                                              padding='same'))
        self.keras_model.add(
            keras.layers.pooling.AveragePooling1D(pool_length=4,
                                                  stride=2,
                                                  padding='same'))
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], self.keras_model.layers[-1].output)

        grad = tf.gradients(
            tf.reduce_sum(self.keras_model.layers[-2].output[:, 0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad)

        self.saved_file_path = "conv1model_samepadding.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path)
예제 #7
0
    def setUp(self):
        self.inp1 = (np.random.randn(10 * 10 * 51).reshape(10, 10,
                                                           51).transpose(
                                                               0, 2, 1))
        self.inp2 = (np.random.randn(10 * 10 * 51).reshape(10, 10,
                                                           51).transpose(
                                                               0, 2, 1))
        inp1 = keras.layers.Input(batch_shape=(None, 51, 10), name="inp1")
        inp2 = keras.layers.Input(batch_shape=(None, 51, 10), name="inp2")
        conv = keras.layers.convolutional.Convolution1D(nb_filter=2,
                                                        filter_length=4,
                                                        subsample_length=2,
                                                        activation="relu")
        maxpool = keras.layers.convolutional.MaxPooling1D(pool_length=4,
                                                          stride=2)
        conv1_out = conv(inp1)
        conv2_out = conv(inp2)
        maxpool1_out = maxpool(conv1_out)
        maxpool2_out = maxpool(conv2_out)
        merge_out = keras.layers.Concatenate(axis=2)(
            [maxpool1_out, maxpool2_out])
        flatten_out = keras.layers.core.Flatten()(merge_out)
        dense1_out = keras.layers.core.Dense(output_dim=5)(flatten_out)
        dense1relu_out = keras.layers.core.Activation("relu")(dense1_out)
        output_preact = keras.layers.core.Dense(
            output_dim=1, name="output_preact")(dense1relu_out)
        output = keras.layers.core.Activation(
            "sigmoid", name="output_postact")(output_preact)
        self.keras_model = keras.models.Model(input=[inp1, inp2],
                                              output=output)
        self.keras_model.compile(optimizer='rmsprop',
                                 loss='binary_crossentropy',
                                 metrics=['accuracy'])

        keras_output_fprop_func = compile_func(
            [inp1, inp2, keras.backend.learning_phase()],
            self.keras_model.layers[-1].output)
        self.keras_output_fprop_func =\
            lambda x,y: keras_output_fprop_func([x,y,False])

        grad = tf.gradients(tf.reduce_sum(output_preact[:, 0]), [inp1, inp2])
        grad_func = compile_func(
            [inp1, inp2, keras.backend.learning_phase()], grad)
        self.grad_func = lambda x, y: grad_func([x, y, False])

        self.saved_file_path = "funcmodel.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path)
예제 #8
0
    def test_fprop_maxpool2d(self): 

        pool_layer = layers.MaxPool2D(pool_size=(2,2),
                          strides=(1,1),
                          padding=PaddingMode.valid,
                          maxpool_deeplift_mode=MaxPoolDeepLiftMode.gradient,
                          data_format="channels_last")
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=9)

        func = compile_func([self.input_layer.get_activation_vars()],
                           self.pool_layer.get_activation_vars())
        np.testing.assert_almost_equal(func([self.reference_inps[0],
                                             self.reference_inps[0]-1]),
                                       np.array(
                                       [[[[1,2,3],
                                          [5,5,4],
                                          [6,7,8]],
                                         [[2,3,4],
                                          [6,6,5],
                                          [7,8,9]]],
                                        [[[0,1,2],
                                          [4,4,3],
                                          [5,6,7]],
                                         [[1,2,3],
                                          [5,5,4],
                                          [6,7,8]]]]).transpose(0,2,3,1))
예제 #9
0
 def test_concat(self):
     func = compile_func([
         self.input_layer1.get_activation_vars(),
         self.input_layer2.get_activation_vars()
     ], self.concat_layer.get_activation_vars())
     np.testing.assert_allclose(func([self.inp1, self.inp2]),
                                np.array([[[[1]], [[1]]], [[[2]], [[2]]]]))
예제 #10
0
    def test_fprop_avgpool2d(self): 

        pool_layer = layers.AvgPool2D(pool_size=(2,2),
                                  strides=(1,1),
                                  padding=PaddingMode.valid,
                                  data_format="channels_last")
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=9)

        func = compile_func([self.input_layer.get_activation_vars()],
                             self.pool_layer.get_activation_vars())
        np.testing.assert_almost_equal(func([self.reference_inps[0],
                                             self.reference_inps[0]-1]),
                                       0.25*np.array(
                                       [[[[ 1, 3, 5],
                                          [ 6,10, 4],
                                          [11,16,19]],
                                         [[ 5, 7, 9],
                                          [10,14, 8],
                                          [15,20,23]]],
                                        [[[-3,-1, 1],
                                          [ 2, 6, 0],
                                          [ 7,12,15]],
                                         [[ 1, 3, 5],
                                          [ 6,10, 4],
                                          [11,16,19]]]]).transpose(0,2,3,1))
예제 #11
0
    def test_backprop_maxpool2d_gradients(self):
        pool_layer = layers.MaxPool2D(pool_size=(2,2),
                  strides=(1,1),
                  padding=PaddingMode.valid,
                  maxpool_deeplift_mode=MaxPoolDeepLiftMode.gradient,
                  data_format="channels_last")
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=9)

        self.dense_layer.update_task_index(task_index=0)
        func = compile_func([
                self.input_layer.get_activation_vars(),
                self.input_layer.get_reference_vars()],
                                   self.input_layer.get_mxts())
        np.testing.assert_almost_equal(
            func([self.backprop_test_inps,
                  np.ones_like(self.backprop_test_inps)*self.reference_inps]),
                                  np.array(
                                  [[np.array([[1, 0, 0, 0],
                                     [0, 0, 2, 0],
                                     [2, 1, 1, 0],
                                     [0, 0, 1, 1]])*2,
                                    np.array([[0, 0, 1, 1],
                                     [0, 1, 0, 0],
                                     [0, 2, 1, 0],
                                     [1, 0, 1, 1]])*3], 
                                   [np.array([[0, 0, 1, 1],
                                     [0, 1, 0, 0],
                                     [0, 2, 1, 0],
                                     [1, 0, 1, 1]])*2,
                                    np.array([[1, 0, 0, 0],
                                     [0, 0, 2, 0],
                                     [2, 1, 1, 0],
                                     [0, 0, 1, 1]])*3]]).transpose(0,2,3,1))
예제 #12
0
    def test_fprop(self): 

        conv_layer = layers.Conv2D(kernel=self.conv_W, bias=self.conv_b,
                                   strides=(1,1),
                                   padding=PaddingMode.valid,
                                   data_format="channels_last",
                                   conv_mxts_mode="Linear")
        self.create_small_net_with_conv_layer(conv_layer,
                                              outputs_per_channel=9)

        func = compile_func([self.input_layer.get_activation_vars()],
                                self.conv_layer.get_activation_vars())
        np.testing.assert_almost_equal(func(self.inp),
                               np.array(
                               [[[[439, 467, 495],
                                  [551, 579, 607],
                                  [663, 691, 719]],
                                 [[-439, -467, -495],
                                  [-551, -579, -607],
                                  [-663, -691, -719]],],
                               [[[1335, 1363, 1391],
                                 [1447, 1475, 1503],
                                 [1559, 1587, 1615],],
                                [[-1335, -1363, -1391],
                                 [-1447, -1475, -1503],
                                 [-1559, -1587, -1615]]]]).transpose(0,2,3,1))
예제 #13
0
 def test_fprop(self): 
     conv_layer = layers.Conv1D(kernel=self.conv_W, bias=self.conv_b,
                               stride=1,
                               padding=PaddingMode.valid,
                               conv_mxts_mode="Linear")
     self.create_small_net_with_conv_layer(conv_layer,
                                           outputs_per_channel=3)
     func = compile_func([self.input_layer.get_activation_vars()],
                             self.conv_layer.get_activation_vars())
    #input:
    #      [[[-8,-7,-6,-5],
    #        [-4,-3,-2,-1]],
    #       [[ 0, 1, 2, 3],
    #        [ 4, 5, 6, 7]]]
    # W:
    # [-2,-1
    #   0, 1]
    # 16+7+0+-3 = 20 - bias (1.0) = 19
    # 0+-1+0+5 = 4 - bias (1.0) = 3
     np.testing.assert_almost_equal(func(self.inp),
                            np.array(
                            [[[ 19, 17, 15],
                              [-19,-17,-15]],
                             [[ 3, 1,-1],
                              [-3,-1, 1]]]).transpose(0,2,1))
예제 #14
0
    def test_dense_backprop(self):
        conv_layer = layers.Conv2D(kernel=self.conv_W,
                                   bias=self.conv_b,
                                   strides=(1, 1),
                                   padding=PaddingMode.valid,
                                   data_format="channels_last",
                                   conv_mxts_mode="Linear")
        self.create_small_net_with_conv_layer(conv_layer,
                                              outputs_per_channel=9)

        self.dense_layer.update_task_index(task_index=0)
        func = compile_func([
            self.input_layer.get_activation_vars(),
            self.input_layer.get_reference_vars()
        ], self.input_layer.get_mxts())
        np.testing.assert_almost_equal(
            func([self.inp, np.zeros_like(self.inp)]),
            np.array([[[[0, 2, 2, 2], [4, 12, 12, 8], [4, 12, 12, 8],
                        [4, 10, 10, 6]],
                       [[8, 18, 18, 10], [20, 44, 44, 24], [20, 44, 44, 24],
                        [12, 26, 26, 14]]],
                      [[[0, 2, 2, 2], [4, 12, 12, 8], [4, 12, 12, 8],
                        [4, 10, 10, 6]],
                       [[8, 18, 18, 10], [20, 44, 44, 24], [20, 44, 44, 24],
                        [12, 26, 26, 14]]]]).transpose(0, 2, 3, 1))
예제 #15
0
    def test_backprop_maxpool2d_gradients(self):
        pool_layer = layers.MaxPool2D(
            pool_size=(2, 2),
            strides=(1, 1),
            padding=PaddingMode.valid,
            maxpool_deeplift_mode=MaxPoolDeepLiftMode.gradient,
            data_format="channels_last")
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=9)

        self.dense_layer.update_task_index(task_index=0)
        func = compile_func([
            self.input_layer.get_activation_vars(),
            self.input_layer.get_reference_vars()
        ], self.input_layer.get_mxts())
        np.testing.assert_almost_equal(
            func([
                self.backprop_test_inps,
                np.ones_like(self.backprop_test_inps) * self.reference_inps
            ]),
            np.array([[
                np.array([[1, 0, 0, 0], [0, 0, 2, 0], [2, 1, 1, 0],
                          [0, 0, 1, 1]]) * 2,
                np.array([[0, 0, 1, 1], [0, 1, 0, 0], [0, 2, 1, 0],
                          [1, 0, 1, 1]]) * 3
            ],
                      [
                          np.array([[0, 0, 1, 1], [0, 1, 0, 0], [0, 2, 1, 0],
                                    [1, 0, 1, 1]]) * 2,
                          np.array([[1, 0, 0, 0], [0, 0, 2, 0], [2, 1, 1, 0],
                                    [0, 0, 1, 1]]) * 3
                      ]]).transpose(0, 2, 3, 1))
    def setUp(self):
        self.inp1 = (np.random.randn(10*10*51)
                    .reshape(10,10,51).transpose(0,2,1))
        self.inp2 = (np.random.randn(10*10*51)
                    .reshape(10,10,51).transpose(0,2,1))
        inp1 = keras.layers.Input(batch_shape=(None,51,10), name="inp1")
        inp2 = keras.layers.Input(batch_shape=(None,51,10), name="inp2")
        conv = keras.layers.convolutional.Convolution1D(
                 nb_filter=2, filter_length=4,
                 subsample_length=2, activation="relu")
        maxpool = keras.layers.convolutional.MaxPooling1D(
                        pool_length=4, stride=2)
        conv1_out = conv(inp1)
        conv2_out = conv(inp2)
        maxpool1_out = maxpool(conv1_out)
        maxpool2_out = maxpool(conv2_out)
        merge_out = keras.layers.Concatenate(axis=2)([maxpool1_out, maxpool2_out])
        flatten_out = keras.layers.core.Flatten()(merge_out)
        dense1_out = keras.layers.core.Dense(output_dim=5)(flatten_out)
        dense1relu_out = keras.layers.core.Activation("relu")(dense1_out)
        output_preact = keras.layers.core.Dense(
                         output_dim=1, name="output_preact")(dense1relu_out)
        output = keras.layers.core.Activation("sigmoid",
                        name="output_postact")(output_preact)
        self.keras_model = keras.models.Model(input=[inp1, inp2],
                                              output=output)
        self.keras_model.compile(optimizer='rmsprop',
                              loss='binary_crossentropy',
                              metrics=['accuracy'])
 
        keras_output_fprop_func = compile_func(
            [inp1, inp2, keras.backend.learning_phase()],
            self.keras_model.layers[-1].output)
        self.keras_output_fprop_func =\
            lambda x,y: keras_output_fprop_func([x,y,False])


        grad = tf.gradients(tf.reduce_sum(
                output_preact[:,0]), [inp1, inp2])
        grad_func = compile_func(
            [inp1, inp2, keras.backend.learning_phase()], grad) 
        self.grad_func = lambda x,y: grad_func([x,y,False])

        self.saved_file_path = "funcmodel.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path)
예제 #17
0
    def setUp(self):
         
        self.axis=3
        self.inp = (np.arange(16).reshape(2,2,2,2)
                    .transpose(0,2,3,1).astype("float32"))
        self.keras_model = keras.models.Sequential()
        self.epsilon = 10**(-3)
        self.gamma = np.array([2.0, 3.0]).astype("float32") 
        self.beta = np.array([4.0, 5.0]).astype("float32")
        self.mean = np.array([3.0, 3.0]).astype("float32")
        self.var = np.array([4.0, 9.0]).astype("float32")
        batch_norm_layer = keras.layers.normalization.BatchNormalization(
                           axis=self.axis, input_shape=(2,2,2))
        self.keras_model.add(batch_norm_layer)
        batch_norm_layer.set_weights(np.array([
                                      self.gamma, #gamma (scaling)
                                      self.beta, #beta (shift)
                                      self.mean, #mean
                                      self.var])) #std
        self.keras_model.add(keras.layers.Flatten())
        dense_layer = keras.layers.Dense(output_dim=1)
        self.keras_model.add(dense_layer)
        dense_layer.set_weights([np.ones((1,8)).astype("float32").T,
                                 np.zeros(1).astype("float32")])
        self.keras_model.compile(loss="mse", optimizer="sgd")

        keras_batchnorm_fprop_func = compile_func(
            [self.keras_model.layers[0].input, K.learning_phase()],        
            self.keras_model.layers[0].output)
        self.keras_batchnorm_fprop_func = compile_func(
            [self.keras_model.layers[0].input, K.learning_phase()],
            self.keras_model.layers[0].output) 
        self.keras_output_fprop_func = compile_func(
            [self.keras_model.layers[0].input, K.learning_phase()],
            self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
                   self.keras_model.layers[-1].output[:,0]),
                   [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input, K.learning_phase()], grad)

        self.saved_file_path = "batchnorm_model.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path)
예제 #18
0
 def test_batch_norm_negative_axis_fwd_prop(self):
     self.prepare_batch_norm_deeplift_model(axis=self.axis - 4)
     deeplift_fprop_func = compile_func(
         [self.input_layer.get_activation_vars()],
         self.batch_norm_layer.get_activation_vars())
     np.testing.assert_almost_equal(deeplift_fprop_func(self.inp),
                                    self.keras_batchnorm_fprop_func(
                                        [self.inp, 0]),
                                    decimal=5)
예제 #19
0
 def test_batch_norm_negative_axis_fwd_prop(self):
     self.prepare_batch_norm_deeplift_model(axis=self.axis-4)
     deeplift_fprop_func = compile_func(
                               [self.input_layer.get_activation_vars()],
                               self.batch_norm_layer.get_activation_vars())
     np.testing.assert_almost_equal(
         deeplift_fprop_func(self.inp),
         self.keras_batchnorm_fprop_func([self.inp, 0]),
         decimal=5)
예제 #20
0
 def test_batch_norm_positive_axis_backprop(self):
     self.prepare_batch_norm_deeplift_model(axis=self.axis)
     deeplift_multipliers_func = compile_func(
                         [self.input_layer.get_activation_vars(),
                          self.input_layer.get_reference_vars()],
                          self.input_layer.get_mxts())
     np.testing.assert_almost_equal(
             deeplift_multipliers_func([self.inp, np.zeros_like(self.inp)]),
             self.grad_func([self.inp, 0]), decimal=5)
예제 #21
0
 def test_batch_norm_negative_axis_backprop(self):
     self.prepare_batch_norm_deeplift_model(axis=self.axis - 4)
     deeplift_multipliers_func = compile_func([
         self.input_layer.get_activation_vars(),
         self.input_layer.get_reference_vars()
     ], self.input_layer.get_mxts())
     np.testing.assert_almost_equal(deeplift_multipliers_func(
         [self.inp, np.zeros_like(self.inp)]),
                                    self.grad_func([self.inp, 0]),
                                    decimal=5)
예제 #22
0
 def test_dense_backprop(self):
     func = compile_func([self.input_layer.get_activation_vars(),
                          self.input_layer.get_reference_vars()],
                          self.dense_layer.get_pos_mxts())
     self.dense_layer.update_task_index(task_index=1)
     self.assertListEqual([list(x) for x in func(
                           [self.inp, np.zeros_like(self.inp)])],
                          [[0.0, 1.0], [0.0, 1.0]])
     func = compile_func([self.input_layer.get_activation_vars(),
                          self.input_layer.get_reference_vars()],
                          self.input_layer.get_mxts())
     self.dense_layer.update_task_index(task_index=0)
     self.assertListEqual([list(x) for x in func(
                          [self.inp, np.zeros_like(self.inp)])],
                          [self.w1, self.w1])
     self.dense_layer.update_task_index(task_index=1)
     self.assertListEqual([list(x) for x in func([self.inp,
                           np.zeros_like(self.inp)])],
                          [self.w2, self.w2])
 def test_convert_conv2d_model_forward_prop(self): 
     deeplift_model =\
         kc.convert_model_from_saved_files(self.saved_file_path) 
     deeplift_fprop_func = compile_func(
                 [deeplift_model.get_layers()[0].get_activation_vars()],
                  deeplift_model.get_layers()[-1].get_activation_vars())
     np.testing.assert_almost_equal(
         deeplift_fprop_func(self.inp),
         self.keras_output_fprop_func([self.inp, 0]),
         decimal=6)
예제 #24
0
 def test_convert_conv2d_model_forward_prop(self):
     deeplift_model =\
         kc.convert_model_from_saved_files(self.saved_file_path)
     deeplift_fprop_func = compile_func(
         [deeplift_model.get_layers()[0].get_activation_vars()],
         deeplift_model.get_layers()[-1].get_activation_vars())
     np.testing.assert_almost_equal(deeplift_fprop_func(self.inp),
                                    self.keras_output_fprop_func(
                                        [self.inp, 0]),
                                    decimal=6)
예제 #25
0
 def test_batch_norm_convert_model_fprop(self):
     deeplift_model =\
         kc.convert_model_from_saved_files(
             self.saved_file_path,
             nonlinear_mxts_mode=NonlinearMxtsMode.Rescale)
     deeplift_fprop_func = compile_func(
         [deeplift_model.get_layers()[0].get_activation_vars()],
         deeplift_model.get_layers()[-1].get_activation_vars())
     np.testing.assert_almost_equal(deeplift_fprop_func(self.inp),
                                    self.keras_output_fprop_func(
                                        [self.inp, 0]),
                                    decimal=5)
 def test_convert_conv1d_model_forward_prop(self): 
     deeplift_model =\
         kc.convert_model_from_saved_files(
             self.saved_file_path,
             nonlinear_mxts_mode=NonlinearMxtsMode.Rescale) 
     deeplift_fprop_func = compile_func(
             inputs=[deeplift_model.get_layers()[0].get_activation_vars()],
             outputs=deeplift_model.get_layers()[-1].get_activation_vars())
     np.testing.assert_almost_equal(
         deeplift_fprop_func(self.inp),
         self.keras_output_fprop_func([self.inp, 0]),
         decimal=6)
예제 #27
0
 def test_convert_conv1d_model_forward_prop(self):
     deeplift_model =\
         kc.convert_model_from_saved_files(
             self.saved_file_path,
             nonlinear_mxts_mode=NonlinearMxtsMode.Gradient)
     deeplift_fprop_func = compile_func(
         inputs=[deeplift_model.get_layers()[0].get_activation_vars()],
         outputs=deeplift_model.get_layers()[-1].get_activation_vars())
     np.testing.assert_almost_equal(deeplift_fprop_func(self.inp),
                                    self.keras_output_fprop_func(
                                        [self.inp, 0]),
                                    decimal=6)
    def setUp(self):
        self.inp = (np.random.randn(10*10*51)
                    .reshape(10,10,51)).transpose(0,2,1)
        self.keras_model = keras.models.Sequential()
        #self.keras_model.add(keras.layers.InputLayer((51,10)))
        conv_layer1 = keras.layers.convolutional.Convolution1D(
                        nb_filter=20, filter_length=4, subsample_length=2,
                        padding='same', input_shape=(51,10))
        self.keras_model.add(conv_layer1)
        self.keras_model.add(keras.layers.advanced_activations.PReLU(
                              shared_axes=[1], alpha_initializer="ones"))
        conv_layer2 = keras.layers.convolutional.Convolution1D(
                        nb_filter=10, filter_length=4, subsample_length=2,
                        activation="relu",
                        padding='same')
        self.keras_model.add(conv_layer2)
        self.keras_model.add(keras.layers.pooling.MaxPooling1D(
                             pool_length=4, stride=2, padding='same')) 
        self.keras_model.add(keras.layers.pooling.AveragePooling1D(
                             pool_length=4, stride=2, padding='same')) 
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
                        [self.keras_model.layers[0].input,
                         K.learning_phase()],
                        self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
            self.keras_model.layers[-2].output[:,0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad) 

        self.saved_file_path = "conv1model_samepadding.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path)
예제 #29
0
 def test_fprop_pos_and_neg_contribs(self): 
     conv_layer = layers.Conv1D(kernel=self.conv_W, bias=self.conv_b,
                               stride=1,
                               padding=PaddingMode.valid,
                               conv_mxts_mode="Linear")
     self.create_small_net_with_conv_layer(conv_layer,
                                           outputs_per_channel=3)
     pos_contribs, neg_contribs = self.conv_layer.get_pos_and_neg_contribs() 
     func_pos = compile_func([self.input_layer.get_activation_vars(),
                              self.input_layer.get_reference_vars()],
                          pos_contribs)
     func_neg = compile_func([self.input_layer.get_activation_vars(),
                              self.input_layer.get_reference_vars()],
                          neg_contribs)
    #diff from ref:
    #      [[[-9,-8,-7,-6],
    #        [-5,-4,-3,-2]],
    #       [[-1, 0, 1, 2],
    #        [ 3, 4, 5, 6]]]
    # W:
    # [-2,-1
    #   0, 1]
    # 18+8 = 26, -4 = -4
    # 0+-1+0+5 = 4 - bias (1.0) = 3
     np.testing.assert_almost_equal(func_pos([self.inp,
                                              np.ones_like(self.inp)]),
                            np.array(
                            [[[ 26, 23, 20],
                              [  4,  3,  2]],
                             [[  6,  5,  6],
                              [  0,  1,  4]]]).transpose(0,2,1))
     np.testing.assert_almost_equal(func_neg([self.inp,
                                              np.ones_like(self.inp)]),
                            np.array(
                            [[[ -4, -3, -2],
                              [-26,-23,-20]],
                             [[  0, -1, -4],
                              [ -6, -5, -6]]]).transpose(0,2,1))
    def test_convert_conv1d_model_forward_prop(self): 
        deeplift_model =\
            kc.convert_model_from_saved_files(
                self.saved_file_path,
                nonlinear_mxts_mode=NonlinearMxtsMode.Rescale) 
        print(deeplift_model.get_name_to_layer().keys())
        deeplift_fprop_func = compile_func(
 [deeplift_model.get_name_to_layer()['inp1_0'].get_activation_vars(),
  deeplift_model.get_name_to_layer()['inp2_0'].get_activation_vars()],
  deeplift_model.get_name_to_layer()['output_postact_0'].get_activation_vars())
        np.testing.assert_almost_equal(
            deeplift_fprop_func([self.inp1, self.inp2]),
            self.keras_output_fprop_func(self.inp1, self.inp2),
            decimal=6)
예제 #31
0
 def test_running_of_different_dense_modes(self):
     for mode in DenseMxtsMode.vals:
         input_layer = layers.Input(batch_shape=(None,4))
         W = np.array([self.w1, self.w2]).T
         b = np.array([-1.0, 1.0])
         dense_layer = layers.Dense(kernel=W, bias=b,
                                    dense_mxts_mode=mode)
         dense_layer.set_inputs(input_layer)
         dense_layer.build_fwd_pass_vars()
         dense_layer.set_scoring_mode(layers.ScoringMode.OneAndZeros)
         dense_layer.set_active()
         input_layer.update_mxts()
         func = compile_func([input_layer.get_activation_vars(),
                              input_layer.get_reference_vars()],
                              input_layer.get_mxts())
         dense_layer.update_task_index(task_index=0)
         func([self.inp, np.zeros_like(self.inp)])
예제 #32
0
 def test_convert_conv1d_model_forward_prop(self):
     deeplift_model =\
         kc.convert_model_from_saved_files(
             self.saved_file_path,
             nonlinear_mxts_mode=NonlinearMxtsMode.Rescale)
     print(deeplift_model.get_name_to_layer().keys())
     deeplift_fprop_func = compile_func([
         deeplift_model.get_name_to_layer()['inp1_0'].get_activation_vars(),
         deeplift_model.get_name_to_layer()['inp2_0'].get_activation_vars()
     ],
                                        deeplift_model.get_name_to_layer()
                                        ['output_postact_0'].
                                        get_activation_vars())
     np.testing.assert_almost_equal(
         deeplift_fprop_func([self.inp1, self.inp2]),
         self.keras_output_fprop_func(self.inp1, self.inp2),
         decimal=6)
예제 #33
0
 def test_concat_backprop(self):
     func = compile_func([
             self.input_layer1.get_activation_vars(),
             self.input_layer2.get_activation_vars(),
             self.input_layer1.get_reference_vars(),
             self.input_layer2.get_reference_vars()],
             [self.input_layer1.get_mxts(),
              self.input_layer2.get_mxts()]
             )
     print(func([self.inp1, self.inp2,
                 np.zeros_like(self.inp1), np.zeros_like(self.inp2)]))
     self.dense_layer.update_task_index(task_index=0)
     np.testing.assert_allclose(func([self.inp1, self.inp2,
                                      np.zeros_like(self.inp1),
                                      np.zeros_like(self.inp2)]),
                                [np.array([[[[1]]],[[[1]]]]),
                                 np.array([[[[2]]],[[[2]]]])])
예제 #34
0
 def test_dense_backprop_stride(self):
     conv_layer = layers.Conv1D(kernel=self.conv_W, bias=self.conv_b,
                                stride=2,
                                padding=PaddingMode.valid,
                                conv_mxts_mode="Linear")
     self.create_small_net_with_conv_layer(conv_layer,
                                           outputs_per_channel=2)
     self.dense_layer.update_task_index(task_index=0)
     func = compile_func([self.input_layer.get_activation_vars(),
                          self.input_layer.get_reference_vars()],
                         self.input_layer.get_mxts())
     np.testing.assert_almost_equal(
         func([self.inp, np.zeros_like(self.inp)]),
         np.array(
          [[[ -4,  -2,  -4, -2],
            [  0,   2,   0,  2]],
           [[ -4,  -2,  -4, -2],
            [  0,   2,   0,  2]]]).transpose(0,2,1))
예제 #35
0
    def test_fprop_avgpool(self): 

        pool_layer = layers.AvgPool1D(pool_length=2,
                                      stride=1,
                                      padding=PaddingMode.valid)
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=3)

        func = compile_func([self.input_layer.get_activation_vars()],
                           self.pool_layer.get_activation_vars())
        np.testing.assert_almost_equal(func(self.backprop_test_inps),
                                        np.array(
                                        [[
                                          [0.5,2.5,3.5],
                                          [2.5,1.5,0.5]],
                                         [[-0.5,-1.5,-2.5],
                                          [-2.5,-1.5,-0.5]
                                         ]]).transpose(0,2,1))
예제 #36
0
    def test_fprop_avgpool2d(self):

        pool_layer = layers.AvgPool2D(pool_size=(2, 2),
                                      strides=(1, 1),
                                      padding=PaddingMode.valid,
                                      data_format="channels_last")
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=9)

        func = compile_func([self.input_layer.get_activation_vars()],
                            self.pool_layer.get_activation_vars())
        np.testing.assert_almost_equal(
            func([self.reference_inps[0], self.reference_inps[0] - 1]),
            0.25 * np.array([[[[1, 3, 5], [6, 10, 4], [11, 16, 19]],
                              [[5, 7, 9], [10, 14, 8], [15, 20, 23]]],
                             [[[-3, -1, 1], [2, 6, 0], [7, 12, 15]],
                              [[1, 3, 5], [6, 10, 4], [11, 16, 19]]]
                             ]).transpose(0, 2, 3, 1))
예제 #37
0
    def test_fprop_maxpool1d(self): 

        pool_layer = layers.MaxPool1D(pool_length=2,
                          stride=1,
                          padding=PaddingMode.valid,
                          maxpool_deeplift_mode=MaxPoolDeepLiftMode.gradient)
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=3)

        func = compile_func([self.input_layer.get_activation_vars()],
                            self.pool_layer.get_activation_vars())
        np.testing.assert_almost_equal(func(self.backprop_test_inps),
                                       np.array(
                                        [[
                                         [1,4,4],
                                         [3,2,1]],
                                        [[ 0,-1,-2],
                                         [-2,-1, 0]
                                        ]]).transpose(0,2,1))
예제 #38
0
    def test_backprop_avgpool(self):
        pool_layer = layers.AvgPool1D(pool_length=2, stride=1,
                                      padding=PaddingMode.valid)
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=3)

        self.dense_layer.update_task_index(task_index=0)
        func = compile_func([self.input_layer.get_activation_vars(), 
                           self.input_layer.get_reference_vars()],
                           self.input_layer.get_mxts())
        avg_pool_grads = np.array([1, 2, 2, 1]).astype("float32")*0.5 
        np.testing.assert_almost_equal(func(
                  [self.backprop_test_inps,
                   np.ones_like(self.backprop_test_inps)*self.reference_inps]),
                              np.array([
                              [avg_pool_grads*2,
                                avg_pool_grads*3], 
                              [avg_pool_grads*2,
                               avg_pool_grads*3]]).transpose(0,2,1))
예제 #39
0
    def predict_on_batch(self, input_batch):
        """
        Function that can be used to check the successful model conversion.
           The output of this function should match the output of the original model when executing .predict(input_batch)
        Args:
          input_batch: Model input data 
        Returns:
          Model predictions
        """
        from deeplift.util import run_function_in_batches
        from deeplift.util import compile_func
        x_standardized = self.model._batch_to_list(input_batch)
        if self.fwd_predict_fn is None:
            # TODO: Once DeepLIFT layer annotation works integrate it here too:
            """
            # identify model output layers:
            self.output_layers_idxs = []
            for output_name in self.model.model.output_names:
                for i, l in enumerate(self.model.model.layers):
                    if l.name == output_name:
                        self.output_layers_idxs.append(i)
            """
            inputs = [
                self.deeplift_model.get_layers()[i].get_activation_vars()
                for i in self.input_layer_idxs
            ]
            outputs = [
                self.deeplift_model.get_layers()[i].get_activation_vars()
                for i in self.output_layers_idxs
            ]
            self.fwd_predict_fn = compile_func(inputs, outputs)

        preds = run_function_in_batches(input_data_list=x_standardized,
                                        func=self.fwd_predict_fn,
                                        batch_size=self.batch_size,
                                        progress_update=None)

        preds = np.array(preds)
        if len(self.output_layers_idxs) == 1:
            preds = preds[0, ...]

        return preds
예제 #40
0
    def test_fprop_maxpool2d(self):

        pool_layer = layers.MaxPool2D(
            pool_size=(2, 2),
            strides=(1, 1),
            padding=PaddingMode.valid,
            maxpool_deeplift_mode=MaxPoolDeepLiftMode.gradient,
            data_format="channels_last")
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=9)

        func = compile_func([self.input_layer.get_activation_vars()],
                            self.pool_layer.get_activation_vars())
        np.testing.assert_almost_equal(
            func([self.reference_inps[0], self.reference_inps[0] - 1]),
            np.array([[[[1, 2, 3], [5, 5, 4], [6, 7, 8]],
                       [[2, 3, 4], [6, 6, 5], [7, 8, 9]]],
                      [[[0, 1, 2], [4, 4, 3], [5, 6, 7]],
                       [[1, 2, 3], [5, 5, 4], [6, 7,
                                               8]]]]).transpose(0, 2, 3, 1))
예제 #41
0
 def test_concat_backprop(self):
     func = compile_func([
         self.input_layer1.get_activation_vars(),
         self.input_layer2.get_activation_vars(),
         self.input_layer1.get_reference_vars(),
         self.input_layer2.get_reference_vars()
     ], [self.input_layer1.get_mxts(),
         self.input_layer2.get_mxts()])
     print(
         func([
             self.inp1, self.inp2,
             np.zeros_like(self.inp1),
             np.zeros_like(self.inp2)
         ]))
     self.dense_layer.update_task_index(task_index=0)
     np.testing.assert_allclose(
         func([
             self.inp1, self.inp2,
             np.zeros_like(self.inp1),
             np.zeros_like(self.inp2)
         ]), [np.array([[[[1]]], [[[1]]]]),
              np.array([[[[2]]], [[[2]]]])])
예제 #42
0
    def test_backprop_avgpool2d(self):
        pool_layer = layers.AvgPool2D(pool_size=(2, 2),
                                      strides=(1, 1),
                                      padding=PaddingMode.valid,
                                      data_format="channels_last")
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=9)

        self.dense_layer.update_task_index(task_index=0)
        func = compile_func([
            self.input_layer.get_activation_vars(),
            self.input_layer.get_reference_vars()
        ], self.input_layer.get_mxts())
        avg_pool_grads = np.array([[1, 2, 2, 1], [2, 4, 4, 2], [2, 4, 4, 2],
                                   [1, 2, 2, 1]]).astype("float32")
        np.testing.assert_almost_equal(
            func([
                self.backprop_test_inps,
                np.ones_like(self.backprop_test_inps) * self.reference_inps
            ]),
            np.array([[avg_pool_grads * 2 * 0.25, avg_pool_grads * 3 * 0.25],
                      [avg_pool_grads * 2 * 0.25,
                       avg_pool_grads * 3 * 0.25]]).transpose(0, 2, 3, 1))
예제 #43
0
    def test_backprop_avgpool2d(self):
        pool_layer = layers.AvgPool2D(pool_size=(2,2),
                  strides=(1,1),
                  padding=PaddingMode.valid,
                  data_format="channels_last")
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=9)

        self.dense_layer.update_task_index(task_index=0)
        func = compile_func([self.input_layer.get_activation_vars(), 
                           self.input_layer.get_reference_vars()],
                           self.input_layer.get_mxts())
        avg_pool_grads = np.array([[1, 2, 2, 1],
                                   [2, 4, 4, 2],
                                   [2, 4, 4, 2],
                                   [1, 2, 2, 1]]).astype("float32") 
        np.testing.assert_almost_equal(func([
                  self.backprop_test_inps,
                  np.ones_like(self.backprop_test_inps)*self.reference_inps]),
                              np.array(
                              [[avg_pool_grads*2*0.25,
                                avg_pool_grads*3*0.25], 
                               [avg_pool_grads*2*0.25,
                                avg_pool_grads*3*0.25]]).transpose(0,2,3,1))
예제 #44
0
 def test_concat(self): 
     func = compile_func([self.input_layer1.get_activation_vars(),
                             self.input_layer2.get_activation_vars()],
                             self.concat_layer.get_activation_vars())
     np.testing.assert_allclose(func([self.inp1, self.inp2]),
                                np.array([[[[1]],[[1]]],[[[2]],[[2]]]]))
예제 #45
0
revealcancel_model = kc.convert_model_from_saved_files(
    h5_file=saved_model_file,
    nonlinear_mxts_mode=NonlinearMxtsMode.RevealCancel)
grad_model = kc.convert_model_from_saved_files(
    h5_file=saved_model_file, nonlinear_mxts_mode=NonlinearMxtsMode.Gradient)
guided_backprop_model = kc.convert_model_from_saved_files(
    h5_file=saved_model_file,
    nonlinear_mxts_mode=NonlinearMxtsMode.GuidedBackprop)

from deeplift.util import compile_func
import numpy as np
from keras import backend as K

deeplift_model = revealcancel_model
deeplift_prediction_func = compile_func(
    [deeplift_model.get_layers()[0].get_activation_vars()],
    deeplift_model.get_layers()[-1].get_activation_vars())
original_model_predictions = keras_model.predict(X_test, batch_size=200)
converted_model_predictions = deeplift.util.run_function_in_batches(
    input_data_list=[X_test],
    func=deeplift_prediction_func,
    batch_size=200,
    progress_update=None)
print(
    "difference in predictions:",
    np.max(
        np.array(converted_model_predictions) -
        np.array(original_model_predictions)))
assert np.max(
    np.array(converted_model_predictions) -
    np.array(original_model_predictions)) < 10**-5
예제 #46
0
 def test_dense_fprop(self): 
     func = compile_func([self.input_layer.get_activation_vars()],
                          self.dense_layer.get_activation_vars())
     self.assertListEqual([list(x) for x in func(self.inp)],
                          [[9.0,-9.0], [19.0, -19.0]])
예제 #47
0
 def _get_func(self, find_scores_layers, 
                     target_layer,
                     input_layers, func_type,
                     slice_objects=None):
     if isinstance(find_scores_layers,list)==False:
         remove_list_wrapper_on_return = True
         find_scores_layers = [find_scores_layers] 
     else:
         remove_list_wrapper_on_return = False
     for find_scores_layer in find_scores_layers:
         find_scores_layer.reset_mxts_updated()
     self._set_scoring_mode_for_target_layer(target_layer)
     for find_scores_layer in find_scores_layers:
         find_scores_layer.update_mxts()
     if (func_type == FuncType.contribs):
         output_symbolic_vars = [
          find_scores_layer.get_target_contrib_vars() for find_scores_layer
          in find_scores_layers]
     elif (func_type == FuncType.multipliers):
         output_symbolic_vars = [
          find_scores_layer.get_mxts() for find_scores_layer in
          find_scores_layers]
     elif (func_type == FuncType.contribs_of_input_with_filter_refs):
         output_symbolic_vars =\
          [find_scores_layer.get_contribs_of_inputs_with_filter_refs()
           for find_scores_layer in find_scores_layers]
     else:
         raise RuntimeError("Unsupported func_type: "+func_type)
     if (slice_objects is not None):
         output_symbolic_vars = output_symbolic_vars[slice_objects]
     core_function = compile_func([input_layer.get_activation_vars()
                                 for input_layer in input_layers]+
                                [input_layer.get_reference_vars()
                                 for input_layer in input_layers],
                                output_symbolic_vars)
     def func(task_idx, input_data_list,
              batch_size, progress_update,
              input_references_list=None):
         if (isinstance(input_data_list, dict)):
             assert hasattr(self, '_input_layer_names'),\
              ("Dictionary supplied for input_data_list but model does "
               "not have an attribute '_input_layer_names")
             input_data_list = [input_data_list[x] for x in
                                self._input_layer_names]
         if (input_references_list is None):
             print("No reference provided - using zeros")
             input_references_list = [0.0 for x in input_data_list]
         if (isinstance(input_references_list, dict)):
             assert hasattr(self, '_input_layer_names'),\
              ("Dictionary supplied for input_references_list but model "
               "does not have an attribute '_input_layer_names")
             input_references_list = [input_references_list[x] for x in
                                      self._input_layer_names]
         input_references_list = [
             np.ones_like(input_data)*reference
             for (input_data, reference) in
             zip(input_data_list, input_references_list)]
         #WARNING: this is not thread-safe. Do not try to
         #parallelize or you can end up with multiple target_layers
         #active at once
         target_layer.set_active()
         target_layer.update_task_index(task_idx)
         to_return = deeplift.util.run_function_in_batches(
                 func = core_function,
                 input_data_list = input_data_list+input_references_list,
                 batch_size = batch_size,
                 progress_update = progress_update,
                 multimodal_output=True)
         target_layer.set_inactive()
         if (remove_list_wrapper_on_return):
             #remove the enclosing []; should be only one element
             assert len(to_return)==1
             to_return = to_return[0]
         return to_return
     return func