def test_running_of_different_activation_modes(self):
     #just tests that things run, not a test for values
     for mode in [blobs.NonlinearMxtsMode.vals]:
         out_layer = blobs.ReLU(
             nonlinear_mxts_mode=blobs.NonlinearMxtsMode.DeepLIFT)
         fprop_results, bprop_results_each_task =\
             self.set_up_prediction_func_and_deeplift_func(out_layer)
         self.assertListEqual(fprop_results, [[9.0, 0.0], [19.0, 0.0]])
Example #2
0
 def test_relu_after_dense(self):
     input_layer = blobs.Input(num_dims=None, shape=(None, 4))
     dense_layer = blobs.Dense(W=np.random.random((2, 4)),
                               b=np.random.random((2, )),
                               dense_mxts_mode=DenseMxtsMode.Linear)
     dense_layer.set_inputs(input_layer)
     relu_after_dense = blobs.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_dense.set_inputs(dense_layer)
     relu_after_dense.build_fwd_pass_vars()
     self.assertEqual(relu_after_dense.nonlinear_mxts_mode,
                      NonlinearMxtsMode.RevealCancel)
    def test_relu_gradient(self):
        out_layer = blobs.ReLU(
            nonlinear_mxts_mode=blobs.NonlinearMxtsMode.Gradient)
        fprop_results, bprop_results_each_task =\
            self.set_up_prediction_func_and_deeplift_func(out_layer)

        np.testing.assert_almost_equal(
            np.array(bprop_results_each_task[0]),
            np.array([np.array(self.w1), np.array(self.w1)]))
        np.testing.assert_almost_equal(
            np.array(bprop_results_each_task[1]),
            np.array([np.zeros_like(self.w2),
                      np.zeros_like(self.w2)]))
Example #4
0
    def test_relu_intgrad(self):
        out_layer = blobs.ReLU(
            nonlinear_mxts_mode=blobs.NonlinearMxtsMode.Gradient)
        out_layer.set_inputs(self.dense_layer)
        out_layer.build_fwd_pass_vars()
        self.input_layer.reset_mxts_updated()
        out_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros)
        out_layer.set_active()
        self.input_layer.update_mxts()

        fprop_func = compile_func([self.input_layer.get_activation_vars()],
                                  out_layer.get_activation_vars())
        fprop_results = [list(x) for x in fprop_func(self.inp)]

        grad_func_temp = compile_func([
            self.input_layer.get_activation_vars(),
            self.input_layer.get_reference_vars()
        ], self.input_layer.get_mxts())
        grad_func = (
            lambda input_data_list, input_references_list, **kwargs:
            #index [0] below is because we are retrieving the
            #first mode, to be passed into grad_func_temp
            grad_func_temp(input_data_list[0], input_references_list[0]))
        integrated_grads_func = (
            deeplift.util.get_integrated_gradients_function(
                gradient_computation_function=grad_func, num_intervals=10))
        bprop_results_each_task = []
        for task_idx in range(len(fprop_results[0])):
            out_layer.update_task_index(task_index=task_idx)  #set task
            bprop_results_task = [
                list(x) for x in integrated_grads_func(
                    task_idx=None,  #task setting handled manually
                    #in line above
                    input_data_list=np.array([self.inp]),
                    input_references_list=np.array(
                        [0.1 * np.ones_like(self.inp)]),
                    #batch_size and progress_update
                    #are ignored by grad_func
                    batch_size=20,
                    progress_update=10)
            ]
            bprop_results_each_task.append(bprop_results_task)

        out_layer.set_inactive()

        print(bprop_results_each_task)
        np.testing.assert_almost_equal(
            np.array(bprop_results_each_task[0]),
            np.array([-2.0 / 5.0 * np.array([1.0, 4.0])]))
Example #5
0
 def test_relu_after_conv2d(self):
     input_layer = blobs.Input(num_dims=None, shape=(None, 2, 2, 2))
     conv_layer = blobs.Conv2D(W=np.random.random((2, 2, 2, 2)),
                               b=np.random.random((2, )),
                               conv_mxts_mode=ConvMxtsMode.Linear,
                               strides=(1, 1),
                               border_mode=PaddingMode.valid,
                               channels_come_last=True)
     conv_layer.set_inputs(input_layer)
     relu_after_conv = blobs.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_conv.set_inputs(conv_layer)
     relu_after_conv.build_fwd_pass_vars()
     self.assertEqual(relu_after_conv.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)
Example #6
0
 def test_relu_after_dense_batchnorm(self):
     input_layer = blobs.Input(num_dims=None, shape=(None, 4))
     dense_layer = blobs.Dense(W=np.random.random((4, 2)),
                               b=np.random.random((2, )),
                               dense_mxts_mode=DenseMxtsMode.Linear)
     dense_layer.set_inputs(input_layer)
     batch_norm = blobs.BatchNormalization(gamma=np.array([1.0, 1.0]),
                                           beta=np.array([-0.5, 0.5]),
                                           axis=-1,
                                           mean=np.array([-0.5, 0.5]),
                                           std=np.array([1.0, 1.0]),
                                           epsilon=0.001)
     batch_norm.set_inputs(dense_layer)
     relu_after_bn = blobs.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(batch_norm)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.RevealCancel)
 def test_relu_deeplift(self):
     out_layer = blobs.ReLU(
         nonlinear_mxts_mode=blobs.NonlinearMxtsMode.DeepLIFT)
     fprop_results, bprop_results_each_task =\
         self.set_up_prediction_func_and_deeplift_func(out_layer)
     self.assertListEqual(fprop_results, [[9.0, 0.0], [19.0, 0.0]])
     #post-activation under default would be [0.0, 1.0]
     #post-activation diff from default = [9.0, -1.0], [19.0, -1.0]
     #pre-activation under default would be [-1.0, 1.0]
     #pre-activation diff-from-default is [10.0, -10.0], [20.0, -20.0]
     #scale-factors: [[9.0/10.0, -1.0/-10.0], [19.0/20.0, -1.0/-20.0]]
     print(bprop_results_each_task)
     np.testing.assert_almost_equal(
         np.array(bprop_results_each_task[0]),
         np.array([(9.0 / 10.0) * np.array(self.w1),
                   (19.0 / 20.0) * np.array(self.w1)]))
     np.testing.assert_almost_equal(
         np.array(bprop_results_each_task[1]),
         np.array([(-1.0 / -10.0) * np.array(self.w2),
                   (-1.0 / -20.0) * np.array(self.w2)]))
Example #8
0
 def test_relu_after_conv1d_batchnorm(self):
     input_layer = blobs.Input(num_dims=None, shape=(None, 2, 2))
     conv_layer = blobs.Conv1D(W=np.random.random((2, 2, 2)),
                               b=np.random.random((2, )),
                               conv_mxts_mode=ConvMxtsMode.Linear,
                               stride=1,
                               border_mode=PaddingMode.valid,
                               channels_come_last=True)
     conv_layer.set_inputs(input_layer)
     batch_norm = blobs.BatchNormalization(gamma=np.array([1.0, 1.0]),
                                           beta=np.array([-0.5, 0.5]),
                                           axis=-1,
                                           mean=np.array([-0.5, 0.5]),
                                           std=np.array([1.0, 1.0]),
                                           epsilon=0.001)
     batch_norm.set_inputs(conv_layer)
     relu_after_bn = blobs.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(batch_norm)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)
 def test_relu_revealcancel(self):
     out_layer = blobs.ReLU(
         nonlinear_mxts_mode=blobs.NonlinearMxtsMode.RevealCancel)
     fprop_results, bprop_results_each_task =\
         self.set_up_prediction_func_and_deeplift_func(out_layer)
     self.assertListEqual(fprop_results, [[3.0]])
     #-1.0, 2.0, -3.0, 4.0: -4.0 and 6.0
     #-1.0, 2.0, -3.0, 4.0: -4.0 and 6.0
     #post-activation under reference: 1.0
     #pre-activation diff from default = -: -4.0, +: 6.0
     #post-activation diff-from-default = -: 0.5(-1.0 + -4.0) = -2.5
     #                                    +: 0.5(3.0 + 6.0) = 4.5
     #multiplier: 2.5/4.0 and 4.5/6.0
     neg_mult = 2.5 / 4.0
     pos_mult = 4.5 / 6.0
     print(bprop_results_each_task)
     np.testing.assert_almost_equal(
         np.array(bprop_results_each_task[0]),
         np.array([
             np.array([neg_mult, pos_mult, neg_mult, pos_mult]) *
             np.array(self.w1)
         ]))
Example #10
0
def relu_conversion(layer, name, verbose, nonlinear_mxts_mode):
    return [
        blobs.ReLU(name=name,
                   verbose=verbose,
                   nonlinear_mxts_mode=nonlinear_mxts_mode)
    ]
Example #11
0
def relu_conversion(layer, name, mxts_mode):
    return [blobs.ReLU(name=name, mxts_mode=mxts_mode)]
Example #12
0
 def test_relu_after_other_layer(self):
     input_layer = blobs.Input(num_dims=None, shape=(None, 4))
     relu_layer = blobs.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_layer.set_inputs(input_layer)
     relu_layer.build_fwd_pass_vars()
Example #13
0
 def test_running_of_different_activation_modes(self):
     #just tests that things run, not a test for values
     for mode in blobs.NonlinearMxtsMode.vals:
         out_layer = blobs.ReLU(nonlinear_mxts_mode=mode)
         fprop_results, bprop_results_each_task =\
             self.set_up_prediction_func_and_deeplift_func(out_layer)