示例#1
0
 def test_relu_after_conv2d_batchnorm(self):
     input_layer = layers.Input(batch_shape=(None, 2, 2, 2))
     conv_layer = layers.Conv2D(kernel=np.random.random(
         (2, 2, 2, 2)).astype("float32"),
                                bias=np.random.random(
                                    (2, )).astype("float32"),
                                conv_mxts_mode=ConvMxtsMode.Linear,
                                strides=(1, 1),
                                padding=PaddingMode.valid,
                                data_format="channels_last")
     conv_layer.set_inputs(input_layer)
     batch_norm = layers.BatchNormalization(
         gamma=np.array([1.0, 1.0]).astype("float32"),
         beta=np.array([-0.5, 0.5]).astype("float32"),
         axis=-1,
         mean=np.array([-0.5, 0.5]).astype("float32"),
         var=np.array([1.0, 1.0]).astype("float32"),
         epsilon=0.001)
     batch_norm.set_inputs(conv_layer)
     relu_after_bn = layers.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(batch_norm)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)
 def test_relu_after_other_layer(self): 
     input_layer = layers.Input(batch_shape=(None,4))
     relu_layer = layers.ReLU(
                     nonlinear_mxts_mode=
                      NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_layer.set_inputs(input_layer)
     relu_layer.build_fwd_pass_vars()
示例#3
0
 def test_relu_after_dense(self):
     input_layer = layers.Input(batch_shape=(None, 4))
     dense_layer = layers.Dense(kernel=np.random.random((4, 2)),
                                bias=np.random.random((2, )),
                                dense_mxts_mode=DenseMxtsMode.Linear)
     dense_layer.set_inputs(input_layer)
     relu_after_dense = layers.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_dense.set_inputs(dense_layer)
     relu_after_dense.build_fwd_pass_vars()
     self.assertEqual(relu_after_dense.nonlinear_mxts_mode,
                      NonlinearMxtsMode.RevealCancel)
 def test_relu_after_conv1d(self): 
     input_layer = layers.Input(batch_shape=(None,2,2))
     conv_layer = layers.Conv1D(
                     kernel=np.random.random((2,2,2)).astype("float32"),
                     bias=np.random.random((2,)).astype("float32"),
                     conv_mxts_mode=ConvMxtsMode.Linear,
                     stride=1,
                     padding=PaddingMode.valid)
     conv_layer.set_inputs(input_layer)
     relu_after_conv = layers.ReLU(nonlinear_mxts_mode=
                                 NonlinearMxtsMode.DeepLIFT_GenomicsDefault) 
     relu_after_conv.set_inputs(conv_layer)
     relu_after_conv.build_fwd_pass_vars()
     self.assertEqual(relu_after_conv.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)
示例#5
0
    def test_relu_gradient(self):
        out_layer = layers.ReLU(
            nonlinear_mxts_mode=layers.NonlinearMxtsMode.Gradient)
        fprop_results, bprop_results_each_task =\
            self.set_up_prediction_func_and_deeplift_func(out_layer)

        np.testing.assert_almost_equal(
            np.array(bprop_results_each_task[0]),
            np.array([np.array(self.w1), np.array(self.w1)]),
            decimal=5)
        np.testing.assert_almost_equal(
            np.array(bprop_results_each_task[1]),
            np.array([np.zeros_like(self.w2),
                      np.zeros_like(self.w2)]),
            decimal=5)
示例#6
0
 def test_relu_after_dense_batchnorm_noop_noop(self):
     input_layer = layers.Input(batch_shape=(None, 4))
     dense_layer = layers.Dense(kernel=np.random.random((4, 2)),
                                bias=np.random.random((2, )),
                                dense_mxts_mode=DenseMxtsMode.Linear)
     dense_layer.set_inputs(input_layer)
     batch_norm = layers.BatchNormalization(
         gamma=np.array([1.0, 1.0]).astype("float32"),
         beta=np.array([-0.5, 0.5]).astype("float32"),
         axis=-1,
         mean=np.array([-0.5, 0.5]).astype("float32"),
         var=np.array([1.0, 1.0]).astype("float32"),
         epsilon=0.001)
     batch_norm.set_inputs(dense_layer)
     noop_layer1 = layers.NoOp()
     noop_layer1.set_inputs(batch_norm)
     noop_layer2 = layers.NoOp()
     noop_layer2.set_inputs(noop_layer1)
     relu_after_bn = layers.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(noop_layer2)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.RevealCancel)
示例#7
0
 def test_relu_rescale(self):
     out_layer = layers.ReLU(
         nonlinear_mxts_mode=layers.NonlinearMxtsMode.Rescale)
     fprop_results, bprop_results_each_task =\
         self.set_up_prediction_func_and_deeplift_func(out_layer)
     self.assertListEqual(fprop_results, [[9.0, 0.0], [19.0, 0.0]])
     #post-activation under default would be [0.0, 1.0, 0.0]
     #post-activation diff from default = [9.0, -1.0, 4.0], [19.0, -1.0, -4.0]
     #pre-activation under default would be [-1.0, 1.0]
     #pre-activation diff-from-default is [10.0, -10.0], [20.0, -20.0]
     #scale-factors: [[9.0/10.0, -1.0/-10.0], [19.0/20.0, -1.0/-20.0]]
     print(bprop_results_each_task)
     np.testing.assert_almost_equal(np.array(bprop_results_each_task[0]),
                                    np.array([
                                        (9.0 / 10.0) * np.array(self.w1),
                                        (19.0 / 20.0) * np.array(self.w1)
                                    ]),
                                    decimal=5)
     np.testing.assert_almost_equal(np.array(bprop_results_each_task[1]),
                                    np.array([
                                        (-1.0 / -10.0) * np.array(self.w2),
                                        (-1.0 / -20.0) * np.array(self.w2)
                                    ]),
                                    decimal=5)
示例#8
0
 def test_running_of_different_activation_modes(self):
     #just tests that things run, not a test for values
     for mode in layers.NonlinearMxtsMode.vals:
         out_layer = layers.ReLU(nonlinear_mxts_mode=mode)
         fprop_results, bprop_results_each_task =\
             self.set_up_prediction_func_and_deeplift_func(out_layer)