Beispiel #1
0
 def test_relu_after_conv2d_batchnorm(self):
     input_layer = layers.Input(batch_shape=(None, 2, 2, 2))
     conv_layer = layers.Conv2D(kernel=np.random.random(
         (2, 2, 2, 2)).astype("float32"),
                                bias=np.random.random(
                                    (2, )).astype("float32"),
                                conv_mxts_mode=ConvMxtsMode.Linear,
                                strides=(1, 1),
                                padding=PaddingMode.valid,
                                data_format="channels_last")
     conv_layer.set_inputs(input_layer)
     batch_norm = layers.BatchNormalization(
         gamma=np.array([1.0, 1.0]).astype("float32"),
         beta=np.array([-0.5, 0.5]).astype("float32"),
         axis=-1,
         mean=np.array([-0.5, 0.5]).astype("float32"),
         var=np.array([1.0, 1.0]).astype("float32"),
         epsilon=0.001)
     batch_norm.set_inputs(conv_layer)
     relu_after_bn = layers.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(batch_norm)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)
Beispiel #2
0
 def prepare_batch_norm_deeplift_model(self, axis):
     self.input_layer = layers.Input(batch_shape=(None, 2, 2, 2))
     self.batch_norm_layer = layers.BatchNormalization(gamma=self.gamma,
                                                       beta=self.beta,
                                                       axis=axis,
                                                       mean=self.mean,
                                                       var=self.var,
                                                       epsilon=self.epsilon)
     self.batch_norm_layer.set_inputs(self.input_layer)
     self.flatten_layer = layers.Flatten()
     self.flatten_layer.set_inputs(self.batch_norm_layer)
     self.dense_layer = layers.Dense(kernel=np.ones(
         (1, 8)).astype("float32").T,
                                     bias=np.zeros(1).astype("float32"),
                                     dense_mxts_mode=DenseMxtsMode.Linear)
     self.dense_layer.set_inputs(self.flatten_layer)
     self.dense_layer.build_fwd_pass_vars()
     self.dense_layer.set_scoring_mode(layers.ScoringMode.OneAndZeros)
     self.dense_layer.set_active()
     self.dense_layer.update_task_index(0)
     self.input_layer.update_mxts()
Beispiel #3
0
 def test_relu_after_dense_batchnorm_noop_noop(self):
     input_layer = layers.Input(batch_shape=(None, 4))
     dense_layer = layers.Dense(kernel=np.random.random((4, 2)),
                                bias=np.random.random((2, )),
                                dense_mxts_mode=DenseMxtsMode.Linear)
     dense_layer.set_inputs(input_layer)
     batch_norm = layers.BatchNormalization(
         gamma=np.array([1.0, 1.0]).astype("float32"),
         beta=np.array([-0.5, 0.5]).astype("float32"),
         axis=-1,
         mean=np.array([-0.5, 0.5]).astype("float32"),
         var=np.array([1.0, 1.0]).astype("float32"),
         epsilon=0.001)
     batch_norm.set_inputs(dense_layer)
     noop_layer1 = layers.NoOp()
     noop_layer1.set_inputs(batch_norm)
     noop_layer2 = layers.NoOp()
     noop_layer2.set_inputs(noop_layer1)
     relu_after_bn = layers.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(noop_layer2)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.RevealCancel)