def test_relu_after_other_layer(self): 
     input_layer = layers.Input(batch_shape=(None,4))
     relu_layer = layers.ReLU(
                     nonlinear_mxts_mode=
                      NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_layer.set_inputs(input_layer)
     relu_layer.build_fwd_pass_vars()
Пример #2
0
 def test_relu_after_conv2d_batchnorm(self):
     input_layer = layers.Input(batch_shape=(None, 2, 2, 2))
     conv_layer = layers.Conv2D(kernel=np.random.random(
         (2, 2, 2, 2)).astype("float32"),
                                bias=np.random.random(
                                    (2, )).astype("float32"),
                                conv_mxts_mode=ConvMxtsMode.Linear,
                                strides=(1, 1),
                                padding=PaddingMode.valid,
                                data_format="channels_last")
     conv_layer.set_inputs(input_layer)
     batch_norm = layers.BatchNormalization(
         gamma=np.array([1.0, 1.0]).astype("float32"),
         beta=np.array([-0.5, 0.5]).astype("float32"),
         axis=-1,
         mean=np.array([-0.5, 0.5]).astype("float32"),
         var=np.array([1.0, 1.0]).astype("float32"),
         epsilon=0.001)
     batch_norm.set_inputs(conv_layer)
     relu_after_bn = layers.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(batch_norm)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)
Пример #3
0
 def setUp(self):
     #swap axes for tensorflow
     self.input_layer = layers.Input(batch_shape=(None,4,2))
     #tensorflow, shockingly, does not flip the weights of a conv
     self.w1 = (np.arange(4).reshape(2,2)[:,:].astype("float32")-2.0)
     self.w2 = -(np.arange(4).reshape(2,2)[:,:].astype("float32")-2.0)
     self.conv_W = (np.array([self.w1, self.w2])
                    .astype("float32")).transpose(2,1,0).astype("float32")
     self.conv_b = np.array([-1.0, 1.0]).astype("float32")
Пример #4
0
 def setUp(self):
     self.input_layer = layers.Input(batch_shape=(None, 4))
     self.w1 = [1.0, 2.0, 3.0, 4.0]
     self.w2 = [-1.0, -2.0, -3.0, -4.0]
     W = np.array([self.w1, self.w2]).T
     b = np.array([-1.0, 1.0])
     self.dense_layer = layers.Dense(kernel=W,
                                     bias=b,
                                     dense_mxts_mode=DenseMxtsMode.Linear)
     self.dense_layer.set_inputs(self.input_layer)
     self.inp = [[1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]]
Пример #5
0
 def test_relu_after_dense(self):
     input_layer = layers.Input(batch_shape=(None, 4))
     dense_layer = layers.Dense(kernel=np.random.random((4, 2)),
                                bias=np.random.random((2, )),
                                dense_mxts_mode=DenseMxtsMode.Linear)
     dense_layer.set_inputs(input_layer)
     relu_after_dense = layers.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_dense.set_inputs(dense_layer)
     relu_after_dense.build_fwd_pass_vars()
     self.assertEqual(relu_after_dense.nonlinear_mxts_mode,
                      NonlinearMxtsMode.RevealCancel)
Пример #6
0
    def setUp(self):
        self.input_layer1 = layers.Input(batch_shape=(None, 1, 1, 1))
        self.input_layer2 = layers.Input(batch_shape=(None, 1, 1, 1))
        self.concat_layer = layers.Concat(axis=1)
        self.concat_layer.set_inputs([self.input_layer1, self.input_layer2])
        self.flatten_layer = layers.Flatten()
        self.flatten_layer.set_inputs(self.concat_layer)
        self.dense_layer = layers.Dense(kernel=np.array([([1, 2])]).T,
                                        bias=[1],
                                        dense_mxts_mode=DenseMxtsMode.Linear)
        self.dense_layer.set_inputs(self.flatten_layer)
        self.dense_layer.build_fwd_pass_vars()

        self.input_layer1.reset_mxts_updated()
        self.input_layer2.reset_mxts_updated()
        self.dense_layer.set_scoring_mode(layers.ScoringMode.OneAndZeros)
        self.dense_layer.set_active()
        self.input_layer1.update_mxts()
        self.input_layer2.update_mxts()

        self.inp1 = np.arange(2).reshape((2, 1, 1, 1)) + 1
        self.inp2 = np.arange(2).reshape((2, 1, 1, 1)) + 1
Пример #7
0
    def setUp(self):
        #theano dimensional ordering assumed here...would need to swap
        #axes for tensorflow
        self.reference_inps=np.array([[[0,0,0,0],
                                       [0,0,0,0]]]).transpose(0,2,1)

        self.backprop_test_inps = np.array(
                                    [[
                                        [0,1,4,3],
                                        [3,2,1,0]],
                                    [[0,-1,-2,-3],
                                     [-3,-2,-1,0]
                                    ]]).transpose(0,2,1)
        self.input_layer = layers.Input(
                            batch_shape=(None,4,2))
 def test_relu_after_conv1d(self): 
     input_layer = layers.Input(batch_shape=(None,2,2))
     conv_layer = layers.Conv1D(
                     kernel=np.random.random((2,2,2)).astype("float32"),
                     bias=np.random.random((2,)).astype("float32"),
                     conv_mxts_mode=ConvMxtsMode.Linear,
                     stride=1,
                     padding=PaddingMode.valid)
     conv_layer.set_inputs(input_layer)
     relu_after_conv = layers.ReLU(nonlinear_mxts_mode=
                                 NonlinearMxtsMode.DeepLIFT_GenomicsDefault) 
     relu_after_conv.set_inputs(conv_layer)
     relu_after_conv.build_fwd_pass_vars()
     self.assertEqual(relu_after_conv.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)
Пример #9
0
 def setUp(self):
     self.input_layer = layers.Input(batch_shape=(None,4))
     self.w1 = [1.0, 2.0, 3.0, 4.0]
     self.w2 = [-1.0, -2.0, -3.0, -4.0]
     W = np.array([self.w1, self.w2]).T
     b = np.array([-1.0, 1.0])
     self.dense_layer = layers.Dense(kernel=W, bias=b,
         dense_mxts_mode=DenseMxtsMode.Linear)
     self.dense_layer.set_inputs(self.input_layer)
     self.dense_layer.build_fwd_pass_vars()
     self.dense_layer.set_scoring_mode(layers.ScoringMode.OneAndZeros)
     self.dense_layer.set_active()
     self.input_layer.update_mxts()
     self.inp = [[1.0, 1.0, 1.0, 1.0],
                 [2.0, 2.0, 2.0, 2.0]]
Пример #10
0
 def test_running_of_different_dense_modes(self):
     for mode in DenseMxtsMode.vals:
         input_layer = layers.Input(batch_shape=(None,4))
         W = np.array([self.w1, self.w2]).T
         b = np.array([-1.0, 1.0])
         dense_layer = layers.Dense(kernel=W, bias=b,
                                    dense_mxts_mode=mode)
         dense_layer.set_inputs(input_layer)
         dense_layer.build_fwd_pass_vars()
         dense_layer.set_scoring_mode(layers.ScoringMode.OneAndZeros)
         dense_layer.set_active()
         input_layer.update_mxts()
         func = compile_func([input_layer.get_activation_vars(),
                              input_layer.get_reference_vars()],
                              input_layer.get_mxts())
         dense_layer.update_task_index(task_index=0)
         func([self.inp, np.zeros_like(self.inp)])
Пример #11
0
    def setUp(self):
        #theano dimensional ordering assumed here...would need to swap
        #axes for tensorflow
        self.reference_inps = np.array([[[[0, 0, 2, 3], [0, 1, 0, 0],
                                          [0, 5, 4, 0], [6, 0, 7, 8]],
                                         [[1, 1, 3, 4], [1, 2, 1, 1],
                                          [1, 6, 5, 1],
                                          [7, 1, 8,
                                           9]]]]).transpose(0, 2, 3, 1)

        self.backprop_test_inps = np.array([[[[2, 0, 2, 3], [0, 1, 4, 0],
                                              [7, 6, 5, 0], [6, 0, 8, 9]],
                                             [[0, 0, 2, 3], [0, 1, 0, 0],
                                              [0, 5, 4, 0], [6, 0, 7, 8]]],
                                            [[[1, 1, 3, 4], [1, 2, 1, 1],
                                              [1, 6, 5, 1], [7, 1, 8, 9]],
                                             [[3, 1, 3, 4], [1, 2, 5, 1],
                                              [8, 7, 6, 1],
                                              [7, 1, 9,
                                               10]]]]).transpose(0, 2, 3, 1)
        self.input_layer = layers.Input(batch_shape=(None, 4, 4, 2))
Пример #12
0
 def prepare_batch_norm_deeplift_model(self, axis):
     self.input_layer = layers.Input(batch_shape=(None, 2, 2, 2))
     self.batch_norm_layer = layers.BatchNormalization(gamma=self.gamma,
                                                       beta=self.beta,
                                                       axis=axis,
                                                       mean=self.mean,
                                                       var=self.var,
                                                       epsilon=self.epsilon)
     self.batch_norm_layer.set_inputs(self.input_layer)
     self.flatten_layer = layers.Flatten()
     self.flatten_layer.set_inputs(self.batch_norm_layer)
     self.dense_layer = layers.Dense(kernel=np.ones(
         (1, 8)).astype("float32").T,
                                     bias=np.zeros(1).astype("float32"),
                                     dense_mxts_mode=DenseMxtsMode.Linear)
     self.dense_layer.set_inputs(self.flatten_layer)
     self.dense_layer.build_fwd_pass_vars()
     self.dense_layer.set_scoring_mode(layers.ScoringMode.OneAndZeros)
     self.dense_layer.set_active()
     self.dense_layer.update_task_index(0)
     self.input_layer.update_mxts()
Пример #13
0
 def test_relu_after_dense_batchnorm_noop_noop(self):
     input_layer = layers.Input(batch_shape=(None, 4))
     dense_layer = layers.Dense(kernel=np.random.random((4, 2)),
                                bias=np.random.random((2, )),
                                dense_mxts_mode=DenseMxtsMode.Linear)
     dense_layer.set_inputs(input_layer)
     batch_norm = layers.BatchNormalization(
         gamma=np.array([1.0, 1.0]).astype("float32"),
         beta=np.array([-0.5, 0.5]).astype("float32"),
         axis=-1,
         mean=np.array([-0.5, 0.5]).astype("float32"),
         var=np.array([1.0, 1.0]).astype("float32"),
         epsilon=0.001)
     batch_norm.set_inputs(dense_layer)
     noop_layer1 = layers.NoOp()
     noop_layer1.set_inputs(batch_norm)
     noop_layer2 = layers.NoOp()
     noop_layer2.set_inputs(noop_layer1)
     relu_after_bn = layers.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(noop_layer2)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.RevealCancel)