Example #1
0
 def prepare_batch_norm_deeplift_model(self, axis):
     self.input_layer = blobs.Input(num_dims=None, shape=(None,2,2,2))
     if (self.keras_version <= 0.3):
         std = self.std
         epsilon = self.epsilon
     else:
         std = np.sqrt(self.std+self.epsilon)
         epsilon = 0
     self.batch_norm_layer = blobs.BatchNormalization(
                              gamma=self.gamma,
                              beta=self.beta,
                              axis=axis,
                              mean=self.mean,
                              std=std,
                              epsilon=epsilon)
     self.batch_norm_layer.set_inputs(self.input_layer)
     self.flatten_layer = blobs.Flatten()
     self.flatten_layer.set_inputs(self.batch_norm_layer)
     self.dense_layer = blobs.Dense(
                         W=np.ones((1,8)).T,
                         b=np.zeros(1),
                         dense_mxts_mode=DenseMxtsMode.Linear)
     self.dense_layer.set_inputs(self.flatten_layer)
     self.dense_layer.build_fwd_pass_vars()
     self.dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros)
     self.dense_layer.set_active()
     self.dense_layer.update_task_index(0)
     self.input_layer.update_mxts()
Example #2
0
    def create_small_net_with_conv_layer(self, conv_layer,
                                               outputs_per_channel):
        self.conv_layer = conv_layer
        self.conv_layer.set_inputs(self.input_layer)

        self.flatten_layer = blobs.Flatten()
        self.flatten_layer.set_inputs(self.conv_layer)

        self.dense_layer = blobs.Dense(
                           W=(np.array(
                              [([1.0]*outputs_per_channel)+
                               ([-1.0]*outputs_per_channel)]).T
                              .astype("float32")),
                           b=np.array([1]).astype("float32"),
                           dense_mxts_mode=DenseMxtsMode.Linear)
        self.dense_layer.set_inputs(self.flatten_layer)

        self.dense_layer.build_fwd_pass_vars()
        self.input_layer.reset_mxts_updated()
        self.dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros)
        self.dense_layer.set_active()
        self.input_layer.update_mxts()

        self.inp = (np.arange(16).reshape((2,2,4))
                    .astype("float32"))
 def setUp(self):
     self.input_layer = blobs.Input(num_dims=None, shape=(None, 4))
     self.w1 = [1.0, -2.0, -3.0, 4.0]
     W = np.array([self.w1]).T
     b = np.array([1.0])
     self.dense_layer = blobs.Dense(W=W,
                                    b=b,
                                    dense_mxts_mode=DenseMxtsMode.Linear)
     self.dense_layer.set_inputs(self.input_layer)
     self.inp = [[-1.0, -1.0, 1.0, 1.0]]
Example #4
0
 def test_relu_after_dense(self):
     input_layer = blobs.Input(num_dims=None, shape=(None, 4))
     dense_layer = blobs.Dense(W=np.random.random((2, 4)),
                               b=np.random.random((2, )),
                               dense_mxts_mode=DenseMxtsMode.Linear)
     dense_layer.set_inputs(input_layer)
     relu_after_dense = blobs.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_dense.set_inputs(dense_layer)
     relu_after_dense.build_fwd_pass_vars()
     self.assertEqual(relu_after_dense.nonlinear_mxts_mode,
                      NonlinearMxtsMode.RevealCancel)
Example #5
0
def dense_conversion(layer, name, mxts_mode):
    #mxts_mode not used
    converted_activation = activation_conversion(layer,
                                                 name,
                                                 mxts_mode=mxts_mode)
    to_return = [
        blobs.Dense(name=("preact_" if len(converted_activation) > 0 else "") +
                    name,
                    W=layer.get_weights()[0],
                    b=layer.get_weights()[1])
    ]
    to_return.extend(converted_activation)
    return to_return
Example #6
0
def dense_conversion(layer, name, verbose,
                      dense_mxts_mode, nonlinear_mxts_mode, **kwargs):
    converted_activation = activation_conversion(
                                  layer, name=name, verbose=verbose,
                                  nonlinear_mxts_mode=nonlinear_mxts_mode) 
    to_return = [blobs.Dense(
                  name=("preact_" if len(converted_activation) > 0
                        else "")+name, 
                  verbose=verbose,
                  W=layer.get_weights()[0],
                  b=layer.get_weights()[1],
                  dense_mxts_mode=dense_mxts_mode)]
    to_return.extend(converted_activation)
    return to_return
Example #7
0
 def setUp(self):
     self.input_layer = blobs.Input(num_dims=None, shape=(None, 4))
     self.w1 = [1.0, 2.0, 3.0, 4.0]
     self.w2 = [-1.0, -2.0, -3.0, -4.0]
     W = np.array([self.w1, self.w2]).T
     b = np.array([-1.0, 1.0])
     self.dense_layer = blobs.Dense(W=W,
                                    b=b,
                                    dense_mxts_mode=DenseMxtsMode.Linear)
     self.dense_layer.set_inputs(self.input_layer)
     self.dense_layer.build_fwd_pass_vars()
     self.dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros)
     self.dense_layer.set_active()
     self.input_layer.update_mxts()
     self.inp = [[1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]]
Example #8
0
 def test_running_of_different_dense_modes(self):
     for mode in DenseMxtsMode.vals:
         input_layer = blobs.Input(num_dims=None, shape=(None, 4))
         W = np.array([self.w1, self.w2]).T
         b = np.array([-1.0, 1.0])
         dense_layer = blobs.Dense(W=W, b=b, dense_mxts_mode=mode)
         dense_layer.set_inputs(input_layer)
         dense_layer.build_fwd_pass_vars()
         dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros)
         dense_layer.set_active()
         input_layer.update_mxts()
         func = compile_func([
             input_layer.get_activation_vars(),
             input_layer.get_reference_vars()
         ], input_layer.get_mxts())
         dense_layer.update_task_index(task_index=0)
         func(self.inp, np.zeros_like(self.inp))
Example #9
0
 def test_relu_after_dense_batchnorm(self):
     input_layer = blobs.Input(num_dims=None, shape=(None, 4))
     dense_layer = blobs.Dense(W=np.random.random((4, 2)),
                               b=np.random.random((2, )),
                               dense_mxts_mode=DenseMxtsMode.Linear)
     dense_layer.set_inputs(input_layer)
     batch_norm = blobs.BatchNormalization(gamma=np.array([1.0, 1.0]),
                                           beta=np.array([-0.5, 0.5]),
                                           axis=-1,
                                           mean=np.array([-0.5, 0.5]),
                                           std=np.array([1.0, 1.0]),
                                           epsilon=0.001)
     batch_norm.set_inputs(dense_layer)
     relu_after_bn = blobs.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(batch_norm)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.RevealCancel)
Example #10
0
    def create_small_net_with_pool_layer(self, pool_layer,
                                         outputs_per_channel):
        self.pool_layer = pool_layer
        self.pool_layer.set_inputs(self.input_layer)

        self.flatten_layer = blobs.Flatten()
        self.flatten_layer.set_inputs(self.pool_layer)

        self.dense_layer = blobs.Dense(W=np.array([
            ([2] * outputs_per_channel) + ([3] * outputs_per_channel)
        ]).astype("float32").T,
                                       b=np.array([1]).astype("float32"),
                                       dense_mxts_mode=DenseMxtsMode.Linear)
        self.dense_layer.set_inputs(self.flatten_layer)

        self.dense_layer.build_fwd_pass_vars()
        self.dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros)
        self.dense_layer.set_active()
        self.input_layer.update_mxts()
Example #11
0
    def setUp(self):
        self.input_layer1 = blobs.Input(num_dims=None, shape=(None,1,1,1))
        self.input_layer2 = blobs.Input(num_dims=None, shape=(None,1,1,1))
        self.concat_layer = blobs.Concat(axis=1)
        self.concat_layer.set_inputs([self.input_layer1, self.input_layer2])
        self.flatten_layer = blobs.Flatten()
        self.flatten_layer.set_inputs(self.concat_layer)
        self.dense_layer = blobs.Dense(
         W=np.array([([1,2])]).T, b=[1], dense_mxts_mode=DenseMxtsMode.Linear)
        self.dense_layer.set_inputs(self.flatten_layer)
        self.dense_layer.build_fwd_pass_vars()

        self.input_layer1.reset_mxts_updated()
        self.input_layer2.reset_mxts_updated()
        self.dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros)
        self.dense_layer.set_active()
        self.input_layer1.update_mxts()
        self.input_layer2.update_mxts()

        self.inp1 = np.arange(2).reshape((2,1,1,1))+1
        self.inp2 = np.arange(2).reshape((2,1,1,1))+1